ucc_geth.c 122 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014301530163017301830193020302130223023302430253026302730283029303030313032303330343035303630373038303930403041304230433044304530463047304830493050305130523053305430553056305730583059306030613062306330643065306630673068306930703071307230733074307530763077307830793080308130823083308430853086308730883089309030913092309330943095309630973098309931003101310231033104310531063107310831093110311131123113311431153116311731183119312031213122312331243125312631273128312931303131313231333134313531363137313831393140314131423143314431453146314731483149315031513152315331543155315631573158315931603161316231633164316531663167316831693170317131723173317431753176317731783179318031813182318331843185318631873188318931903191319231933194319531963197319831993200320132023203320432053206320732083209321032113212321332143215321632173218321932203221322232233224322532263227322832293230323132323233323432353236323732383239324032413242324332443245324632473248324932503251325232533254325532563257325832593260326132623263326432653266326732683269327032713272327332743275327632773278327932803281328232833284328532863287328832893290329132923293329432953296329732983299330033013302330333043305330633073308330933103311331233133314331533163317331833193320332133223323332433253326332733283329333033313332333333343335333633373338333933403341334233433344334533463347334833493350335133523353335433553356335733583359336033613362336333643365336633673368336933703371337233733374337533763377337833793380338133823383338433853386338733883389339033913392339333943395339633973398339934003401340234033404340534063407340834093410341134123413341434153416341734183419342034213422342334243425342634273428342934303431343234333434343534363437343834393440344134423443344434453446344734483449345034513452345334543455345634573458345934603461346234633464346534663467346834693470347134723473347434753476347734783479348034813482348334843485348634873488348934903491349234933494349534963497349834993500350135023503350435053506350735083509351035113512351335143515351635173518351935203521352235233524352535263527352835293530353135323533353435353536353735383539354035413542354335443545354635473548354935503551355235533554355535563557355835593560356135623563356435653566356735683569357035713572357335743575357635773578357935803581358235833584358535863587358835893590359135923593359435953596359735983599360036013602360336043605360636073608360936103611361236133614361536163617361836193620362136223623362436253626362736283629363036313632363336343635363636373638363936403641364236433644364536463647364836493650365136523653365436553656365736583659366036613662366336643665366636673668366936703671367236733674367536763677367836793680368136823683368436853686368736883689369036913692369336943695369636973698369937003701370237033704370537063707370837093710371137123713371437153716371737183719372037213722372337243725372637273728372937303731373237333734373537363737373837393740374137423743374437453746374737483749375037513752375337543755375637573758375937603761376237633764376537663767376837693770377137723773377437753776377737783779378037813782378337843785378637873788378937903791379237933794379537963797379837993800380138023803380438053806380738083809381038113812381338143815381638173818381938203821382238233824382538263827382838293830383138323833383438353836383738383839384038413842384338443845384638473848384938503851385238533854385538563857385838593860386138623863386438653866386738683869387038713872387338743875387638773878387938803881388238833884388538863887388838893890389138923893389438953896389738983899390039013902390339043905390639073908390939103911391239133914391539163917391839193920392139223923392439253926392739283929393039313932393339343935393639373938393939403941394239433944394539463947394839493950395139523953395439553956395739583959396039613962396339643965396639673968396939703971397239733974397539763977397839793980398139823983398439853986398739883989399039913992399339943995399639973998399940004001400240034004400540064007400840094010401140124013401440154016401740184019402040214022402340244025402640274028402940304031403240334034403540364037403840394040404140424043404440454046404740484049405040514052405340544055405640574058405940604061406240634064406540664067406840694070407140724073407440754076407740784079408040814082408340844085408640874088408940904091409240934094409540964097409840994100410141024103410441054106410741084109411041114112411341144115411641174118411941204121412241234124412541264127
  1. /*
  2. * Copyright (C) 2006-2007 Freescale Semicondutor, Inc. All rights reserved.
  3. *
  4. * Author: Shlomi Gridish <gridish@freescale.com>
  5. * Li Yang <leoli@freescale.com>
  6. *
  7. * Description:
  8. * QE UCC Gigabit Ethernet Driver
  9. *
  10. * This program is free software; you can redistribute it and/or modify it
  11. * under the terms of the GNU General Public License as published by the
  12. * Free Software Foundation; either version 2 of the License, or (at your
  13. * option) any later version.
  14. */
  15. #include <linux/kernel.h>
  16. #include <linux/init.h>
  17. #include <linux/errno.h>
  18. #include <linux/slab.h>
  19. #include <linux/stddef.h>
  20. #include <linux/interrupt.h>
  21. #include <linux/netdevice.h>
  22. #include <linux/etherdevice.h>
  23. #include <linux/skbuff.h>
  24. #include <linux/spinlock.h>
  25. #include <linux/mm.h>
  26. #include <linux/dma-mapping.h>
  27. #include <linux/fsl_devices.h>
  28. #include <linux/mii.h>
  29. #include <linux/phy.h>
  30. #include <linux/workqueue.h>
  31. #include <linux/of_platform.h>
  32. #include <asm/uaccess.h>
  33. #include <asm/irq.h>
  34. #include <asm/io.h>
  35. #include <asm/immap_qe.h>
  36. #include <asm/qe.h>
  37. #include <asm/ucc.h>
  38. #include <asm/ucc_fast.h>
  39. #include "ucc_geth.h"
  40. #include "ucc_geth_mii.h"
  41. #undef DEBUG
  42. #define ugeth_printk(level, format, arg...) \
  43. printk(level format "\n", ## arg)
  44. #define ugeth_dbg(format, arg...) \
  45. ugeth_printk(KERN_DEBUG , format , ## arg)
  46. #define ugeth_err(format, arg...) \
  47. ugeth_printk(KERN_ERR , format , ## arg)
  48. #define ugeth_info(format, arg...) \
  49. ugeth_printk(KERN_INFO , format , ## arg)
  50. #define ugeth_warn(format, arg...) \
  51. ugeth_printk(KERN_WARNING , format , ## arg)
  52. #ifdef UGETH_VERBOSE_DEBUG
  53. #define ugeth_vdbg ugeth_dbg
  54. #else
  55. #define ugeth_vdbg(fmt, args...) do { } while (0)
  56. #endif /* UGETH_VERBOSE_DEBUG */
  57. #define UGETH_MSG_DEFAULT (NETIF_MSG_IFUP << 1 ) - 1
  58. static DEFINE_SPINLOCK(ugeth_lock);
  59. static struct {
  60. u32 msg_enable;
  61. } debug = { -1 };
  62. module_param_named(debug, debug.msg_enable, int, 0);
  63. MODULE_PARM_DESC(debug, "Debug verbosity level (0=none, ..., 0xffff=all)");
  64. static struct ucc_geth_info ugeth_primary_info = {
  65. .uf_info = {
  66. .bd_mem_part = MEM_PART_SYSTEM,
  67. .rtsm = UCC_FAST_SEND_IDLES_BETWEEN_FRAMES,
  68. .max_rx_buf_length = 1536,
  69. /* adjusted at startup if max-speed 1000 */
  70. .urfs = UCC_GETH_URFS_INIT,
  71. .urfet = UCC_GETH_URFET_INIT,
  72. .urfset = UCC_GETH_URFSET_INIT,
  73. .utfs = UCC_GETH_UTFS_INIT,
  74. .utfet = UCC_GETH_UTFET_INIT,
  75. .utftt = UCC_GETH_UTFTT_INIT,
  76. .ufpt = 256,
  77. .mode = UCC_FAST_PROTOCOL_MODE_ETHERNET,
  78. .ttx_trx = UCC_FAST_GUMR_TRANSPARENT_TTX_TRX_NORMAL,
  79. .tenc = UCC_FAST_TX_ENCODING_NRZ,
  80. .renc = UCC_FAST_RX_ENCODING_NRZ,
  81. .tcrc = UCC_FAST_16_BIT_CRC,
  82. .synl = UCC_FAST_SYNC_LEN_NOT_USED,
  83. },
  84. .numQueuesTx = 1,
  85. .numQueuesRx = 1,
  86. .extendedFilteringChainPointer = ((uint32_t) NULL),
  87. .typeorlen = 3072 /*1536 */ ,
  88. .nonBackToBackIfgPart1 = 0x40,
  89. .nonBackToBackIfgPart2 = 0x60,
  90. .miminumInterFrameGapEnforcement = 0x50,
  91. .backToBackInterFrameGap = 0x60,
  92. .mblinterval = 128,
  93. .nortsrbytetime = 5,
  94. .fracsiz = 1,
  95. .strictpriorityq = 0xff,
  96. .altBebTruncation = 0xa,
  97. .excessDefer = 1,
  98. .maxRetransmission = 0xf,
  99. .collisionWindow = 0x37,
  100. .receiveFlowControl = 1,
  101. .transmitFlowControl = 1,
  102. .maxGroupAddrInHash = 4,
  103. .maxIndAddrInHash = 4,
  104. .prel = 7,
  105. .maxFrameLength = 1518,
  106. .minFrameLength = 64,
  107. .maxD1Length = 1520,
  108. .maxD2Length = 1520,
  109. .vlantype = 0x8100,
  110. .ecamptr = ((uint32_t) NULL),
  111. .eventRegMask = UCCE_OTHER,
  112. .pausePeriod = 0xf000,
  113. .interruptcoalescingmaxvalue = {1, 1, 1, 1, 1, 1, 1, 1},
  114. .bdRingLenTx = {
  115. TX_BD_RING_LEN,
  116. TX_BD_RING_LEN,
  117. TX_BD_RING_LEN,
  118. TX_BD_RING_LEN,
  119. TX_BD_RING_LEN,
  120. TX_BD_RING_LEN,
  121. TX_BD_RING_LEN,
  122. TX_BD_RING_LEN},
  123. .bdRingLenRx = {
  124. RX_BD_RING_LEN,
  125. RX_BD_RING_LEN,
  126. RX_BD_RING_LEN,
  127. RX_BD_RING_LEN,
  128. RX_BD_RING_LEN,
  129. RX_BD_RING_LEN,
  130. RX_BD_RING_LEN,
  131. RX_BD_RING_LEN},
  132. .numStationAddresses = UCC_GETH_NUM_OF_STATION_ADDRESSES_1,
  133. .largestexternallookupkeysize =
  134. QE_FLTR_LARGEST_EXTERNAL_TABLE_LOOKUP_KEY_SIZE_NONE,
  135. .statisticsMode = UCC_GETH_STATISTICS_GATHERING_MODE_HARDWARE |
  136. UCC_GETH_STATISTICS_GATHERING_MODE_FIRMWARE_TX |
  137. UCC_GETH_STATISTICS_GATHERING_MODE_FIRMWARE_RX,
  138. .vlanOperationTagged = UCC_GETH_VLAN_OPERATION_TAGGED_NOP,
  139. .vlanOperationNonTagged = UCC_GETH_VLAN_OPERATION_NON_TAGGED_NOP,
  140. .rxQoSMode = UCC_GETH_QOS_MODE_DEFAULT,
  141. .aufc = UPSMR_AUTOMATIC_FLOW_CONTROL_MODE_NONE,
  142. .padAndCrc = MACCFG2_PAD_AND_CRC_MODE_PAD_AND_CRC,
  143. .numThreadsTx = UCC_GETH_NUM_OF_THREADS_1,
  144. .numThreadsRx = UCC_GETH_NUM_OF_THREADS_1,
  145. .riscTx = QE_RISC_ALLOCATION_RISC1_AND_RISC2,
  146. .riscRx = QE_RISC_ALLOCATION_RISC1_AND_RISC2,
  147. };
  148. static struct ucc_geth_info ugeth_info[8];
  149. #ifdef DEBUG
  150. static void mem_disp(u8 *addr, int size)
  151. {
  152. u8 *i;
  153. int size16Aling = (size >> 4) << 4;
  154. int size4Aling = (size >> 2) << 2;
  155. int notAlign = 0;
  156. if (size % 16)
  157. notAlign = 1;
  158. for (i = addr; (u32) i < (u32) addr + size16Aling; i += 16)
  159. printk("0x%08x: %08x %08x %08x %08x\r\n",
  160. (u32) i,
  161. *((u32 *) (i)),
  162. *((u32 *) (i + 4)),
  163. *((u32 *) (i + 8)), *((u32 *) (i + 12)));
  164. if (notAlign == 1)
  165. printk("0x%08x: ", (u32) i);
  166. for (; (u32) i < (u32) addr + size4Aling; i += 4)
  167. printk("%08x ", *((u32 *) (i)));
  168. for (; (u32) i < (u32) addr + size; i++)
  169. printk("%02x", *((u8 *) (i)));
  170. if (notAlign == 1)
  171. printk("\r\n");
  172. }
  173. #endif /* DEBUG */
  174. #ifdef CONFIG_UGETH_FILTERING
  175. static void enqueue(struct list_head *node, struct list_head *lh)
  176. {
  177. unsigned long flags;
  178. spin_lock_irqsave(&ugeth_lock, flags);
  179. list_add_tail(node, lh);
  180. spin_unlock_irqrestore(&ugeth_lock, flags);
  181. }
  182. #endif /* CONFIG_UGETH_FILTERING */
  183. static struct list_head *dequeue(struct list_head *lh)
  184. {
  185. unsigned long flags;
  186. spin_lock_irqsave(&ugeth_lock, flags);
  187. if (!list_empty(lh)) {
  188. struct list_head *node = lh->next;
  189. list_del(node);
  190. spin_unlock_irqrestore(&ugeth_lock, flags);
  191. return node;
  192. } else {
  193. spin_unlock_irqrestore(&ugeth_lock, flags);
  194. return NULL;
  195. }
  196. }
  197. static struct sk_buff *get_new_skb(struct ucc_geth_private *ugeth,
  198. u8 __iomem *bd)
  199. {
  200. struct sk_buff *skb = NULL;
  201. skb = dev_alloc_skb(ugeth->ug_info->uf_info.max_rx_buf_length +
  202. UCC_GETH_RX_DATA_BUF_ALIGNMENT);
  203. if (skb == NULL)
  204. return NULL;
  205. /* We need the data buffer to be aligned properly. We will reserve
  206. * as many bytes as needed to align the data properly
  207. */
  208. skb_reserve(skb,
  209. UCC_GETH_RX_DATA_BUF_ALIGNMENT -
  210. (((unsigned)skb->data) & (UCC_GETH_RX_DATA_BUF_ALIGNMENT -
  211. 1)));
  212. skb->dev = ugeth->dev;
  213. out_be32(&((struct qe_bd __iomem *)bd)->buf,
  214. dma_map_single(&ugeth->dev->dev,
  215. skb->data,
  216. ugeth->ug_info->uf_info.max_rx_buf_length +
  217. UCC_GETH_RX_DATA_BUF_ALIGNMENT,
  218. DMA_FROM_DEVICE));
  219. out_be32((u32 __iomem *)bd,
  220. (R_E | R_I | (in_be32((u32 __iomem*)bd) & R_W)));
  221. return skb;
  222. }
  223. static int rx_bd_buffer_set(struct ucc_geth_private *ugeth, u8 rxQ)
  224. {
  225. u8 __iomem *bd;
  226. u32 bd_status;
  227. struct sk_buff *skb;
  228. int i;
  229. bd = ugeth->p_rx_bd_ring[rxQ];
  230. i = 0;
  231. do {
  232. bd_status = in_be32((u32 __iomem *)bd);
  233. skb = get_new_skb(ugeth, bd);
  234. if (!skb) /* If can not allocate data buffer,
  235. abort. Cleanup will be elsewhere */
  236. return -ENOMEM;
  237. ugeth->rx_skbuff[rxQ][i] = skb;
  238. /* advance the BD pointer */
  239. bd += sizeof(struct qe_bd);
  240. i++;
  241. } while (!(bd_status & R_W));
  242. return 0;
  243. }
  244. static int fill_init_enet_entries(struct ucc_geth_private *ugeth,
  245. u32 *p_start,
  246. u8 num_entries,
  247. u32 thread_size,
  248. u32 thread_alignment,
  249. enum qe_risc_allocation risc,
  250. int skip_page_for_first_entry)
  251. {
  252. u32 init_enet_offset;
  253. u8 i;
  254. int snum;
  255. for (i = 0; i < num_entries; i++) {
  256. if ((snum = qe_get_snum()) < 0) {
  257. if (netif_msg_ifup(ugeth))
  258. ugeth_err("fill_init_enet_entries: Can not get SNUM.");
  259. return snum;
  260. }
  261. if ((i == 0) && skip_page_for_first_entry)
  262. /* First entry of Rx does not have page */
  263. init_enet_offset = 0;
  264. else {
  265. init_enet_offset =
  266. qe_muram_alloc(thread_size, thread_alignment);
  267. if (IS_ERR_VALUE(init_enet_offset)) {
  268. if (netif_msg_ifup(ugeth))
  269. ugeth_err("fill_init_enet_entries: Can not allocate DPRAM memory.");
  270. qe_put_snum((u8) snum);
  271. return -ENOMEM;
  272. }
  273. }
  274. *(p_start++) =
  275. ((u8) snum << ENET_INIT_PARAM_SNUM_SHIFT) | init_enet_offset
  276. | risc;
  277. }
  278. return 0;
  279. }
  280. static int return_init_enet_entries(struct ucc_geth_private *ugeth,
  281. u32 *p_start,
  282. u8 num_entries,
  283. enum qe_risc_allocation risc,
  284. int skip_page_for_first_entry)
  285. {
  286. u32 init_enet_offset;
  287. u8 i;
  288. int snum;
  289. for (i = 0; i < num_entries; i++) {
  290. u32 val = *p_start;
  291. /* Check that this entry was actually valid --
  292. needed in case failed in allocations */
  293. if ((val & ENET_INIT_PARAM_RISC_MASK) == risc) {
  294. snum =
  295. (u32) (val & ENET_INIT_PARAM_SNUM_MASK) >>
  296. ENET_INIT_PARAM_SNUM_SHIFT;
  297. qe_put_snum((u8) snum);
  298. if (!((i == 0) && skip_page_for_first_entry)) {
  299. /* First entry of Rx does not have page */
  300. init_enet_offset =
  301. (val & ENET_INIT_PARAM_PTR_MASK);
  302. qe_muram_free(init_enet_offset);
  303. }
  304. *p_start++ = 0;
  305. }
  306. }
  307. return 0;
  308. }
  309. #ifdef DEBUG
  310. static int dump_init_enet_entries(struct ucc_geth_private *ugeth,
  311. u32 __iomem *p_start,
  312. u8 num_entries,
  313. u32 thread_size,
  314. enum qe_risc_allocation risc,
  315. int skip_page_for_first_entry)
  316. {
  317. u32 init_enet_offset;
  318. u8 i;
  319. int snum;
  320. for (i = 0; i < num_entries; i++) {
  321. u32 val = in_be32(p_start);
  322. /* Check that this entry was actually valid --
  323. needed in case failed in allocations */
  324. if ((val & ENET_INIT_PARAM_RISC_MASK) == risc) {
  325. snum =
  326. (u32) (val & ENET_INIT_PARAM_SNUM_MASK) >>
  327. ENET_INIT_PARAM_SNUM_SHIFT;
  328. qe_put_snum((u8) snum);
  329. if (!((i == 0) && skip_page_for_first_entry)) {
  330. /* First entry of Rx does not have page */
  331. init_enet_offset =
  332. (in_be32(p_start) &
  333. ENET_INIT_PARAM_PTR_MASK);
  334. ugeth_info("Init enet entry %d:", i);
  335. ugeth_info("Base address: 0x%08x",
  336. (u32)
  337. qe_muram_addr(init_enet_offset));
  338. mem_disp(qe_muram_addr(init_enet_offset),
  339. thread_size);
  340. }
  341. p_start++;
  342. }
  343. }
  344. return 0;
  345. }
  346. #endif
  347. #ifdef CONFIG_UGETH_FILTERING
  348. static struct enet_addr_container *get_enet_addr_container(void)
  349. {
  350. struct enet_addr_container *enet_addr_cont;
  351. /* allocate memory */
  352. enet_addr_cont = kmalloc(sizeof(struct enet_addr_container), GFP_KERNEL);
  353. if (!enet_addr_cont) {
  354. ugeth_err("%s: No memory for enet_addr_container object.",
  355. __func__);
  356. return NULL;
  357. }
  358. return enet_addr_cont;
  359. }
  360. #endif /* CONFIG_UGETH_FILTERING */
  361. static void put_enet_addr_container(struct enet_addr_container *enet_addr_cont)
  362. {
  363. kfree(enet_addr_cont);
  364. }
  365. static void set_mac_addr(__be16 __iomem *reg, u8 *mac)
  366. {
  367. out_be16(&reg[0], ((u16)mac[5] << 8) | mac[4]);
  368. out_be16(&reg[1], ((u16)mac[3] << 8) | mac[2]);
  369. out_be16(&reg[2], ((u16)mac[1] << 8) | mac[0]);
  370. }
  371. #ifdef CONFIG_UGETH_FILTERING
  372. static int hw_add_addr_in_paddr(struct ucc_geth_private *ugeth,
  373. u8 *p_enet_addr, u8 paddr_num)
  374. {
  375. struct ucc_geth_82xx_address_filtering_pram *p_82xx_addr_filt;
  376. if (!(paddr_num < NUM_OF_PADDRS)) {
  377. ugeth_warn("%s: Illegal paddr_num.", __func__);
  378. return -EINVAL;
  379. }
  380. p_82xx_addr_filt =
  381. (struct ucc_geth_82xx_address_filtering_pram *) ugeth->p_rx_glbl_pram->
  382. addressfiltering;
  383. /* Ethernet frames are defined in Little Endian mode, */
  384. /* therefore to insert the address we reverse the bytes. */
  385. set_mac_addr(&p_82xx_addr_filt->paddr[paddr_num].h, p_enet_addr);
  386. return 0;
  387. }
  388. #endif /* CONFIG_UGETH_FILTERING */
  389. static int hw_clear_addr_in_paddr(struct ucc_geth_private *ugeth, u8 paddr_num)
  390. {
  391. struct ucc_geth_82xx_address_filtering_pram __iomem *p_82xx_addr_filt;
  392. if (!(paddr_num < NUM_OF_PADDRS)) {
  393. ugeth_warn("%s: Illagel paddr_num.", __func__);
  394. return -EINVAL;
  395. }
  396. p_82xx_addr_filt =
  397. (struct ucc_geth_82xx_address_filtering_pram __iomem *) ugeth->p_rx_glbl_pram->
  398. addressfiltering;
  399. /* Writing address ff.ff.ff.ff.ff.ff disables address
  400. recognition for this register */
  401. out_be16(&p_82xx_addr_filt->paddr[paddr_num].h, 0xffff);
  402. out_be16(&p_82xx_addr_filt->paddr[paddr_num].m, 0xffff);
  403. out_be16(&p_82xx_addr_filt->paddr[paddr_num].l, 0xffff);
  404. return 0;
  405. }
  406. static void hw_add_addr_in_hash(struct ucc_geth_private *ugeth,
  407. u8 *p_enet_addr)
  408. {
  409. struct ucc_geth_82xx_address_filtering_pram __iomem *p_82xx_addr_filt;
  410. u32 cecr_subblock;
  411. p_82xx_addr_filt =
  412. (struct ucc_geth_82xx_address_filtering_pram __iomem *) ugeth->p_rx_glbl_pram->
  413. addressfiltering;
  414. cecr_subblock =
  415. ucc_fast_get_qe_cr_subblock(ugeth->ug_info->uf_info.ucc_num);
  416. /* Ethernet frames are defined in Little Endian mode,
  417. therefor to insert */
  418. /* the address to the hash (Big Endian mode), we reverse the bytes.*/
  419. set_mac_addr(&p_82xx_addr_filt->taddr.h, p_enet_addr);
  420. qe_issue_cmd(QE_SET_GROUP_ADDRESS, cecr_subblock,
  421. QE_CR_PROTOCOL_ETHERNET, 0);
  422. }
  423. #ifdef CONFIG_UGETH_MAGIC_PACKET
  424. static void magic_packet_detection_enable(struct ucc_geth_private *ugeth)
  425. {
  426. struct ucc_fast_private *uccf;
  427. struct ucc_geth __iomem *ug_regs;
  428. u32 maccfg2, uccm;
  429. uccf = ugeth->uccf;
  430. ug_regs = ugeth->ug_regs;
  431. /* Enable interrupts for magic packet detection */
  432. uccm = in_be32(uccf->p_uccm);
  433. uccm |= UCCE_MPD;
  434. out_be32(uccf->p_uccm, uccm);
  435. /* Enable magic packet detection */
  436. maccfg2 = in_be32(&ug_regs->maccfg2);
  437. maccfg2 |= MACCFG2_MPE;
  438. out_be32(&ug_regs->maccfg2, maccfg2);
  439. }
  440. static void magic_packet_detection_disable(struct ucc_geth_private *ugeth)
  441. {
  442. struct ucc_fast_private *uccf;
  443. struct ucc_geth __iomem *ug_regs;
  444. u32 maccfg2, uccm;
  445. uccf = ugeth->uccf;
  446. ug_regs = ugeth->ug_regs;
  447. /* Disable interrupts for magic packet detection */
  448. uccm = in_be32(uccf->p_uccm);
  449. uccm &= ~UCCE_MPD;
  450. out_be32(uccf->p_uccm, uccm);
  451. /* Disable magic packet detection */
  452. maccfg2 = in_be32(&ug_regs->maccfg2);
  453. maccfg2 &= ~MACCFG2_MPE;
  454. out_be32(&ug_regs->maccfg2, maccfg2);
  455. }
  456. #endif /* MAGIC_PACKET */
  457. static inline int compare_addr(u8 **addr1, u8 **addr2)
  458. {
  459. return memcmp(addr1, addr2, ENET_NUM_OCTETS_PER_ADDRESS);
  460. }
  461. #ifdef DEBUG
  462. static void get_statistics(struct ucc_geth_private *ugeth,
  463. struct ucc_geth_tx_firmware_statistics *
  464. tx_firmware_statistics,
  465. struct ucc_geth_rx_firmware_statistics *
  466. rx_firmware_statistics,
  467. struct ucc_geth_hardware_statistics *hardware_statistics)
  468. {
  469. struct ucc_fast __iomem *uf_regs;
  470. struct ucc_geth __iomem *ug_regs;
  471. struct ucc_geth_tx_firmware_statistics_pram *p_tx_fw_statistics_pram;
  472. struct ucc_geth_rx_firmware_statistics_pram *p_rx_fw_statistics_pram;
  473. ug_regs = ugeth->ug_regs;
  474. uf_regs = (struct ucc_fast __iomem *) ug_regs;
  475. p_tx_fw_statistics_pram = ugeth->p_tx_fw_statistics_pram;
  476. p_rx_fw_statistics_pram = ugeth->p_rx_fw_statistics_pram;
  477. /* Tx firmware only if user handed pointer and driver actually
  478. gathers Tx firmware statistics */
  479. if (tx_firmware_statistics && p_tx_fw_statistics_pram) {
  480. tx_firmware_statistics->sicoltx =
  481. in_be32(&p_tx_fw_statistics_pram->sicoltx);
  482. tx_firmware_statistics->mulcoltx =
  483. in_be32(&p_tx_fw_statistics_pram->mulcoltx);
  484. tx_firmware_statistics->latecoltxfr =
  485. in_be32(&p_tx_fw_statistics_pram->latecoltxfr);
  486. tx_firmware_statistics->frabortduecol =
  487. in_be32(&p_tx_fw_statistics_pram->frabortduecol);
  488. tx_firmware_statistics->frlostinmactxer =
  489. in_be32(&p_tx_fw_statistics_pram->frlostinmactxer);
  490. tx_firmware_statistics->carriersenseertx =
  491. in_be32(&p_tx_fw_statistics_pram->carriersenseertx);
  492. tx_firmware_statistics->frtxok =
  493. in_be32(&p_tx_fw_statistics_pram->frtxok);
  494. tx_firmware_statistics->txfrexcessivedefer =
  495. in_be32(&p_tx_fw_statistics_pram->txfrexcessivedefer);
  496. tx_firmware_statistics->txpkts256 =
  497. in_be32(&p_tx_fw_statistics_pram->txpkts256);
  498. tx_firmware_statistics->txpkts512 =
  499. in_be32(&p_tx_fw_statistics_pram->txpkts512);
  500. tx_firmware_statistics->txpkts1024 =
  501. in_be32(&p_tx_fw_statistics_pram->txpkts1024);
  502. tx_firmware_statistics->txpktsjumbo =
  503. in_be32(&p_tx_fw_statistics_pram->txpktsjumbo);
  504. }
  505. /* Rx firmware only if user handed pointer and driver actually
  506. * gathers Rx firmware statistics */
  507. if (rx_firmware_statistics && p_rx_fw_statistics_pram) {
  508. int i;
  509. rx_firmware_statistics->frrxfcser =
  510. in_be32(&p_rx_fw_statistics_pram->frrxfcser);
  511. rx_firmware_statistics->fraligner =
  512. in_be32(&p_rx_fw_statistics_pram->fraligner);
  513. rx_firmware_statistics->inrangelenrxer =
  514. in_be32(&p_rx_fw_statistics_pram->inrangelenrxer);
  515. rx_firmware_statistics->outrangelenrxer =
  516. in_be32(&p_rx_fw_statistics_pram->outrangelenrxer);
  517. rx_firmware_statistics->frtoolong =
  518. in_be32(&p_rx_fw_statistics_pram->frtoolong);
  519. rx_firmware_statistics->runt =
  520. in_be32(&p_rx_fw_statistics_pram->runt);
  521. rx_firmware_statistics->verylongevent =
  522. in_be32(&p_rx_fw_statistics_pram->verylongevent);
  523. rx_firmware_statistics->symbolerror =
  524. in_be32(&p_rx_fw_statistics_pram->symbolerror);
  525. rx_firmware_statistics->dropbsy =
  526. in_be32(&p_rx_fw_statistics_pram->dropbsy);
  527. for (i = 0; i < 0x8; i++)
  528. rx_firmware_statistics->res0[i] =
  529. p_rx_fw_statistics_pram->res0[i];
  530. rx_firmware_statistics->mismatchdrop =
  531. in_be32(&p_rx_fw_statistics_pram->mismatchdrop);
  532. rx_firmware_statistics->underpkts =
  533. in_be32(&p_rx_fw_statistics_pram->underpkts);
  534. rx_firmware_statistics->pkts256 =
  535. in_be32(&p_rx_fw_statistics_pram->pkts256);
  536. rx_firmware_statistics->pkts512 =
  537. in_be32(&p_rx_fw_statistics_pram->pkts512);
  538. rx_firmware_statistics->pkts1024 =
  539. in_be32(&p_rx_fw_statistics_pram->pkts1024);
  540. rx_firmware_statistics->pktsjumbo =
  541. in_be32(&p_rx_fw_statistics_pram->pktsjumbo);
  542. rx_firmware_statistics->frlossinmacer =
  543. in_be32(&p_rx_fw_statistics_pram->frlossinmacer);
  544. rx_firmware_statistics->pausefr =
  545. in_be32(&p_rx_fw_statistics_pram->pausefr);
  546. for (i = 0; i < 0x4; i++)
  547. rx_firmware_statistics->res1[i] =
  548. p_rx_fw_statistics_pram->res1[i];
  549. rx_firmware_statistics->removevlan =
  550. in_be32(&p_rx_fw_statistics_pram->removevlan);
  551. rx_firmware_statistics->replacevlan =
  552. in_be32(&p_rx_fw_statistics_pram->replacevlan);
  553. rx_firmware_statistics->insertvlan =
  554. in_be32(&p_rx_fw_statistics_pram->insertvlan);
  555. }
  556. /* Hardware only if user handed pointer and driver actually
  557. gathers hardware statistics */
  558. if (hardware_statistics && (in_be32(&uf_regs->upsmr) & UPSMR_HSE)) {
  559. hardware_statistics->tx64 = in_be32(&ug_regs->tx64);
  560. hardware_statistics->tx127 = in_be32(&ug_regs->tx127);
  561. hardware_statistics->tx255 = in_be32(&ug_regs->tx255);
  562. hardware_statistics->rx64 = in_be32(&ug_regs->rx64);
  563. hardware_statistics->rx127 = in_be32(&ug_regs->rx127);
  564. hardware_statistics->rx255 = in_be32(&ug_regs->rx255);
  565. hardware_statistics->txok = in_be32(&ug_regs->txok);
  566. hardware_statistics->txcf = in_be16(&ug_regs->txcf);
  567. hardware_statistics->tmca = in_be32(&ug_regs->tmca);
  568. hardware_statistics->tbca = in_be32(&ug_regs->tbca);
  569. hardware_statistics->rxfok = in_be32(&ug_regs->rxfok);
  570. hardware_statistics->rxbok = in_be32(&ug_regs->rxbok);
  571. hardware_statistics->rbyt = in_be32(&ug_regs->rbyt);
  572. hardware_statistics->rmca = in_be32(&ug_regs->rmca);
  573. hardware_statistics->rbca = in_be32(&ug_regs->rbca);
  574. }
  575. }
  576. static void dump_bds(struct ucc_geth_private *ugeth)
  577. {
  578. int i;
  579. int length;
  580. for (i = 0; i < ugeth->ug_info->numQueuesTx; i++) {
  581. if (ugeth->p_tx_bd_ring[i]) {
  582. length =
  583. (ugeth->ug_info->bdRingLenTx[i] *
  584. sizeof(struct qe_bd));
  585. ugeth_info("TX BDs[%d]", i);
  586. mem_disp(ugeth->p_tx_bd_ring[i], length);
  587. }
  588. }
  589. for (i = 0; i < ugeth->ug_info->numQueuesRx; i++) {
  590. if (ugeth->p_rx_bd_ring[i]) {
  591. length =
  592. (ugeth->ug_info->bdRingLenRx[i] *
  593. sizeof(struct qe_bd));
  594. ugeth_info("RX BDs[%d]", i);
  595. mem_disp(ugeth->p_rx_bd_ring[i], length);
  596. }
  597. }
  598. }
  599. static void dump_regs(struct ucc_geth_private *ugeth)
  600. {
  601. int i;
  602. ugeth_info("UCC%d Geth registers:", ugeth->ug_info->uf_info.ucc_num);
  603. ugeth_info("Base address: 0x%08x", (u32) ugeth->ug_regs);
  604. ugeth_info("maccfg1 : addr - 0x%08x, val - 0x%08x",
  605. (u32) & ugeth->ug_regs->maccfg1,
  606. in_be32(&ugeth->ug_regs->maccfg1));
  607. ugeth_info("maccfg2 : addr - 0x%08x, val - 0x%08x",
  608. (u32) & ugeth->ug_regs->maccfg2,
  609. in_be32(&ugeth->ug_regs->maccfg2));
  610. ugeth_info("ipgifg : addr - 0x%08x, val - 0x%08x",
  611. (u32) & ugeth->ug_regs->ipgifg,
  612. in_be32(&ugeth->ug_regs->ipgifg));
  613. ugeth_info("hafdup : addr - 0x%08x, val - 0x%08x",
  614. (u32) & ugeth->ug_regs->hafdup,
  615. in_be32(&ugeth->ug_regs->hafdup));
  616. ugeth_info("ifctl : addr - 0x%08x, val - 0x%08x",
  617. (u32) & ugeth->ug_regs->ifctl,
  618. in_be32(&ugeth->ug_regs->ifctl));
  619. ugeth_info("ifstat : addr - 0x%08x, val - 0x%08x",
  620. (u32) & ugeth->ug_regs->ifstat,
  621. in_be32(&ugeth->ug_regs->ifstat));
  622. ugeth_info("macstnaddr1: addr - 0x%08x, val - 0x%08x",
  623. (u32) & ugeth->ug_regs->macstnaddr1,
  624. in_be32(&ugeth->ug_regs->macstnaddr1));
  625. ugeth_info("macstnaddr2: addr - 0x%08x, val - 0x%08x",
  626. (u32) & ugeth->ug_regs->macstnaddr2,
  627. in_be32(&ugeth->ug_regs->macstnaddr2));
  628. ugeth_info("uempr : addr - 0x%08x, val - 0x%08x",
  629. (u32) & ugeth->ug_regs->uempr,
  630. in_be32(&ugeth->ug_regs->uempr));
  631. ugeth_info("utbipar : addr - 0x%08x, val - 0x%08x",
  632. (u32) & ugeth->ug_regs->utbipar,
  633. in_be32(&ugeth->ug_regs->utbipar));
  634. ugeth_info("uescr : addr - 0x%08x, val - 0x%04x",
  635. (u32) & ugeth->ug_regs->uescr,
  636. in_be16(&ugeth->ug_regs->uescr));
  637. ugeth_info("tx64 : addr - 0x%08x, val - 0x%08x",
  638. (u32) & ugeth->ug_regs->tx64,
  639. in_be32(&ugeth->ug_regs->tx64));
  640. ugeth_info("tx127 : addr - 0x%08x, val - 0x%08x",
  641. (u32) & ugeth->ug_regs->tx127,
  642. in_be32(&ugeth->ug_regs->tx127));
  643. ugeth_info("tx255 : addr - 0x%08x, val - 0x%08x",
  644. (u32) & ugeth->ug_regs->tx255,
  645. in_be32(&ugeth->ug_regs->tx255));
  646. ugeth_info("rx64 : addr - 0x%08x, val - 0x%08x",
  647. (u32) & ugeth->ug_regs->rx64,
  648. in_be32(&ugeth->ug_regs->rx64));
  649. ugeth_info("rx127 : addr - 0x%08x, val - 0x%08x",
  650. (u32) & ugeth->ug_regs->rx127,
  651. in_be32(&ugeth->ug_regs->rx127));
  652. ugeth_info("rx255 : addr - 0x%08x, val - 0x%08x",
  653. (u32) & ugeth->ug_regs->rx255,
  654. in_be32(&ugeth->ug_regs->rx255));
  655. ugeth_info("txok : addr - 0x%08x, val - 0x%08x",
  656. (u32) & ugeth->ug_regs->txok,
  657. in_be32(&ugeth->ug_regs->txok));
  658. ugeth_info("txcf : addr - 0x%08x, val - 0x%04x",
  659. (u32) & ugeth->ug_regs->txcf,
  660. in_be16(&ugeth->ug_regs->txcf));
  661. ugeth_info("tmca : addr - 0x%08x, val - 0x%08x",
  662. (u32) & ugeth->ug_regs->tmca,
  663. in_be32(&ugeth->ug_regs->tmca));
  664. ugeth_info("tbca : addr - 0x%08x, val - 0x%08x",
  665. (u32) & ugeth->ug_regs->tbca,
  666. in_be32(&ugeth->ug_regs->tbca));
  667. ugeth_info("rxfok : addr - 0x%08x, val - 0x%08x",
  668. (u32) & ugeth->ug_regs->rxfok,
  669. in_be32(&ugeth->ug_regs->rxfok));
  670. ugeth_info("rxbok : addr - 0x%08x, val - 0x%08x",
  671. (u32) & ugeth->ug_regs->rxbok,
  672. in_be32(&ugeth->ug_regs->rxbok));
  673. ugeth_info("rbyt : addr - 0x%08x, val - 0x%08x",
  674. (u32) & ugeth->ug_regs->rbyt,
  675. in_be32(&ugeth->ug_regs->rbyt));
  676. ugeth_info("rmca : addr - 0x%08x, val - 0x%08x",
  677. (u32) & ugeth->ug_regs->rmca,
  678. in_be32(&ugeth->ug_regs->rmca));
  679. ugeth_info("rbca : addr - 0x%08x, val - 0x%08x",
  680. (u32) & ugeth->ug_regs->rbca,
  681. in_be32(&ugeth->ug_regs->rbca));
  682. ugeth_info("scar : addr - 0x%08x, val - 0x%08x",
  683. (u32) & ugeth->ug_regs->scar,
  684. in_be32(&ugeth->ug_regs->scar));
  685. ugeth_info("scam : addr - 0x%08x, val - 0x%08x",
  686. (u32) & ugeth->ug_regs->scam,
  687. in_be32(&ugeth->ug_regs->scam));
  688. if (ugeth->p_thread_data_tx) {
  689. int numThreadsTxNumerical;
  690. switch (ugeth->ug_info->numThreadsTx) {
  691. case UCC_GETH_NUM_OF_THREADS_1:
  692. numThreadsTxNumerical = 1;
  693. break;
  694. case UCC_GETH_NUM_OF_THREADS_2:
  695. numThreadsTxNumerical = 2;
  696. break;
  697. case UCC_GETH_NUM_OF_THREADS_4:
  698. numThreadsTxNumerical = 4;
  699. break;
  700. case UCC_GETH_NUM_OF_THREADS_6:
  701. numThreadsTxNumerical = 6;
  702. break;
  703. case UCC_GETH_NUM_OF_THREADS_8:
  704. numThreadsTxNumerical = 8;
  705. break;
  706. default:
  707. numThreadsTxNumerical = 0;
  708. break;
  709. }
  710. ugeth_info("Thread data TXs:");
  711. ugeth_info("Base address: 0x%08x",
  712. (u32) ugeth->p_thread_data_tx);
  713. for (i = 0; i < numThreadsTxNumerical; i++) {
  714. ugeth_info("Thread data TX[%d]:", i);
  715. ugeth_info("Base address: 0x%08x",
  716. (u32) & ugeth->p_thread_data_tx[i]);
  717. mem_disp((u8 *) & ugeth->p_thread_data_tx[i],
  718. sizeof(struct ucc_geth_thread_data_tx));
  719. }
  720. }
  721. if (ugeth->p_thread_data_rx) {
  722. int numThreadsRxNumerical;
  723. switch (ugeth->ug_info->numThreadsRx) {
  724. case UCC_GETH_NUM_OF_THREADS_1:
  725. numThreadsRxNumerical = 1;
  726. break;
  727. case UCC_GETH_NUM_OF_THREADS_2:
  728. numThreadsRxNumerical = 2;
  729. break;
  730. case UCC_GETH_NUM_OF_THREADS_4:
  731. numThreadsRxNumerical = 4;
  732. break;
  733. case UCC_GETH_NUM_OF_THREADS_6:
  734. numThreadsRxNumerical = 6;
  735. break;
  736. case UCC_GETH_NUM_OF_THREADS_8:
  737. numThreadsRxNumerical = 8;
  738. break;
  739. default:
  740. numThreadsRxNumerical = 0;
  741. break;
  742. }
  743. ugeth_info("Thread data RX:");
  744. ugeth_info("Base address: 0x%08x",
  745. (u32) ugeth->p_thread_data_rx);
  746. for (i = 0; i < numThreadsRxNumerical; i++) {
  747. ugeth_info("Thread data RX[%d]:", i);
  748. ugeth_info("Base address: 0x%08x",
  749. (u32) & ugeth->p_thread_data_rx[i]);
  750. mem_disp((u8 *) & ugeth->p_thread_data_rx[i],
  751. sizeof(struct ucc_geth_thread_data_rx));
  752. }
  753. }
  754. if (ugeth->p_exf_glbl_param) {
  755. ugeth_info("EXF global param:");
  756. ugeth_info("Base address: 0x%08x",
  757. (u32) ugeth->p_exf_glbl_param);
  758. mem_disp((u8 *) ugeth->p_exf_glbl_param,
  759. sizeof(*ugeth->p_exf_glbl_param));
  760. }
  761. if (ugeth->p_tx_glbl_pram) {
  762. ugeth_info("TX global param:");
  763. ugeth_info("Base address: 0x%08x", (u32) ugeth->p_tx_glbl_pram);
  764. ugeth_info("temoder : addr - 0x%08x, val - 0x%04x",
  765. (u32) & ugeth->p_tx_glbl_pram->temoder,
  766. in_be16(&ugeth->p_tx_glbl_pram->temoder));
  767. ugeth_info("sqptr : addr - 0x%08x, val - 0x%08x",
  768. (u32) & ugeth->p_tx_glbl_pram->sqptr,
  769. in_be32(&ugeth->p_tx_glbl_pram->sqptr));
  770. ugeth_info("schedulerbasepointer: addr - 0x%08x, val - 0x%08x",
  771. (u32) & ugeth->p_tx_glbl_pram->schedulerbasepointer,
  772. in_be32(&ugeth->p_tx_glbl_pram->
  773. schedulerbasepointer));
  774. ugeth_info("txrmonbaseptr: addr - 0x%08x, val - 0x%08x",
  775. (u32) & ugeth->p_tx_glbl_pram->txrmonbaseptr,
  776. in_be32(&ugeth->p_tx_glbl_pram->txrmonbaseptr));
  777. ugeth_info("tstate : addr - 0x%08x, val - 0x%08x",
  778. (u32) & ugeth->p_tx_glbl_pram->tstate,
  779. in_be32(&ugeth->p_tx_glbl_pram->tstate));
  780. ugeth_info("iphoffset[0] : addr - 0x%08x, val - 0x%02x",
  781. (u32) & ugeth->p_tx_glbl_pram->iphoffset[0],
  782. ugeth->p_tx_glbl_pram->iphoffset[0]);
  783. ugeth_info("iphoffset[1] : addr - 0x%08x, val - 0x%02x",
  784. (u32) & ugeth->p_tx_glbl_pram->iphoffset[1],
  785. ugeth->p_tx_glbl_pram->iphoffset[1]);
  786. ugeth_info("iphoffset[2] : addr - 0x%08x, val - 0x%02x",
  787. (u32) & ugeth->p_tx_glbl_pram->iphoffset[2],
  788. ugeth->p_tx_glbl_pram->iphoffset[2]);
  789. ugeth_info("iphoffset[3] : addr - 0x%08x, val - 0x%02x",
  790. (u32) & ugeth->p_tx_glbl_pram->iphoffset[3],
  791. ugeth->p_tx_glbl_pram->iphoffset[3]);
  792. ugeth_info("iphoffset[4] : addr - 0x%08x, val - 0x%02x",
  793. (u32) & ugeth->p_tx_glbl_pram->iphoffset[4],
  794. ugeth->p_tx_glbl_pram->iphoffset[4]);
  795. ugeth_info("iphoffset[5] : addr - 0x%08x, val - 0x%02x",
  796. (u32) & ugeth->p_tx_glbl_pram->iphoffset[5],
  797. ugeth->p_tx_glbl_pram->iphoffset[5]);
  798. ugeth_info("iphoffset[6] : addr - 0x%08x, val - 0x%02x",
  799. (u32) & ugeth->p_tx_glbl_pram->iphoffset[6],
  800. ugeth->p_tx_glbl_pram->iphoffset[6]);
  801. ugeth_info("iphoffset[7] : addr - 0x%08x, val - 0x%02x",
  802. (u32) & ugeth->p_tx_glbl_pram->iphoffset[7],
  803. ugeth->p_tx_glbl_pram->iphoffset[7]);
  804. ugeth_info("vtagtable[0] : addr - 0x%08x, val - 0x%08x",
  805. (u32) & ugeth->p_tx_glbl_pram->vtagtable[0],
  806. in_be32(&ugeth->p_tx_glbl_pram->vtagtable[0]));
  807. ugeth_info("vtagtable[1] : addr - 0x%08x, val - 0x%08x",
  808. (u32) & ugeth->p_tx_glbl_pram->vtagtable[1],
  809. in_be32(&ugeth->p_tx_glbl_pram->vtagtable[1]));
  810. ugeth_info("vtagtable[2] : addr - 0x%08x, val - 0x%08x",
  811. (u32) & ugeth->p_tx_glbl_pram->vtagtable[2],
  812. in_be32(&ugeth->p_tx_glbl_pram->vtagtable[2]));
  813. ugeth_info("vtagtable[3] : addr - 0x%08x, val - 0x%08x",
  814. (u32) & ugeth->p_tx_glbl_pram->vtagtable[3],
  815. in_be32(&ugeth->p_tx_glbl_pram->vtagtable[3]));
  816. ugeth_info("vtagtable[4] : addr - 0x%08x, val - 0x%08x",
  817. (u32) & ugeth->p_tx_glbl_pram->vtagtable[4],
  818. in_be32(&ugeth->p_tx_glbl_pram->vtagtable[4]));
  819. ugeth_info("vtagtable[5] : addr - 0x%08x, val - 0x%08x",
  820. (u32) & ugeth->p_tx_glbl_pram->vtagtable[5],
  821. in_be32(&ugeth->p_tx_glbl_pram->vtagtable[5]));
  822. ugeth_info("vtagtable[6] : addr - 0x%08x, val - 0x%08x",
  823. (u32) & ugeth->p_tx_glbl_pram->vtagtable[6],
  824. in_be32(&ugeth->p_tx_glbl_pram->vtagtable[6]));
  825. ugeth_info("vtagtable[7] : addr - 0x%08x, val - 0x%08x",
  826. (u32) & ugeth->p_tx_glbl_pram->vtagtable[7],
  827. in_be32(&ugeth->p_tx_glbl_pram->vtagtable[7]));
  828. ugeth_info("tqptr : addr - 0x%08x, val - 0x%08x",
  829. (u32) & ugeth->p_tx_glbl_pram->tqptr,
  830. in_be32(&ugeth->p_tx_glbl_pram->tqptr));
  831. }
  832. if (ugeth->p_rx_glbl_pram) {
  833. ugeth_info("RX global param:");
  834. ugeth_info("Base address: 0x%08x", (u32) ugeth->p_rx_glbl_pram);
  835. ugeth_info("remoder : addr - 0x%08x, val - 0x%08x",
  836. (u32) & ugeth->p_rx_glbl_pram->remoder,
  837. in_be32(&ugeth->p_rx_glbl_pram->remoder));
  838. ugeth_info("rqptr : addr - 0x%08x, val - 0x%08x",
  839. (u32) & ugeth->p_rx_glbl_pram->rqptr,
  840. in_be32(&ugeth->p_rx_glbl_pram->rqptr));
  841. ugeth_info("typeorlen : addr - 0x%08x, val - 0x%04x",
  842. (u32) & ugeth->p_rx_glbl_pram->typeorlen,
  843. in_be16(&ugeth->p_rx_glbl_pram->typeorlen));
  844. ugeth_info("rxgstpack : addr - 0x%08x, val - 0x%02x",
  845. (u32) & ugeth->p_rx_glbl_pram->rxgstpack,
  846. ugeth->p_rx_glbl_pram->rxgstpack);
  847. ugeth_info("rxrmonbaseptr : addr - 0x%08x, val - 0x%08x",
  848. (u32) & ugeth->p_rx_glbl_pram->rxrmonbaseptr,
  849. in_be32(&ugeth->p_rx_glbl_pram->rxrmonbaseptr));
  850. ugeth_info("intcoalescingptr: addr - 0x%08x, val - 0x%08x",
  851. (u32) & ugeth->p_rx_glbl_pram->intcoalescingptr,
  852. in_be32(&ugeth->p_rx_glbl_pram->intcoalescingptr));
  853. ugeth_info("rstate : addr - 0x%08x, val - 0x%02x",
  854. (u32) & ugeth->p_rx_glbl_pram->rstate,
  855. ugeth->p_rx_glbl_pram->rstate);
  856. ugeth_info("mrblr : addr - 0x%08x, val - 0x%04x",
  857. (u32) & ugeth->p_rx_glbl_pram->mrblr,
  858. in_be16(&ugeth->p_rx_glbl_pram->mrblr));
  859. ugeth_info("rbdqptr : addr - 0x%08x, val - 0x%08x",
  860. (u32) & ugeth->p_rx_glbl_pram->rbdqptr,
  861. in_be32(&ugeth->p_rx_glbl_pram->rbdqptr));
  862. ugeth_info("mflr : addr - 0x%08x, val - 0x%04x",
  863. (u32) & ugeth->p_rx_glbl_pram->mflr,
  864. in_be16(&ugeth->p_rx_glbl_pram->mflr));
  865. ugeth_info("minflr : addr - 0x%08x, val - 0x%04x",
  866. (u32) & ugeth->p_rx_glbl_pram->minflr,
  867. in_be16(&ugeth->p_rx_glbl_pram->minflr));
  868. ugeth_info("maxd1 : addr - 0x%08x, val - 0x%04x",
  869. (u32) & ugeth->p_rx_glbl_pram->maxd1,
  870. in_be16(&ugeth->p_rx_glbl_pram->maxd1));
  871. ugeth_info("maxd2 : addr - 0x%08x, val - 0x%04x",
  872. (u32) & ugeth->p_rx_glbl_pram->maxd2,
  873. in_be16(&ugeth->p_rx_glbl_pram->maxd2));
  874. ugeth_info("ecamptr : addr - 0x%08x, val - 0x%08x",
  875. (u32) & ugeth->p_rx_glbl_pram->ecamptr,
  876. in_be32(&ugeth->p_rx_glbl_pram->ecamptr));
  877. ugeth_info("l2qt : addr - 0x%08x, val - 0x%08x",
  878. (u32) & ugeth->p_rx_glbl_pram->l2qt,
  879. in_be32(&ugeth->p_rx_glbl_pram->l2qt));
  880. ugeth_info("l3qt[0] : addr - 0x%08x, val - 0x%08x",
  881. (u32) & ugeth->p_rx_glbl_pram->l3qt[0],
  882. in_be32(&ugeth->p_rx_glbl_pram->l3qt[0]));
  883. ugeth_info("l3qt[1] : addr - 0x%08x, val - 0x%08x",
  884. (u32) & ugeth->p_rx_glbl_pram->l3qt[1],
  885. in_be32(&ugeth->p_rx_glbl_pram->l3qt[1]));
  886. ugeth_info("l3qt[2] : addr - 0x%08x, val - 0x%08x",
  887. (u32) & ugeth->p_rx_glbl_pram->l3qt[2],
  888. in_be32(&ugeth->p_rx_glbl_pram->l3qt[2]));
  889. ugeth_info("l3qt[3] : addr - 0x%08x, val - 0x%08x",
  890. (u32) & ugeth->p_rx_glbl_pram->l3qt[3],
  891. in_be32(&ugeth->p_rx_glbl_pram->l3qt[3]));
  892. ugeth_info("l3qt[4] : addr - 0x%08x, val - 0x%08x",
  893. (u32) & ugeth->p_rx_glbl_pram->l3qt[4],
  894. in_be32(&ugeth->p_rx_glbl_pram->l3qt[4]));
  895. ugeth_info("l3qt[5] : addr - 0x%08x, val - 0x%08x",
  896. (u32) & ugeth->p_rx_glbl_pram->l3qt[5],
  897. in_be32(&ugeth->p_rx_glbl_pram->l3qt[5]));
  898. ugeth_info("l3qt[6] : addr - 0x%08x, val - 0x%08x",
  899. (u32) & ugeth->p_rx_glbl_pram->l3qt[6],
  900. in_be32(&ugeth->p_rx_glbl_pram->l3qt[6]));
  901. ugeth_info("l3qt[7] : addr - 0x%08x, val - 0x%08x",
  902. (u32) & ugeth->p_rx_glbl_pram->l3qt[7],
  903. in_be32(&ugeth->p_rx_glbl_pram->l3qt[7]));
  904. ugeth_info("vlantype : addr - 0x%08x, val - 0x%04x",
  905. (u32) & ugeth->p_rx_glbl_pram->vlantype,
  906. in_be16(&ugeth->p_rx_glbl_pram->vlantype));
  907. ugeth_info("vlantci : addr - 0x%08x, val - 0x%04x",
  908. (u32) & ugeth->p_rx_glbl_pram->vlantci,
  909. in_be16(&ugeth->p_rx_glbl_pram->vlantci));
  910. for (i = 0; i < 64; i++)
  911. ugeth_info
  912. ("addressfiltering[%d]: addr - 0x%08x, val - 0x%02x",
  913. i,
  914. (u32) & ugeth->p_rx_glbl_pram->addressfiltering[i],
  915. ugeth->p_rx_glbl_pram->addressfiltering[i]);
  916. ugeth_info("exfGlobalParam : addr - 0x%08x, val - 0x%08x",
  917. (u32) & ugeth->p_rx_glbl_pram->exfGlobalParam,
  918. in_be32(&ugeth->p_rx_glbl_pram->exfGlobalParam));
  919. }
  920. if (ugeth->p_send_q_mem_reg) {
  921. ugeth_info("Send Q memory registers:");
  922. ugeth_info("Base address: 0x%08x",
  923. (u32) ugeth->p_send_q_mem_reg);
  924. for (i = 0; i < ugeth->ug_info->numQueuesTx; i++) {
  925. ugeth_info("SQQD[%d]:", i);
  926. ugeth_info("Base address: 0x%08x",
  927. (u32) & ugeth->p_send_q_mem_reg->sqqd[i]);
  928. mem_disp((u8 *) & ugeth->p_send_q_mem_reg->sqqd[i],
  929. sizeof(struct ucc_geth_send_queue_qd));
  930. }
  931. }
  932. if (ugeth->p_scheduler) {
  933. ugeth_info("Scheduler:");
  934. ugeth_info("Base address: 0x%08x", (u32) ugeth->p_scheduler);
  935. mem_disp((u8 *) ugeth->p_scheduler,
  936. sizeof(*ugeth->p_scheduler));
  937. }
  938. if (ugeth->p_tx_fw_statistics_pram) {
  939. ugeth_info("TX FW statistics pram:");
  940. ugeth_info("Base address: 0x%08x",
  941. (u32) ugeth->p_tx_fw_statistics_pram);
  942. mem_disp((u8 *) ugeth->p_tx_fw_statistics_pram,
  943. sizeof(*ugeth->p_tx_fw_statistics_pram));
  944. }
  945. if (ugeth->p_rx_fw_statistics_pram) {
  946. ugeth_info("RX FW statistics pram:");
  947. ugeth_info("Base address: 0x%08x",
  948. (u32) ugeth->p_rx_fw_statistics_pram);
  949. mem_disp((u8 *) ugeth->p_rx_fw_statistics_pram,
  950. sizeof(*ugeth->p_rx_fw_statistics_pram));
  951. }
  952. if (ugeth->p_rx_irq_coalescing_tbl) {
  953. ugeth_info("RX IRQ coalescing tables:");
  954. ugeth_info("Base address: 0x%08x",
  955. (u32) ugeth->p_rx_irq_coalescing_tbl);
  956. for (i = 0; i < ugeth->ug_info->numQueuesRx; i++) {
  957. ugeth_info("RX IRQ coalescing table entry[%d]:", i);
  958. ugeth_info("Base address: 0x%08x",
  959. (u32) & ugeth->p_rx_irq_coalescing_tbl->
  960. coalescingentry[i]);
  961. ugeth_info
  962. ("interruptcoalescingmaxvalue: addr - 0x%08x, val - 0x%08x",
  963. (u32) & ugeth->p_rx_irq_coalescing_tbl->
  964. coalescingentry[i].interruptcoalescingmaxvalue,
  965. in_be32(&ugeth->p_rx_irq_coalescing_tbl->
  966. coalescingentry[i].
  967. interruptcoalescingmaxvalue));
  968. ugeth_info
  969. ("interruptcoalescingcounter : addr - 0x%08x, val - 0x%08x",
  970. (u32) & ugeth->p_rx_irq_coalescing_tbl->
  971. coalescingentry[i].interruptcoalescingcounter,
  972. in_be32(&ugeth->p_rx_irq_coalescing_tbl->
  973. coalescingentry[i].
  974. interruptcoalescingcounter));
  975. }
  976. }
  977. if (ugeth->p_rx_bd_qs_tbl) {
  978. ugeth_info("RX BD QS tables:");
  979. ugeth_info("Base address: 0x%08x", (u32) ugeth->p_rx_bd_qs_tbl);
  980. for (i = 0; i < ugeth->ug_info->numQueuesRx; i++) {
  981. ugeth_info("RX BD QS table[%d]:", i);
  982. ugeth_info("Base address: 0x%08x",
  983. (u32) & ugeth->p_rx_bd_qs_tbl[i]);
  984. ugeth_info
  985. ("bdbaseptr : addr - 0x%08x, val - 0x%08x",
  986. (u32) & ugeth->p_rx_bd_qs_tbl[i].bdbaseptr,
  987. in_be32(&ugeth->p_rx_bd_qs_tbl[i].bdbaseptr));
  988. ugeth_info
  989. ("bdptr : addr - 0x%08x, val - 0x%08x",
  990. (u32) & ugeth->p_rx_bd_qs_tbl[i].bdptr,
  991. in_be32(&ugeth->p_rx_bd_qs_tbl[i].bdptr));
  992. ugeth_info
  993. ("externalbdbaseptr: addr - 0x%08x, val - 0x%08x",
  994. (u32) & ugeth->p_rx_bd_qs_tbl[i].externalbdbaseptr,
  995. in_be32(&ugeth->p_rx_bd_qs_tbl[i].
  996. externalbdbaseptr));
  997. ugeth_info
  998. ("externalbdptr : addr - 0x%08x, val - 0x%08x",
  999. (u32) & ugeth->p_rx_bd_qs_tbl[i].externalbdptr,
  1000. in_be32(&ugeth->p_rx_bd_qs_tbl[i].externalbdptr));
  1001. ugeth_info("ucode RX Prefetched BDs:");
  1002. ugeth_info("Base address: 0x%08x",
  1003. (u32)
  1004. qe_muram_addr(in_be32
  1005. (&ugeth->p_rx_bd_qs_tbl[i].
  1006. bdbaseptr)));
  1007. mem_disp((u8 *)
  1008. qe_muram_addr(in_be32
  1009. (&ugeth->p_rx_bd_qs_tbl[i].
  1010. bdbaseptr)),
  1011. sizeof(struct ucc_geth_rx_prefetched_bds));
  1012. }
  1013. }
  1014. if (ugeth->p_init_enet_param_shadow) {
  1015. int size;
  1016. ugeth_info("Init enet param shadow:");
  1017. ugeth_info("Base address: 0x%08x",
  1018. (u32) ugeth->p_init_enet_param_shadow);
  1019. mem_disp((u8 *) ugeth->p_init_enet_param_shadow,
  1020. sizeof(*ugeth->p_init_enet_param_shadow));
  1021. size = sizeof(struct ucc_geth_thread_rx_pram);
  1022. if (ugeth->ug_info->rxExtendedFiltering) {
  1023. size +=
  1024. THREAD_RX_PRAM_ADDITIONAL_FOR_EXTENDED_FILTERING;
  1025. if (ugeth->ug_info->largestexternallookupkeysize ==
  1026. QE_FLTR_TABLE_LOOKUP_KEY_SIZE_8_BYTES)
  1027. size +=
  1028. THREAD_RX_PRAM_ADDITIONAL_FOR_EXTENDED_FILTERING_8;
  1029. if (ugeth->ug_info->largestexternallookupkeysize ==
  1030. QE_FLTR_TABLE_LOOKUP_KEY_SIZE_16_BYTES)
  1031. size +=
  1032. THREAD_RX_PRAM_ADDITIONAL_FOR_EXTENDED_FILTERING_16;
  1033. }
  1034. dump_init_enet_entries(ugeth,
  1035. &(ugeth->p_init_enet_param_shadow->
  1036. txthread[0]),
  1037. ENET_INIT_PARAM_MAX_ENTRIES_TX,
  1038. sizeof(struct ucc_geth_thread_tx_pram),
  1039. ugeth->ug_info->riscTx, 0);
  1040. dump_init_enet_entries(ugeth,
  1041. &(ugeth->p_init_enet_param_shadow->
  1042. rxthread[0]),
  1043. ENET_INIT_PARAM_MAX_ENTRIES_RX, size,
  1044. ugeth->ug_info->riscRx, 1);
  1045. }
  1046. }
  1047. #endif /* DEBUG */
  1048. static void init_default_reg_vals(u32 __iomem *upsmr_register,
  1049. u32 __iomem *maccfg1_register,
  1050. u32 __iomem *maccfg2_register)
  1051. {
  1052. out_be32(upsmr_register, UCC_GETH_UPSMR_INIT);
  1053. out_be32(maccfg1_register, UCC_GETH_MACCFG1_INIT);
  1054. out_be32(maccfg2_register, UCC_GETH_MACCFG2_INIT);
  1055. }
  1056. static int init_half_duplex_params(int alt_beb,
  1057. int back_pressure_no_backoff,
  1058. int no_backoff,
  1059. int excess_defer,
  1060. u8 alt_beb_truncation,
  1061. u8 max_retransmissions,
  1062. u8 collision_window,
  1063. u32 __iomem *hafdup_register)
  1064. {
  1065. u32 value = 0;
  1066. if ((alt_beb_truncation > HALFDUP_ALT_BEB_TRUNCATION_MAX) ||
  1067. (max_retransmissions > HALFDUP_MAX_RETRANSMISSION_MAX) ||
  1068. (collision_window > HALFDUP_COLLISION_WINDOW_MAX))
  1069. return -EINVAL;
  1070. value = (u32) (alt_beb_truncation << HALFDUP_ALT_BEB_TRUNCATION_SHIFT);
  1071. if (alt_beb)
  1072. value |= HALFDUP_ALT_BEB;
  1073. if (back_pressure_no_backoff)
  1074. value |= HALFDUP_BACK_PRESSURE_NO_BACKOFF;
  1075. if (no_backoff)
  1076. value |= HALFDUP_NO_BACKOFF;
  1077. if (excess_defer)
  1078. value |= HALFDUP_EXCESSIVE_DEFER;
  1079. value |= (max_retransmissions << HALFDUP_MAX_RETRANSMISSION_SHIFT);
  1080. value |= collision_window;
  1081. out_be32(hafdup_register, value);
  1082. return 0;
  1083. }
  1084. static int init_inter_frame_gap_params(u8 non_btb_cs_ipg,
  1085. u8 non_btb_ipg,
  1086. u8 min_ifg,
  1087. u8 btb_ipg,
  1088. u32 __iomem *ipgifg_register)
  1089. {
  1090. u32 value = 0;
  1091. /* Non-Back-to-back IPG part 1 should be <= Non-Back-to-back
  1092. IPG part 2 */
  1093. if (non_btb_cs_ipg > non_btb_ipg)
  1094. return -EINVAL;
  1095. if ((non_btb_cs_ipg > IPGIFG_NON_BACK_TO_BACK_IFG_PART1_MAX) ||
  1096. (non_btb_ipg > IPGIFG_NON_BACK_TO_BACK_IFG_PART2_MAX) ||
  1097. /*(min_ifg > IPGIFG_MINIMUM_IFG_ENFORCEMENT_MAX) || */
  1098. (btb_ipg > IPGIFG_BACK_TO_BACK_IFG_MAX))
  1099. return -EINVAL;
  1100. value |=
  1101. ((non_btb_cs_ipg << IPGIFG_NON_BACK_TO_BACK_IFG_PART1_SHIFT) &
  1102. IPGIFG_NBTB_CS_IPG_MASK);
  1103. value |=
  1104. ((non_btb_ipg << IPGIFG_NON_BACK_TO_BACK_IFG_PART2_SHIFT) &
  1105. IPGIFG_NBTB_IPG_MASK);
  1106. value |=
  1107. ((min_ifg << IPGIFG_MINIMUM_IFG_ENFORCEMENT_SHIFT) &
  1108. IPGIFG_MIN_IFG_MASK);
  1109. value |= (btb_ipg & IPGIFG_BTB_IPG_MASK);
  1110. out_be32(ipgifg_register, value);
  1111. return 0;
  1112. }
  1113. int init_flow_control_params(u32 automatic_flow_control_mode,
  1114. int rx_flow_control_enable,
  1115. int tx_flow_control_enable,
  1116. u16 pause_period,
  1117. u16 extension_field,
  1118. u32 __iomem *upsmr_register,
  1119. u32 __iomem *uempr_register,
  1120. u32 __iomem *maccfg1_register)
  1121. {
  1122. u32 value = 0;
  1123. /* Set UEMPR register */
  1124. value = (u32) pause_period << UEMPR_PAUSE_TIME_VALUE_SHIFT;
  1125. value |= (u32) extension_field << UEMPR_EXTENDED_PAUSE_TIME_VALUE_SHIFT;
  1126. out_be32(uempr_register, value);
  1127. /* Set UPSMR register */
  1128. value = in_be32(upsmr_register);
  1129. value |= automatic_flow_control_mode;
  1130. out_be32(upsmr_register, value);
  1131. value = in_be32(maccfg1_register);
  1132. if (rx_flow_control_enable)
  1133. value |= MACCFG1_FLOW_RX;
  1134. if (tx_flow_control_enable)
  1135. value |= MACCFG1_FLOW_TX;
  1136. out_be32(maccfg1_register, value);
  1137. return 0;
  1138. }
  1139. static int init_hw_statistics_gathering_mode(int enable_hardware_statistics,
  1140. int auto_zero_hardware_statistics,
  1141. u32 __iomem *upsmr_register,
  1142. u16 __iomem *uescr_register)
  1143. {
  1144. u32 upsmr_value = 0;
  1145. u16 uescr_value = 0;
  1146. /* Enable hardware statistics gathering if requested */
  1147. if (enable_hardware_statistics) {
  1148. upsmr_value = in_be32(upsmr_register);
  1149. upsmr_value |= UPSMR_HSE;
  1150. out_be32(upsmr_register, upsmr_value);
  1151. }
  1152. /* Clear hardware statistics counters */
  1153. uescr_value = in_be16(uescr_register);
  1154. uescr_value |= UESCR_CLRCNT;
  1155. /* Automatically zero hardware statistics counters on read,
  1156. if requested */
  1157. if (auto_zero_hardware_statistics)
  1158. uescr_value |= UESCR_AUTOZ;
  1159. out_be16(uescr_register, uescr_value);
  1160. return 0;
  1161. }
  1162. static int init_firmware_statistics_gathering_mode(int
  1163. enable_tx_firmware_statistics,
  1164. int enable_rx_firmware_statistics,
  1165. u32 __iomem *tx_rmon_base_ptr,
  1166. u32 tx_firmware_statistics_structure_address,
  1167. u32 __iomem *rx_rmon_base_ptr,
  1168. u32 rx_firmware_statistics_structure_address,
  1169. u16 __iomem *temoder_register,
  1170. u32 __iomem *remoder_register)
  1171. {
  1172. /* Note: this function does not check if */
  1173. /* the parameters it receives are NULL */
  1174. u16 temoder_value;
  1175. u32 remoder_value;
  1176. if (enable_tx_firmware_statistics) {
  1177. out_be32(tx_rmon_base_ptr,
  1178. tx_firmware_statistics_structure_address);
  1179. temoder_value = in_be16(temoder_register);
  1180. temoder_value |= TEMODER_TX_RMON_STATISTICS_ENABLE;
  1181. out_be16(temoder_register, temoder_value);
  1182. }
  1183. if (enable_rx_firmware_statistics) {
  1184. out_be32(rx_rmon_base_ptr,
  1185. rx_firmware_statistics_structure_address);
  1186. remoder_value = in_be32(remoder_register);
  1187. remoder_value |= REMODER_RX_RMON_STATISTICS_ENABLE;
  1188. out_be32(remoder_register, remoder_value);
  1189. }
  1190. return 0;
  1191. }
  1192. static int init_mac_station_addr_regs(u8 address_byte_0,
  1193. u8 address_byte_1,
  1194. u8 address_byte_2,
  1195. u8 address_byte_3,
  1196. u8 address_byte_4,
  1197. u8 address_byte_5,
  1198. u32 __iomem *macstnaddr1_register,
  1199. u32 __iomem *macstnaddr2_register)
  1200. {
  1201. u32 value = 0;
  1202. /* Example: for a station address of 0x12345678ABCD, */
  1203. /* 0x12 is byte 0, 0x34 is byte 1 and so on and 0xCD is byte 5 */
  1204. /* MACSTNADDR1 Register: */
  1205. /* 0 7 8 15 */
  1206. /* station address byte 5 station address byte 4 */
  1207. /* 16 23 24 31 */
  1208. /* station address byte 3 station address byte 2 */
  1209. value |= (u32) ((address_byte_2 << 0) & 0x000000FF);
  1210. value |= (u32) ((address_byte_3 << 8) & 0x0000FF00);
  1211. value |= (u32) ((address_byte_4 << 16) & 0x00FF0000);
  1212. value |= (u32) ((address_byte_5 << 24) & 0xFF000000);
  1213. out_be32(macstnaddr1_register, value);
  1214. /* MACSTNADDR2 Register: */
  1215. /* 0 7 8 15 */
  1216. /* station address byte 1 station address byte 0 */
  1217. /* 16 23 24 31 */
  1218. /* reserved reserved */
  1219. value = 0;
  1220. value |= (u32) ((address_byte_0 << 16) & 0x00FF0000);
  1221. value |= (u32) ((address_byte_1 << 24) & 0xFF000000);
  1222. out_be32(macstnaddr2_register, value);
  1223. return 0;
  1224. }
  1225. static int init_check_frame_length_mode(int length_check,
  1226. u32 __iomem *maccfg2_register)
  1227. {
  1228. u32 value = 0;
  1229. value = in_be32(maccfg2_register);
  1230. if (length_check)
  1231. value |= MACCFG2_LC;
  1232. else
  1233. value &= ~MACCFG2_LC;
  1234. out_be32(maccfg2_register, value);
  1235. return 0;
  1236. }
  1237. static int init_preamble_length(u8 preamble_length,
  1238. u32 __iomem *maccfg2_register)
  1239. {
  1240. u32 value = 0;
  1241. if ((preamble_length < 3) || (preamble_length > 7))
  1242. return -EINVAL;
  1243. value = in_be32(maccfg2_register);
  1244. value &= ~MACCFG2_PREL_MASK;
  1245. value |= (preamble_length << MACCFG2_PREL_SHIFT);
  1246. out_be32(maccfg2_register, value);
  1247. return 0;
  1248. }
  1249. static int init_rx_parameters(int reject_broadcast,
  1250. int receive_short_frames,
  1251. int promiscuous, u32 __iomem *upsmr_register)
  1252. {
  1253. u32 value = 0;
  1254. value = in_be32(upsmr_register);
  1255. if (reject_broadcast)
  1256. value |= UPSMR_BRO;
  1257. else
  1258. value &= ~UPSMR_BRO;
  1259. if (receive_short_frames)
  1260. value |= UPSMR_RSH;
  1261. else
  1262. value &= ~UPSMR_RSH;
  1263. if (promiscuous)
  1264. value |= UPSMR_PRO;
  1265. else
  1266. value &= ~UPSMR_PRO;
  1267. out_be32(upsmr_register, value);
  1268. return 0;
  1269. }
  1270. static int init_max_rx_buff_len(u16 max_rx_buf_len,
  1271. u16 __iomem *mrblr_register)
  1272. {
  1273. /* max_rx_buf_len value must be a multiple of 128 */
  1274. if ((max_rx_buf_len == 0)
  1275. || (max_rx_buf_len % UCC_GETH_MRBLR_ALIGNMENT))
  1276. return -EINVAL;
  1277. out_be16(mrblr_register, max_rx_buf_len);
  1278. return 0;
  1279. }
  1280. static int init_min_frame_len(u16 min_frame_length,
  1281. u16 __iomem *minflr_register,
  1282. u16 __iomem *mrblr_register)
  1283. {
  1284. u16 mrblr_value = 0;
  1285. mrblr_value = in_be16(mrblr_register);
  1286. if (min_frame_length >= (mrblr_value - 4))
  1287. return -EINVAL;
  1288. out_be16(minflr_register, min_frame_length);
  1289. return 0;
  1290. }
  1291. static int adjust_enet_interface(struct ucc_geth_private *ugeth)
  1292. {
  1293. struct ucc_geth_info *ug_info;
  1294. struct ucc_geth __iomem *ug_regs;
  1295. struct ucc_fast __iomem *uf_regs;
  1296. int ret_val;
  1297. u32 upsmr, maccfg2, tbiBaseAddress;
  1298. u16 value;
  1299. ugeth_vdbg("%s: IN", __func__);
  1300. ug_info = ugeth->ug_info;
  1301. ug_regs = ugeth->ug_regs;
  1302. uf_regs = ugeth->uccf->uf_regs;
  1303. /* Set MACCFG2 */
  1304. maccfg2 = in_be32(&ug_regs->maccfg2);
  1305. maccfg2 &= ~MACCFG2_INTERFACE_MODE_MASK;
  1306. if ((ugeth->max_speed == SPEED_10) ||
  1307. (ugeth->max_speed == SPEED_100))
  1308. maccfg2 |= MACCFG2_INTERFACE_MODE_NIBBLE;
  1309. else if (ugeth->max_speed == SPEED_1000)
  1310. maccfg2 |= MACCFG2_INTERFACE_MODE_BYTE;
  1311. maccfg2 |= ug_info->padAndCrc;
  1312. out_be32(&ug_regs->maccfg2, maccfg2);
  1313. /* Set UPSMR */
  1314. upsmr = in_be32(&uf_regs->upsmr);
  1315. upsmr &= ~(UPSMR_RPM | UPSMR_R10M | UPSMR_TBIM | UPSMR_RMM);
  1316. if ((ugeth->phy_interface == PHY_INTERFACE_MODE_RMII) ||
  1317. (ugeth->phy_interface == PHY_INTERFACE_MODE_RGMII) ||
  1318. (ugeth->phy_interface == PHY_INTERFACE_MODE_RGMII_ID) ||
  1319. (ugeth->phy_interface == PHY_INTERFACE_MODE_RGMII_RXID) ||
  1320. (ugeth->phy_interface == PHY_INTERFACE_MODE_RGMII_TXID) ||
  1321. (ugeth->phy_interface == PHY_INTERFACE_MODE_RTBI)) {
  1322. upsmr |= UPSMR_RPM;
  1323. switch (ugeth->max_speed) {
  1324. case SPEED_10:
  1325. upsmr |= UPSMR_R10M;
  1326. /* FALLTHROUGH */
  1327. case SPEED_100:
  1328. if (ugeth->phy_interface != PHY_INTERFACE_MODE_RTBI)
  1329. upsmr |= UPSMR_RMM;
  1330. }
  1331. }
  1332. if ((ugeth->phy_interface == PHY_INTERFACE_MODE_TBI) ||
  1333. (ugeth->phy_interface == PHY_INTERFACE_MODE_RTBI)) {
  1334. upsmr |= UPSMR_TBIM;
  1335. }
  1336. out_be32(&uf_regs->upsmr, upsmr);
  1337. /* Disable autonegotiation in tbi mode, because by default it
  1338. comes up in autonegotiation mode. */
  1339. /* Note that this depends on proper setting in utbipar register. */
  1340. if ((ugeth->phy_interface == PHY_INTERFACE_MODE_TBI) ||
  1341. (ugeth->phy_interface == PHY_INTERFACE_MODE_RTBI)) {
  1342. tbiBaseAddress = in_be32(&ug_regs->utbipar);
  1343. tbiBaseAddress &= UTBIPAR_PHY_ADDRESS_MASK;
  1344. tbiBaseAddress >>= UTBIPAR_PHY_ADDRESS_SHIFT;
  1345. value = ugeth->phydev->bus->read(ugeth->phydev->bus,
  1346. (u8) tbiBaseAddress, ENET_TBI_MII_CR);
  1347. value &= ~0x1000; /* Turn off autonegotiation */
  1348. ugeth->phydev->bus->write(ugeth->phydev->bus,
  1349. (u8) tbiBaseAddress, ENET_TBI_MII_CR, value);
  1350. }
  1351. init_check_frame_length_mode(ug_info->lengthCheckRx, &ug_regs->maccfg2);
  1352. ret_val = init_preamble_length(ug_info->prel, &ug_regs->maccfg2);
  1353. if (ret_val != 0) {
  1354. if (netif_msg_probe(ugeth))
  1355. ugeth_err("%s: Preamble length must be between 3 and 7 inclusive.",
  1356. __func__);
  1357. return ret_val;
  1358. }
  1359. return 0;
  1360. }
  1361. /* Called every time the controller might need to be made
  1362. * aware of new link state. The PHY code conveys this
  1363. * information through variables in the ugeth structure, and this
  1364. * function converts those variables into the appropriate
  1365. * register values, and can bring down the device if needed.
  1366. */
  1367. static void adjust_link(struct net_device *dev)
  1368. {
  1369. struct ucc_geth_private *ugeth = netdev_priv(dev);
  1370. struct ucc_geth __iomem *ug_regs;
  1371. struct ucc_fast __iomem *uf_regs;
  1372. struct phy_device *phydev = ugeth->phydev;
  1373. unsigned long flags;
  1374. int new_state = 0;
  1375. ug_regs = ugeth->ug_regs;
  1376. uf_regs = ugeth->uccf->uf_regs;
  1377. spin_lock_irqsave(&ugeth->lock, flags);
  1378. if (phydev->link) {
  1379. u32 tempval = in_be32(&ug_regs->maccfg2);
  1380. u32 upsmr = in_be32(&uf_regs->upsmr);
  1381. /* Now we make sure that we can be in full duplex mode.
  1382. * If not, we operate in half-duplex mode. */
  1383. if (phydev->duplex != ugeth->oldduplex) {
  1384. new_state = 1;
  1385. if (!(phydev->duplex))
  1386. tempval &= ~(MACCFG2_FDX);
  1387. else
  1388. tempval |= MACCFG2_FDX;
  1389. ugeth->oldduplex = phydev->duplex;
  1390. }
  1391. if (phydev->speed != ugeth->oldspeed) {
  1392. new_state = 1;
  1393. switch (phydev->speed) {
  1394. case SPEED_1000:
  1395. tempval = ((tempval &
  1396. ~(MACCFG2_INTERFACE_MODE_MASK)) |
  1397. MACCFG2_INTERFACE_MODE_BYTE);
  1398. break;
  1399. case SPEED_100:
  1400. case SPEED_10:
  1401. tempval = ((tempval &
  1402. ~(MACCFG2_INTERFACE_MODE_MASK)) |
  1403. MACCFG2_INTERFACE_MODE_NIBBLE);
  1404. /* if reduced mode, re-set UPSMR.R10M */
  1405. if ((ugeth->phy_interface == PHY_INTERFACE_MODE_RMII) ||
  1406. (ugeth->phy_interface == PHY_INTERFACE_MODE_RGMII) ||
  1407. (ugeth->phy_interface == PHY_INTERFACE_MODE_RGMII_ID) ||
  1408. (ugeth->phy_interface == PHY_INTERFACE_MODE_RGMII_RXID) ||
  1409. (ugeth->phy_interface == PHY_INTERFACE_MODE_RGMII_TXID) ||
  1410. (ugeth->phy_interface == PHY_INTERFACE_MODE_RTBI)) {
  1411. if (phydev->speed == SPEED_10)
  1412. upsmr |= UPSMR_R10M;
  1413. else
  1414. upsmr &= ~(UPSMR_R10M);
  1415. }
  1416. break;
  1417. default:
  1418. if (netif_msg_link(ugeth))
  1419. ugeth_warn(
  1420. "%s: Ack! Speed (%d) is not 10/100/1000!",
  1421. dev->name, phydev->speed);
  1422. break;
  1423. }
  1424. ugeth->oldspeed = phydev->speed;
  1425. }
  1426. out_be32(&ug_regs->maccfg2, tempval);
  1427. out_be32(&uf_regs->upsmr, upsmr);
  1428. if (!ugeth->oldlink) {
  1429. new_state = 1;
  1430. ugeth->oldlink = 1;
  1431. }
  1432. } else if (ugeth->oldlink) {
  1433. new_state = 1;
  1434. ugeth->oldlink = 0;
  1435. ugeth->oldspeed = 0;
  1436. ugeth->oldduplex = -1;
  1437. }
  1438. if (new_state && netif_msg_link(ugeth))
  1439. phy_print_status(phydev);
  1440. spin_unlock_irqrestore(&ugeth->lock, flags);
  1441. }
  1442. /* Configure the PHY for dev.
  1443. * returns 0 if success. -1 if failure
  1444. */
  1445. static int init_phy(struct net_device *dev)
  1446. {
  1447. struct ucc_geth_private *priv = netdev_priv(dev);
  1448. struct phy_device *phydev;
  1449. char phy_id[BUS_ID_SIZE];
  1450. priv->oldlink = 0;
  1451. priv->oldspeed = 0;
  1452. priv->oldduplex = -1;
  1453. snprintf(phy_id, BUS_ID_SIZE, PHY_ID_FMT, priv->ug_info->mdio_bus,
  1454. priv->ug_info->phy_address);
  1455. phydev = phy_connect(dev, phy_id, &adjust_link, 0, priv->phy_interface);
  1456. if (IS_ERR(phydev)) {
  1457. printk("%s: Could not attach to PHY\n", dev->name);
  1458. return PTR_ERR(phydev);
  1459. }
  1460. phydev->supported &= (ADVERTISED_10baseT_Half |
  1461. ADVERTISED_10baseT_Full |
  1462. ADVERTISED_100baseT_Half |
  1463. ADVERTISED_100baseT_Full);
  1464. if (priv->max_speed == SPEED_1000)
  1465. phydev->supported |= ADVERTISED_1000baseT_Full;
  1466. phydev->advertising = phydev->supported;
  1467. priv->phydev = phydev;
  1468. return 0;
  1469. }
  1470. static int ugeth_graceful_stop_tx(struct ucc_geth_private *ugeth)
  1471. {
  1472. struct ucc_fast_private *uccf;
  1473. u32 cecr_subblock;
  1474. u32 temp;
  1475. uccf = ugeth->uccf;
  1476. /* Mask GRACEFUL STOP TX interrupt bit and clear it */
  1477. temp = in_be32(uccf->p_uccm);
  1478. temp &= ~UCCE_GRA;
  1479. out_be32(uccf->p_uccm, temp);
  1480. out_be32(uccf->p_ucce, UCCE_GRA); /* clear by writing 1 */
  1481. /* Issue host command */
  1482. cecr_subblock =
  1483. ucc_fast_get_qe_cr_subblock(ugeth->ug_info->uf_info.ucc_num);
  1484. qe_issue_cmd(QE_GRACEFUL_STOP_TX, cecr_subblock,
  1485. QE_CR_PROTOCOL_ETHERNET, 0);
  1486. /* Wait for command to complete */
  1487. do {
  1488. temp = in_be32(uccf->p_ucce);
  1489. } while (!(temp & UCCE_GRA));
  1490. uccf->stopped_tx = 1;
  1491. return 0;
  1492. }
  1493. static int ugeth_graceful_stop_rx(struct ucc_geth_private * ugeth)
  1494. {
  1495. struct ucc_fast_private *uccf;
  1496. u32 cecr_subblock;
  1497. u8 temp;
  1498. uccf = ugeth->uccf;
  1499. /* Clear acknowledge bit */
  1500. temp = in_8(&ugeth->p_rx_glbl_pram->rxgstpack);
  1501. temp &= ~GRACEFUL_STOP_ACKNOWLEDGE_RX;
  1502. out_8(&ugeth->p_rx_glbl_pram->rxgstpack, temp);
  1503. /* Keep issuing command and checking acknowledge bit until
  1504. it is asserted, according to spec */
  1505. do {
  1506. /* Issue host command */
  1507. cecr_subblock =
  1508. ucc_fast_get_qe_cr_subblock(ugeth->ug_info->uf_info.
  1509. ucc_num);
  1510. qe_issue_cmd(QE_GRACEFUL_STOP_RX, cecr_subblock,
  1511. QE_CR_PROTOCOL_ETHERNET, 0);
  1512. temp = in_8(&ugeth->p_rx_glbl_pram->rxgstpack);
  1513. } while (!(temp & GRACEFUL_STOP_ACKNOWLEDGE_RX));
  1514. uccf->stopped_rx = 1;
  1515. return 0;
  1516. }
  1517. static int ugeth_restart_tx(struct ucc_geth_private *ugeth)
  1518. {
  1519. struct ucc_fast_private *uccf;
  1520. u32 cecr_subblock;
  1521. uccf = ugeth->uccf;
  1522. cecr_subblock =
  1523. ucc_fast_get_qe_cr_subblock(ugeth->ug_info->uf_info.ucc_num);
  1524. qe_issue_cmd(QE_RESTART_TX, cecr_subblock, QE_CR_PROTOCOL_ETHERNET, 0);
  1525. uccf->stopped_tx = 0;
  1526. return 0;
  1527. }
  1528. static int ugeth_restart_rx(struct ucc_geth_private *ugeth)
  1529. {
  1530. struct ucc_fast_private *uccf;
  1531. u32 cecr_subblock;
  1532. uccf = ugeth->uccf;
  1533. cecr_subblock =
  1534. ucc_fast_get_qe_cr_subblock(ugeth->ug_info->uf_info.ucc_num);
  1535. qe_issue_cmd(QE_RESTART_RX, cecr_subblock, QE_CR_PROTOCOL_ETHERNET,
  1536. 0);
  1537. uccf->stopped_rx = 0;
  1538. return 0;
  1539. }
  1540. static int ugeth_enable(struct ucc_geth_private *ugeth, enum comm_dir mode)
  1541. {
  1542. struct ucc_fast_private *uccf;
  1543. int enabled_tx, enabled_rx;
  1544. uccf = ugeth->uccf;
  1545. /* check if the UCC number is in range. */
  1546. if (ugeth->ug_info->uf_info.ucc_num >= UCC_MAX_NUM) {
  1547. if (netif_msg_probe(ugeth))
  1548. ugeth_err("%s: ucc_num out of range.", __func__);
  1549. return -EINVAL;
  1550. }
  1551. enabled_tx = uccf->enabled_tx;
  1552. enabled_rx = uccf->enabled_rx;
  1553. /* Get Tx and Rx going again, in case this channel was actively
  1554. disabled. */
  1555. if ((mode & COMM_DIR_TX) && (!enabled_tx) && uccf->stopped_tx)
  1556. ugeth_restart_tx(ugeth);
  1557. if ((mode & COMM_DIR_RX) && (!enabled_rx) && uccf->stopped_rx)
  1558. ugeth_restart_rx(ugeth);
  1559. ucc_fast_enable(uccf, mode); /* OK to do even if not disabled */
  1560. return 0;
  1561. }
  1562. static int ugeth_disable(struct ucc_geth_private * ugeth, enum comm_dir mode)
  1563. {
  1564. struct ucc_fast_private *uccf;
  1565. uccf = ugeth->uccf;
  1566. /* check if the UCC number is in range. */
  1567. if (ugeth->ug_info->uf_info.ucc_num >= UCC_MAX_NUM) {
  1568. if (netif_msg_probe(ugeth))
  1569. ugeth_err("%s: ucc_num out of range.", __func__);
  1570. return -EINVAL;
  1571. }
  1572. /* Stop any transmissions */
  1573. if ((mode & COMM_DIR_TX) && uccf->enabled_tx && !uccf->stopped_tx)
  1574. ugeth_graceful_stop_tx(ugeth);
  1575. /* Stop any receptions */
  1576. if ((mode & COMM_DIR_RX) && uccf->enabled_rx && !uccf->stopped_rx)
  1577. ugeth_graceful_stop_rx(ugeth);
  1578. ucc_fast_disable(ugeth->uccf, mode); /* OK to do even if not enabled */
  1579. return 0;
  1580. }
  1581. static void ugeth_dump_regs(struct ucc_geth_private *ugeth)
  1582. {
  1583. #ifdef DEBUG
  1584. ucc_fast_dump_regs(ugeth->uccf);
  1585. dump_regs(ugeth);
  1586. dump_bds(ugeth);
  1587. #endif
  1588. }
  1589. #ifdef CONFIG_UGETH_FILTERING
  1590. static int ugeth_ext_filtering_serialize_tad(struct ucc_geth_tad_params *
  1591. p_UccGethTadParams,
  1592. struct qe_fltr_tad *qe_fltr_tad)
  1593. {
  1594. u16 temp;
  1595. /* Zero serialized TAD */
  1596. memset(qe_fltr_tad, 0, QE_FLTR_TAD_SIZE);
  1597. qe_fltr_tad->serialized[0] |= UCC_GETH_TAD_V; /* Must have this */
  1598. if (p_UccGethTadParams->rx_non_dynamic_extended_features_mode ||
  1599. (p_UccGethTadParams->vtag_op != UCC_GETH_VLAN_OPERATION_TAGGED_NOP)
  1600. || (p_UccGethTadParams->vnontag_op !=
  1601. UCC_GETH_VLAN_OPERATION_NON_TAGGED_NOP)
  1602. )
  1603. qe_fltr_tad->serialized[0] |= UCC_GETH_TAD_EF;
  1604. if (p_UccGethTadParams->reject_frame)
  1605. qe_fltr_tad->serialized[0] |= UCC_GETH_TAD_REJ;
  1606. temp =
  1607. (u16) (((u16) p_UccGethTadParams->
  1608. vtag_op) << UCC_GETH_TAD_VTAG_OP_SHIFT);
  1609. qe_fltr_tad->serialized[0] |= (u8) (temp >> 8); /* upper bits */
  1610. qe_fltr_tad->serialized[1] |= (u8) (temp & 0x00ff); /* lower bits */
  1611. if (p_UccGethTadParams->vnontag_op ==
  1612. UCC_GETH_VLAN_OPERATION_NON_TAGGED_Q_TAG_INSERT)
  1613. qe_fltr_tad->serialized[1] |= UCC_GETH_TAD_V_NON_VTAG_OP;
  1614. qe_fltr_tad->serialized[1] |=
  1615. p_UccGethTadParams->rqos << UCC_GETH_TAD_RQOS_SHIFT;
  1616. qe_fltr_tad->serialized[2] |=
  1617. p_UccGethTadParams->vpri << UCC_GETH_TAD_V_PRIORITY_SHIFT;
  1618. /* upper bits */
  1619. qe_fltr_tad->serialized[2] |= (u8) (p_UccGethTadParams->vid >> 8);
  1620. /* lower bits */
  1621. qe_fltr_tad->serialized[3] |= (u8) (p_UccGethTadParams->vid & 0x00ff);
  1622. return 0;
  1623. }
  1624. static struct enet_addr_container_t
  1625. *ugeth_82xx_filtering_get_match_addr_in_hash(struct ucc_geth_private *ugeth,
  1626. struct enet_addr *p_enet_addr)
  1627. {
  1628. struct enet_addr_container *enet_addr_cont;
  1629. struct list_head *p_lh;
  1630. u16 i, num;
  1631. int32_t j;
  1632. u8 *p_counter;
  1633. if ((*p_enet_addr)[0] & ENET_GROUP_ADDR) {
  1634. p_lh = &ugeth->group_hash_q;
  1635. p_counter = &(ugeth->numGroupAddrInHash);
  1636. } else {
  1637. p_lh = &ugeth->ind_hash_q;
  1638. p_counter = &(ugeth->numIndAddrInHash);
  1639. }
  1640. if (!p_lh)
  1641. return NULL;
  1642. num = *p_counter;
  1643. for (i = 0; i < num; i++) {
  1644. enet_addr_cont =
  1645. (struct enet_addr_container *)
  1646. ENET_ADDR_CONT_ENTRY(dequeue(p_lh));
  1647. for (j = ENET_NUM_OCTETS_PER_ADDRESS - 1; j >= 0; j--) {
  1648. if ((*p_enet_addr)[j] != (enet_addr_cont->address)[j])
  1649. break;
  1650. if (j == 0)
  1651. return enet_addr_cont; /* Found */
  1652. }
  1653. enqueue(p_lh, &enet_addr_cont->node); /* Put it back */
  1654. }
  1655. return NULL;
  1656. }
  1657. static int ugeth_82xx_filtering_add_addr_in_hash(struct ucc_geth_private *ugeth,
  1658. struct enet_addr *p_enet_addr)
  1659. {
  1660. enum ucc_geth_enet_address_recognition_location location;
  1661. struct enet_addr_container *enet_addr_cont;
  1662. struct list_head *p_lh;
  1663. u8 i;
  1664. u32 limit;
  1665. u8 *p_counter;
  1666. if ((*p_enet_addr)[0] & ENET_GROUP_ADDR) {
  1667. p_lh = &ugeth->group_hash_q;
  1668. limit = ugeth->ug_info->maxGroupAddrInHash;
  1669. location =
  1670. UCC_GETH_ENET_ADDRESS_RECOGNITION_LOCATION_GROUP_HASH;
  1671. p_counter = &(ugeth->numGroupAddrInHash);
  1672. } else {
  1673. p_lh = &ugeth->ind_hash_q;
  1674. limit = ugeth->ug_info->maxIndAddrInHash;
  1675. location =
  1676. UCC_GETH_ENET_ADDRESS_RECOGNITION_LOCATION_INDIVIDUAL_HASH;
  1677. p_counter = &(ugeth->numIndAddrInHash);
  1678. }
  1679. if ((enet_addr_cont =
  1680. ugeth_82xx_filtering_get_match_addr_in_hash(ugeth, p_enet_addr))) {
  1681. list_add(p_lh, &enet_addr_cont->node); /* Put it back */
  1682. return 0;
  1683. }
  1684. if ((!p_lh) || (!(*p_counter < limit)))
  1685. return -EBUSY;
  1686. if (!(enet_addr_cont = get_enet_addr_container()))
  1687. return -ENOMEM;
  1688. for (i = 0; i < ENET_NUM_OCTETS_PER_ADDRESS; i++)
  1689. (enet_addr_cont->address)[i] = (*p_enet_addr)[i];
  1690. enet_addr_cont->location = location;
  1691. enqueue(p_lh, &enet_addr_cont->node); /* Put it back */
  1692. ++(*p_counter);
  1693. hw_add_addr_in_hash(ugeth, enet_addr_cont->address);
  1694. return 0;
  1695. }
  1696. static int ugeth_82xx_filtering_clear_addr_in_hash(struct ucc_geth_private *ugeth,
  1697. struct enet_addr *p_enet_addr)
  1698. {
  1699. struct ucc_geth_82xx_address_filtering_pram *p_82xx_addr_filt;
  1700. struct enet_addr_container *enet_addr_cont;
  1701. struct ucc_fast_private *uccf;
  1702. enum comm_dir comm_dir;
  1703. u16 i, num;
  1704. struct list_head *p_lh;
  1705. u32 *addr_h, *addr_l;
  1706. u8 *p_counter;
  1707. uccf = ugeth->uccf;
  1708. p_82xx_addr_filt =
  1709. (struct ucc_geth_82xx_address_filtering_pram *) ugeth->p_rx_glbl_pram->
  1710. addressfiltering;
  1711. if (!
  1712. (enet_addr_cont =
  1713. ugeth_82xx_filtering_get_match_addr_in_hash(ugeth, p_enet_addr)))
  1714. return -ENOENT;
  1715. /* It's been found and removed from the CQ. */
  1716. /* Now destroy its container */
  1717. put_enet_addr_container(enet_addr_cont);
  1718. if ((*p_enet_addr)[0] & ENET_GROUP_ADDR) {
  1719. addr_h = &(p_82xx_addr_filt->gaddr_h);
  1720. addr_l = &(p_82xx_addr_filt->gaddr_l);
  1721. p_lh = &ugeth->group_hash_q;
  1722. p_counter = &(ugeth->numGroupAddrInHash);
  1723. } else {
  1724. addr_h = &(p_82xx_addr_filt->iaddr_h);
  1725. addr_l = &(p_82xx_addr_filt->iaddr_l);
  1726. p_lh = &ugeth->ind_hash_q;
  1727. p_counter = &(ugeth->numIndAddrInHash);
  1728. }
  1729. comm_dir = 0;
  1730. if (uccf->enabled_tx)
  1731. comm_dir |= COMM_DIR_TX;
  1732. if (uccf->enabled_rx)
  1733. comm_dir |= COMM_DIR_RX;
  1734. if (comm_dir)
  1735. ugeth_disable(ugeth, comm_dir);
  1736. /* Clear the hash table. */
  1737. out_be32(addr_h, 0x00000000);
  1738. out_be32(addr_l, 0x00000000);
  1739. /* Add all remaining CQ elements back into hash */
  1740. num = --(*p_counter);
  1741. for (i = 0; i < num; i++) {
  1742. enet_addr_cont =
  1743. (struct enet_addr_container *)
  1744. ENET_ADDR_CONT_ENTRY(dequeue(p_lh));
  1745. hw_add_addr_in_hash(ugeth, enet_addr_cont->address);
  1746. enqueue(p_lh, &enet_addr_cont->node); /* Put it back */
  1747. }
  1748. if (comm_dir)
  1749. ugeth_enable(ugeth, comm_dir);
  1750. return 0;
  1751. }
  1752. #endif /* CONFIG_UGETH_FILTERING */
  1753. static int ugeth_82xx_filtering_clear_all_addr_in_hash(struct ucc_geth_private *
  1754. ugeth,
  1755. enum enet_addr_type
  1756. enet_addr_type)
  1757. {
  1758. struct ucc_geth_82xx_address_filtering_pram __iomem *p_82xx_addr_filt;
  1759. struct ucc_fast_private *uccf;
  1760. enum comm_dir comm_dir;
  1761. struct list_head *p_lh;
  1762. u16 i, num;
  1763. u32 __iomem *addr_h;
  1764. u32 __iomem *addr_l;
  1765. u8 *p_counter;
  1766. uccf = ugeth->uccf;
  1767. p_82xx_addr_filt =
  1768. (struct ucc_geth_82xx_address_filtering_pram __iomem *)
  1769. ugeth->p_rx_glbl_pram->addressfiltering;
  1770. if (enet_addr_type == ENET_ADDR_TYPE_GROUP) {
  1771. addr_h = &(p_82xx_addr_filt->gaddr_h);
  1772. addr_l = &(p_82xx_addr_filt->gaddr_l);
  1773. p_lh = &ugeth->group_hash_q;
  1774. p_counter = &(ugeth->numGroupAddrInHash);
  1775. } else if (enet_addr_type == ENET_ADDR_TYPE_INDIVIDUAL) {
  1776. addr_h = &(p_82xx_addr_filt->iaddr_h);
  1777. addr_l = &(p_82xx_addr_filt->iaddr_l);
  1778. p_lh = &ugeth->ind_hash_q;
  1779. p_counter = &(ugeth->numIndAddrInHash);
  1780. } else
  1781. return -EINVAL;
  1782. comm_dir = 0;
  1783. if (uccf->enabled_tx)
  1784. comm_dir |= COMM_DIR_TX;
  1785. if (uccf->enabled_rx)
  1786. comm_dir |= COMM_DIR_RX;
  1787. if (comm_dir)
  1788. ugeth_disable(ugeth, comm_dir);
  1789. /* Clear the hash table. */
  1790. out_be32(addr_h, 0x00000000);
  1791. out_be32(addr_l, 0x00000000);
  1792. if (!p_lh)
  1793. return 0;
  1794. num = *p_counter;
  1795. /* Delete all remaining CQ elements */
  1796. for (i = 0; i < num; i++)
  1797. put_enet_addr_container(ENET_ADDR_CONT_ENTRY(dequeue(p_lh)));
  1798. *p_counter = 0;
  1799. if (comm_dir)
  1800. ugeth_enable(ugeth, comm_dir);
  1801. return 0;
  1802. }
  1803. #ifdef CONFIG_UGETH_FILTERING
  1804. static int ugeth_82xx_filtering_add_addr_in_paddr(struct ucc_geth_private *ugeth,
  1805. struct enet_addr *p_enet_addr,
  1806. u8 paddr_num)
  1807. {
  1808. int i;
  1809. if ((*p_enet_addr)[0] & ENET_GROUP_ADDR)
  1810. ugeth_warn
  1811. ("%s: multicast address added to paddr will have no "
  1812. "effect - is this what you wanted?",
  1813. __func__);
  1814. ugeth->indAddrRegUsed[paddr_num] = 1; /* mark this paddr as used */
  1815. /* store address in our database */
  1816. for (i = 0; i < ENET_NUM_OCTETS_PER_ADDRESS; i++)
  1817. ugeth->paddr[paddr_num][i] = (*p_enet_addr)[i];
  1818. /* put in hardware */
  1819. return hw_add_addr_in_paddr(ugeth, p_enet_addr, paddr_num);
  1820. }
  1821. #endif /* CONFIG_UGETH_FILTERING */
  1822. static int ugeth_82xx_filtering_clear_addr_in_paddr(struct ucc_geth_private *ugeth,
  1823. u8 paddr_num)
  1824. {
  1825. ugeth->indAddrRegUsed[paddr_num] = 0; /* mark this paddr as not used */
  1826. return hw_clear_addr_in_paddr(ugeth, paddr_num);/* clear in hardware */
  1827. }
  1828. static void ucc_geth_memclean(struct ucc_geth_private *ugeth)
  1829. {
  1830. u16 i, j;
  1831. u8 __iomem *bd;
  1832. if (!ugeth)
  1833. return;
  1834. if (ugeth->uccf) {
  1835. ucc_fast_free(ugeth->uccf);
  1836. ugeth->uccf = NULL;
  1837. }
  1838. if (ugeth->p_thread_data_tx) {
  1839. qe_muram_free(ugeth->thread_dat_tx_offset);
  1840. ugeth->p_thread_data_tx = NULL;
  1841. }
  1842. if (ugeth->p_thread_data_rx) {
  1843. qe_muram_free(ugeth->thread_dat_rx_offset);
  1844. ugeth->p_thread_data_rx = NULL;
  1845. }
  1846. if (ugeth->p_exf_glbl_param) {
  1847. qe_muram_free(ugeth->exf_glbl_param_offset);
  1848. ugeth->p_exf_glbl_param = NULL;
  1849. }
  1850. if (ugeth->p_rx_glbl_pram) {
  1851. qe_muram_free(ugeth->rx_glbl_pram_offset);
  1852. ugeth->p_rx_glbl_pram = NULL;
  1853. }
  1854. if (ugeth->p_tx_glbl_pram) {
  1855. qe_muram_free(ugeth->tx_glbl_pram_offset);
  1856. ugeth->p_tx_glbl_pram = NULL;
  1857. }
  1858. if (ugeth->p_send_q_mem_reg) {
  1859. qe_muram_free(ugeth->send_q_mem_reg_offset);
  1860. ugeth->p_send_q_mem_reg = NULL;
  1861. }
  1862. if (ugeth->p_scheduler) {
  1863. qe_muram_free(ugeth->scheduler_offset);
  1864. ugeth->p_scheduler = NULL;
  1865. }
  1866. if (ugeth->p_tx_fw_statistics_pram) {
  1867. qe_muram_free(ugeth->tx_fw_statistics_pram_offset);
  1868. ugeth->p_tx_fw_statistics_pram = NULL;
  1869. }
  1870. if (ugeth->p_rx_fw_statistics_pram) {
  1871. qe_muram_free(ugeth->rx_fw_statistics_pram_offset);
  1872. ugeth->p_rx_fw_statistics_pram = NULL;
  1873. }
  1874. if (ugeth->p_rx_irq_coalescing_tbl) {
  1875. qe_muram_free(ugeth->rx_irq_coalescing_tbl_offset);
  1876. ugeth->p_rx_irq_coalescing_tbl = NULL;
  1877. }
  1878. if (ugeth->p_rx_bd_qs_tbl) {
  1879. qe_muram_free(ugeth->rx_bd_qs_tbl_offset);
  1880. ugeth->p_rx_bd_qs_tbl = NULL;
  1881. }
  1882. if (ugeth->p_init_enet_param_shadow) {
  1883. return_init_enet_entries(ugeth,
  1884. &(ugeth->p_init_enet_param_shadow->
  1885. rxthread[0]),
  1886. ENET_INIT_PARAM_MAX_ENTRIES_RX,
  1887. ugeth->ug_info->riscRx, 1);
  1888. return_init_enet_entries(ugeth,
  1889. &(ugeth->p_init_enet_param_shadow->
  1890. txthread[0]),
  1891. ENET_INIT_PARAM_MAX_ENTRIES_TX,
  1892. ugeth->ug_info->riscTx, 0);
  1893. kfree(ugeth->p_init_enet_param_shadow);
  1894. ugeth->p_init_enet_param_shadow = NULL;
  1895. }
  1896. for (i = 0; i < ugeth->ug_info->numQueuesTx; i++) {
  1897. bd = ugeth->p_tx_bd_ring[i];
  1898. if (!bd)
  1899. continue;
  1900. for (j = 0; j < ugeth->ug_info->bdRingLenTx[i]; j++) {
  1901. if (ugeth->tx_skbuff[i][j]) {
  1902. dma_unmap_single(&ugeth->dev->dev,
  1903. in_be32(&((struct qe_bd __iomem *)bd)->buf),
  1904. (in_be32((u32 __iomem *)bd) &
  1905. BD_LENGTH_MASK),
  1906. DMA_TO_DEVICE);
  1907. dev_kfree_skb_any(ugeth->tx_skbuff[i][j]);
  1908. ugeth->tx_skbuff[i][j] = NULL;
  1909. }
  1910. }
  1911. kfree(ugeth->tx_skbuff[i]);
  1912. if (ugeth->p_tx_bd_ring[i]) {
  1913. if (ugeth->ug_info->uf_info.bd_mem_part ==
  1914. MEM_PART_SYSTEM)
  1915. kfree((void *)ugeth->tx_bd_ring_offset[i]);
  1916. else if (ugeth->ug_info->uf_info.bd_mem_part ==
  1917. MEM_PART_MURAM)
  1918. qe_muram_free(ugeth->tx_bd_ring_offset[i]);
  1919. ugeth->p_tx_bd_ring[i] = NULL;
  1920. }
  1921. }
  1922. for (i = 0; i < ugeth->ug_info->numQueuesRx; i++) {
  1923. if (ugeth->p_rx_bd_ring[i]) {
  1924. /* Return existing data buffers in ring */
  1925. bd = ugeth->p_rx_bd_ring[i];
  1926. for (j = 0; j < ugeth->ug_info->bdRingLenRx[i]; j++) {
  1927. if (ugeth->rx_skbuff[i][j]) {
  1928. dma_unmap_single(&ugeth->dev->dev,
  1929. in_be32(&((struct qe_bd __iomem *)bd)->buf),
  1930. ugeth->ug_info->
  1931. uf_info.max_rx_buf_length +
  1932. UCC_GETH_RX_DATA_BUF_ALIGNMENT,
  1933. DMA_FROM_DEVICE);
  1934. dev_kfree_skb_any(
  1935. ugeth->rx_skbuff[i][j]);
  1936. ugeth->rx_skbuff[i][j] = NULL;
  1937. }
  1938. bd += sizeof(struct qe_bd);
  1939. }
  1940. kfree(ugeth->rx_skbuff[i]);
  1941. if (ugeth->ug_info->uf_info.bd_mem_part ==
  1942. MEM_PART_SYSTEM)
  1943. kfree((void *)ugeth->rx_bd_ring_offset[i]);
  1944. else if (ugeth->ug_info->uf_info.bd_mem_part ==
  1945. MEM_PART_MURAM)
  1946. qe_muram_free(ugeth->rx_bd_ring_offset[i]);
  1947. ugeth->p_rx_bd_ring[i] = NULL;
  1948. }
  1949. }
  1950. while (!list_empty(&ugeth->group_hash_q))
  1951. put_enet_addr_container(ENET_ADDR_CONT_ENTRY
  1952. (dequeue(&ugeth->group_hash_q)));
  1953. while (!list_empty(&ugeth->ind_hash_q))
  1954. put_enet_addr_container(ENET_ADDR_CONT_ENTRY
  1955. (dequeue(&ugeth->ind_hash_q)));
  1956. }
  1957. static void ucc_geth_set_multi(struct net_device *dev)
  1958. {
  1959. struct ucc_geth_private *ugeth;
  1960. struct dev_mc_list *dmi;
  1961. struct ucc_fast __iomem *uf_regs;
  1962. struct ucc_geth_82xx_address_filtering_pram __iomem *p_82xx_addr_filt;
  1963. int i;
  1964. ugeth = netdev_priv(dev);
  1965. uf_regs = ugeth->uccf->uf_regs;
  1966. if (dev->flags & IFF_PROMISC) {
  1967. out_be32(&uf_regs->upsmr, in_be32(&uf_regs->upsmr) | UPSMR_PRO);
  1968. } else {
  1969. out_be32(&uf_regs->upsmr, in_be32(&uf_regs->upsmr)&~UPSMR_PRO);
  1970. p_82xx_addr_filt =
  1971. (struct ucc_geth_82xx_address_filtering_pram __iomem *) ugeth->
  1972. p_rx_glbl_pram->addressfiltering;
  1973. if (dev->flags & IFF_ALLMULTI) {
  1974. /* Catch all multicast addresses, so set the
  1975. * filter to all 1's.
  1976. */
  1977. out_be32(&p_82xx_addr_filt->gaddr_h, 0xffffffff);
  1978. out_be32(&p_82xx_addr_filt->gaddr_l, 0xffffffff);
  1979. } else {
  1980. /* Clear filter and add the addresses in the list.
  1981. */
  1982. out_be32(&p_82xx_addr_filt->gaddr_h, 0x0);
  1983. out_be32(&p_82xx_addr_filt->gaddr_l, 0x0);
  1984. dmi = dev->mc_list;
  1985. for (i = 0; i < dev->mc_count; i++, dmi = dmi->next) {
  1986. /* Only support group multicast for now.
  1987. */
  1988. if (!(dmi->dmi_addr[0] & 1))
  1989. continue;
  1990. /* Ask CPM to run CRC and set bit in
  1991. * filter mask.
  1992. */
  1993. hw_add_addr_in_hash(ugeth, dmi->dmi_addr);
  1994. }
  1995. }
  1996. }
  1997. }
  1998. static void ucc_geth_stop(struct ucc_geth_private *ugeth)
  1999. {
  2000. struct ucc_geth __iomem *ug_regs = ugeth->ug_regs;
  2001. struct phy_device *phydev = ugeth->phydev;
  2002. u32 tempval;
  2003. ugeth_vdbg("%s: IN", __func__);
  2004. /* Disable the controller */
  2005. ugeth_disable(ugeth, COMM_DIR_RX_AND_TX);
  2006. /* Tell the kernel the link is down */
  2007. phy_stop(phydev);
  2008. /* Mask all interrupts */
  2009. out_be32(ugeth->uccf->p_uccm, 0x00000000);
  2010. /* Clear all interrupts */
  2011. out_be32(ugeth->uccf->p_ucce, 0xffffffff);
  2012. /* Disable Rx and Tx */
  2013. tempval = in_be32(&ug_regs->maccfg1);
  2014. tempval &= ~(MACCFG1_ENABLE_RX | MACCFG1_ENABLE_TX);
  2015. out_be32(&ug_regs->maccfg1, tempval);
  2016. free_irq(ugeth->ug_info->uf_info.irq, ugeth->dev);
  2017. ucc_geth_memclean(ugeth);
  2018. }
  2019. static int ucc_struct_init(struct ucc_geth_private *ugeth)
  2020. {
  2021. struct ucc_geth_info *ug_info;
  2022. struct ucc_fast_info *uf_info;
  2023. int i;
  2024. ug_info = ugeth->ug_info;
  2025. uf_info = &ug_info->uf_info;
  2026. if (!((uf_info->bd_mem_part == MEM_PART_SYSTEM) ||
  2027. (uf_info->bd_mem_part == MEM_PART_MURAM))) {
  2028. if (netif_msg_probe(ugeth))
  2029. ugeth_err("%s: Bad memory partition value.",
  2030. __func__);
  2031. return -EINVAL;
  2032. }
  2033. /* Rx BD lengths */
  2034. for (i = 0; i < ug_info->numQueuesRx; i++) {
  2035. if ((ug_info->bdRingLenRx[i] < UCC_GETH_RX_BD_RING_SIZE_MIN) ||
  2036. (ug_info->bdRingLenRx[i] %
  2037. UCC_GETH_RX_BD_RING_SIZE_ALIGNMENT)) {
  2038. if (netif_msg_probe(ugeth))
  2039. ugeth_err
  2040. ("%s: Rx BD ring length must be multiple of 4, no smaller than 8.",
  2041. __func__);
  2042. return -EINVAL;
  2043. }
  2044. }
  2045. /* Tx BD lengths */
  2046. for (i = 0; i < ug_info->numQueuesTx; i++) {
  2047. if (ug_info->bdRingLenTx[i] < UCC_GETH_TX_BD_RING_SIZE_MIN) {
  2048. if (netif_msg_probe(ugeth))
  2049. ugeth_err
  2050. ("%s: Tx BD ring length must be no smaller than 2.",
  2051. __func__);
  2052. return -EINVAL;
  2053. }
  2054. }
  2055. /* mrblr */
  2056. if ((uf_info->max_rx_buf_length == 0) ||
  2057. (uf_info->max_rx_buf_length % UCC_GETH_MRBLR_ALIGNMENT)) {
  2058. if (netif_msg_probe(ugeth))
  2059. ugeth_err
  2060. ("%s: max_rx_buf_length must be non-zero multiple of 128.",
  2061. __func__);
  2062. return -EINVAL;
  2063. }
  2064. /* num Tx queues */
  2065. if (ug_info->numQueuesTx > NUM_TX_QUEUES) {
  2066. if (netif_msg_probe(ugeth))
  2067. ugeth_err("%s: number of tx queues too large.", __func__);
  2068. return -EINVAL;
  2069. }
  2070. /* num Rx queues */
  2071. if (ug_info->numQueuesRx > NUM_RX_QUEUES) {
  2072. if (netif_msg_probe(ugeth))
  2073. ugeth_err("%s: number of rx queues too large.", __func__);
  2074. return -EINVAL;
  2075. }
  2076. /* l2qt */
  2077. for (i = 0; i < UCC_GETH_VLAN_PRIORITY_MAX; i++) {
  2078. if (ug_info->l2qt[i] >= ug_info->numQueuesRx) {
  2079. if (netif_msg_probe(ugeth))
  2080. ugeth_err
  2081. ("%s: VLAN priority table entry must not be"
  2082. " larger than number of Rx queues.",
  2083. __func__);
  2084. return -EINVAL;
  2085. }
  2086. }
  2087. /* l3qt */
  2088. for (i = 0; i < UCC_GETH_IP_PRIORITY_MAX; i++) {
  2089. if (ug_info->l3qt[i] >= ug_info->numQueuesRx) {
  2090. if (netif_msg_probe(ugeth))
  2091. ugeth_err
  2092. ("%s: IP priority table entry must not be"
  2093. " larger than number of Rx queues.",
  2094. __func__);
  2095. return -EINVAL;
  2096. }
  2097. }
  2098. if (ug_info->cam && !ug_info->ecamptr) {
  2099. if (netif_msg_probe(ugeth))
  2100. ugeth_err("%s: If cam mode is chosen, must supply cam ptr.",
  2101. __func__);
  2102. return -EINVAL;
  2103. }
  2104. if ((ug_info->numStationAddresses !=
  2105. UCC_GETH_NUM_OF_STATION_ADDRESSES_1)
  2106. && ug_info->rxExtendedFiltering) {
  2107. if (netif_msg_probe(ugeth))
  2108. ugeth_err("%s: Number of station addresses greater than 1 "
  2109. "not allowed in extended parsing mode.",
  2110. __func__);
  2111. return -EINVAL;
  2112. }
  2113. /* Generate uccm_mask for receive */
  2114. uf_info->uccm_mask = ug_info->eventRegMask & UCCE_OTHER;/* Errors */
  2115. for (i = 0; i < ug_info->numQueuesRx; i++)
  2116. uf_info->uccm_mask |= (UCCE_RXBF_SINGLE_MASK << i);
  2117. for (i = 0; i < ug_info->numQueuesTx; i++)
  2118. uf_info->uccm_mask |= (UCCE_TXBF_SINGLE_MASK << i);
  2119. /* Initialize the general fast UCC block. */
  2120. if (ucc_fast_init(uf_info, &ugeth->uccf)) {
  2121. if (netif_msg_probe(ugeth))
  2122. ugeth_err("%s: Failed to init uccf.", __func__);
  2123. ucc_geth_memclean(ugeth);
  2124. return -ENOMEM;
  2125. }
  2126. ugeth->ug_regs = (struct ucc_geth __iomem *) ioremap(uf_info->regs, sizeof(struct ucc_geth));
  2127. return 0;
  2128. }
  2129. static int ucc_geth_startup(struct ucc_geth_private *ugeth)
  2130. {
  2131. struct ucc_geth_82xx_address_filtering_pram __iomem *p_82xx_addr_filt;
  2132. struct ucc_geth_init_pram __iomem *p_init_enet_pram;
  2133. struct ucc_fast_private *uccf;
  2134. struct ucc_geth_info *ug_info;
  2135. struct ucc_fast_info *uf_info;
  2136. struct ucc_fast __iomem *uf_regs;
  2137. struct ucc_geth __iomem *ug_regs;
  2138. int ret_val = -EINVAL;
  2139. u32 remoder = UCC_GETH_REMODER_INIT;
  2140. u32 init_enet_pram_offset, cecr_subblock, command, maccfg1;
  2141. u32 ifstat, i, j, size, l2qt, l3qt, length;
  2142. u16 temoder = UCC_GETH_TEMODER_INIT;
  2143. u16 test;
  2144. u8 function_code = 0;
  2145. u8 __iomem *bd;
  2146. u8 __iomem *endOfRing;
  2147. u8 numThreadsRxNumerical, numThreadsTxNumerical;
  2148. ugeth_vdbg("%s: IN", __func__);
  2149. uccf = ugeth->uccf;
  2150. ug_info = ugeth->ug_info;
  2151. uf_info = &ug_info->uf_info;
  2152. uf_regs = uccf->uf_regs;
  2153. ug_regs = ugeth->ug_regs;
  2154. switch (ug_info->numThreadsRx) {
  2155. case UCC_GETH_NUM_OF_THREADS_1:
  2156. numThreadsRxNumerical = 1;
  2157. break;
  2158. case UCC_GETH_NUM_OF_THREADS_2:
  2159. numThreadsRxNumerical = 2;
  2160. break;
  2161. case UCC_GETH_NUM_OF_THREADS_4:
  2162. numThreadsRxNumerical = 4;
  2163. break;
  2164. case UCC_GETH_NUM_OF_THREADS_6:
  2165. numThreadsRxNumerical = 6;
  2166. break;
  2167. case UCC_GETH_NUM_OF_THREADS_8:
  2168. numThreadsRxNumerical = 8;
  2169. break;
  2170. default:
  2171. if (netif_msg_ifup(ugeth))
  2172. ugeth_err("%s: Bad number of Rx threads value.",
  2173. __func__);
  2174. ucc_geth_memclean(ugeth);
  2175. return -EINVAL;
  2176. break;
  2177. }
  2178. switch (ug_info->numThreadsTx) {
  2179. case UCC_GETH_NUM_OF_THREADS_1:
  2180. numThreadsTxNumerical = 1;
  2181. break;
  2182. case UCC_GETH_NUM_OF_THREADS_2:
  2183. numThreadsTxNumerical = 2;
  2184. break;
  2185. case UCC_GETH_NUM_OF_THREADS_4:
  2186. numThreadsTxNumerical = 4;
  2187. break;
  2188. case UCC_GETH_NUM_OF_THREADS_6:
  2189. numThreadsTxNumerical = 6;
  2190. break;
  2191. case UCC_GETH_NUM_OF_THREADS_8:
  2192. numThreadsTxNumerical = 8;
  2193. break;
  2194. default:
  2195. if (netif_msg_ifup(ugeth))
  2196. ugeth_err("%s: Bad number of Tx threads value.",
  2197. __func__);
  2198. ucc_geth_memclean(ugeth);
  2199. return -EINVAL;
  2200. break;
  2201. }
  2202. /* Calculate rx_extended_features */
  2203. ugeth->rx_non_dynamic_extended_features = ug_info->ipCheckSumCheck ||
  2204. ug_info->ipAddressAlignment ||
  2205. (ug_info->numStationAddresses !=
  2206. UCC_GETH_NUM_OF_STATION_ADDRESSES_1);
  2207. ugeth->rx_extended_features = ugeth->rx_non_dynamic_extended_features ||
  2208. (ug_info->vlanOperationTagged != UCC_GETH_VLAN_OPERATION_TAGGED_NOP)
  2209. || (ug_info->vlanOperationNonTagged !=
  2210. UCC_GETH_VLAN_OPERATION_NON_TAGGED_NOP);
  2211. init_default_reg_vals(&uf_regs->upsmr,
  2212. &ug_regs->maccfg1, &ug_regs->maccfg2);
  2213. /* Set UPSMR */
  2214. /* For more details see the hardware spec. */
  2215. init_rx_parameters(ug_info->bro,
  2216. ug_info->rsh, ug_info->pro, &uf_regs->upsmr);
  2217. /* We're going to ignore other registers for now, */
  2218. /* except as needed to get up and running */
  2219. /* Set MACCFG1 */
  2220. /* For more details see the hardware spec. */
  2221. init_flow_control_params(ug_info->aufc,
  2222. ug_info->receiveFlowControl,
  2223. ug_info->transmitFlowControl,
  2224. ug_info->pausePeriod,
  2225. ug_info->extensionField,
  2226. &uf_regs->upsmr,
  2227. &ug_regs->uempr, &ug_regs->maccfg1);
  2228. maccfg1 = in_be32(&ug_regs->maccfg1);
  2229. maccfg1 |= MACCFG1_ENABLE_RX;
  2230. maccfg1 |= MACCFG1_ENABLE_TX;
  2231. out_be32(&ug_regs->maccfg1, maccfg1);
  2232. /* Set IPGIFG */
  2233. /* For more details see the hardware spec. */
  2234. ret_val = init_inter_frame_gap_params(ug_info->nonBackToBackIfgPart1,
  2235. ug_info->nonBackToBackIfgPart2,
  2236. ug_info->
  2237. miminumInterFrameGapEnforcement,
  2238. ug_info->backToBackInterFrameGap,
  2239. &ug_regs->ipgifg);
  2240. if (ret_val != 0) {
  2241. if (netif_msg_ifup(ugeth))
  2242. ugeth_err("%s: IPGIFG initialization parameter too large.",
  2243. __func__);
  2244. ucc_geth_memclean(ugeth);
  2245. return ret_val;
  2246. }
  2247. /* Set HAFDUP */
  2248. /* For more details see the hardware spec. */
  2249. ret_val = init_half_duplex_params(ug_info->altBeb,
  2250. ug_info->backPressureNoBackoff,
  2251. ug_info->noBackoff,
  2252. ug_info->excessDefer,
  2253. ug_info->altBebTruncation,
  2254. ug_info->maxRetransmission,
  2255. ug_info->collisionWindow,
  2256. &ug_regs->hafdup);
  2257. if (ret_val != 0) {
  2258. if (netif_msg_ifup(ugeth))
  2259. ugeth_err("%s: Half Duplex initialization parameter too large.",
  2260. __func__);
  2261. ucc_geth_memclean(ugeth);
  2262. return ret_val;
  2263. }
  2264. /* Set IFSTAT */
  2265. /* For more details see the hardware spec. */
  2266. /* Read only - resets upon read */
  2267. ifstat = in_be32(&ug_regs->ifstat);
  2268. /* Clear UEMPR */
  2269. /* For more details see the hardware spec. */
  2270. out_be32(&ug_regs->uempr, 0);
  2271. /* Set UESCR */
  2272. /* For more details see the hardware spec. */
  2273. init_hw_statistics_gathering_mode((ug_info->statisticsMode &
  2274. UCC_GETH_STATISTICS_GATHERING_MODE_HARDWARE),
  2275. 0, &uf_regs->upsmr, &ug_regs->uescr);
  2276. /* Allocate Tx bds */
  2277. for (j = 0; j < ug_info->numQueuesTx; j++) {
  2278. /* Allocate in multiple of
  2279. UCC_GETH_TX_BD_RING_SIZE_MEMORY_ALIGNMENT,
  2280. according to spec */
  2281. length = ((ug_info->bdRingLenTx[j] * sizeof(struct qe_bd))
  2282. / UCC_GETH_TX_BD_RING_SIZE_MEMORY_ALIGNMENT)
  2283. * UCC_GETH_TX_BD_RING_SIZE_MEMORY_ALIGNMENT;
  2284. if ((ug_info->bdRingLenTx[j] * sizeof(struct qe_bd)) %
  2285. UCC_GETH_TX_BD_RING_SIZE_MEMORY_ALIGNMENT)
  2286. length += UCC_GETH_TX_BD_RING_SIZE_MEMORY_ALIGNMENT;
  2287. if (uf_info->bd_mem_part == MEM_PART_SYSTEM) {
  2288. u32 align = 4;
  2289. if (UCC_GETH_TX_BD_RING_ALIGNMENT > 4)
  2290. align = UCC_GETH_TX_BD_RING_ALIGNMENT;
  2291. ugeth->tx_bd_ring_offset[j] =
  2292. (u32) kmalloc((u32) (length + align), GFP_KERNEL);
  2293. if (ugeth->tx_bd_ring_offset[j] != 0)
  2294. ugeth->p_tx_bd_ring[j] =
  2295. (u8 __iomem *)((ugeth->tx_bd_ring_offset[j] +
  2296. align) & ~(align - 1));
  2297. } else if (uf_info->bd_mem_part == MEM_PART_MURAM) {
  2298. ugeth->tx_bd_ring_offset[j] =
  2299. qe_muram_alloc(length,
  2300. UCC_GETH_TX_BD_RING_ALIGNMENT);
  2301. if (!IS_ERR_VALUE(ugeth->tx_bd_ring_offset[j]))
  2302. ugeth->p_tx_bd_ring[j] =
  2303. (u8 __iomem *) qe_muram_addr(ugeth->
  2304. tx_bd_ring_offset[j]);
  2305. }
  2306. if (!ugeth->p_tx_bd_ring[j]) {
  2307. if (netif_msg_ifup(ugeth))
  2308. ugeth_err
  2309. ("%s: Can not allocate memory for Tx bd rings.",
  2310. __func__);
  2311. ucc_geth_memclean(ugeth);
  2312. return -ENOMEM;
  2313. }
  2314. /* Zero unused end of bd ring, according to spec */
  2315. memset_io((void __iomem *)(ugeth->p_tx_bd_ring[j] +
  2316. ug_info->bdRingLenTx[j] * sizeof(struct qe_bd)), 0,
  2317. length - ug_info->bdRingLenTx[j] * sizeof(struct qe_bd));
  2318. }
  2319. /* Allocate Rx bds */
  2320. for (j = 0; j < ug_info->numQueuesRx; j++) {
  2321. length = ug_info->bdRingLenRx[j] * sizeof(struct qe_bd);
  2322. if (uf_info->bd_mem_part == MEM_PART_SYSTEM) {
  2323. u32 align = 4;
  2324. if (UCC_GETH_RX_BD_RING_ALIGNMENT > 4)
  2325. align = UCC_GETH_RX_BD_RING_ALIGNMENT;
  2326. ugeth->rx_bd_ring_offset[j] =
  2327. (u32) kmalloc((u32) (length + align), GFP_KERNEL);
  2328. if (ugeth->rx_bd_ring_offset[j] != 0)
  2329. ugeth->p_rx_bd_ring[j] =
  2330. (u8 __iomem *)((ugeth->rx_bd_ring_offset[j] +
  2331. align) & ~(align - 1));
  2332. } else if (uf_info->bd_mem_part == MEM_PART_MURAM) {
  2333. ugeth->rx_bd_ring_offset[j] =
  2334. qe_muram_alloc(length,
  2335. UCC_GETH_RX_BD_RING_ALIGNMENT);
  2336. if (!IS_ERR_VALUE(ugeth->rx_bd_ring_offset[j]))
  2337. ugeth->p_rx_bd_ring[j] =
  2338. (u8 __iomem *) qe_muram_addr(ugeth->
  2339. rx_bd_ring_offset[j]);
  2340. }
  2341. if (!ugeth->p_rx_bd_ring[j]) {
  2342. if (netif_msg_ifup(ugeth))
  2343. ugeth_err
  2344. ("%s: Can not allocate memory for Rx bd rings.",
  2345. __func__);
  2346. ucc_geth_memclean(ugeth);
  2347. return -ENOMEM;
  2348. }
  2349. }
  2350. /* Init Tx bds */
  2351. for (j = 0; j < ug_info->numQueuesTx; j++) {
  2352. /* Setup the skbuff rings */
  2353. ugeth->tx_skbuff[j] = kmalloc(sizeof(struct sk_buff *) *
  2354. ugeth->ug_info->bdRingLenTx[j],
  2355. GFP_KERNEL);
  2356. if (ugeth->tx_skbuff[j] == NULL) {
  2357. if (netif_msg_ifup(ugeth))
  2358. ugeth_err("%s: Could not allocate tx_skbuff",
  2359. __func__);
  2360. ucc_geth_memclean(ugeth);
  2361. return -ENOMEM;
  2362. }
  2363. for (i = 0; i < ugeth->ug_info->bdRingLenTx[j]; i++)
  2364. ugeth->tx_skbuff[j][i] = NULL;
  2365. ugeth->skb_curtx[j] = ugeth->skb_dirtytx[j] = 0;
  2366. bd = ugeth->confBd[j] = ugeth->txBd[j] = ugeth->p_tx_bd_ring[j];
  2367. for (i = 0; i < ug_info->bdRingLenTx[j]; i++) {
  2368. /* clear bd buffer */
  2369. out_be32(&((struct qe_bd __iomem *)bd)->buf, 0);
  2370. /* set bd status and length */
  2371. out_be32((u32 __iomem *)bd, 0);
  2372. bd += sizeof(struct qe_bd);
  2373. }
  2374. bd -= sizeof(struct qe_bd);
  2375. /* set bd status and length */
  2376. out_be32((u32 __iomem *)bd, T_W); /* for last BD set Wrap bit */
  2377. }
  2378. /* Init Rx bds */
  2379. for (j = 0; j < ug_info->numQueuesRx; j++) {
  2380. /* Setup the skbuff rings */
  2381. ugeth->rx_skbuff[j] = kmalloc(sizeof(struct sk_buff *) *
  2382. ugeth->ug_info->bdRingLenRx[j],
  2383. GFP_KERNEL);
  2384. if (ugeth->rx_skbuff[j] == NULL) {
  2385. if (netif_msg_ifup(ugeth))
  2386. ugeth_err("%s: Could not allocate rx_skbuff",
  2387. __func__);
  2388. ucc_geth_memclean(ugeth);
  2389. return -ENOMEM;
  2390. }
  2391. for (i = 0; i < ugeth->ug_info->bdRingLenRx[j]; i++)
  2392. ugeth->rx_skbuff[j][i] = NULL;
  2393. ugeth->skb_currx[j] = 0;
  2394. bd = ugeth->rxBd[j] = ugeth->p_rx_bd_ring[j];
  2395. for (i = 0; i < ug_info->bdRingLenRx[j]; i++) {
  2396. /* set bd status and length */
  2397. out_be32((u32 __iomem *)bd, R_I);
  2398. /* clear bd buffer */
  2399. out_be32(&((struct qe_bd __iomem *)bd)->buf, 0);
  2400. bd += sizeof(struct qe_bd);
  2401. }
  2402. bd -= sizeof(struct qe_bd);
  2403. /* set bd status and length */
  2404. out_be32((u32 __iomem *)bd, R_W); /* for last BD set Wrap bit */
  2405. }
  2406. /*
  2407. * Global PRAM
  2408. */
  2409. /* Tx global PRAM */
  2410. /* Allocate global tx parameter RAM page */
  2411. ugeth->tx_glbl_pram_offset =
  2412. qe_muram_alloc(sizeof(struct ucc_geth_tx_global_pram),
  2413. UCC_GETH_TX_GLOBAL_PRAM_ALIGNMENT);
  2414. if (IS_ERR_VALUE(ugeth->tx_glbl_pram_offset)) {
  2415. if (netif_msg_ifup(ugeth))
  2416. ugeth_err
  2417. ("%s: Can not allocate DPRAM memory for p_tx_glbl_pram.",
  2418. __func__);
  2419. ucc_geth_memclean(ugeth);
  2420. return -ENOMEM;
  2421. }
  2422. ugeth->p_tx_glbl_pram =
  2423. (struct ucc_geth_tx_global_pram __iomem *) qe_muram_addr(ugeth->
  2424. tx_glbl_pram_offset);
  2425. /* Zero out p_tx_glbl_pram */
  2426. memset_io((void __iomem *)ugeth->p_tx_glbl_pram, 0, sizeof(struct ucc_geth_tx_global_pram));
  2427. /* Fill global PRAM */
  2428. /* TQPTR */
  2429. /* Size varies with number of Tx threads */
  2430. ugeth->thread_dat_tx_offset =
  2431. qe_muram_alloc(numThreadsTxNumerical *
  2432. sizeof(struct ucc_geth_thread_data_tx) +
  2433. 32 * (numThreadsTxNumerical == 1),
  2434. UCC_GETH_THREAD_DATA_ALIGNMENT);
  2435. if (IS_ERR_VALUE(ugeth->thread_dat_tx_offset)) {
  2436. if (netif_msg_ifup(ugeth))
  2437. ugeth_err
  2438. ("%s: Can not allocate DPRAM memory for p_thread_data_tx.",
  2439. __func__);
  2440. ucc_geth_memclean(ugeth);
  2441. return -ENOMEM;
  2442. }
  2443. ugeth->p_thread_data_tx =
  2444. (struct ucc_geth_thread_data_tx __iomem *) qe_muram_addr(ugeth->
  2445. thread_dat_tx_offset);
  2446. out_be32(&ugeth->p_tx_glbl_pram->tqptr, ugeth->thread_dat_tx_offset);
  2447. /* vtagtable */
  2448. for (i = 0; i < UCC_GETH_TX_VTAG_TABLE_ENTRY_MAX; i++)
  2449. out_be32(&ugeth->p_tx_glbl_pram->vtagtable[i],
  2450. ug_info->vtagtable[i]);
  2451. /* iphoffset */
  2452. for (i = 0; i < TX_IP_OFFSET_ENTRY_MAX; i++)
  2453. out_8(&ugeth->p_tx_glbl_pram->iphoffset[i],
  2454. ug_info->iphoffset[i]);
  2455. /* SQPTR */
  2456. /* Size varies with number of Tx queues */
  2457. ugeth->send_q_mem_reg_offset =
  2458. qe_muram_alloc(ug_info->numQueuesTx *
  2459. sizeof(struct ucc_geth_send_queue_qd),
  2460. UCC_GETH_SEND_QUEUE_QUEUE_DESCRIPTOR_ALIGNMENT);
  2461. if (IS_ERR_VALUE(ugeth->send_q_mem_reg_offset)) {
  2462. if (netif_msg_ifup(ugeth))
  2463. ugeth_err
  2464. ("%s: Can not allocate DPRAM memory for p_send_q_mem_reg.",
  2465. __func__);
  2466. ucc_geth_memclean(ugeth);
  2467. return -ENOMEM;
  2468. }
  2469. ugeth->p_send_q_mem_reg =
  2470. (struct ucc_geth_send_queue_mem_region __iomem *) qe_muram_addr(ugeth->
  2471. send_q_mem_reg_offset);
  2472. out_be32(&ugeth->p_tx_glbl_pram->sqptr, ugeth->send_q_mem_reg_offset);
  2473. /* Setup the table */
  2474. /* Assume BD rings are already established */
  2475. for (i = 0; i < ug_info->numQueuesTx; i++) {
  2476. endOfRing =
  2477. ugeth->p_tx_bd_ring[i] + (ug_info->bdRingLenTx[i] -
  2478. 1) * sizeof(struct qe_bd);
  2479. if (ugeth->ug_info->uf_info.bd_mem_part == MEM_PART_SYSTEM) {
  2480. out_be32(&ugeth->p_send_q_mem_reg->sqqd[i].bd_ring_base,
  2481. (u32) virt_to_phys(ugeth->p_tx_bd_ring[i]));
  2482. out_be32(&ugeth->p_send_q_mem_reg->sqqd[i].
  2483. last_bd_completed_address,
  2484. (u32) virt_to_phys(endOfRing));
  2485. } else if (ugeth->ug_info->uf_info.bd_mem_part ==
  2486. MEM_PART_MURAM) {
  2487. out_be32(&ugeth->p_send_q_mem_reg->sqqd[i].bd_ring_base,
  2488. (u32) immrbar_virt_to_phys(ugeth->
  2489. p_tx_bd_ring[i]));
  2490. out_be32(&ugeth->p_send_q_mem_reg->sqqd[i].
  2491. last_bd_completed_address,
  2492. (u32) immrbar_virt_to_phys(endOfRing));
  2493. }
  2494. }
  2495. /* schedulerbasepointer */
  2496. if (ug_info->numQueuesTx > 1) {
  2497. /* scheduler exists only if more than 1 tx queue */
  2498. ugeth->scheduler_offset =
  2499. qe_muram_alloc(sizeof(struct ucc_geth_scheduler),
  2500. UCC_GETH_SCHEDULER_ALIGNMENT);
  2501. if (IS_ERR_VALUE(ugeth->scheduler_offset)) {
  2502. if (netif_msg_ifup(ugeth))
  2503. ugeth_err
  2504. ("%s: Can not allocate DPRAM memory for p_scheduler.",
  2505. __func__);
  2506. ucc_geth_memclean(ugeth);
  2507. return -ENOMEM;
  2508. }
  2509. ugeth->p_scheduler =
  2510. (struct ucc_geth_scheduler __iomem *) qe_muram_addr(ugeth->
  2511. scheduler_offset);
  2512. out_be32(&ugeth->p_tx_glbl_pram->schedulerbasepointer,
  2513. ugeth->scheduler_offset);
  2514. /* Zero out p_scheduler */
  2515. memset_io((void __iomem *)ugeth->p_scheduler, 0, sizeof(struct ucc_geth_scheduler));
  2516. /* Set values in scheduler */
  2517. out_be32(&ugeth->p_scheduler->mblinterval,
  2518. ug_info->mblinterval);
  2519. out_be16(&ugeth->p_scheduler->nortsrbytetime,
  2520. ug_info->nortsrbytetime);
  2521. out_8(&ugeth->p_scheduler->fracsiz, ug_info->fracsiz);
  2522. out_8(&ugeth->p_scheduler->strictpriorityq,
  2523. ug_info->strictpriorityq);
  2524. out_8(&ugeth->p_scheduler->txasap, ug_info->txasap);
  2525. out_8(&ugeth->p_scheduler->extrabw, ug_info->extrabw);
  2526. for (i = 0; i < NUM_TX_QUEUES; i++)
  2527. out_8(&ugeth->p_scheduler->weightfactor[i],
  2528. ug_info->weightfactor[i]);
  2529. /* Set pointers to cpucount registers in scheduler */
  2530. ugeth->p_cpucount[0] = &(ugeth->p_scheduler->cpucount0);
  2531. ugeth->p_cpucount[1] = &(ugeth->p_scheduler->cpucount1);
  2532. ugeth->p_cpucount[2] = &(ugeth->p_scheduler->cpucount2);
  2533. ugeth->p_cpucount[3] = &(ugeth->p_scheduler->cpucount3);
  2534. ugeth->p_cpucount[4] = &(ugeth->p_scheduler->cpucount4);
  2535. ugeth->p_cpucount[5] = &(ugeth->p_scheduler->cpucount5);
  2536. ugeth->p_cpucount[6] = &(ugeth->p_scheduler->cpucount6);
  2537. ugeth->p_cpucount[7] = &(ugeth->p_scheduler->cpucount7);
  2538. }
  2539. /* schedulerbasepointer */
  2540. /* TxRMON_PTR (statistics) */
  2541. if (ug_info->
  2542. statisticsMode & UCC_GETH_STATISTICS_GATHERING_MODE_FIRMWARE_TX) {
  2543. ugeth->tx_fw_statistics_pram_offset =
  2544. qe_muram_alloc(sizeof
  2545. (struct ucc_geth_tx_firmware_statistics_pram),
  2546. UCC_GETH_TX_STATISTICS_ALIGNMENT);
  2547. if (IS_ERR_VALUE(ugeth->tx_fw_statistics_pram_offset)) {
  2548. if (netif_msg_ifup(ugeth))
  2549. ugeth_err
  2550. ("%s: Can not allocate DPRAM memory for"
  2551. " p_tx_fw_statistics_pram.",
  2552. __func__);
  2553. ucc_geth_memclean(ugeth);
  2554. return -ENOMEM;
  2555. }
  2556. ugeth->p_tx_fw_statistics_pram =
  2557. (struct ucc_geth_tx_firmware_statistics_pram __iomem *)
  2558. qe_muram_addr(ugeth->tx_fw_statistics_pram_offset);
  2559. /* Zero out p_tx_fw_statistics_pram */
  2560. memset_io((void __iomem *)ugeth->p_tx_fw_statistics_pram,
  2561. 0, sizeof(struct ucc_geth_tx_firmware_statistics_pram));
  2562. }
  2563. /* temoder */
  2564. /* Already has speed set */
  2565. if (ug_info->numQueuesTx > 1)
  2566. temoder |= TEMODER_SCHEDULER_ENABLE;
  2567. if (ug_info->ipCheckSumGenerate)
  2568. temoder |= TEMODER_IP_CHECKSUM_GENERATE;
  2569. temoder |= ((ug_info->numQueuesTx - 1) << TEMODER_NUM_OF_QUEUES_SHIFT);
  2570. out_be16(&ugeth->p_tx_glbl_pram->temoder, temoder);
  2571. test = in_be16(&ugeth->p_tx_glbl_pram->temoder);
  2572. /* Function code register value to be used later */
  2573. function_code = UCC_BMR_BO_BE | UCC_BMR_GBL;
  2574. /* Required for QE */
  2575. /* function code register */
  2576. out_be32(&ugeth->p_tx_glbl_pram->tstate, ((u32) function_code) << 24);
  2577. /* Rx global PRAM */
  2578. /* Allocate global rx parameter RAM page */
  2579. ugeth->rx_glbl_pram_offset =
  2580. qe_muram_alloc(sizeof(struct ucc_geth_rx_global_pram),
  2581. UCC_GETH_RX_GLOBAL_PRAM_ALIGNMENT);
  2582. if (IS_ERR_VALUE(ugeth->rx_glbl_pram_offset)) {
  2583. if (netif_msg_ifup(ugeth))
  2584. ugeth_err
  2585. ("%s: Can not allocate DPRAM memory for p_rx_glbl_pram.",
  2586. __func__);
  2587. ucc_geth_memclean(ugeth);
  2588. return -ENOMEM;
  2589. }
  2590. ugeth->p_rx_glbl_pram =
  2591. (struct ucc_geth_rx_global_pram __iomem *) qe_muram_addr(ugeth->
  2592. rx_glbl_pram_offset);
  2593. /* Zero out p_rx_glbl_pram */
  2594. memset_io((void __iomem *)ugeth->p_rx_glbl_pram, 0, sizeof(struct ucc_geth_rx_global_pram));
  2595. /* Fill global PRAM */
  2596. /* RQPTR */
  2597. /* Size varies with number of Rx threads */
  2598. ugeth->thread_dat_rx_offset =
  2599. qe_muram_alloc(numThreadsRxNumerical *
  2600. sizeof(struct ucc_geth_thread_data_rx),
  2601. UCC_GETH_THREAD_DATA_ALIGNMENT);
  2602. if (IS_ERR_VALUE(ugeth->thread_dat_rx_offset)) {
  2603. if (netif_msg_ifup(ugeth))
  2604. ugeth_err
  2605. ("%s: Can not allocate DPRAM memory for p_thread_data_rx.",
  2606. __func__);
  2607. ucc_geth_memclean(ugeth);
  2608. return -ENOMEM;
  2609. }
  2610. ugeth->p_thread_data_rx =
  2611. (struct ucc_geth_thread_data_rx __iomem *) qe_muram_addr(ugeth->
  2612. thread_dat_rx_offset);
  2613. out_be32(&ugeth->p_rx_glbl_pram->rqptr, ugeth->thread_dat_rx_offset);
  2614. /* typeorlen */
  2615. out_be16(&ugeth->p_rx_glbl_pram->typeorlen, ug_info->typeorlen);
  2616. /* rxrmonbaseptr (statistics) */
  2617. if (ug_info->
  2618. statisticsMode & UCC_GETH_STATISTICS_GATHERING_MODE_FIRMWARE_RX) {
  2619. ugeth->rx_fw_statistics_pram_offset =
  2620. qe_muram_alloc(sizeof
  2621. (struct ucc_geth_rx_firmware_statistics_pram),
  2622. UCC_GETH_RX_STATISTICS_ALIGNMENT);
  2623. if (IS_ERR_VALUE(ugeth->rx_fw_statistics_pram_offset)) {
  2624. if (netif_msg_ifup(ugeth))
  2625. ugeth_err
  2626. ("%s: Can not allocate DPRAM memory for"
  2627. " p_rx_fw_statistics_pram.", __func__);
  2628. ucc_geth_memclean(ugeth);
  2629. return -ENOMEM;
  2630. }
  2631. ugeth->p_rx_fw_statistics_pram =
  2632. (struct ucc_geth_rx_firmware_statistics_pram __iomem *)
  2633. qe_muram_addr(ugeth->rx_fw_statistics_pram_offset);
  2634. /* Zero out p_rx_fw_statistics_pram */
  2635. memset_io((void __iomem *)ugeth->p_rx_fw_statistics_pram, 0,
  2636. sizeof(struct ucc_geth_rx_firmware_statistics_pram));
  2637. }
  2638. /* intCoalescingPtr */
  2639. /* Size varies with number of Rx queues */
  2640. ugeth->rx_irq_coalescing_tbl_offset =
  2641. qe_muram_alloc(ug_info->numQueuesRx *
  2642. sizeof(struct ucc_geth_rx_interrupt_coalescing_entry)
  2643. + 4, UCC_GETH_RX_INTERRUPT_COALESCING_ALIGNMENT);
  2644. if (IS_ERR_VALUE(ugeth->rx_irq_coalescing_tbl_offset)) {
  2645. if (netif_msg_ifup(ugeth))
  2646. ugeth_err
  2647. ("%s: Can not allocate DPRAM memory for"
  2648. " p_rx_irq_coalescing_tbl.", __func__);
  2649. ucc_geth_memclean(ugeth);
  2650. return -ENOMEM;
  2651. }
  2652. ugeth->p_rx_irq_coalescing_tbl =
  2653. (struct ucc_geth_rx_interrupt_coalescing_table __iomem *)
  2654. qe_muram_addr(ugeth->rx_irq_coalescing_tbl_offset);
  2655. out_be32(&ugeth->p_rx_glbl_pram->intcoalescingptr,
  2656. ugeth->rx_irq_coalescing_tbl_offset);
  2657. /* Fill interrupt coalescing table */
  2658. for (i = 0; i < ug_info->numQueuesRx; i++) {
  2659. out_be32(&ugeth->p_rx_irq_coalescing_tbl->coalescingentry[i].
  2660. interruptcoalescingmaxvalue,
  2661. ug_info->interruptcoalescingmaxvalue[i]);
  2662. out_be32(&ugeth->p_rx_irq_coalescing_tbl->coalescingentry[i].
  2663. interruptcoalescingcounter,
  2664. ug_info->interruptcoalescingmaxvalue[i]);
  2665. }
  2666. /* MRBLR */
  2667. init_max_rx_buff_len(uf_info->max_rx_buf_length,
  2668. &ugeth->p_rx_glbl_pram->mrblr);
  2669. /* MFLR */
  2670. out_be16(&ugeth->p_rx_glbl_pram->mflr, ug_info->maxFrameLength);
  2671. /* MINFLR */
  2672. init_min_frame_len(ug_info->minFrameLength,
  2673. &ugeth->p_rx_glbl_pram->minflr,
  2674. &ugeth->p_rx_glbl_pram->mrblr);
  2675. /* MAXD1 */
  2676. out_be16(&ugeth->p_rx_glbl_pram->maxd1, ug_info->maxD1Length);
  2677. /* MAXD2 */
  2678. out_be16(&ugeth->p_rx_glbl_pram->maxd2, ug_info->maxD2Length);
  2679. /* l2qt */
  2680. l2qt = 0;
  2681. for (i = 0; i < UCC_GETH_VLAN_PRIORITY_MAX; i++)
  2682. l2qt |= (ug_info->l2qt[i] << (28 - 4 * i));
  2683. out_be32(&ugeth->p_rx_glbl_pram->l2qt, l2qt);
  2684. /* l3qt */
  2685. for (j = 0; j < UCC_GETH_IP_PRIORITY_MAX; j += 8) {
  2686. l3qt = 0;
  2687. for (i = 0; i < 8; i++)
  2688. l3qt |= (ug_info->l3qt[j + i] << (28 - 4 * i));
  2689. out_be32(&ugeth->p_rx_glbl_pram->l3qt[j/8], l3qt);
  2690. }
  2691. /* vlantype */
  2692. out_be16(&ugeth->p_rx_glbl_pram->vlantype, ug_info->vlantype);
  2693. /* vlantci */
  2694. out_be16(&ugeth->p_rx_glbl_pram->vlantci, ug_info->vlantci);
  2695. /* ecamptr */
  2696. out_be32(&ugeth->p_rx_glbl_pram->ecamptr, ug_info->ecamptr);
  2697. /* RBDQPTR */
  2698. /* Size varies with number of Rx queues */
  2699. ugeth->rx_bd_qs_tbl_offset =
  2700. qe_muram_alloc(ug_info->numQueuesRx *
  2701. (sizeof(struct ucc_geth_rx_bd_queues_entry) +
  2702. sizeof(struct ucc_geth_rx_prefetched_bds)),
  2703. UCC_GETH_RX_BD_QUEUES_ALIGNMENT);
  2704. if (IS_ERR_VALUE(ugeth->rx_bd_qs_tbl_offset)) {
  2705. if (netif_msg_ifup(ugeth))
  2706. ugeth_err
  2707. ("%s: Can not allocate DPRAM memory for p_rx_bd_qs_tbl.",
  2708. __func__);
  2709. ucc_geth_memclean(ugeth);
  2710. return -ENOMEM;
  2711. }
  2712. ugeth->p_rx_bd_qs_tbl =
  2713. (struct ucc_geth_rx_bd_queues_entry __iomem *) qe_muram_addr(ugeth->
  2714. rx_bd_qs_tbl_offset);
  2715. out_be32(&ugeth->p_rx_glbl_pram->rbdqptr, ugeth->rx_bd_qs_tbl_offset);
  2716. /* Zero out p_rx_bd_qs_tbl */
  2717. memset_io((void __iomem *)ugeth->p_rx_bd_qs_tbl,
  2718. 0,
  2719. ug_info->numQueuesRx * (sizeof(struct ucc_geth_rx_bd_queues_entry) +
  2720. sizeof(struct ucc_geth_rx_prefetched_bds)));
  2721. /* Setup the table */
  2722. /* Assume BD rings are already established */
  2723. for (i = 0; i < ug_info->numQueuesRx; i++) {
  2724. if (ugeth->ug_info->uf_info.bd_mem_part == MEM_PART_SYSTEM) {
  2725. out_be32(&ugeth->p_rx_bd_qs_tbl[i].externalbdbaseptr,
  2726. (u32) virt_to_phys(ugeth->p_rx_bd_ring[i]));
  2727. } else if (ugeth->ug_info->uf_info.bd_mem_part ==
  2728. MEM_PART_MURAM) {
  2729. out_be32(&ugeth->p_rx_bd_qs_tbl[i].externalbdbaseptr,
  2730. (u32) immrbar_virt_to_phys(ugeth->
  2731. p_rx_bd_ring[i]));
  2732. }
  2733. /* rest of fields handled by QE */
  2734. }
  2735. /* remoder */
  2736. /* Already has speed set */
  2737. if (ugeth->rx_extended_features)
  2738. remoder |= REMODER_RX_EXTENDED_FEATURES;
  2739. if (ug_info->rxExtendedFiltering)
  2740. remoder |= REMODER_RX_EXTENDED_FILTERING;
  2741. if (ug_info->dynamicMaxFrameLength)
  2742. remoder |= REMODER_DYNAMIC_MAX_FRAME_LENGTH;
  2743. if (ug_info->dynamicMinFrameLength)
  2744. remoder |= REMODER_DYNAMIC_MIN_FRAME_LENGTH;
  2745. remoder |=
  2746. ug_info->vlanOperationTagged << REMODER_VLAN_OPERATION_TAGGED_SHIFT;
  2747. remoder |=
  2748. ug_info->
  2749. vlanOperationNonTagged << REMODER_VLAN_OPERATION_NON_TAGGED_SHIFT;
  2750. remoder |= ug_info->rxQoSMode << REMODER_RX_QOS_MODE_SHIFT;
  2751. remoder |= ((ug_info->numQueuesRx - 1) << REMODER_NUM_OF_QUEUES_SHIFT);
  2752. if (ug_info->ipCheckSumCheck)
  2753. remoder |= REMODER_IP_CHECKSUM_CHECK;
  2754. if (ug_info->ipAddressAlignment)
  2755. remoder |= REMODER_IP_ADDRESS_ALIGNMENT;
  2756. out_be32(&ugeth->p_rx_glbl_pram->remoder, remoder);
  2757. /* Note that this function must be called */
  2758. /* ONLY AFTER p_tx_fw_statistics_pram */
  2759. /* andp_UccGethRxFirmwareStatisticsPram are allocated ! */
  2760. init_firmware_statistics_gathering_mode((ug_info->
  2761. statisticsMode &
  2762. UCC_GETH_STATISTICS_GATHERING_MODE_FIRMWARE_TX),
  2763. (ug_info->statisticsMode &
  2764. UCC_GETH_STATISTICS_GATHERING_MODE_FIRMWARE_RX),
  2765. &ugeth->p_tx_glbl_pram->txrmonbaseptr,
  2766. ugeth->tx_fw_statistics_pram_offset,
  2767. &ugeth->p_rx_glbl_pram->rxrmonbaseptr,
  2768. ugeth->rx_fw_statistics_pram_offset,
  2769. &ugeth->p_tx_glbl_pram->temoder,
  2770. &ugeth->p_rx_glbl_pram->remoder);
  2771. /* function code register */
  2772. out_8(&ugeth->p_rx_glbl_pram->rstate, function_code);
  2773. /* initialize extended filtering */
  2774. if (ug_info->rxExtendedFiltering) {
  2775. if (!ug_info->extendedFilteringChainPointer) {
  2776. if (netif_msg_ifup(ugeth))
  2777. ugeth_err("%s: Null Extended Filtering Chain Pointer.",
  2778. __func__);
  2779. ucc_geth_memclean(ugeth);
  2780. return -EINVAL;
  2781. }
  2782. /* Allocate memory for extended filtering Mode Global
  2783. Parameters */
  2784. ugeth->exf_glbl_param_offset =
  2785. qe_muram_alloc(sizeof(struct ucc_geth_exf_global_pram),
  2786. UCC_GETH_RX_EXTENDED_FILTERING_GLOBAL_PARAMETERS_ALIGNMENT);
  2787. if (IS_ERR_VALUE(ugeth->exf_glbl_param_offset)) {
  2788. if (netif_msg_ifup(ugeth))
  2789. ugeth_err
  2790. ("%s: Can not allocate DPRAM memory for"
  2791. " p_exf_glbl_param.", __func__);
  2792. ucc_geth_memclean(ugeth);
  2793. return -ENOMEM;
  2794. }
  2795. ugeth->p_exf_glbl_param =
  2796. (struct ucc_geth_exf_global_pram __iomem *) qe_muram_addr(ugeth->
  2797. exf_glbl_param_offset);
  2798. out_be32(&ugeth->p_rx_glbl_pram->exfGlobalParam,
  2799. ugeth->exf_glbl_param_offset);
  2800. out_be32(&ugeth->p_exf_glbl_param->l2pcdptr,
  2801. (u32) ug_info->extendedFilteringChainPointer);
  2802. } else { /* initialize 82xx style address filtering */
  2803. /* Init individual address recognition registers to disabled */
  2804. for (j = 0; j < NUM_OF_PADDRS; j++)
  2805. ugeth_82xx_filtering_clear_addr_in_paddr(ugeth, (u8) j);
  2806. p_82xx_addr_filt =
  2807. (struct ucc_geth_82xx_address_filtering_pram __iomem *) ugeth->
  2808. p_rx_glbl_pram->addressfiltering;
  2809. ugeth_82xx_filtering_clear_all_addr_in_hash(ugeth,
  2810. ENET_ADDR_TYPE_GROUP);
  2811. ugeth_82xx_filtering_clear_all_addr_in_hash(ugeth,
  2812. ENET_ADDR_TYPE_INDIVIDUAL);
  2813. }
  2814. /*
  2815. * Initialize UCC at QE level
  2816. */
  2817. command = QE_INIT_TX_RX;
  2818. /* Allocate shadow InitEnet command parameter structure.
  2819. * This is needed because after the InitEnet command is executed,
  2820. * the structure in DPRAM is released, because DPRAM is a premium
  2821. * resource.
  2822. * This shadow structure keeps a copy of what was done so that the
  2823. * allocated resources can be released when the channel is freed.
  2824. */
  2825. if (!(ugeth->p_init_enet_param_shadow =
  2826. kmalloc(sizeof(struct ucc_geth_init_pram), GFP_KERNEL))) {
  2827. if (netif_msg_ifup(ugeth))
  2828. ugeth_err
  2829. ("%s: Can not allocate memory for"
  2830. " p_UccInitEnetParamShadows.", __func__);
  2831. ucc_geth_memclean(ugeth);
  2832. return -ENOMEM;
  2833. }
  2834. /* Zero out *p_init_enet_param_shadow */
  2835. memset((char *)ugeth->p_init_enet_param_shadow,
  2836. 0, sizeof(struct ucc_geth_init_pram));
  2837. /* Fill shadow InitEnet command parameter structure */
  2838. ugeth->p_init_enet_param_shadow->resinit1 =
  2839. ENET_INIT_PARAM_MAGIC_RES_INIT1;
  2840. ugeth->p_init_enet_param_shadow->resinit2 =
  2841. ENET_INIT_PARAM_MAGIC_RES_INIT2;
  2842. ugeth->p_init_enet_param_shadow->resinit3 =
  2843. ENET_INIT_PARAM_MAGIC_RES_INIT3;
  2844. ugeth->p_init_enet_param_shadow->resinit4 =
  2845. ENET_INIT_PARAM_MAGIC_RES_INIT4;
  2846. ugeth->p_init_enet_param_shadow->resinit5 =
  2847. ENET_INIT_PARAM_MAGIC_RES_INIT5;
  2848. ugeth->p_init_enet_param_shadow->rgftgfrxglobal |=
  2849. ((u32) ug_info->numThreadsRx) << ENET_INIT_PARAM_RGF_SHIFT;
  2850. ugeth->p_init_enet_param_shadow->rgftgfrxglobal |=
  2851. ((u32) ug_info->numThreadsTx) << ENET_INIT_PARAM_TGF_SHIFT;
  2852. ugeth->p_init_enet_param_shadow->rgftgfrxglobal |=
  2853. ugeth->rx_glbl_pram_offset | ug_info->riscRx;
  2854. if ((ug_info->largestexternallookupkeysize !=
  2855. QE_FLTR_LARGEST_EXTERNAL_TABLE_LOOKUP_KEY_SIZE_NONE)
  2856. && (ug_info->largestexternallookupkeysize !=
  2857. QE_FLTR_LARGEST_EXTERNAL_TABLE_LOOKUP_KEY_SIZE_8_BYTES)
  2858. && (ug_info->largestexternallookupkeysize !=
  2859. QE_FLTR_LARGEST_EXTERNAL_TABLE_LOOKUP_KEY_SIZE_16_BYTES)) {
  2860. if (netif_msg_ifup(ugeth))
  2861. ugeth_err("%s: Invalid largest External Lookup Key Size.",
  2862. __func__);
  2863. ucc_geth_memclean(ugeth);
  2864. return -EINVAL;
  2865. }
  2866. ugeth->p_init_enet_param_shadow->largestexternallookupkeysize =
  2867. ug_info->largestexternallookupkeysize;
  2868. size = sizeof(struct ucc_geth_thread_rx_pram);
  2869. if (ug_info->rxExtendedFiltering) {
  2870. size += THREAD_RX_PRAM_ADDITIONAL_FOR_EXTENDED_FILTERING;
  2871. if (ug_info->largestexternallookupkeysize ==
  2872. QE_FLTR_TABLE_LOOKUP_KEY_SIZE_8_BYTES)
  2873. size +=
  2874. THREAD_RX_PRAM_ADDITIONAL_FOR_EXTENDED_FILTERING_8;
  2875. if (ug_info->largestexternallookupkeysize ==
  2876. QE_FLTR_TABLE_LOOKUP_KEY_SIZE_16_BYTES)
  2877. size +=
  2878. THREAD_RX_PRAM_ADDITIONAL_FOR_EXTENDED_FILTERING_16;
  2879. }
  2880. if ((ret_val = fill_init_enet_entries(ugeth, &(ugeth->
  2881. p_init_enet_param_shadow->rxthread[0]),
  2882. (u8) (numThreadsRxNumerical + 1)
  2883. /* Rx needs one extra for terminator */
  2884. , size, UCC_GETH_THREAD_RX_PRAM_ALIGNMENT,
  2885. ug_info->riscRx, 1)) != 0) {
  2886. if (netif_msg_ifup(ugeth))
  2887. ugeth_err("%s: Can not fill p_init_enet_param_shadow.",
  2888. __func__);
  2889. ucc_geth_memclean(ugeth);
  2890. return ret_val;
  2891. }
  2892. ugeth->p_init_enet_param_shadow->txglobal =
  2893. ugeth->tx_glbl_pram_offset | ug_info->riscTx;
  2894. if ((ret_val =
  2895. fill_init_enet_entries(ugeth,
  2896. &(ugeth->p_init_enet_param_shadow->
  2897. txthread[0]), numThreadsTxNumerical,
  2898. sizeof(struct ucc_geth_thread_tx_pram),
  2899. UCC_GETH_THREAD_TX_PRAM_ALIGNMENT,
  2900. ug_info->riscTx, 0)) != 0) {
  2901. if (netif_msg_ifup(ugeth))
  2902. ugeth_err("%s: Can not fill p_init_enet_param_shadow.",
  2903. __func__);
  2904. ucc_geth_memclean(ugeth);
  2905. return ret_val;
  2906. }
  2907. /* Load Rx bds with buffers */
  2908. for (i = 0; i < ug_info->numQueuesRx; i++) {
  2909. if ((ret_val = rx_bd_buffer_set(ugeth, (u8) i)) != 0) {
  2910. if (netif_msg_ifup(ugeth))
  2911. ugeth_err("%s: Can not fill Rx bds with buffers.",
  2912. __func__);
  2913. ucc_geth_memclean(ugeth);
  2914. return ret_val;
  2915. }
  2916. }
  2917. /* Allocate InitEnet command parameter structure */
  2918. init_enet_pram_offset = qe_muram_alloc(sizeof(struct ucc_geth_init_pram), 4);
  2919. if (IS_ERR_VALUE(init_enet_pram_offset)) {
  2920. if (netif_msg_ifup(ugeth))
  2921. ugeth_err
  2922. ("%s: Can not allocate DPRAM memory for p_init_enet_pram.",
  2923. __func__);
  2924. ucc_geth_memclean(ugeth);
  2925. return -ENOMEM;
  2926. }
  2927. p_init_enet_pram =
  2928. (struct ucc_geth_init_pram __iomem *) qe_muram_addr(init_enet_pram_offset);
  2929. /* Copy shadow InitEnet command parameter structure into PRAM */
  2930. out_8(&p_init_enet_pram->resinit1,
  2931. ugeth->p_init_enet_param_shadow->resinit1);
  2932. out_8(&p_init_enet_pram->resinit2,
  2933. ugeth->p_init_enet_param_shadow->resinit2);
  2934. out_8(&p_init_enet_pram->resinit3,
  2935. ugeth->p_init_enet_param_shadow->resinit3);
  2936. out_8(&p_init_enet_pram->resinit4,
  2937. ugeth->p_init_enet_param_shadow->resinit4);
  2938. out_be16(&p_init_enet_pram->resinit5,
  2939. ugeth->p_init_enet_param_shadow->resinit5);
  2940. out_8(&p_init_enet_pram->largestexternallookupkeysize,
  2941. ugeth->p_init_enet_param_shadow->largestexternallookupkeysize);
  2942. out_be32(&p_init_enet_pram->rgftgfrxglobal,
  2943. ugeth->p_init_enet_param_shadow->rgftgfrxglobal);
  2944. for (i = 0; i < ENET_INIT_PARAM_MAX_ENTRIES_RX; i++)
  2945. out_be32(&p_init_enet_pram->rxthread[i],
  2946. ugeth->p_init_enet_param_shadow->rxthread[i]);
  2947. out_be32(&p_init_enet_pram->txglobal,
  2948. ugeth->p_init_enet_param_shadow->txglobal);
  2949. for (i = 0; i < ENET_INIT_PARAM_MAX_ENTRIES_TX; i++)
  2950. out_be32(&p_init_enet_pram->txthread[i],
  2951. ugeth->p_init_enet_param_shadow->txthread[i]);
  2952. /* Issue QE command */
  2953. cecr_subblock =
  2954. ucc_fast_get_qe_cr_subblock(ugeth->ug_info->uf_info.ucc_num);
  2955. qe_issue_cmd(command, cecr_subblock, QE_CR_PROTOCOL_ETHERNET,
  2956. init_enet_pram_offset);
  2957. /* Free InitEnet command parameter */
  2958. qe_muram_free(init_enet_pram_offset);
  2959. return 0;
  2960. }
  2961. /* ucc_geth_timeout gets called when a packet has not been
  2962. * transmitted after a set amount of time.
  2963. * For now, assume that clearing out all the structures, and
  2964. * starting over will fix the problem. */
  2965. static void ucc_geth_timeout(struct net_device *dev)
  2966. {
  2967. struct ucc_geth_private *ugeth = netdev_priv(dev);
  2968. ugeth_vdbg("%s: IN", __func__);
  2969. dev->stats.tx_errors++;
  2970. ugeth_dump_regs(ugeth);
  2971. if (dev->flags & IFF_UP) {
  2972. ucc_geth_stop(ugeth);
  2973. ucc_geth_startup(ugeth);
  2974. }
  2975. netif_tx_schedule_all(dev);
  2976. }
  2977. /* This is called by the kernel when a frame is ready for transmission. */
  2978. /* It is pointed to by the dev->hard_start_xmit function pointer */
  2979. static int ucc_geth_start_xmit(struct sk_buff *skb, struct net_device *dev)
  2980. {
  2981. struct ucc_geth_private *ugeth = netdev_priv(dev);
  2982. #ifdef CONFIG_UGETH_TX_ON_DEMAND
  2983. struct ucc_fast_private *uccf;
  2984. #endif
  2985. u8 __iomem *bd; /* BD pointer */
  2986. u32 bd_status;
  2987. u8 txQ = 0;
  2988. ugeth_vdbg("%s: IN", __func__);
  2989. spin_lock_irq(&ugeth->lock);
  2990. dev->stats.tx_bytes += skb->len;
  2991. /* Start from the next BD that should be filled */
  2992. bd = ugeth->txBd[txQ];
  2993. bd_status = in_be32((u32 __iomem *)bd);
  2994. /* Save the skb pointer so we can free it later */
  2995. ugeth->tx_skbuff[txQ][ugeth->skb_curtx[txQ]] = skb;
  2996. /* Update the current skb pointer (wrapping if this was the last) */
  2997. ugeth->skb_curtx[txQ] =
  2998. (ugeth->skb_curtx[txQ] +
  2999. 1) & TX_RING_MOD_MASK(ugeth->ug_info->bdRingLenTx[txQ]);
  3000. /* set up the buffer descriptor */
  3001. out_be32(&((struct qe_bd __iomem *)bd)->buf,
  3002. dma_map_single(&ugeth->dev->dev, skb->data,
  3003. skb->len, DMA_TO_DEVICE));
  3004. /* printk(KERN_DEBUG"skb->data is 0x%x\n",skb->data); */
  3005. bd_status = (bd_status & T_W) | T_R | T_I | T_L | skb->len;
  3006. /* set bd status and length */
  3007. out_be32((u32 __iomem *)bd, bd_status);
  3008. dev->trans_start = jiffies;
  3009. /* Move to next BD in the ring */
  3010. if (!(bd_status & T_W))
  3011. bd += sizeof(struct qe_bd);
  3012. else
  3013. bd = ugeth->p_tx_bd_ring[txQ];
  3014. /* If the next BD still needs to be cleaned up, then the bds
  3015. are full. We need to tell the kernel to stop sending us stuff. */
  3016. if (bd == ugeth->confBd[txQ]) {
  3017. if (!netif_queue_stopped(dev))
  3018. netif_stop_queue(dev);
  3019. }
  3020. ugeth->txBd[txQ] = bd;
  3021. if (ugeth->p_scheduler) {
  3022. ugeth->cpucount[txQ]++;
  3023. /* Indicate to QE that there are more Tx bds ready for
  3024. transmission */
  3025. /* This is done by writing a running counter of the bd
  3026. count to the scheduler PRAM. */
  3027. out_be16(ugeth->p_cpucount[txQ], ugeth->cpucount[txQ]);
  3028. }
  3029. #ifdef CONFIG_UGETH_TX_ON_DEMAND
  3030. uccf = ugeth->uccf;
  3031. out_be16(uccf->p_utodr, UCC_FAST_TOD);
  3032. #endif
  3033. spin_unlock_irq(&ugeth->lock);
  3034. return 0;
  3035. }
  3036. static int ucc_geth_rx(struct ucc_geth_private *ugeth, u8 rxQ, int rx_work_limit)
  3037. {
  3038. struct sk_buff *skb;
  3039. u8 __iomem *bd;
  3040. u16 length, howmany = 0;
  3041. u32 bd_status;
  3042. u8 *bdBuffer;
  3043. struct net_device *dev;
  3044. ugeth_vdbg("%s: IN", __func__);
  3045. dev = ugeth->dev;
  3046. /* collect received buffers */
  3047. bd = ugeth->rxBd[rxQ];
  3048. bd_status = in_be32((u32 __iomem *)bd);
  3049. /* while there are received buffers and BD is full (~R_E) */
  3050. while (!((bd_status & (R_E)) || (--rx_work_limit < 0))) {
  3051. bdBuffer = (u8 *) in_be32(&((struct qe_bd __iomem *)bd)->buf);
  3052. length = (u16) ((bd_status & BD_LENGTH_MASK) - 4);
  3053. skb = ugeth->rx_skbuff[rxQ][ugeth->skb_currx[rxQ]];
  3054. /* determine whether buffer is first, last, first and last
  3055. (single buffer frame) or middle (not first and not last) */
  3056. if (!skb ||
  3057. (!(bd_status & (R_F | R_L))) ||
  3058. (bd_status & R_ERRORS_FATAL)) {
  3059. if (netif_msg_rx_err(ugeth))
  3060. ugeth_err("%s, %d: ERROR!!! skb - 0x%08x",
  3061. __func__, __LINE__, (u32) skb);
  3062. if (skb)
  3063. dev_kfree_skb_any(skb);
  3064. ugeth->rx_skbuff[rxQ][ugeth->skb_currx[rxQ]] = NULL;
  3065. dev->stats.rx_dropped++;
  3066. } else {
  3067. dev->stats.rx_packets++;
  3068. howmany++;
  3069. /* Prep the skb for the packet */
  3070. skb_put(skb, length);
  3071. /* Tell the skb what kind of packet this is */
  3072. skb->protocol = eth_type_trans(skb, ugeth->dev);
  3073. dev->stats.rx_bytes += length;
  3074. /* Send the packet up the stack */
  3075. netif_receive_skb(skb);
  3076. }
  3077. ugeth->dev->last_rx = jiffies;
  3078. skb = get_new_skb(ugeth, bd);
  3079. if (!skb) {
  3080. if (netif_msg_rx_err(ugeth))
  3081. ugeth_warn("%s: No Rx Data Buffer", __func__);
  3082. dev->stats.rx_dropped++;
  3083. break;
  3084. }
  3085. ugeth->rx_skbuff[rxQ][ugeth->skb_currx[rxQ]] = skb;
  3086. /* update to point at the next skb */
  3087. ugeth->skb_currx[rxQ] =
  3088. (ugeth->skb_currx[rxQ] +
  3089. 1) & RX_RING_MOD_MASK(ugeth->ug_info->bdRingLenRx[rxQ]);
  3090. if (bd_status & R_W)
  3091. bd = ugeth->p_rx_bd_ring[rxQ];
  3092. else
  3093. bd += sizeof(struct qe_bd);
  3094. bd_status = in_be32((u32 __iomem *)bd);
  3095. }
  3096. ugeth->rxBd[rxQ] = bd;
  3097. return howmany;
  3098. }
  3099. static int ucc_geth_tx(struct net_device *dev, u8 txQ)
  3100. {
  3101. /* Start from the next BD that should be filled */
  3102. struct ucc_geth_private *ugeth = netdev_priv(dev);
  3103. u8 __iomem *bd; /* BD pointer */
  3104. u32 bd_status;
  3105. bd = ugeth->confBd[txQ];
  3106. bd_status = in_be32((u32 __iomem *)bd);
  3107. /* Normal processing. */
  3108. while ((bd_status & T_R) == 0) {
  3109. /* BD contains already transmitted buffer. */
  3110. /* Handle the transmitted buffer and release */
  3111. /* the BD to be used with the current frame */
  3112. if ((bd == ugeth->txBd[txQ]) && (netif_queue_stopped(dev) == 0))
  3113. break;
  3114. dev->stats.tx_packets++;
  3115. /* Free the sk buffer associated with this TxBD */
  3116. dev_kfree_skb_irq(ugeth->
  3117. tx_skbuff[txQ][ugeth->skb_dirtytx[txQ]]);
  3118. ugeth->tx_skbuff[txQ][ugeth->skb_dirtytx[txQ]] = NULL;
  3119. ugeth->skb_dirtytx[txQ] =
  3120. (ugeth->skb_dirtytx[txQ] +
  3121. 1) & TX_RING_MOD_MASK(ugeth->ug_info->bdRingLenTx[txQ]);
  3122. /* We freed a buffer, so now we can restart transmission */
  3123. if (netif_queue_stopped(dev))
  3124. netif_wake_queue(dev);
  3125. /* Advance the confirmation BD pointer */
  3126. if (!(bd_status & T_W))
  3127. bd += sizeof(struct qe_bd);
  3128. else
  3129. bd = ugeth->p_tx_bd_ring[txQ];
  3130. bd_status = in_be32((u32 __iomem *)bd);
  3131. }
  3132. ugeth->confBd[txQ] = bd;
  3133. return 0;
  3134. }
  3135. static int ucc_geth_poll(struct napi_struct *napi, int budget)
  3136. {
  3137. struct ucc_geth_private *ugeth = container_of(napi, struct ucc_geth_private, napi);
  3138. struct net_device *dev = ugeth->dev;
  3139. struct ucc_geth_info *ug_info;
  3140. int howmany, i;
  3141. ug_info = ugeth->ug_info;
  3142. howmany = 0;
  3143. for (i = 0; i < ug_info->numQueuesRx; i++)
  3144. howmany += ucc_geth_rx(ugeth, i, budget - howmany);
  3145. if (howmany < budget) {
  3146. struct ucc_fast_private *uccf;
  3147. u32 uccm;
  3148. netif_rx_complete(dev, napi);
  3149. uccf = ugeth->uccf;
  3150. uccm = in_be32(uccf->p_uccm);
  3151. uccm |= UCCE_RX_EVENTS;
  3152. out_be32(uccf->p_uccm, uccm);
  3153. }
  3154. return howmany;
  3155. }
  3156. static irqreturn_t ucc_geth_irq_handler(int irq, void *info)
  3157. {
  3158. struct net_device *dev = info;
  3159. struct ucc_geth_private *ugeth = netdev_priv(dev);
  3160. struct ucc_fast_private *uccf;
  3161. struct ucc_geth_info *ug_info;
  3162. register u32 ucce;
  3163. register u32 uccm;
  3164. register u32 tx_mask;
  3165. u8 i;
  3166. ugeth_vdbg("%s: IN", __func__);
  3167. uccf = ugeth->uccf;
  3168. ug_info = ugeth->ug_info;
  3169. /* read and clear events */
  3170. ucce = (u32) in_be32(uccf->p_ucce);
  3171. uccm = (u32) in_be32(uccf->p_uccm);
  3172. ucce &= uccm;
  3173. out_be32(uccf->p_ucce, ucce);
  3174. /* check for receive events that require processing */
  3175. if (ucce & UCCE_RX_EVENTS) {
  3176. if (netif_rx_schedule_prep(dev, &ugeth->napi)) {
  3177. uccm &= ~UCCE_RX_EVENTS;
  3178. out_be32(uccf->p_uccm, uccm);
  3179. __netif_rx_schedule(dev, &ugeth->napi);
  3180. }
  3181. }
  3182. /* Tx event processing */
  3183. if (ucce & UCCE_TX_EVENTS) {
  3184. spin_lock(&ugeth->lock);
  3185. tx_mask = UCCE_TXBF_SINGLE_MASK;
  3186. for (i = 0; i < ug_info->numQueuesTx; i++) {
  3187. if (ucce & tx_mask)
  3188. ucc_geth_tx(dev, i);
  3189. ucce &= ~tx_mask;
  3190. tx_mask <<= 1;
  3191. }
  3192. spin_unlock(&ugeth->lock);
  3193. }
  3194. /* Errors and other events */
  3195. if (ucce & UCCE_OTHER) {
  3196. if (ucce & UCCE_BSY) {
  3197. dev->stats.rx_errors++;
  3198. }
  3199. if (ucce & UCCE_TXE) {
  3200. dev->stats.tx_errors++;
  3201. }
  3202. }
  3203. return IRQ_HANDLED;
  3204. }
  3205. #ifdef CONFIG_NET_POLL_CONTROLLER
  3206. /*
  3207. * Polling 'interrupt' - used by things like netconsole to send skbs
  3208. * without having to re-enable interrupts. It's not called while
  3209. * the interrupt routine is executing.
  3210. */
  3211. static void ucc_netpoll(struct net_device *dev)
  3212. {
  3213. struct ucc_geth_private *ugeth = netdev_priv(dev);
  3214. int irq = ugeth->ug_info->uf_info.irq;
  3215. disable_irq(irq);
  3216. ucc_geth_irq_handler(irq, dev);
  3217. enable_irq(irq);
  3218. }
  3219. #endif /* CONFIG_NET_POLL_CONTROLLER */
  3220. /* Called when something needs to use the ethernet device */
  3221. /* Returns 0 for success. */
  3222. static int ucc_geth_open(struct net_device *dev)
  3223. {
  3224. struct ucc_geth_private *ugeth = netdev_priv(dev);
  3225. int err;
  3226. ugeth_vdbg("%s: IN", __func__);
  3227. /* Test station address */
  3228. if (dev->dev_addr[0] & ENET_GROUP_ADDR) {
  3229. if (netif_msg_ifup(ugeth))
  3230. ugeth_err("%s: Multicast address used for station address"
  3231. " - is this what you wanted?", __func__);
  3232. return -EINVAL;
  3233. }
  3234. err = ucc_struct_init(ugeth);
  3235. if (err) {
  3236. if (netif_msg_ifup(ugeth))
  3237. ugeth_err("%s: Cannot configure internal struct, aborting.", dev->name);
  3238. return err;
  3239. }
  3240. napi_enable(&ugeth->napi);
  3241. err = ucc_geth_startup(ugeth);
  3242. if (err) {
  3243. if (netif_msg_ifup(ugeth))
  3244. ugeth_err("%s: Cannot configure net device, aborting.",
  3245. dev->name);
  3246. goto out_err;
  3247. }
  3248. err = adjust_enet_interface(ugeth);
  3249. if (err) {
  3250. if (netif_msg_ifup(ugeth))
  3251. ugeth_err("%s: Cannot configure net device, aborting.",
  3252. dev->name);
  3253. goto out_err;
  3254. }
  3255. /* Set MACSTNADDR1, MACSTNADDR2 */
  3256. /* For more details see the hardware spec. */
  3257. init_mac_station_addr_regs(dev->dev_addr[0],
  3258. dev->dev_addr[1],
  3259. dev->dev_addr[2],
  3260. dev->dev_addr[3],
  3261. dev->dev_addr[4],
  3262. dev->dev_addr[5],
  3263. &ugeth->ug_regs->macstnaddr1,
  3264. &ugeth->ug_regs->macstnaddr2);
  3265. err = init_phy(dev);
  3266. if (err) {
  3267. if (netif_msg_ifup(ugeth))
  3268. ugeth_err("%s: Cannot initialize PHY, aborting.", dev->name);
  3269. goto out_err;
  3270. }
  3271. phy_start(ugeth->phydev);
  3272. err =
  3273. request_irq(ugeth->ug_info->uf_info.irq, ucc_geth_irq_handler, 0,
  3274. "UCC Geth", dev);
  3275. if (err) {
  3276. if (netif_msg_ifup(ugeth))
  3277. ugeth_err("%s: Cannot get IRQ for net device, aborting.",
  3278. dev->name);
  3279. ucc_geth_stop(ugeth);
  3280. goto out_err;
  3281. }
  3282. err = ugeth_enable(ugeth, COMM_DIR_RX_AND_TX);
  3283. if (err) {
  3284. if (netif_msg_ifup(ugeth))
  3285. ugeth_err("%s: Cannot enable net device, aborting.", dev->name);
  3286. ucc_geth_stop(ugeth);
  3287. goto out_err;
  3288. }
  3289. netif_start_queue(dev);
  3290. return err;
  3291. out_err:
  3292. napi_disable(&ugeth->napi);
  3293. return err;
  3294. }
  3295. /* Stops the kernel queue, and halts the controller */
  3296. static int ucc_geth_close(struct net_device *dev)
  3297. {
  3298. struct ucc_geth_private *ugeth = netdev_priv(dev);
  3299. ugeth_vdbg("%s: IN", __func__);
  3300. napi_disable(&ugeth->napi);
  3301. ucc_geth_stop(ugeth);
  3302. phy_disconnect(ugeth->phydev);
  3303. ugeth->phydev = NULL;
  3304. netif_stop_queue(dev);
  3305. return 0;
  3306. }
  3307. static phy_interface_t to_phy_interface(const char *phy_connection_type)
  3308. {
  3309. if (strcasecmp(phy_connection_type, "mii") == 0)
  3310. return PHY_INTERFACE_MODE_MII;
  3311. if (strcasecmp(phy_connection_type, "gmii") == 0)
  3312. return PHY_INTERFACE_MODE_GMII;
  3313. if (strcasecmp(phy_connection_type, "tbi") == 0)
  3314. return PHY_INTERFACE_MODE_TBI;
  3315. if (strcasecmp(phy_connection_type, "rmii") == 0)
  3316. return PHY_INTERFACE_MODE_RMII;
  3317. if (strcasecmp(phy_connection_type, "rgmii") == 0)
  3318. return PHY_INTERFACE_MODE_RGMII;
  3319. if (strcasecmp(phy_connection_type, "rgmii-id") == 0)
  3320. return PHY_INTERFACE_MODE_RGMII_ID;
  3321. if (strcasecmp(phy_connection_type, "rgmii-txid") == 0)
  3322. return PHY_INTERFACE_MODE_RGMII_TXID;
  3323. if (strcasecmp(phy_connection_type, "rgmii-rxid") == 0)
  3324. return PHY_INTERFACE_MODE_RGMII_RXID;
  3325. if (strcasecmp(phy_connection_type, "rtbi") == 0)
  3326. return PHY_INTERFACE_MODE_RTBI;
  3327. return PHY_INTERFACE_MODE_MII;
  3328. }
  3329. static int ucc_geth_probe(struct of_device* ofdev, const struct of_device_id *match)
  3330. {
  3331. struct device *device = &ofdev->dev;
  3332. struct device_node *np = ofdev->node;
  3333. struct device_node *mdio;
  3334. struct net_device *dev = NULL;
  3335. struct ucc_geth_private *ugeth = NULL;
  3336. struct ucc_geth_info *ug_info;
  3337. struct resource res;
  3338. struct device_node *phy;
  3339. int err, ucc_num, max_speed = 0;
  3340. const phandle *ph;
  3341. const u32 *fixed_link;
  3342. const unsigned int *prop;
  3343. const char *sprop;
  3344. const void *mac_addr;
  3345. phy_interface_t phy_interface;
  3346. static const int enet_to_speed[] = {
  3347. SPEED_10, SPEED_10, SPEED_10,
  3348. SPEED_100, SPEED_100, SPEED_100,
  3349. SPEED_1000, SPEED_1000, SPEED_1000, SPEED_1000,
  3350. };
  3351. static const phy_interface_t enet_to_phy_interface[] = {
  3352. PHY_INTERFACE_MODE_MII, PHY_INTERFACE_MODE_RMII,
  3353. PHY_INTERFACE_MODE_RGMII, PHY_INTERFACE_MODE_MII,
  3354. PHY_INTERFACE_MODE_RMII, PHY_INTERFACE_MODE_RGMII,
  3355. PHY_INTERFACE_MODE_GMII, PHY_INTERFACE_MODE_RGMII,
  3356. PHY_INTERFACE_MODE_TBI, PHY_INTERFACE_MODE_RTBI,
  3357. };
  3358. ugeth_vdbg("%s: IN", __func__);
  3359. prop = of_get_property(np, "cell-index", NULL);
  3360. if (!prop) {
  3361. prop = of_get_property(np, "device-id", NULL);
  3362. if (!prop)
  3363. return -ENODEV;
  3364. }
  3365. ucc_num = *prop - 1;
  3366. if ((ucc_num < 0) || (ucc_num > 7))
  3367. return -ENODEV;
  3368. ug_info = &ugeth_info[ucc_num];
  3369. if (ug_info == NULL) {
  3370. if (netif_msg_probe(&debug))
  3371. ugeth_err("%s: [%d] Missing additional data!",
  3372. __func__, ucc_num);
  3373. return -ENODEV;
  3374. }
  3375. ug_info->uf_info.ucc_num = ucc_num;
  3376. sprop = of_get_property(np, "rx-clock-name", NULL);
  3377. if (sprop) {
  3378. ug_info->uf_info.rx_clock = qe_clock_source(sprop);
  3379. if ((ug_info->uf_info.rx_clock < QE_CLK_NONE) ||
  3380. (ug_info->uf_info.rx_clock > QE_CLK24)) {
  3381. printk(KERN_ERR
  3382. "ucc_geth: invalid rx-clock-name property\n");
  3383. return -EINVAL;
  3384. }
  3385. } else {
  3386. prop = of_get_property(np, "rx-clock", NULL);
  3387. if (!prop) {
  3388. /* If both rx-clock-name and rx-clock are missing,
  3389. we want to tell people to use rx-clock-name. */
  3390. printk(KERN_ERR
  3391. "ucc_geth: missing rx-clock-name property\n");
  3392. return -EINVAL;
  3393. }
  3394. if ((*prop < QE_CLK_NONE) || (*prop > QE_CLK24)) {
  3395. printk(KERN_ERR
  3396. "ucc_geth: invalid rx-clock propperty\n");
  3397. return -EINVAL;
  3398. }
  3399. ug_info->uf_info.rx_clock = *prop;
  3400. }
  3401. sprop = of_get_property(np, "tx-clock-name", NULL);
  3402. if (sprop) {
  3403. ug_info->uf_info.tx_clock = qe_clock_source(sprop);
  3404. if ((ug_info->uf_info.tx_clock < QE_CLK_NONE) ||
  3405. (ug_info->uf_info.tx_clock > QE_CLK24)) {
  3406. printk(KERN_ERR
  3407. "ucc_geth: invalid tx-clock-name property\n");
  3408. return -EINVAL;
  3409. }
  3410. } else {
  3411. prop = of_get_property(np, "tx-clock", NULL);
  3412. if (!prop) {
  3413. printk(KERN_ERR
  3414. "ucc_geth: mising tx-clock-name property\n");
  3415. return -EINVAL;
  3416. }
  3417. if ((*prop < QE_CLK_NONE) || (*prop > QE_CLK24)) {
  3418. printk(KERN_ERR
  3419. "ucc_geth: invalid tx-clock property\n");
  3420. return -EINVAL;
  3421. }
  3422. ug_info->uf_info.tx_clock = *prop;
  3423. }
  3424. err = of_address_to_resource(np, 0, &res);
  3425. if (err)
  3426. return -EINVAL;
  3427. ug_info->uf_info.regs = res.start;
  3428. ug_info->uf_info.irq = irq_of_parse_and_map(np, 0);
  3429. fixed_link = of_get_property(np, "fixed-link", NULL);
  3430. if (fixed_link) {
  3431. snprintf(ug_info->mdio_bus, MII_BUS_ID_SIZE, "0");
  3432. ug_info->phy_address = fixed_link[0];
  3433. phy = NULL;
  3434. } else {
  3435. ph = of_get_property(np, "phy-handle", NULL);
  3436. phy = of_find_node_by_phandle(*ph);
  3437. if (phy == NULL)
  3438. return -ENODEV;
  3439. /* set the PHY address */
  3440. prop = of_get_property(phy, "reg", NULL);
  3441. if (prop == NULL)
  3442. return -1;
  3443. ug_info->phy_address = *prop;
  3444. /* Set the bus id */
  3445. mdio = of_get_parent(phy);
  3446. if (mdio == NULL)
  3447. return -1;
  3448. err = of_address_to_resource(mdio, 0, &res);
  3449. of_node_put(mdio);
  3450. if (err)
  3451. return -1;
  3452. snprintf(ug_info->mdio_bus, MII_BUS_ID_SIZE, "%x", res.start);
  3453. }
  3454. /* get the phy interface type, or default to MII */
  3455. prop = of_get_property(np, "phy-connection-type", NULL);
  3456. if (!prop) {
  3457. /* handle interface property present in old trees */
  3458. prop = of_get_property(phy, "interface", NULL);
  3459. if (prop != NULL) {
  3460. phy_interface = enet_to_phy_interface[*prop];
  3461. max_speed = enet_to_speed[*prop];
  3462. } else
  3463. phy_interface = PHY_INTERFACE_MODE_MII;
  3464. } else {
  3465. phy_interface = to_phy_interface((const char *)prop);
  3466. }
  3467. /* get speed, or derive from PHY interface */
  3468. if (max_speed == 0)
  3469. switch (phy_interface) {
  3470. case PHY_INTERFACE_MODE_GMII:
  3471. case PHY_INTERFACE_MODE_RGMII:
  3472. case PHY_INTERFACE_MODE_RGMII_ID:
  3473. case PHY_INTERFACE_MODE_RGMII_RXID:
  3474. case PHY_INTERFACE_MODE_RGMII_TXID:
  3475. case PHY_INTERFACE_MODE_TBI:
  3476. case PHY_INTERFACE_MODE_RTBI:
  3477. max_speed = SPEED_1000;
  3478. break;
  3479. default:
  3480. max_speed = SPEED_100;
  3481. break;
  3482. }
  3483. if (max_speed == SPEED_1000) {
  3484. /* configure muram FIFOs for gigabit operation */
  3485. ug_info->uf_info.urfs = UCC_GETH_URFS_GIGA_INIT;
  3486. ug_info->uf_info.urfet = UCC_GETH_URFET_GIGA_INIT;
  3487. ug_info->uf_info.urfset = UCC_GETH_URFSET_GIGA_INIT;
  3488. ug_info->uf_info.utfs = UCC_GETH_UTFS_GIGA_INIT;
  3489. ug_info->uf_info.utfet = UCC_GETH_UTFET_GIGA_INIT;
  3490. ug_info->uf_info.utftt = UCC_GETH_UTFTT_GIGA_INIT;
  3491. ug_info->numThreadsTx = UCC_GETH_NUM_OF_THREADS_4;
  3492. ug_info->numThreadsRx = UCC_GETH_NUM_OF_THREADS_4;
  3493. }
  3494. if (netif_msg_probe(&debug))
  3495. printk(KERN_INFO "ucc_geth: UCC%1d at 0x%8x (irq = %d) \n",
  3496. ug_info->uf_info.ucc_num + 1, ug_info->uf_info.regs,
  3497. ug_info->uf_info.irq);
  3498. /* Create an ethernet device instance */
  3499. dev = alloc_etherdev(sizeof(*ugeth));
  3500. if (dev == NULL)
  3501. return -ENOMEM;
  3502. ugeth = netdev_priv(dev);
  3503. spin_lock_init(&ugeth->lock);
  3504. /* Create CQs for hash tables */
  3505. INIT_LIST_HEAD(&ugeth->group_hash_q);
  3506. INIT_LIST_HEAD(&ugeth->ind_hash_q);
  3507. dev_set_drvdata(device, dev);
  3508. /* Set the dev->base_addr to the gfar reg region */
  3509. dev->base_addr = (unsigned long)(ug_info->uf_info.regs);
  3510. SET_NETDEV_DEV(dev, device);
  3511. /* Fill in the dev structure */
  3512. uec_set_ethtool_ops(dev);
  3513. dev->open = ucc_geth_open;
  3514. dev->hard_start_xmit = ucc_geth_start_xmit;
  3515. dev->tx_timeout = ucc_geth_timeout;
  3516. dev->watchdog_timeo = TX_TIMEOUT;
  3517. netif_napi_add(dev, &ugeth->napi, ucc_geth_poll, UCC_GETH_DEV_WEIGHT);
  3518. #ifdef CONFIG_NET_POLL_CONTROLLER
  3519. dev->poll_controller = ucc_netpoll;
  3520. #endif
  3521. dev->stop = ucc_geth_close;
  3522. // dev->change_mtu = ucc_geth_change_mtu;
  3523. dev->mtu = 1500;
  3524. dev->set_multicast_list = ucc_geth_set_multi;
  3525. ugeth->msg_enable = netif_msg_init(debug.msg_enable, UGETH_MSG_DEFAULT);
  3526. ugeth->phy_interface = phy_interface;
  3527. ugeth->max_speed = max_speed;
  3528. err = register_netdev(dev);
  3529. if (err) {
  3530. if (netif_msg_probe(ugeth))
  3531. ugeth_err("%s: Cannot register net device, aborting.",
  3532. dev->name);
  3533. free_netdev(dev);
  3534. return err;
  3535. }
  3536. mac_addr = of_get_mac_address(np);
  3537. if (mac_addr)
  3538. memcpy(dev->dev_addr, mac_addr, 6);
  3539. ugeth->ug_info = ug_info;
  3540. ugeth->dev = dev;
  3541. return 0;
  3542. }
  3543. static int ucc_geth_remove(struct of_device* ofdev)
  3544. {
  3545. struct device *device = &ofdev->dev;
  3546. struct net_device *dev = dev_get_drvdata(device);
  3547. struct ucc_geth_private *ugeth = netdev_priv(dev);
  3548. unregister_netdev(dev);
  3549. free_netdev(dev);
  3550. ucc_geth_memclean(ugeth);
  3551. dev_set_drvdata(device, NULL);
  3552. return 0;
  3553. }
  3554. static struct of_device_id ucc_geth_match[] = {
  3555. {
  3556. .type = "network",
  3557. .compatible = "ucc_geth",
  3558. },
  3559. {},
  3560. };
  3561. MODULE_DEVICE_TABLE(of, ucc_geth_match);
  3562. static struct of_platform_driver ucc_geth_driver = {
  3563. .name = DRV_NAME,
  3564. .match_table = ucc_geth_match,
  3565. .probe = ucc_geth_probe,
  3566. .remove = ucc_geth_remove,
  3567. };
  3568. static int __init ucc_geth_init(void)
  3569. {
  3570. int i, ret;
  3571. ret = uec_mdio_init();
  3572. if (ret)
  3573. return ret;
  3574. if (netif_msg_drv(&debug))
  3575. printk(KERN_INFO "ucc_geth: " DRV_DESC "\n");
  3576. for (i = 0; i < 8; i++)
  3577. memcpy(&(ugeth_info[i]), &ugeth_primary_info,
  3578. sizeof(ugeth_primary_info));
  3579. ret = of_register_platform_driver(&ucc_geth_driver);
  3580. if (ret)
  3581. uec_mdio_exit();
  3582. return ret;
  3583. }
  3584. static void __exit ucc_geth_exit(void)
  3585. {
  3586. of_unregister_platform_driver(&ucc_geth_driver);
  3587. uec_mdio_exit();
  3588. }
  3589. module_init(ucc_geth_init);
  3590. module_exit(ucc_geth_exit);
  3591. MODULE_AUTHOR("Freescale Semiconductor, Inc");
  3592. MODULE_DESCRIPTION(DRV_DESC);
  3593. MODULE_VERSION(DRV_VERSION);
  3594. MODULE_LICENSE("GPL");