ucc_geth.c 116 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014301530163017301830193020302130223023302430253026302730283029303030313032303330343035303630373038303930403041304230433044304530463047304830493050305130523053305430553056305730583059306030613062306330643065306630673068306930703071307230733074307530763077307830793080308130823083308430853086308730883089309030913092309330943095309630973098309931003101310231033104310531063107310831093110311131123113311431153116311731183119312031213122312331243125312631273128312931303131313231333134313531363137313831393140314131423143314431453146314731483149315031513152315331543155315631573158315931603161316231633164316531663167316831693170317131723173317431753176317731783179318031813182318331843185318631873188318931903191319231933194319531963197319831993200320132023203320432053206320732083209321032113212321332143215321632173218321932203221322232233224322532263227322832293230323132323233323432353236323732383239324032413242324332443245324632473248324932503251325232533254325532563257325832593260326132623263326432653266326732683269327032713272327332743275327632773278327932803281328232833284328532863287328832893290329132923293329432953296329732983299330033013302330333043305330633073308330933103311331233133314331533163317331833193320332133223323332433253326332733283329333033313332333333343335333633373338333933403341334233433344334533463347334833493350335133523353335433553356335733583359336033613362336333643365336633673368336933703371337233733374337533763377337833793380338133823383338433853386338733883389339033913392339333943395339633973398339934003401340234033404340534063407340834093410341134123413341434153416341734183419342034213422342334243425342634273428342934303431343234333434343534363437343834393440344134423443344434453446344734483449345034513452345334543455345634573458345934603461346234633464346534663467346834693470347134723473347434753476347734783479348034813482348334843485348634873488348934903491349234933494349534963497349834993500350135023503350435053506350735083509351035113512351335143515351635173518351935203521352235233524352535263527352835293530353135323533353435353536353735383539354035413542354335443545354635473548354935503551355235533554355535563557355835593560356135623563356435653566356735683569357035713572357335743575357635773578357935803581358235833584358535863587358835893590359135923593359435953596359735983599360036013602360336043605360636073608360936103611361236133614361536163617361836193620362136223623362436253626362736283629363036313632363336343635363636373638363936403641364236433644364536463647364836493650365136523653365436553656365736583659366036613662366336643665366636673668366936703671367236733674367536763677367836793680368136823683368436853686368736883689369036913692369336943695369636973698369937003701370237033704370537063707370837093710371137123713371437153716371737183719372037213722372337243725372637273728372937303731373237333734373537363737373837393740374137423743374437453746374737483749375037513752375337543755375637573758375937603761376237633764376537663767376837693770377137723773377437753776377737783779378037813782378337843785378637873788378937903791379237933794379537963797379837993800380138023803380438053806380738083809381038113812381338143815381638173818381938203821382238233824382538263827382838293830383138323833383438353836383738383839384038413842384338443845384638473848384938503851385238533854385538563857385838593860386138623863386438653866386738683869387038713872387338743875387638773878387938803881388238833884388538863887388838893890389138923893389438953896
  1. /*
  2. * Copyright (C) 2006-2009 Freescale Semicondutor, Inc. All rights reserved.
  3. *
  4. * Author: Shlomi Gridish <gridish@freescale.com>
  5. * Li Yang <leoli@freescale.com>
  6. *
  7. * Description:
  8. * QE UCC Gigabit Ethernet Driver
  9. *
  10. * This program is free software; you can redistribute it and/or modify it
  11. * under the terms of the GNU General Public License as published by the
  12. * Free Software Foundation; either version 2 of the License, or (at your
  13. * option) any later version.
  14. */
  15. #include <linux/kernel.h>
  16. #include <linux/init.h>
  17. #include <linux/errno.h>
  18. #include <linux/slab.h>
  19. #include <linux/stddef.h>
  20. #include <linux/interrupt.h>
  21. #include <linux/netdevice.h>
  22. #include <linux/etherdevice.h>
  23. #include <linux/skbuff.h>
  24. #include <linux/spinlock.h>
  25. #include <linux/mm.h>
  26. #include <linux/dma-mapping.h>
  27. #include <linux/mii.h>
  28. #include <linux/phy.h>
  29. #include <linux/workqueue.h>
  30. #include <linux/of_mdio.h>
  31. #include <linux/of_platform.h>
  32. #include <asm/uaccess.h>
  33. #include <asm/irq.h>
  34. #include <asm/io.h>
  35. #include <asm/immap_qe.h>
  36. #include <asm/qe.h>
  37. #include <asm/ucc.h>
  38. #include <asm/ucc_fast.h>
  39. #include "ucc_geth.h"
  40. #include "fsl_pq_mdio.h"
  41. #undef DEBUG
  42. #define ugeth_printk(level, format, arg...) \
  43. printk(level format "\n", ## arg)
  44. #define ugeth_dbg(format, arg...) \
  45. ugeth_printk(KERN_DEBUG , format , ## arg)
  46. #define ugeth_err(format, arg...) \
  47. ugeth_printk(KERN_ERR , format , ## arg)
  48. #define ugeth_info(format, arg...) \
  49. ugeth_printk(KERN_INFO , format , ## arg)
  50. #define ugeth_warn(format, arg...) \
  51. ugeth_printk(KERN_WARNING , format , ## arg)
  52. #ifdef UGETH_VERBOSE_DEBUG
  53. #define ugeth_vdbg ugeth_dbg
  54. #else
  55. #define ugeth_vdbg(fmt, args...) do { } while (0)
  56. #endif /* UGETH_VERBOSE_DEBUG */
  57. #define UGETH_MSG_DEFAULT (NETIF_MSG_IFUP << 1 ) - 1
  58. static DEFINE_SPINLOCK(ugeth_lock);
  59. static void uec_configure_serdes(struct net_device *dev);
  60. static struct {
  61. u32 msg_enable;
  62. } debug = { -1 };
  63. module_param_named(debug, debug.msg_enable, int, 0);
  64. MODULE_PARM_DESC(debug, "Debug verbosity level (0=none, ..., 0xffff=all)");
  65. static struct ucc_geth_info ugeth_primary_info = {
  66. .uf_info = {
  67. .bd_mem_part = MEM_PART_SYSTEM,
  68. .rtsm = UCC_FAST_SEND_IDLES_BETWEEN_FRAMES,
  69. .max_rx_buf_length = 1536,
  70. /* adjusted at startup if max-speed 1000 */
  71. .urfs = UCC_GETH_URFS_INIT,
  72. .urfet = UCC_GETH_URFET_INIT,
  73. .urfset = UCC_GETH_URFSET_INIT,
  74. .utfs = UCC_GETH_UTFS_INIT,
  75. .utfet = UCC_GETH_UTFET_INIT,
  76. .utftt = UCC_GETH_UTFTT_INIT,
  77. .ufpt = 256,
  78. .mode = UCC_FAST_PROTOCOL_MODE_ETHERNET,
  79. .ttx_trx = UCC_FAST_GUMR_TRANSPARENT_TTX_TRX_NORMAL,
  80. .tenc = UCC_FAST_TX_ENCODING_NRZ,
  81. .renc = UCC_FAST_RX_ENCODING_NRZ,
  82. .tcrc = UCC_FAST_16_BIT_CRC,
  83. .synl = UCC_FAST_SYNC_LEN_NOT_USED,
  84. },
  85. .numQueuesTx = 1,
  86. .numQueuesRx = 1,
  87. .extendedFilteringChainPointer = ((uint32_t) NULL),
  88. .typeorlen = 3072 /*1536 */ ,
  89. .nonBackToBackIfgPart1 = 0x40,
  90. .nonBackToBackIfgPart2 = 0x60,
  91. .miminumInterFrameGapEnforcement = 0x50,
  92. .backToBackInterFrameGap = 0x60,
  93. .mblinterval = 128,
  94. .nortsrbytetime = 5,
  95. .fracsiz = 1,
  96. .strictpriorityq = 0xff,
  97. .altBebTruncation = 0xa,
  98. .excessDefer = 1,
  99. .maxRetransmission = 0xf,
  100. .collisionWindow = 0x37,
  101. .receiveFlowControl = 1,
  102. .transmitFlowControl = 1,
  103. .maxGroupAddrInHash = 4,
  104. .maxIndAddrInHash = 4,
  105. .prel = 7,
  106. .maxFrameLength = 1518,
  107. .minFrameLength = 64,
  108. .maxD1Length = 1520,
  109. .maxD2Length = 1520,
  110. .vlantype = 0x8100,
  111. .ecamptr = ((uint32_t) NULL),
  112. .eventRegMask = UCCE_OTHER,
  113. .pausePeriod = 0xf000,
  114. .interruptcoalescingmaxvalue = {1, 1, 1, 1, 1, 1, 1, 1},
  115. .bdRingLenTx = {
  116. TX_BD_RING_LEN,
  117. TX_BD_RING_LEN,
  118. TX_BD_RING_LEN,
  119. TX_BD_RING_LEN,
  120. TX_BD_RING_LEN,
  121. TX_BD_RING_LEN,
  122. TX_BD_RING_LEN,
  123. TX_BD_RING_LEN},
  124. .bdRingLenRx = {
  125. RX_BD_RING_LEN,
  126. RX_BD_RING_LEN,
  127. RX_BD_RING_LEN,
  128. RX_BD_RING_LEN,
  129. RX_BD_RING_LEN,
  130. RX_BD_RING_LEN,
  131. RX_BD_RING_LEN,
  132. RX_BD_RING_LEN},
  133. .numStationAddresses = UCC_GETH_NUM_OF_STATION_ADDRESSES_1,
  134. .largestexternallookupkeysize =
  135. QE_FLTR_LARGEST_EXTERNAL_TABLE_LOOKUP_KEY_SIZE_NONE,
  136. .statisticsMode = UCC_GETH_STATISTICS_GATHERING_MODE_HARDWARE |
  137. UCC_GETH_STATISTICS_GATHERING_MODE_FIRMWARE_TX |
  138. UCC_GETH_STATISTICS_GATHERING_MODE_FIRMWARE_RX,
  139. .vlanOperationTagged = UCC_GETH_VLAN_OPERATION_TAGGED_NOP,
  140. .vlanOperationNonTagged = UCC_GETH_VLAN_OPERATION_NON_TAGGED_NOP,
  141. .rxQoSMode = UCC_GETH_QOS_MODE_DEFAULT,
  142. .aufc = UPSMR_AUTOMATIC_FLOW_CONTROL_MODE_NONE,
  143. .padAndCrc = MACCFG2_PAD_AND_CRC_MODE_PAD_AND_CRC,
  144. .numThreadsTx = UCC_GETH_NUM_OF_THREADS_1,
  145. .numThreadsRx = UCC_GETH_NUM_OF_THREADS_1,
  146. .riscTx = QE_RISC_ALLOCATION_RISC1_AND_RISC2,
  147. .riscRx = QE_RISC_ALLOCATION_RISC1_AND_RISC2,
  148. };
  149. static struct ucc_geth_info ugeth_info[8];
  150. #ifdef DEBUG
  151. static void mem_disp(u8 *addr, int size)
  152. {
  153. u8 *i;
  154. int size16Aling = (size >> 4) << 4;
  155. int size4Aling = (size >> 2) << 2;
  156. int notAlign = 0;
  157. if (size % 16)
  158. notAlign = 1;
  159. for (i = addr; (u32) i < (u32) addr + size16Aling; i += 16)
  160. printk("0x%08x: %08x %08x %08x %08x\r\n",
  161. (u32) i,
  162. *((u32 *) (i)),
  163. *((u32 *) (i + 4)),
  164. *((u32 *) (i + 8)), *((u32 *) (i + 12)));
  165. if (notAlign == 1)
  166. printk("0x%08x: ", (u32) i);
  167. for (; (u32) i < (u32) addr + size4Aling; i += 4)
  168. printk("%08x ", *((u32 *) (i)));
  169. for (; (u32) i < (u32) addr + size; i++)
  170. printk("%02x", *((u8 *) (i)));
  171. if (notAlign == 1)
  172. printk("\r\n");
  173. }
  174. #endif /* DEBUG */
  175. static struct list_head *dequeue(struct list_head *lh)
  176. {
  177. unsigned long flags;
  178. spin_lock_irqsave(&ugeth_lock, flags);
  179. if (!list_empty(lh)) {
  180. struct list_head *node = lh->next;
  181. list_del(node);
  182. spin_unlock_irqrestore(&ugeth_lock, flags);
  183. return node;
  184. } else {
  185. spin_unlock_irqrestore(&ugeth_lock, flags);
  186. return NULL;
  187. }
  188. }
  189. static struct sk_buff *get_new_skb(struct ucc_geth_private *ugeth,
  190. u8 __iomem *bd)
  191. {
  192. struct sk_buff *skb = NULL;
  193. skb = dev_alloc_skb(ugeth->ug_info->uf_info.max_rx_buf_length +
  194. UCC_GETH_RX_DATA_BUF_ALIGNMENT);
  195. if (skb == NULL)
  196. return NULL;
  197. /* We need the data buffer to be aligned properly. We will reserve
  198. * as many bytes as needed to align the data properly
  199. */
  200. skb_reserve(skb,
  201. UCC_GETH_RX_DATA_BUF_ALIGNMENT -
  202. (((unsigned)skb->data) & (UCC_GETH_RX_DATA_BUF_ALIGNMENT -
  203. 1)));
  204. skb->dev = ugeth->ndev;
  205. out_be32(&((struct qe_bd __iomem *)bd)->buf,
  206. dma_map_single(ugeth->dev,
  207. skb->data,
  208. ugeth->ug_info->uf_info.max_rx_buf_length +
  209. UCC_GETH_RX_DATA_BUF_ALIGNMENT,
  210. DMA_FROM_DEVICE));
  211. out_be32((u32 __iomem *)bd,
  212. (R_E | R_I | (in_be32((u32 __iomem*)bd) & R_W)));
  213. return skb;
  214. }
  215. static int rx_bd_buffer_set(struct ucc_geth_private *ugeth, u8 rxQ)
  216. {
  217. u8 __iomem *bd;
  218. u32 bd_status;
  219. struct sk_buff *skb;
  220. int i;
  221. bd = ugeth->p_rx_bd_ring[rxQ];
  222. i = 0;
  223. do {
  224. bd_status = in_be32((u32 __iomem *)bd);
  225. skb = get_new_skb(ugeth, bd);
  226. if (!skb) /* If can not allocate data buffer,
  227. abort. Cleanup will be elsewhere */
  228. return -ENOMEM;
  229. ugeth->rx_skbuff[rxQ][i] = skb;
  230. /* advance the BD pointer */
  231. bd += sizeof(struct qe_bd);
  232. i++;
  233. } while (!(bd_status & R_W));
  234. return 0;
  235. }
  236. static int fill_init_enet_entries(struct ucc_geth_private *ugeth,
  237. u32 *p_start,
  238. u8 num_entries,
  239. u32 thread_size,
  240. u32 thread_alignment,
  241. enum qe_risc_allocation risc,
  242. int skip_page_for_first_entry)
  243. {
  244. u32 init_enet_offset;
  245. u8 i;
  246. int snum;
  247. for (i = 0; i < num_entries; i++) {
  248. if ((snum = qe_get_snum()) < 0) {
  249. if (netif_msg_ifup(ugeth))
  250. ugeth_err("fill_init_enet_entries: Can not get SNUM.");
  251. return snum;
  252. }
  253. if ((i == 0) && skip_page_for_first_entry)
  254. /* First entry of Rx does not have page */
  255. init_enet_offset = 0;
  256. else {
  257. init_enet_offset =
  258. qe_muram_alloc(thread_size, thread_alignment);
  259. if (IS_ERR_VALUE(init_enet_offset)) {
  260. if (netif_msg_ifup(ugeth))
  261. ugeth_err("fill_init_enet_entries: Can not allocate DPRAM memory.");
  262. qe_put_snum((u8) snum);
  263. return -ENOMEM;
  264. }
  265. }
  266. *(p_start++) =
  267. ((u8) snum << ENET_INIT_PARAM_SNUM_SHIFT) | init_enet_offset
  268. | risc;
  269. }
  270. return 0;
  271. }
  272. static int return_init_enet_entries(struct ucc_geth_private *ugeth,
  273. u32 *p_start,
  274. u8 num_entries,
  275. enum qe_risc_allocation risc,
  276. int skip_page_for_first_entry)
  277. {
  278. u32 init_enet_offset;
  279. u8 i;
  280. int snum;
  281. for (i = 0; i < num_entries; i++) {
  282. u32 val = *p_start;
  283. /* Check that this entry was actually valid --
  284. needed in case failed in allocations */
  285. if ((val & ENET_INIT_PARAM_RISC_MASK) == risc) {
  286. snum =
  287. (u32) (val & ENET_INIT_PARAM_SNUM_MASK) >>
  288. ENET_INIT_PARAM_SNUM_SHIFT;
  289. qe_put_snum((u8) snum);
  290. if (!((i == 0) && skip_page_for_first_entry)) {
  291. /* First entry of Rx does not have page */
  292. init_enet_offset =
  293. (val & ENET_INIT_PARAM_PTR_MASK);
  294. qe_muram_free(init_enet_offset);
  295. }
  296. *p_start++ = 0;
  297. }
  298. }
  299. return 0;
  300. }
  301. #ifdef DEBUG
  302. static int dump_init_enet_entries(struct ucc_geth_private *ugeth,
  303. u32 __iomem *p_start,
  304. u8 num_entries,
  305. u32 thread_size,
  306. enum qe_risc_allocation risc,
  307. int skip_page_for_first_entry)
  308. {
  309. u32 init_enet_offset;
  310. u8 i;
  311. int snum;
  312. for (i = 0; i < num_entries; i++) {
  313. u32 val = in_be32(p_start);
  314. /* Check that this entry was actually valid --
  315. needed in case failed in allocations */
  316. if ((val & ENET_INIT_PARAM_RISC_MASK) == risc) {
  317. snum =
  318. (u32) (val & ENET_INIT_PARAM_SNUM_MASK) >>
  319. ENET_INIT_PARAM_SNUM_SHIFT;
  320. qe_put_snum((u8) snum);
  321. if (!((i == 0) && skip_page_for_first_entry)) {
  322. /* First entry of Rx does not have page */
  323. init_enet_offset =
  324. (in_be32(p_start) &
  325. ENET_INIT_PARAM_PTR_MASK);
  326. ugeth_info("Init enet entry %d:", i);
  327. ugeth_info("Base address: 0x%08x",
  328. (u32)
  329. qe_muram_addr(init_enet_offset));
  330. mem_disp(qe_muram_addr(init_enet_offset),
  331. thread_size);
  332. }
  333. p_start++;
  334. }
  335. }
  336. return 0;
  337. }
  338. #endif
  339. static void put_enet_addr_container(struct enet_addr_container *enet_addr_cont)
  340. {
  341. kfree(enet_addr_cont);
  342. }
  343. static void set_mac_addr(__be16 __iomem *reg, u8 *mac)
  344. {
  345. out_be16(&reg[0], ((u16)mac[5] << 8) | mac[4]);
  346. out_be16(&reg[1], ((u16)mac[3] << 8) | mac[2]);
  347. out_be16(&reg[2], ((u16)mac[1] << 8) | mac[0]);
  348. }
  349. static int hw_clear_addr_in_paddr(struct ucc_geth_private *ugeth, u8 paddr_num)
  350. {
  351. struct ucc_geth_82xx_address_filtering_pram __iomem *p_82xx_addr_filt;
  352. if (!(paddr_num < NUM_OF_PADDRS)) {
  353. ugeth_warn("%s: Illagel paddr_num.", __func__);
  354. return -EINVAL;
  355. }
  356. p_82xx_addr_filt =
  357. (struct ucc_geth_82xx_address_filtering_pram __iomem *) ugeth->p_rx_glbl_pram->
  358. addressfiltering;
  359. /* Writing address ff.ff.ff.ff.ff.ff disables address
  360. recognition for this register */
  361. out_be16(&p_82xx_addr_filt->paddr[paddr_num].h, 0xffff);
  362. out_be16(&p_82xx_addr_filt->paddr[paddr_num].m, 0xffff);
  363. out_be16(&p_82xx_addr_filt->paddr[paddr_num].l, 0xffff);
  364. return 0;
  365. }
  366. static void hw_add_addr_in_hash(struct ucc_geth_private *ugeth,
  367. u8 *p_enet_addr)
  368. {
  369. struct ucc_geth_82xx_address_filtering_pram __iomem *p_82xx_addr_filt;
  370. u32 cecr_subblock;
  371. p_82xx_addr_filt =
  372. (struct ucc_geth_82xx_address_filtering_pram __iomem *) ugeth->p_rx_glbl_pram->
  373. addressfiltering;
  374. cecr_subblock =
  375. ucc_fast_get_qe_cr_subblock(ugeth->ug_info->uf_info.ucc_num);
  376. /* Ethernet frames are defined in Little Endian mode,
  377. therefor to insert */
  378. /* the address to the hash (Big Endian mode), we reverse the bytes.*/
  379. set_mac_addr(&p_82xx_addr_filt->taddr.h, p_enet_addr);
  380. qe_issue_cmd(QE_SET_GROUP_ADDRESS, cecr_subblock,
  381. QE_CR_PROTOCOL_ETHERNET, 0);
  382. }
  383. #ifdef CONFIG_UGETH_MAGIC_PACKET
  384. static void magic_packet_detection_enable(struct ucc_geth_private *ugeth)
  385. {
  386. struct ucc_fast_private *uccf;
  387. struct ucc_geth __iomem *ug_regs;
  388. uccf = ugeth->uccf;
  389. ug_regs = ugeth->ug_regs;
  390. /* Enable interrupts for magic packet detection */
  391. setbits32(uccf->p_uccm, UCC_GETH_UCCE_MPD);
  392. /* Enable magic packet detection */
  393. setbits32(&ug_regs->maccfg2, MACCFG2_MPE);
  394. }
  395. static void magic_packet_detection_disable(struct ucc_geth_private *ugeth)
  396. {
  397. struct ucc_fast_private *uccf;
  398. struct ucc_geth __iomem *ug_regs;
  399. uccf = ugeth->uccf;
  400. ug_regs = ugeth->ug_regs;
  401. /* Disable interrupts for magic packet detection */
  402. clrbits32(uccf->p_uccm, UCC_GETH_UCCE_MPD);
  403. /* Disable magic packet detection */
  404. clrbits32(&ug_regs->maccfg2, MACCFG2_MPE);
  405. }
  406. #endif /* MAGIC_PACKET */
  407. static inline int compare_addr(u8 **addr1, u8 **addr2)
  408. {
  409. return memcmp(addr1, addr2, ENET_NUM_OCTETS_PER_ADDRESS);
  410. }
  411. #ifdef DEBUG
  412. static void get_statistics(struct ucc_geth_private *ugeth,
  413. struct ucc_geth_tx_firmware_statistics *
  414. tx_firmware_statistics,
  415. struct ucc_geth_rx_firmware_statistics *
  416. rx_firmware_statistics,
  417. struct ucc_geth_hardware_statistics *hardware_statistics)
  418. {
  419. struct ucc_fast __iomem *uf_regs;
  420. struct ucc_geth __iomem *ug_regs;
  421. struct ucc_geth_tx_firmware_statistics_pram *p_tx_fw_statistics_pram;
  422. struct ucc_geth_rx_firmware_statistics_pram *p_rx_fw_statistics_pram;
  423. ug_regs = ugeth->ug_regs;
  424. uf_regs = (struct ucc_fast __iomem *) ug_regs;
  425. p_tx_fw_statistics_pram = ugeth->p_tx_fw_statistics_pram;
  426. p_rx_fw_statistics_pram = ugeth->p_rx_fw_statistics_pram;
  427. /* Tx firmware only if user handed pointer and driver actually
  428. gathers Tx firmware statistics */
  429. if (tx_firmware_statistics && p_tx_fw_statistics_pram) {
  430. tx_firmware_statistics->sicoltx =
  431. in_be32(&p_tx_fw_statistics_pram->sicoltx);
  432. tx_firmware_statistics->mulcoltx =
  433. in_be32(&p_tx_fw_statistics_pram->mulcoltx);
  434. tx_firmware_statistics->latecoltxfr =
  435. in_be32(&p_tx_fw_statistics_pram->latecoltxfr);
  436. tx_firmware_statistics->frabortduecol =
  437. in_be32(&p_tx_fw_statistics_pram->frabortduecol);
  438. tx_firmware_statistics->frlostinmactxer =
  439. in_be32(&p_tx_fw_statistics_pram->frlostinmactxer);
  440. tx_firmware_statistics->carriersenseertx =
  441. in_be32(&p_tx_fw_statistics_pram->carriersenseertx);
  442. tx_firmware_statistics->frtxok =
  443. in_be32(&p_tx_fw_statistics_pram->frtxok);
  444. tx_firmware_statistics->txfrexcessivedefer =
  445. in_be32(&p_tx_fw_statistics_pram->txfrexcessivedefer);
  446. tx_firmware_statistics->txpkts256 =
  447. in_be32(&p_tx_fw_statistics_pram->txpkts256);
  448. tx_firmware_statistics->txpkts512 =
  449. in_be32(&p_tx_fw_statistics_pram->txpkts512);
  450. tx_firmware_statistics->txpkts1024 =
  451. in_be32(&p_tx_fw_statistics_pram->txpkts1024);
  452. tx_firmware_statistics->txpktsjumbo =
  453. in_be32(&p_tx_fw_statistics_pram->txpktsjumbo);
  454. }
  455. /* Rx firmware only if user handed pointer and driver actually
  456. * gathers Rx firmware statistics */
  457. if (rx_firmware_statistics && p_rx_fw_statistics_pram) {
  458. int i;
  459. rx_firmware_statistics->frrxfcser =
  460. in_be32(&p_rx_fw_statistics_pram->frrxfcser);
  461. rx_firmware_statistics->fraligner =
  462. in_be32(&p_rx_fw_statistics_pram->fraligner);
  463. rx_firmware_statistics->inrangelenrxer =
  464. in_be32(&p_rx_fw_statistics_pram->inrangelenrxer);
  465. rx_firmware_statistics->outrangelenrxer =
  466. in_be32(&p_rx_fw_statistics_pram->outrangelenrxer);
  467. rx_firmware_statistics->frtoolong =
  468. in_be32(&p_rx_fw_statistics_pram->frtoolong);
  469. rx_firmware_statistics->runt =
  470. in_be32(&p_rx_fw_statistics_pram->runt);
  471. rx_firmware_statistics->verylongevent =
  472. in_be32(&p_rx_fw_statistics_pram->verylongevent);
  473. rx_firmware_statistics->symbolerror =
  474. in_be32(&p_rx_fw_statistics_pram->symbolerror);
  475. rx_firmware_statistics->dropbsy =
  476. in_be32(&p_rx_fw_statistics_pram->dropbsy);
  477. for (i = 0; i < 0x8; i++)
  478. rx_firmware_statistics->res0[i] =
  479. p_rx_fw_statistics_pram->res0[i];
  480. rx_firmware_statistics->mismatchdrop =
  481. in_be32(&p_rx_fw_statistics_pram->mismatchdrop);
  482. rx_firmware_statistics->underpkts =
  483. in_be32(&p_rx_fw_statistics_pram->underpkts);
  484. rx_firmware_statistics->pkts256 =
  485. in_be32(&p_rx_fw_statistics_pram->pkts256);
  486. rx_firmware_statistics->pkts512 =
  487. in_be32(&p_rx_fw_statistics_pram->pkts512);
  488. rx_firmware_statistics->pkts1024 =
  489. in_be32(&p_rx_fw_statistics_pram->pkts1024);
  490. rx_firmware_statistics->pktsjumbo =
  491. in_be32(&p_rx_fw_statistics_pram->pktsjumbo);
  492. rx_firmware_statistics->frlossinmacer =
  493. in_be32(&p_rx_fw_statistics_pram->frlossinmacer);
  494. rx_firmware_statistics->pausefr =
  495. in_be32(&p_rx_fw_statistics_pram->pausefr);
  496. for (i = 0; i < 0x4; i++)
  497. rx_firmware_statistics->res1[i] =
  498. p_rx_fw_statistics_pram->res1[i];
  499. rx_firmware_statistics->removevlan =
  500. in_be32(&p_rx_fw_statistics_pram->removevlan);
  501. rx_firmware_statistics->replacevlan =
  502. in_be32(&p_rx_fw_statistics_pram->replacevlan);
  503. rx_firmware_statistics->insertvlan =
  504. in_be32(&p_rx_fw_statistics_pram->insertvlan);
  505. }
  506. /* Hardware only if user handed pointer and driver actually
  507. gathers hardware statistics */
  508. if (hardware_statistics &&
  509. (in_be32(&uf_regs->upsmr) & UCC_GETH_UPSMR_HSE)) {
  510. hardware_statistics->tx64 = in_be32(&ug_regs->tx64);
  511. hardware_statistics->tx127 = in_be32(&ug_regs->tx127);
  512. hardware_statistics->tx255 = in_be32(&ug_regs->tx255);
  513. hardware_statistics->rx64 = in_be32(&ug_regs->rx64);
  514. hardware_statistics->rx127 = in_be32(&ug_regs->rx127);
  515. hardware_statistics->rx255 = in_be32(&ug_regs->rx255);
  516. hardware_statistics->txok = in_be32(&ug_regs->txok);
  517. hardware_statistics->txcf = in_be16(&ug_regs->txcf);
  518. hardware_statistics->tmca = in_be32(&ug_regs->tmca);
  519. hardware_statistics->tbca = in_be32(&ug_regs->tbca);
  520. hardware_statistics->rxfok = in_be32(&ug_regs->rxfok);
  521. hardware_statistics->rxbok = in_be32(&ug_regs->rxbok);
  522. hardware_statistics->rbyt = in_be32(&ug_regs->rbyt);
  523. hardware_statistics->rmca = in_be32(&ug_regs->rmca);
  524. hardware_statistics->rbca = in_be32(&ug_regs->rbca);
  525. }
  526. }
  527. static void dump_bds(struct ucc_geth_private *ugeth)
  528. {
  529. int i;
  530. int length;
  531. for (i = 0; i < ugeth->ug_info->numQueuesTx; i++) {
  532. if (ugeth->p_tx_bd_ring[i]) {
  533. length =
  534. (ugeth->ug_info->bdRingLenTx[i] *
  535. sizeof(struct qe_bd));
  536. ugeth_info("TX BDs[%d]", i);
  537. mem_disp(ugeth->p_tx_bd_ring[i], length);
  538. }
  539. }
  540. for (i = 0; i < ugeth->ug_info->numQueuesRx; i++) {
  541. if (ugeth->p_rx_bd_ring[i]) {
  542. length =
  543. (ugeth->ug_info->bdRingLenRx[i] *
  544. sizeof(struct qe_bd));
  545. ugeth_info("RX BDs[%d]", i);
  546. mem_disp(ugeth->p_rx_bd_ring[i], length);
  547. }
  548. }
  549. }
  550. static void dump_regs(struct ucc_geth_private *ugeth)
  551. {
  552. int i;
  553. ugeth_info("UCC%d Geth registers:", ugeth->ug_info->uf_info.ucc_num);
  554. ugeth_info("Base address: 0x%08x", (u32) ugeth->ug_regs);
  555. ugeth_info("maccfg1 : addr - 0x%08x, val - 0x%08x",
  556. (u32) & ugeth->ug_regs->maccfg1,
  557. in_be32(&ugeth->ug_regs->maccfg1));
  558. ugeth_info("maccfg2 : addr - 0x%08x, val - 0x%08x",
  559. (u32) & ugeth->ug_regs->maccfg2,
  560. in_be32(&ugeth->ug_regs->maccfg2));
  561. ugeth_info("ipgifg : addr - 0x%08x, val - 0x%08x",
  562. (u32) & ugeth->ug_regs->ipgifg,
  563. in_be32(&ugeth->ug_regs->ipgifg));
  564. ugeth_info("hafdup : addr - 0x%08x, val - 0x%08x",
  565. (u32) & ugeth->ug_regs->hafdup,
  566. in_be32(&ugeth->ug_regs->hafdup));
  567. ugeth_info("ifctl : addr - 0x%08x, val - 0x%08x",
  568. (u32) & ugeth->ug_regs->ifctl,
  569. in_be32(&ugeth->ug_regs->ifctl));
  570. ugeth_info("ifstat : addr - 0x%08x, val - 0x%08x",
  571. (u32) & ugeth->ug_regs->ifstat,
  572. in_be32(&ugeth->ug_regs->ifstat));
  573. ugeth_info("macstnaddr1: addr - 0x%08x, val - 0x%08x",
  574. (u32) & ugeth->ug_regs->macstnaddr1,
  575. in_be32(&ugeth->ug_regs->macstnaddr1));
  576. ugeth_info("macstnaddr2: addr - 0x%08x, val - 0x%08x",
  577. (u32) & ugeth->ug_regs->macstnaddr2,
  578. in_be32(&ugeth->ug_regs->macstnaddr2));
  579. ugeth_info("uempr : addr - 0x%08x, val - 0x%08x",
  580. (u32) & ugeth->ug_regs->uempr,
  581. in_be32(&ugeth->ug_regs->uempr));
  582. ugeth_info("utbipar : addr - 0x%08x, val - 0x%08x",
  583. (u32) & ugeth->ug_regs->utbipar,
  584. in_be32(&ugeth->ug_regs->utbipar));
  585. ugeth_info("uescr : addr - 0x%08x, val - 0x%04x",
  586. (u32) & ugeth->ug_regs->uescr,
  587. in_be16(&ugeth->ug_regs->uescr));
  588. ugeth_info("tx64 : addr - 0x%08x, val - 0x%08x",
  589. (u32) & ugeth->ug_regs->tx64,
  590. in_be32(&ugeth->ug_regs->tx64));
  591. ugeth_info("tx127 : addr - 0x%08x, val - 0x%08x",
  592. (u32) & ugeth->ug_regs->tx127,
  593. in_be32(&ugeth->ug_regs->tx127));
  594. ugeth_info("tx255 : addr - 0x%08x, val - 0x%08x",
  595. (u32) & ugeth->ug_regs->tx255,
  596. in_be32(&ugeth->ug_regs->tx255));
  597. ugeth_info("rx64 : addr - 0x%08x, val - 0x%08x",
  598. (u32) & ugeth->ug_regs->rx64,
  599. in_be32(&ugeth->ug_regs->rx64));
  600. ugeth_info("rx127 : addr - 0x%08x, val - 0x%08x",
  601. (u32) & ugeth->ug_regs->rx127,
  602. in_be32(&ugeth->ug_regs->rx127));
  603. ugeth_info("rx255 : addr - 0x%08x, val - 0x%08x",
  604. (u32) & ugeth->ug_regs->rx255,
  605. in_be32(&ugeth->ug_regs->rx255));
  606. ugeth_info("txok : addr - 0x%08x, val - 0x%08x",
  607. (u32) & ugeth->ug_regs->txok,
  608. in_be32(&ugeth->ug_regs->txok));
  609. ugeth_info("txcf : addr - 0x%08x, val - 0x%04x",
  610. (u32) & ugeth->ug_regs->txcf,
  611. in_be16(&ugeth->ug_regs->txcf));
  612. ugeth_info("tmca : addr - 0x%08x, val - 0x%08x",
  613. (u32) & ugeth->ug_regs->tmca,
  614. in_be32(&ugeth->ug_regs->tmca));
  615. ugeth_info("tbca : addr - 0x%08x, val - 0x%08x",
  616. (u32) & ugeth->ug_regs->tbca,
  617. in_be32(&ugeth->ug_regs->tbca));
  618. ugeth_info("rxfok : addr - 0x%08x, val - 0x%08x",
  619. (u32) & ugeth->ug_regs->rxfok,
  620. in_be32(&ugeth->ug_regs->rxfok));
  621. ugeth_info("rxbok : addr - 0x%08x, val - 0x%08x",
  622. (u32) & ugeth->ug_regs->rxbok,
  623. in_be32(&ugeth->ug_regs->rxbok));
  624. ugeth_info("rbyt : addr - 0x%08x, val - 0x%08x",
  625. (u32) & ugeth->ug_regs->rbyt,
  626. in_be32(&ugeth->ug_regs->rbyt));
  627. ugeth_info("rmca : addr - 0x%08x, val - 0x%08x",
  628. (u32) & ugeth->ug_regs->rmca,
  629. in_be32(&ugeth->ug_regs->rmca));
  630. ugeth_info("rbca : addr - 0x%08x, val - 0x%08x",
  631. (u32) & ugeth->ug_regs->rbca,
  632. in_be32(&ugeth->ug_regs->rbca));
  633. ugeth_info("scar : addr - 0x%08x, val - 0x%08x",
  634. (u32) & ugeth->ug_regs->scar,
  635. in_be32(&ugeth->ug_regs->scar));
  636. ugeth_info("scam : addr - 0x%08x, val - 0x%08x",
  637. (u32) & ugeth->ug_regs->scam,
  638. in_be32(&ugeth->ug_regs->scam));
  639. if (ugeth->p_thread_data_tx) {
  640. int numThreadsTxNumerical;
  641. switch (ugeth->ug_info->numThreadsTx) {
  642. case UCC_GETH_NUM_OF_THREADS_1:
  643. numThreadsTxNumerical = 1;
  644. break;
  645. case UCC_GETH_NUM_OF_THREADS_2:
  646. numThreadsTxNumerical = 2;
  647. break;
  648. case UCC_GETH_NUM_OF_THREADS_4:
  649. numThreadsTxNumerical = 4;
  650. break;
  651. case UCC_GETH_NUM_OF_THREADS_6:
  652. numThreadsTxNumerical = 6;
  653. break;
  654. case UCC_GETH_NUM_OF_THREADS_8:
  655. numThreadsTxNumerical = 8;
  656. break;
  657. default:
  658. numThreadsTxNumerical = 0;
  659. break;
  660. }
  661. ugeth_info("Thread data TXs:");
  662. ugeth_info("Base address: 0x%08x",
  663. (u32) ugeth->p_thread_data_tx);
  664. for (i = 0; i < numThreadsTxNumerical; i++) {
  665. ugeth_info("Thread data TX[%d]:", i);
  666. ugeth_info("Base address: 0x%08x",
  667. (u32) & ugeth->p_thread_data_tx[i]);
  668. mem_disp((u8 *) & ugeth->p_thread_data_tx[i],
  669. sizeof(struct ucc_geth_thread_data_tx));
  670. }
  671. }
  672. if (ugeth->p_thread_data_rx) {
  673. int numThreadsRxNumerical;
  674. switch (ugeth->ug_info->numThreadsRx) {
  675. case UCC_GETH_NUM_OF_THREADS_1:
  676. numThreadsRxNumerical = 1;
  677. break;
  678. case UCC_GETH_NUM_OF_THREADS_2:
  679. numThreadsRxNumerical = 2;
  680. break;
  681. case UCC_GETH_NUM_OF_THREADS_4:
  682. numThreadsRxNumerical = 4;
  683. break;
  684. case UCC_GETH_NUM_OF_THREADS_6:
  685. numThreadsRxNumerical = 6;
  686. break;
  687. case UCC_GETH_NUM_OF_THREADS_8:
  688. numThreadsRxNumerical = 8;
  689. break;
  690. default:
  691. numThreadsRxNumerical = 0;
  692. break;
  693. }
  694. ugeth_info("Thread data RX:");
  695. ugeth_info("Base address: 0x%08x",
  696. (u32) ugeth->p_thread_data_rx);
  697. for (i = 0; i < numThreadsRxNumerical; i++) {
  698. ugeth_info("Thread data RX[%d]:", i);
  699. ugeth_info("Base address: 0x%08x",
  700. (u32) & ugeth->p_thread_data_rx[i]);
  701. mem_disp((u8 *) & ugeth->p_thread_data_rx[i],
  702. sizeof(struct ucc_geth_thread_data_rx));
  703. }
  704. }
  705. if (ugeth->p_exf_glbl_param) {
  706. ugeth_info("EXF global param:");
  707. ugeth_info("Base address: 0x%08x",
  708. (u32) ugeth->p_exf_glbl_param);
  709. mem_disp((u8 *) ugeth->p_exf_glbl_param,
  710. sizeof(*ugeth->p_exf_glbl_param));
  711. }
  712. if (ugeth->p_tx_glbl_pram) {
  713. ugeth_info("TX global param:");
  714. ugeth_info("Base address: 0x%08x", (u32) ugeth->p_tx_glbl_pram);
  715. ugeth_info("temoder : addr - 0x%08x, val - 0x%04x",
  716. (u32) & ugeth->p_tx_glbl_pram->temoder,
  717. in_be16(&ugeth->p_tx_glbl_pram->temoder));
  718. ugeth_info("sqptr : addr - 0x%08x, val - 0x%08x",
  719. (u32) & ugeth->p_tx_glbl_pram->sqptr,
  720. in_be32(&ugeth->p_tx_glbl_pram->sqptr));
  721. ugeth_info("schedulerbasepointer: addr - 0x%08x, val - 0x%08x",
  722. (u32) & ugeth->p_tx_glbl_pram->schedulerbasepointer,
  723. in_be32(&ugeth->p_tx_glbl_pram->
  724. schedulerbasepointer));
  725. ugeth_info("txrmonbaseptr: addr - 0x%08x, val - 0x%08x",
  726. (u32) & ugeth->p_tx_glbl_pram->txrmonbaseptr,
  727. in_be32(&ugeth->p_tx_glbl_pram->txrmonbaseptr));
  728. ugeth_info("tstate : addr - 0x%08x, val - 0x%08x",
  729. (u32) & ugeth->p_tx_glbl_pram->tstate,
  730. in_be32(&ugeth->p_tx_glbl_pram->tstate));
  731. ugeth_info("iphoffset[0] : addr - 0x%08x, val - 0x%02x",
  732. (u32) & ugeth->p_tx_glbl_pram->iphoffset[0],
  733. ugeth->p_tx_glbl_pram->iphoffset[0]);
  734. ugeth_info("iphoffset[1] : addr - 0x%08x, val - 0x%02x",
  735. (u32) & ugeth->p_tx_glbl_pram->iphoffset[1],
  736. ugeth->p_tx_glbl_pram->iphoffset[1]);
  737. ugeth_info("iphoffset[2] : addr - 0x%08x, val - 0x%02x",
  738. (u32) & ugeth->p_tx_glbl_pram->iphoffset[2],
  739. ugeth->p_tx_glbl_pram->iphoffset[2]);
  740. ugeth_info("iphoffset[3] : addr - 0x%08x, val - 0x%02x",
  741. (u32) & ugeth->p_tx_glbl_pram->iphoffset[3],
  742. ugeth->p_tx_glbl_pram->iphoffset[3]);
  743. ugeth_info("iphoffset[4] : addr - 0x%08x, val - 0x%02x",
  744. (u32) & ugeth->p_tx_glbl_pram->iphoffset[4],
  745. ugeth->p_tx_glbl_pram->iphoffset[4]);
  746. ugeth_info("iphoffset[5] : addr - 0x%08x, val - 0x%02x",
  747. (u32) & ugeth->p_tx_glbl_pram->iphoffset[5],
  748. ugeth->p_tx_glbl_pram->iphoffset[5]);
  749. ugeth_info("iphoffset[6] : addr - 0x%08x, val - 0x%02x",
  750. (u32) & ugeth->p_tx_glbl_pram->iphoffset[6],
  751. ugeth->p_tx_glbl_pram->iphoffset[6]);
  752. ugeth_info("iphoffset[7] : addr - 0x%08x, val - 0x%02x",
  753. (u32) & ugeth->p_tx_glbl_pram->iphoffset[7],
  754. ugeth->p_tx_glbl_pram->iphoffset[7]);
  755. ugeth_info("vtagtable[0] : addr - 0x%08x, val - 0x%08x",
  756. (u32) & ugeth->p_tx_glbl_pram->vtagtable[0],
  757. in_be32(&ugeth->p_tx_glbl_pram->vtagtable[0]));
  758. ugeth_info("vtagtable[1] : addr - 0x%08x, val - 0x%08x",
  759. (u32) & ugeth->p_tx_glbl_pram->vtagtable[1],
  760. in_be32(&ugeth->p_tx_glbl_pram->vtagtable[1]));
  761. ugeth_info("vtagtable[2] : addr - 0x%08x, val - 0x%08x",
  762. (u32) & ugeth->p_tx_glbl_pram->vtagtable[2],
  763. in_be32(&ugeth->p_tx_glbl_pram->vtagtable[2]));
  764. ugeth_info("vtagtable[3] : addr - 0x%08x, val - 0x%08x",
  765. (u32) & ugeth->p_tx_glbl_pram->vtagtable[3],
  766. in_be32(&ugeth->p_tx_glbl_pram->vtagtable[3]));
  767. ugeth_info("vtagtable[4] : addr - 0x%08x, val - 0x%08x",
  768. (u32) & ugeth->p_tx_glbl_pram->vtagtable[4],
  769. in_be32(&ugeth->p_tx_glbl_pram->vtagtable[4]));
  770. ugeth_info("vtagtable[5] : addr - 0x%08x, val - 0x%08x",
  771. (u32) & ugeth->p_tx_glbl_pram->vtagtable[5],
  772. in_be32(&ugeth->p_tx_glbl_pram->vtagtable[5]));
  773. ugeth_info("vtagtable[6] : addr - 0x%08x, val - 0x%08x",
  774. (u32) & ugeth->p_tx_glbl_pram->vtagtable[6],
  775. in_be32(&ugeth->p_tx_glbl_pram->vtagtable[6]));
  776. ugeth_info("vtagtable[7] : addr - 0x%08x, val - 0x%08x",
  777. (u32) & ugeth->p_tx_glbl_pram->vtagtable[7],
  778. in_be32(&ugeth->p_tx_glbl_pram->vtagtable[7]));
  779. ugeth_info("tqptr : addr - 0x%08x, val - 0x%08x",
  780. (u32) & ugeth->p_tx_glbl_pram->tqptr,
  781. in_be32(&ugeth->p_tx_glbl_pram->tqptr));
  782. }
  783. if (ugeth->p_rx_glbl_pram) {
  784. ugeth_info("RX global param:");
  785. ugeth_info("Base address: 0x%08x", (u32) ugeth->p_rx_glbl_pram);
  786. ugeth_info("remoder : addr - 0x%08x, val - 0x%08x",
  787. (u32) & ugeth->p_rx_glbl_pram->remoder,
  788. in_be32(&ugeth->p_rx_glbl_pram->remoder));
  789. ugeth_info("rqptr : addr - 0x%08x, val - 0x%08x",
  790. (u32) & ugeth->p_rx_glbl_pram->rqptr,
  791. in_be32(&ugeth->p_rx_glbl_pram->rqptr));
  792. ugeth_info("typeorlen : addr - 0x%08x, val - 0x%04x",
  793. (u32) & ugeth->p_rx_glbl_pram->typeorlen,
  794. in_be16(&ugeth->p_rx_glbl_pram->typeorlen));
  795. ugeth_info("rxgstpack : addr - 0x%08x, val - 0x%02x",
  796. (u32) & ugeth->p_rx_glbl_pram->rxgstpack,
  797. ugeth->p_rx_glbl_pram->rxgstpack);
  798. ugeth_info("rxrmonbaseptr : addr - 0x%08x, val - 0x%08x",
  799. (u32) & ugeth->p_rx_glbl_pram->rxrmonbaseptr,
  800. in_be32(&ugeth->p_rx_glbl_pram->rxrmonbaseptr));
  801. ugeth_info("intcoalescingptr: addr - 0x%08x, val - 0x%08x",
  802. (u32) & ugeth->p_rx_glbl_pram->intcoalescingptr,
  803. in_be32(&ugeth->p_rx_glbl_pram->intcoalescingptr));
  804. ugeth_info("rstate : addr - 0x%08x, val - 0x%02x",
  805. (u32) & ugeth->p_rx_glbl_pram->rstate,
  806. ugeth->p_rx_glbl_pram->rstate);
  807. ugeth_info("mrblr : addr - 0x%08x, val - 0x%04x",
  808. (u32) & ugeth->p_rx_glbl_pram->mrblr,
  809. in_be16(&ugeth->p_rx_glbl_pram->mrblr));
  810. ugeth_info("rbdqptr : addr - 0x%08x, val - 0x%08x",
  811. (u32) & ugeth->p_rx_glbl_pram->rbdqptr,
  812. in_be32(&ugeth->p_rx_glbl_pram->rbdqptr));
  813. ugeth_info("mflr : addr - 0x%08x, val - 0x%04x",
  814. (u32) & ugeth->p_rx_glbl_pram->mflr,
  815. in_be16(&ugeth->p_rx_glbl_pram->mflr));
  816. ugeth_info("minflr : addr - 0x%08x, val - 0x%04x",
  817. (u32) & ugeth->p_rx_glbl_pram->minflr,
  818. in_be16(&ugeth->p_rx_glbl_pram->minflr));
  819. ugeth_info("maxd1 : addr - 0x%08x, val - 0x%04x",
  820. (u32) & ugeth->p_rx_glbl_pram->maxd1,
  821. in_be16(&ugeth->p_rx_glbl_pram->maxd1));
  822. ugeth_info("maxd2 : addr - 0x%08x, val - 0x%04x",
  823. (u32) & ugeth->p_rx_glbl_pram->maxd2,
  824. in_be16(&ugeth->p_rx_glbl_pram->maxd2));
  825. ugeth_info("ecamptr : addr - 0x%08x, val - 0x%08x",
  826. (u32) & ugeth->p_rx_glbl_pram->ecamptr,
  827. in_be32(&ugeth->p_rx_glbl_pram->ecamptr));
  828. ugeth_info("l2qt : addr - 0x%08x, val - 0x%08x",
  829. (u32) & ugeth->p_rx_glbl_pram->l2qt,
  830. in_be32(&ugeth->p_rx_glbl_pram->l2qt));
  831. ugeth_info("l3qt[0] : addr - 0x%08x, val - 0x%08x",
  832. (u32) & ugeth->p_rx_glbl_pram->l3qt[0],
  833. in_be32(&ugeth->p_rx_glbl_pram->l3qt[0]));
  834. ugeth_info("l3qt[1] : addr - 0x%08x, val - 0x%08x",
  835. (u32) & ugeth->p_rx_glbl_pram->l3qt[1],
  836. in_be32(&ugeth->p_rx_glbl_pram->l3qt[1]));
  837. ugeth_info("l3qt[2] : addr - 0x%08x, val - 0x%08x",
  838. (u32) & ugeth->p_rx_glbl_pram->l3qt[2],
  839. in_be32(&ugeth->p_rx_glbl_pram->l3qt[2]));
  840. ugeth_info("l3qt[3] : addr - 0x%08x, val - 0x%08x",
  841. (u32) & ugeth->p_rx_glbl_pram->l3qt[3],
  842. in_be32(&ugeth->p_rx_glbl_pram->l3qt[3]));
  843. ugeth_info("l3qt[4] : addr - 0x%08x, val - 0x%08x",
  844. (u32) & ugeth->p_rx_glbl_pram->l3qt[4],
  845. in_be32(&ugeth->p_rx_glbl_pram->l3qt[4]));
  846. ugeth_info("l3qt[5] : addr - 0x%08x, val - 0x%08x",
  847. (u32) & ugeth->p_rx_glbl_pram->l3qt[5],
  848. in_be32(&ugeth->p_rx_glbl_pram->l3qt[5]));
  849. ugeth_info("l3qt[6] : addr - 0x%08x, val - 0x%08x",
  850. (u32) & ugeth->p_rx_glbl_pram->l3qt[6],
  851. in_be32(&ugeth->p_rx_glbl_pram->l3qt[6]));
  852. ugeth_info("l3qt[7] : addr - 0x%08x, val - 0x%08x",
  853. (u32) & ugeth->p_rx_glbl_pram->l3qt[7],
  854. in_be32(&ugeth->p_rx_glbl_pram->l3qt[7]));
  855. ugeth_info("vlantype : addr - 0x%08x, val - 0x%04x",
  856. (u32) & ugeth->p_rx_glbl_pram->vlantype,
  857. in_be16(&ugeth->p_rx_glbl_pram->vlantype));
  858. ugeth_info("vlantci : addr - 0x%08x, val - 0x%04x",
  859. (u32) & ugeth->p_rx_glbl_pram->vlantci,
  860. in_be16(&ugeth->p_rx_glbl_pram->vlantci));
  861. for (i = 0; i < 64; i++)
  862. ugeth_info
  863. ("addressfiltering[%d]: addr - 0x%08x, val - 0x%02x",
  864. i,
  865. (u32) & ugeth->p_rx_glbl_pram->addressfiltering[i],
  866. ugeth->p_rx_glbl_pram->addressfiltering[i]);
  867. ugeth_info("exfGlobalParam : addr - 0x%08x, val - 0x%08x",
  868. (u32) & ugeth->p_rx_glbl_pram->exfGlobalParam,
  869. in_be32(&ugeth->p_rx_glbl_pram->exfGlobalParam));
  870. }
  871. if (ugeth->p_send_q_mem_reg) {
  872. ugeth_info("Send Q memory registers:");
  873. ugeth_info("Base address: 0x%08x",
  874. (u32) ugeth->p_send_q_mem_reg);
  875. for (i = 0; i < ugeth->ug_info->numQueuesTx; i++) {
  876. ugeth_info("SQQD[%d]:", i);
  877. ugeth_info("Base address: 0x%08x",
  878. (u32) & ugeth->p_send_q_mem_reg->sqqd[i]);
  879. mem_disp((u8 *) & ugeth->p_send_q_mem_reg->sqqd[i],
  880. sizeof(struct ucc_geth_send_queue_qd));
  881. }
  882. }
  883. if (ugeth->p_scheduler) {
  884. ugeth_info("Scheduler:");
  885. ugeth_info("Base address: 0x%08x", (u32) ugeth->p_scheduler);
  886. mem_disp((u8 *) ugeth->p_scheduler,
  887. sizeof(*ugeth->p_scheduler));
  888. }
  889. if (ugeth->p_tx_fw_statistics_pram) {
  890. ugeth_info("TX FW statistics pram:");
  891. ugeth_info("Base address: 0x%08x",
  892. (u32) ugeth->p_tx_fw_statistics_pram);
  893. mem_disp((u8 *) ugeth->p_tx_fw_statistics_pram,
  894. sizeof(*ugeth->p_tx_fw_statistics_pram));
  895. }
  896. if (ugeth->p_rx_fw_statistics_pram) {
  897. ugeth_info("RX FW statistics pram:");
  898. ugeth_info("Base address: 0x%08x",
  899. (u32) ugeth->p_rx_fw_statistics_pram);
  900. mem_disp((u8 *) ugeth->p_rx_fw_statistics_pram,
  901. sizeof(*ugeth->p_rx_fw_statistics_pram));
  902. }
  903. if (ugeth->p_rx_irq_coalescing_tbl) {
  904. ugeth_info("RX IRQ coalescing tables:");
  905. ugeth_info("Base address: 0x%08x",
  906. (u32) ugeth->p_rx_irq_coalescing_tbl);
  907. for (i = 0; i < ugeth->ug_info->numQueuesRx; i++) {
  908. ugeth_info("RX IRQ coalescing table entry[%d]:", i);
  909. ugeth_info("Base address: 0x%08x",
  910. (u32) & ugeth->p_rx_irq_coalescing_tbl->
  911. coalescingentry[i]);
  912. ugeth_info
  913. ("interruptcoalescingmaxvalue: addr - 0x%08x, val - 0x%08x",
  914. (u32) & ugeth->p_rx_irq_coalescing_tbl->
  915. coalescingentry[i].interruptcoalescingmaxvalue,
  916. in_be32(&ugeth->p_rx_irq_coalescing_tbl->
  917. coalescingentry[i].
  918. interruptcoalescingmaxvalue));
  919. ugeth_info
  920. ("interruptcoalescingcounter : addr - 0x%08x, val - 0x%08x",
  921. (u32) & ugeth->p_rx_irq_coalescing_tbl->
  922. coalescingentry[i].interruptcoalescingcounter,
  923. in_be32(&ugeth->p_rx_irq_coalescing_tbl->
  924. coalescingentry[i].
  925. interruptcoalescingcounter));
  926. }
  927. }
  928. if (ugeth->p_rx_bd_qs_tbl) {
  929. ugeth_info("RX BD QS tables:");
  930. ugeth_info("Base address: 0x%08x", (u32) ugeth->p_rx_bd_qs_tbl);
  931. for (i = 0; i < ugeth->ug_info->numQueuesRx; i++) {
  932. ugeth_info("RX BD QS table[%d]:", i);
  933. ugeth_info("Base address: 0x%08x",
  934. (u32) & ugeth->p_rx_bd_qs_tbl[i]);
  935. ugeth_info
  936. ("bdbaseptr : addr - 0x%08x, val - 0x%08x",
  937. (u32) & ugeth->p_rx_bd_qs_tbl[i].bdbaseptr,
  938. in_be32(&ugeth->p_rx_bd_qs_tbl[i].bdbaseptr));
  939. ugeth_info
  940. ("bdptr : addr - 0x%08x, val - 0x%08x",
  941. (u32) & ugeth->p_rx_bd_qs_tbl[i].bdptr,
  942. in_be32(&ugeth->p_rx_bd_qs_tbl[i].bdptr));
  943. ugeth_info
  944. ("externalbdbaseptr: addr - 0x%08x, val - 0x%08x",
  945. (u32) & ugeth->p_rx_bd_qs_tbl[i].externalbdbaseptr,
  946. in_be32(&ugeth->p_rx_bd_qs_tbl[i].
  947. externalbdbaseptr));
  948. ugeth_info
  949. ("externalbdptr : addr - 0x%08x, val - 0x%08x",
  950. (u32) & ugeth->p_rx_bd_qs_tbl[i].externalbdptr,
  951. in_be32(&ugeth->p_rx_bd_qs_tbl[i].externalbdptr));
  952. ugeth_info("ucode RX Prefetched BDs:");
  953. ugeth_info("Base address: 0x%08x",
  954. (u32)
  955. qe_muram_addr(in_be32
  956. (&ugeth->p_rx_bd_qs_tbl[i].
  957. bdbaseptr)));
  958. mem_disp((u8 *)
  959. qe_muram_addr(in_be32
  960. (&ugeth->p_rx_bd_qs_tbl[i].
  961. bdbaseptr)),
  962. sizeof(struct ucc_geth_rx_prefetched_bds));
  963. }
  964. }
  965. if (ugeth->p_init_enet_param_shadow) {
  966. int size;
  967. ugeth_info("Init enet param shadow:");
  968. ugeth_info("Base address: 0x%08x",
  969. (u32) ugeth->p_init_enet_param_shadow);
  970. mem_disp((u8 *) ugeth->p_init_enet_param_shadow,
  971. sizeof(*ugeth->p_init_enet_param_shadow));
  972. size = sizeof(struct ucc_geth_thread_rx_pram);
  973. if (ugeth->ug_info->rxExtendedFiltering) {
  974. size +=
  975. THREAD_RX_PRAM_ADDITIONAL_FOR_EXTENDED_FILTERING;
  976. if (ugeth->ug_info->largestexternallookupkeysize ==
  977. QE_FLTR_TABLE_LOOKUP_KEY_SIZE_8_BYTES)
  978. size +=
  979. THREAD_RX_PRAM_ADDITIONAL_FOR_EXTENDED_FILTERING_8;
  980. if (ugeth->ug_info->largestexternallookupkeysize ==
  981. QE_FLTR_TABLE_LOOKUP_KEY_SIZE_16_BYTES)
  982. size +=
  983. THREAD_RX_PRAM_ADDITIONAL_FOR_EXTENDED_FILTERING_16;
  984. }
  985. dump_init_enet_entries(ugeth,
  986. &(ugeth->p_init_enet_param_shadow->
  987. txthread[0]),
  988. ENET_INIT_PARAM_MAX_ENTRIES_TX,
  989. sizeof(struct ucc_geth_thread_tx_pram),
  990. ugeth->ug_info->riscTx, 0);
  991. dump_init_enet_entries(ugeth,
  992. &(ugeth->p_init_enet_param_shadow->
  993. rxthread[0]),
  994. ENET_INIT_PARAM_MAX_ENTRIES_RX, size,
  995. ugeth->ug_info->riscRx, 1);
  996. }
  997. }
  998. #endif /* DEBUG */
  999. static void init_default_reg_vals(u32 __iomem *upsmr_register,
  1000. u32 __iomem *maccfg1_register,
  1001. u32 __iomem *maccfg2_register)
  1002. {
  1003. out_be32(upsmr_register, UCC_GETH_UPSMR_INIT);
  1004. out_be32(maccfg1_register, UCC_GETH_MACCFG1_INIT);
  1005. out_be32(maccfg2_register, UCC_GETH_MACCFG2_INIT);
  1006. }
  1007. static int init_half_duplex_params(int alt_beb,
  1008. int back_pressure_no_backoff,
  1009. int no_backoff,
  1010. int excess_defer,
  1011. u8 alt_beb_truncation,
  1012. u8 max_retransmissions,
  1013. u8 collision_window,
  1014. u32 __iomem *hafdup_register)
  1015. {
  1016. u32 value = 0;
  1017. if ((alt_beb_truncation > HALFDUP_ALT_BEB_TRUNCATION_MAX) ||
  1018. (max_retransmissions > HALFDUP_MAX_RETRANSMISSION_MAX) ||
  1019. (collision_window > HALFDUP_COLLISION_WINDOW_MAX))
  1020. return -EINVAL;
  1021. value = (u32) (alt_beb_truncation << HALFDUP_ALT_BEB_TRUNCATION_SHIFT);
  1022. if (alt_beb)
  1023. value |= HALFDUP_ALT_BEB;
  1024. if (back_pressure_no_backoff)
  1025. value |= HALFDUP_BACK_PRESSURE_NO_BACKOFF;
  1026. if (no_backoff)
  1027. value |= HALFDUP_NO_BACKOFF;
  1028. if (excess_defer)
  1029. value |= HALFDUP_EXCESSIVE_DEFER;
  1030. value |= (max_retransmissions << HALFDUP_MAX_RETRANSMISSION_SHIFT);
  1031. value |= collision_window;
  1032. out_be32(hafdup_register, value);
  1033. return 0;
  1034. }
  1035. static int init_inter_frame_gap_params(u8 non_btb_cs_ipg,
  1036. u8 non_btb_ipg,
  1037. u8 min_ifg,
  1038. u8 btb_ipg,
  1039. u32 __iomem *ipgifg_register)
  1040. {
  1041. u32 value = 0;
  1042. /* Non-Back-to-back IPG part 1 should be <= Non-Back-to-back
  1043. IPG part 2 */
  1044. if (non_btb_cs_ipg > non_btb_ipg)
  1045. return -EINVAL;
  1046. if ((non_btb_cs_ipg > IPGIFG_NON_BACK_TO_BACK_IFG_PART1_MAX) ||
  1047. (non_btb_ipg > IPGIFG_NON_BACK_TO_BACK_IFG_PART2_MAX) ||
  1048. /*(min_ifg > IPGIFG_MINIMUM_IFG_ENFORCEMENT_MAX) || */
  1049. (btb_ipg > IPGIFG_BACK_TO_BACK_IFG_MAX))
  1050. return -EINVAL;
  1051. value |=
  1052. ((non_btb_cs_ipg << IPGIFG_NON_BACK_TO_BACK_IFG_PART1_SHIFT) &
  1053. IPGIFG_NBTB_CS_IPG_MASK);
  1054. value |=
  1055. ((non_btb_ipg << IPGIFG_NON_BACK_TO_BACK_IFG_PART2_SHIFT) &
  1056. IPGIFG_NBTB_IPG_MASK);
  1057. value |=
  1058. ((min_ifg << IPGIFG_MINIMUM_IFG_ENFORCEMENT_SHIFT) &
  1059. IPGIFG_MIN_IFG_MASK);
  1060. value |= (btb_ipg & IPGIFG_BTB_IPG_MASK);
  1061. out_be32(ipgifg_register, value);
  1062. return 0;
  1063. }
  1064. int init_flow_control_params(u32 automatic_flow_control_mode,
  1065. int rx_flow_control_enable,
  1066. int tx_flow_control_enable,
  1067. u16 pause_period,
  1068. u16 extension_field,
  1069. u32 __iomem *upsmr_register,
  1070. u32 __iomem *uempr_register,
  1071. u32 __iomem *maccfg1_register)
  1072. {
  1073. u32 value = 0;
  1074. /* Set UEMPR register */
  1075. value = (u32) pause_period << UEMPR_PAUSE_TIME_VALUE_SHIFT;
  1076. value |= (u32) extension_field << UEMPR_EXTENDED_PAUSE_TIME_VALUE_SHIFT;
  1077. out_be32(uempr_register, value);
  1078. /* Set UPSMR register */
  1079. setbits32(upsmr_register, automatic_flow_control_mode);
  1080. value = in_be32(maccfg1_register);
  1081. if (rx_flow_control_enable)
  1082. value |= MACCFG1_FLOW_RX;
  1083. if (tx_flow_control_enable)
  1084. value |= MACCFG1_FLOW_TX;
  1085. out_be32(maccfg1_register, value);
  1086. return 0;
  1087. }
  1088. static int init_hw_statistics_gathering_mode(int enable_hardware_statistics,
  1089. int auto_zero_hardware_statistics,
  1090. u32 __iomem *upsmr_register,
  1091. u16 __iomem *uescr_register)
  1092. {
  1093. u16 uescr_value = 0;
  1094. /* Enable hardware statistics gathering if requested */
  1095. if (enable_hardware_statistics)
  1096. setbits32(upsmr_register, UCC_GETH_UPSMR_HSE);
  1097. /* Clear hardware statistics counters */
  1098. uescr_value = in_be16(uescr_register);
  1099. uescr_value |= UESCR_CLRCNT;
  1100. /* Automatically zero hardware statistics counters on read,
  1101. if requested */
  1102. if (auto_zero_hardware_statistics)
  1103. uescr_value |= UESCR_AUTOZ;
  1104. out_be16(uescr_register, uescr_value);
  1105. return 0;
  1106. }
  1107. static int init_firmware_statistics_gathering_mode(int
  1108. enable_tx_firmware_statistics,
  1109. int enable_rx_firmware_statistics,
  1110. u32 __iomem *tx_rmon_base_ptr,
  1111. u32 tx_firmware_statistics_structure_address,
  1112. u32 __iomem *rx_rmon_base_ptr,
  1113. u32 rx_firmware_statistics_structure_address,
  1114. u16 __iomem *temoder_register,
  1115. u32 __iomem *remoder_register)
  1116. {
  1117. /* Note: this function does not check if */
  1118. /* the parameters it receives are NULL */
  1119. if (enable_tx_firmware_statistics) {
  1120. out_be32(tx_rmon_base_ptr,
  1121. tx_firmware_statistics_structure_address);
  1122. setbits16(temoder_register, TEMODER_TX_RMON_STATISTICS_ENABLE);
  1123. }
  1124. if (enable_rx_firmware_statistics) {
  1125. out_be32(rx_rmon_base_ptr,
  1126. rx_firmware_statistics_structure_address);
  1127. setbits32(remoder_register, REMODER_RX_RMON_STATISTICS_ENABLE);
  1128. }
  1129. return 0;
  1130. }
  1131. static int init_mac_station_addr_regs(u8 address_byte_0,
  1132. u8 address_byte_1,
  1133. u8 address_byte_2,
  1134. u8 address_byte_3,
  1135. u8 address_byte_4,
  1136. u8 address_byte_5,
  1137. u32 __iomem *macstnaddr1_register,
  1138. u32 __iomem *macstnaddr2_register)
  1139. {
  1140. u32 value = 0;
  1141. /* Example: for a station address of 0x12345678ABCD, */
  1142. /* 0x12 is byte 0, 0x34 is byte 1 and so on and 0xCD is byte 5 */
  1143. /* MACSTNADDR1 Register: */
  1144. /* 0 7 8 15 */
  1145. /* station address byte 5 station address byte 4 */
  1146. /* 16 23 24 31 */
  1147. /* station address byte 3 station address byte 2 */
  1148. value |= (u32) ((address_byte_2 << 0) & 0x000000FF);
  1149. value |= (u32) ((address_byte_3 << 8) & 0x0000FF00);
  1150. value |= (u32) ((address_byte_4 << 16) & 0x00FF0000);
  1151. value |= (u32) ((address_byte_5 << 24) & 0xFF000000);
  1152. out_be32(macstnaddr1_register, value);
  1153. /* MACSTNADDR2 Register: */
  1154. /* 0 7 8 15 */
  1155. /* station address byte 1 station address byte 0 */
  1156. /* 16 23 24 31 */
  1157. /* reserved reserved */
  1158. value = 0;
  1159. value |= (u32) ((address_byte_0 << 16) & 0x00FF0000);
  1160. value |= (u32) ((address_byte_1 << 24) & 0xFF000000);
  1161. out_be32(macstnaddr2_register, value);
  1162. return 0;
  1163. }
  1164. static int init_check_frame_length_mode(int length_check,
  1165. u32 __iomem *maccfg2_register)
  1166. {
  1167. u32 value = 0;
  1168. value = in_be32(maccfg2_register);
  1169. if (length_check)
  1170. value |= MACCFG2_LC;
  1171. else
  1172. value &= ~MACCFG2_LC;
  1173. out_be32(maccfg2_register, value);
  1174. return 0;
  1175. }
  1176. static int init_preamble_length(u8 preamble_length,
  1177. u32 __iomem *maccfg2_register)
  1178. {
  1179. if ((preamble_length < 3) || (preamble_length > 7))
  1180. return -EINVAL;
  1181. clrsetbits_be32(maccfg2_register, MACCFG2_PREL_MASK,
  1182. preamble_length << MACCFG2_PREL_SHIFT);
  1183. return 0;
  1184. }
  1185. static int init_rx_parameters(int reject_broadcast,
  1186. int receive_short_frames,
  1187. int promiscuous, u32 __iomem *upsmr_register)
  1188. {
  1189. u32 value = 0;
  1190. value = in_be32(upsmr_register);
  1191. if (reject_broadcast)
  1192. value |= UCC_GETH_UPSMR_BRO;
  1193. else
  1194. value &= ~UCC_GETH_UPSMR_BRO;
  1195. if (receive_short_frames)
  1196. value |= UCC_GETH_UPSMR_RSH;
  1197. else
  1198. value &= ~UCC_GETH_UPSMR_RSH;
  1199. if (promiscuous)
  1200. value |= UCC_GETH_UPSMR_PRO;
  1201. else
  1202. value &= ~UCC_GETH_UPSMR_PRO;
  1203. out_be32(upsmr_register, value);
  1204. return 0;
  1205. }
  1206. static int init_max_rx_buff_len(u16 max_rx_buf_len,
  1207. u16 __iomem *mrblr_register)
  1208. {
  1209. /* max_rx_buf_len value must be a multiple of 128 */
  1210. if ((max_rx_buf_len == 0)
  1211. || (max_rx_buf_len % UCC_GETH_MRBLR_ALIGNMENT))
  1212. return -EINVAL;
  1213. out_be16(mrblr_register, max_rx_buf_len);
  1214. return 0;
  1215. }
  1216. static int init_min_frame_len(u16 min_frame_length,
  1217. u16 __iomem *minflr_register,
  1218. u16 __iomem *mrblr_register)
  1219. {
  1220. u16 mrblr_value = 0;
  1221. mrblr_value = in_be16(mrblr_register);
  1222. if (min_frame_length >= (mrblr_value - 4))
  1223. return -EINVAL;
  1224. out_be16(minflr_register, min_frame_length);
  1225. return 0;
  1226. }
  1227. static int adjust_enet_interface(struct ucc_geth_private *ugeth)
  1228. {
  1229. struct ucc_geth_info *ug_info;
  1230. struct ucc_geth __iomem *ug_regs;
  1231. struct ucc_fast __iomem *uf_regs;
  1232. int ret_val;
  1233. u32 upsmr, maccfg2, tbiBaseAddress;
  1234. u16 value;
  1235. ugeth_vdbg("%s: IN", __func__);
  1236. ug_info = ugeth->ug_info;
  1237. ug_regs = ugeth->ug_regs;
  1238. uf_regs = ugeth->uccf->uf_regs;
  1239. /* Set MACCFG2 */
  1240. maccfg2 = in_be32(&ug_regs->maccfg2);
  1241. maccfg2 &= ~MACCFG2_INTERFACE_MODE_MASK;
  1242. if ((ugeth->max_speed == SPEED_10) ||
  1243. (ugeth->max_speed == SPEED_100))
  1244. maccfg2 |= MACCFG2_INTERFACE_MODE_NIBBLE;
  1245. else if (ugeth->max_speed == SPEED_1000)
  1246. maccfg2 |= MACCFG2_INTERFACE_MODE_BYTE;
  1247. maccfg2 |= ug_info->padAndCrc;
  1248. out_be32(&ug_regs->maccfg2, maccfg2);
  1249. /* Set UPSMR */
  1250. upsmr = in_be32(&uf_regs->upsmr);
  1251. upsmr &= ~(UCC_GETH_UPSMR_RPM | UCC_GETH_UPSMR_R10M |
  1252. UCC_GETH_UPSMR_TBIM | UCC_GETH_UPSMR_RMM);
  1253. if ((ugeth->phy_interface == PHY_INTERFACE_MODE_RMII) ||
  1254. (ugeth->phy_interface == PHY_INTERFACE_MODE_RGMII) ||
  1255. (ugeth->phy_interface == PHY_INTERFACE_MODE_RGMII_ID) ||
  1256. (ugeth->phy_interface == PHY_INTERFACE_MODE_RGMII_RXID) ||
  1257. (ugeth->phy_interface == PHY_INTERFACE_MODE_RGMII_TXID) ||
  1258. (ugeth->phy_interface == PHY_INTERFACE_MODE_RTBI)) {
  1259. if (ugeth->phy_interface != PHY_INTERFACE_MODE_RMII)
  1260. upsmr |= UCC_GETH_UPSMR_RPM;
  1261. switch (ugeth->max_speed) {
  1262. case SPEED_10:
  1263. upsmr |= UCC_GETH_UPSMR_R10M;
  1264. /* FALLTHROUGH */
  1265. case SPEED_100:
  1266. if (ugeth->phy_interface != PHY_INTERFACE_MODE_RTBI)
  1267. upsmr |= UCC_GETH_UPSMR_RMM;
  1268. }
  1269. }
  1270. if ((ugeth->phy_interface == PHY_INTERFACE_MODE_TBI) ||
  1271. (ugeth->phy_interface == PHY_INTERFACE_MODE_RTBI)) {
  1272. upsmr |= UCC_GETH_UPSMR_TBIM;
  1273. }
  1274. if ((ugeth->phy_interface == PHY_INTERFACE_MODE_SGMII))
  1275. upsmr |= UCC_GETH_UPSMR_SGMM;
  1276. out_be32(&uf_regs->upsmr, upsmr);
  1277. /* Disable autonegotiation in tbi mode, because by default it
  1278. comes up in autonegotiation mode. */
  1279. /* Note that this depends on proper setting in utbipar register. */
  1280. if ((ugeth->phy_interface == PHY_INTERFACE_MODE_TBI) ||
  1281. (ugeth->phy_interface == PHY_INTERFACE_MODE_RTBI)) {
  1282. tbiBaseAddress = in_be32(&ug_regs->utbipar);
  1283. tbiBaseAddress &= UTBIPAR_PHY_ADDRESS_MASK;
  1284. tbiBaseAddress >>= UTBIPAR_PHY_ADDRESS_SHIFT;
  1285. value = ugeth->phydev->bus->read(ugeth->phydev->bus,
  1286. (u8) tbiBaseAddress, ENET_TBI_MII_CR);
  1287. value &= ~0x1000; /* Turn off autonegotiation */
  1288. ugeth->phydev->bus->write(ugeth->phydev->bus,
  1289. (u8) tbiBaseAddress, ENET_TBI_MII_CR, value);
  1290. }
  1291. init_check_frame_length_mode(ug_info->lengthCheckRx, &ug_regs->maccfg2);
  1292. ret_val = init_preamble_length(ug_info->prel, &ug_regs->maccfg2);
  1293. if (ret_val != 0) {
  1294. if (netif_msg_probe(ugeth))
  1295. ugeth_err("%s: Preamble length must be between 3 and 7 inclusive.",
  1296. __func__);
  1297. return ret_val;
  1298. }
  1299. return 0;
  1300. }
  1301. /* Called every time the controller might need to be made
  1302. * aware of new link state. The PHY code conveys this
  1303. * information through variables in the ugeth structure, and this
  1304. * function converts those variables into the appropriate
  1305. * register values, and can bring down the device if needed.
  1306. */
  1307. static void adjust_link(struct net_device *dev)
  1308. {
  1309. struct ucc_geth_private *ugeth = netdev_priv(dev);
  1310. struct ucc_geth __iomem *ug_regs;
  1311. struct ucc_fast __iomem *uf_regs;
  1312. struct phy_device *phydev = ugeth->phydev;
  1313. unsigned long flags;
  1314. int new_state = 0;
  1315. ug_regs = ugeth->ug_regs;
  1316. uf_regs = ugeth->uccf->uf_regs;
  1317. spin_lock_irqsave(&ugeth->lock, flags);
  1318. if (phydev->link) {
  1319. u32 tempval = in_be32(&ug_regs->maccfg2);
  1320. u32 upsmr = in_be32(&uf_regs->upsmr);
  1321. /* Now we make sure that we can be in full duplex mode.
  1322. * If not, we operate in half-duplex mode. */
  1323. if (phydev->duplex != ugeth->oldduplex) {
  1324. new_state = 1;
  1325. if (!(phydev->duplex))
  1326. tempval &= ~(MACCFG2_FDX);
  1327. else
  1328. tempval |= MACCFG2_FDX;
  1329. ugeth->oldduplex = phydev->duplex;
  1330. }
  1331. if (phydev->speed != ugeth->oldspeed) {
  1332. new_state = 1;
  1333. switch (phydev->speed) {
  1334. case SPEED_1000:
  1335. tempval = ((tempval &
  1336. ~(MACCFG2_INTERFACE_MODE_MASK)) |
  1337. MACCFG2_INTERFACE_MODE_BYTE);
  1338. break;
  1339. case SPEED_100:
  1340. case SPEED_10:
  1341. tempval = ((tempval &
  1342. ~(MACCFG2_INTERFACE_MODE_MASK)) |
  1343. MACCFG2_INTERFACE_MODE_NIBBLE);
  1344. /* if reduced mode, re-set UPSMR.R10M */
  1345. if ((ugeth->phy_interface == PHY_INTERFACE_MODE_RMII) ||
  1346. (ugeth->phy_interface == PHY_INTERFACE_MODE_RGMII) ||
  1347. (ugeth->phy_interface == PHY_INTERFACE_MODE_RGMII_ID) ||
  1348. (ugeth->phy_interface == PHY_INTERFACE_MODE_RGMII_RXID) ||
  1349. (ugeth->phy_interface == PHY_INTERFACE_MODE_RGMII_TXID) ||
  1350. (ugeth->phy_interface == PHY_INTERFACE_MODE_RTBI)) {
  1351. if (phydev->speed == SPEED_10)
  1352. upsmr |= UCC_GETH_UPSMR_R10M;
  1353. else
  1354. upsmr &= ~UCC_GETH_UPSMR_R10M;
  1355. }
  1356. break;
  1357. default:
  1358. if (netif_msg_link(ugeth))
  1359. ugeth_warn(
  1360. "%s: Ack! Speed (%d) is not 10/100/1000!",
  1361. dev->name, phydev->speed);
  1362. break;
  1363. }
  1364. ugeth->oldspeed = phydev->speed;
  1365. }
  1366. out_be32(&ug_regs->maccfg2, tempval);
  1367. out_be32(&uf_regs->upsmr, upsmr);
  1368. if (!ugeth->oldlink) {
  1369. new_state = 1;
  1370. ugeth->oldlink = 1;
  1371. }
  1372. } else if (ugeth->oldlink) {
  1373. new_state = 1;
  1374. ugeth->oldlink = 0;
  1375. ugeth->oldspeed = 0;
  1376. ugeth->oldduplex = -1;
  1377. }
  1378. if (new_state && netif_msg_link(ugeth))
  1379. phy_print_status(phydev);
  1380. spin_unlock_irqrestore(&ugeth->lock, flags);
  1381. }
  1382. /* Configure the PHY for dev.
  1383. * returns 0 if success. -1 if failure
  1384. */
  1385. static int init_phy(struct net_device *dev)
  1386. {
  1387. struct ucc_geth_private *priv = netdev_priv(dev);
  1388. struct ucc_geth_info *ug_info = priv->ug_info;
  1389. struct phy_device *phydev;
  1390. priv->oldlink = 0;
  1391. priv->oldspeed = 0;
  1392. priv->oldduplex = -1;
  1393. if (!ug_info->phy_node)
  1394. return 0;
  1395. phydev = of_phy_connect(dev, ug_info->phy_node, &adjust_link, 0,
  1396. priv->phy_interface);
  1397. if (!phydev) {
  1398. printk("%s: Could not attach to PHY\n", dev->name);
  1399. return -ENODEV;
  1400. }
  1401. if (priv->phy_interface == PHY_INTERFACE_MODE_SGMII)
  1402. uec_configure_serdes(dev);
  1403. phydev->supported &= (ADVERTISED_10baseT_Half |
  1404. ADVERTISED_10baseT_Full |
  1405. ADVERTISED_100baseT_Half |
  1406. ADVERTISED_100baseT_Full);
  1407. if (priv->max_speed == SPEED_1000)
  1408. phydev->supported |= ADVERTISED_1000baseT_Full;
  1409. phydev->advertising = phydev->supported;
  1410. priv->phydev = phydev;
  1411. return 0;
  1412. }
  1413. /* Initialize TBI PHY interface for communicating with the
  1414. * SERDES lynx PHY on the chip. We communicate with this PHY
  1415. * through the MDIO bus on each controller, treating it as a
  1416. * "normal" PHY at the address found in the UTBIPA register. We assume
  1417. * that the UTBIPA register is valid. Either the MDIO bus code will set
  1418. * it to a value that doesn't conflict with other PHYs on the bus, or the
  1419. * value doesn't matter, as there are no other PHYs on the bus.
  1420. */
  1421. static void uec_configure_serdes(struct net_device *dev)
  1422. {
  1423. struct ucc_geth_private *ugeth = netdev_priv(dev);
  1424. if (!ugeth->tbiphy) {
  1425. printk(KERN_WARNING "SGMII mode requires that the device "
  1426. "tree specify a tbi-handle\n");
  1427. return;
  1428. }
  1429. /*
  1430. * If the link is already up, we must already be ok, and don't need to
  1431. * configure and reset the TBI<->SerDes link. Maybe U-Boot configured
  1432. * everything for us? Resetting it takes the link down and requires
  1433. * several seconds for it to come back.
  1434. */
  1435. if (phy_read(ugeth->tbiphy, ENET_TBI_MII_SR) & TBISR_LSTATUS)
  1436. return;
  1437. /* Single clk mode, mii mode off(for serdes communication) */
  1438. phy_write(ugeth->tbiphy, ENET_TBI_MII_ANA, TBIANA_SETTINGS);
  1439. phy_write(ugeth->tbiphy, ENET_TBI_MII_TBICON, TBICON_CLK_SELECT);
  1440. phy_write(ugeth->tbiphy, ENET_TBI_MII_CR, TBICR_SETTINGS);
  1441. }
  1442. static int ugeth_graceful_stop_tx(struct ucc_geth_private *ugeth)
  1443. {
  1444. struct ucc_fast_private *uccf;
  1445. u32 cecr_subblock;
  1446. u32 temp;
  1447. int i = 10;
  1448. uccf = ugeth->uccf;
  1449. /* Mask GRACEFUL STOP TX interrupt bit and clear it */
  1450. clrbits32(uccf->p_uccm, UCC_GETH_UCCE_GRA);
  1451. out_be32(uccf->p_ucce, UCC_GETH_UCCE_GRA); /* clear by writing 1 */
  1452. /* Issue host command */
  1453. cecr_subblock =
  1454. ucc_fast_get_qe_cr_subblock(ugeth->ug_info->uf_info.ucc_num);
  1455. qe_issue_cmd(QE_GRACEFUL_STOP_TX, cecr_subblock,
  1456. QE_CR_PROTOCOL_ETHERNET, 0);
  1457. /* Wait for command to complete */
  1458. do {
  1459. msleep(10);
  1460. temp = in_be32(uccf->p_ucce);
  1461. } while (!(temp & UCC_GETH_UCCE_GRA) && --i);
  1462. uccf->stopped_tx = 1;
  1463. return 0;
  1464. }
  1465. static int ugeth_graceful_stop_rx(struct ucc_geth_private * ugeth)
  1466. {
  1467. struct ucc_fast_private *uccf;
  1468. u32 cecr_subblock;
  1469. u8 temp;
  1470. int i = 10;
  1471. uccf = ugeth->uccf;
  1472. /* Clear acknowledge bit */
  1473. temp = in_8(&ugeth->p_rx_glbl_pram->rxgstpack);
  1474. temp &= ~GRACEFUL_STOP_ACKNOWLEDGE_RX;
  1475. out_8(&ugeth->p_rx_glbl_pram->rxgstpack, temp);
  1476. /* Keep issuing command and checking acknowledge bit until
  1477. it is asserted, according to spec */
  1478. do {
  1479. /* Issue host command */
  1480. cecr_subblock =
  1481. ucc_fast_get_qe_cr_subblock(ugeth->ug_info->uf_info.
  1482. ucc_num);
  1483. qe_issue_cmd(QE_GRACEFUL_STOP_RX, cecr_subblock,
  1484. QE_CR_PROTOCOL_ETHERNET, 0);
  1485. msleep(10);
  1486. temp = in_8(&ugeth->p_rx_glbl_pram->rxgstpack);
  1487. } while (!(temp & GRACEFUL_STOP_ACKNOWLEDGE_RX) && --i);
  1488. uccf->stopped_rx = 1;
  1489. return 0;
  1490. }
  1491. static int ugeth_restart_tx(struct ucc_geth_private *ugeth)
  1492. {
  1493. struct ucc_fast_private *uccf;
  1494. u32 cecr_subblock;
  1495. uccf = ugeth->uccf;
  1496. cecr_subblock =
  1497. ucc_fast_get_qe_cr_subblock(ugeth->ug_info->uf_info.ucc_num);
  1498. qe_issue_cmd(QE_RESTART_TX, cecr_subblock, QE_CR_PROTOCOL_ETHERNET, 0);
  1499. uccf->stopped_tx = 0;
  1500. return 0;
  1501. }
  1502. static int ugeth_restart_rx(struct ucc_geth_private *ugeth)
  1503. {
  1504. struct ucc_fast_private *uccf;
  1505. u32 cecr_subblock;
  1506. uccf = ugeth->uccf;
  1507. cecr_subblock =
  1508. ucc_fast_get_qe_cr_subblock(ugeth->ug_info->uf_info.ucc_num);
  1509. qe_issue_cmd(QE_RESTART_RX, cecr_subblock, QE_CR_PROTOCOL_ETHERNET,
  1510. 0);
  1511. uccf->stopped_rx = 0;
  1512. return 0;
  1513. }
  1514. static int ugeth_enable(struct ucc_geth_private *ugeth, enum comm_dir mode)
  1515. {
  1516. struct ucc_fast_private *uccf;
  1517. int enabled_tx, enabled_rx;
  1518. uccf = ugeth->uccf;
  1519. /* check if the UCC number is in range. */
  1520. if (ugeth->ug_info->uf_info.ucc_num >= UCC_MAX_NUM) {
  1521. if (netif_msg_probe(ugeth))
  1522. ugeth_err("%s: ucc_num out of range.", __func__);
  1523. return -EINVAL;
  1524. }
  1525. enabled_tx = uccf->enabled_tx;
  1526. enabled_rx = uccf->enabled_rx;
  1527. /* Get Tx and Rx going again, in case this channel was actively
  1528. disabled. */
  1529. if ((mode & COMM_DIR_TX) && (!enabled_tx) && uccf->stopped_tx)
  1530. ugeth_restart_tx(ugeth);
  1531. if ((mode & COMM_DIR_RX) && (!enabled_rx) && uccf->stopped_rx)
  1532. ugeth_restart_rx(ugeth);
  1533. ucc_fast_enable(uccf, mode); /* OK to do even if not disabled */
  1534. return 0;
  1535. }
  1536. static int ugeth_disable(struct ucc_geth_private * ugeth, enum comm_dir mode)
  1537. {
  1538. struct ucc_fast_private *uccf;
  1539. uccf = ugeth->uccf;
  1540. /* check if the UCC number is in range. */
  1541. if (ugeth->ug_info->uf_info.ucc_num >= UCC_MAX_NUM) {
  1542. if (netif_msg_probe(ugeth))
  1543. ugeth_err("%s: ucc_num out of range.", __func__);
  1544. return -EINVAL;
  1545. }
  1546. /* Stop any transmissions */
  1547. if ((mode & COMM_DIR_TX) && uccf->enabled_tx && !uccf->stopped_tx)
  1548. ugeth_graceful_stop_tx(ugeth);
  1549. /* Stop any receptions */
  1550. if ((mode & COMM_DIR_RX) && uccf->enabled_rx && !uccf->stopped_rx)
  1551. ugeth_graceful_stop_rx(ugeth);
  1552. ucc_fast_disable(ugeth->uccf, mode); /* OK to do even if not enabled */
  1553. return 0;
  1554. }
  1555. static void ugeth_dump_regs(struct ucc_geth_private *ugeth)
  1556. {
  1557. #ifdef DEBUG
  1558. ucc_fast_dump_regs(ugeth->uccf);
  1559. dump_regs(ugeth);
  1560. dump_bds(ugeth);
  1561. #endif
  1562. }
  1563. static int ugeth_82xx_filtering_clear_all_addr_in_hash(struct ucc_geth_private *
  1564. ugeth,
  1565. enum enet_addr_type
  1566. enet_addr_type)
  1567. {
  1568. struct ucc_geth_82xx_address_filtering_pram __iomem *p_82xx_addr_filt;
  1569. struct ucc_fast_private *uccf;
  1570. enum comm_dir comm_dir;
  1571. struct list_head *p_lh;
  1572. u16 i, num;
  1573. u32 __iomem *addr_h;
  1574. u32 __iomem *addr_l;
  1575. u8 *p_counter;
  1576. uccf = ugeth->uccf;
  1577. p_82xx_addr_filt =
  1578. (struct ucc_geth_82xx_address_filtering_pram __iomem *)
  1579. ugeth->p_rx_glbl_pram->addressfiltering;
  1580. if (enet_addr_type == ENET_ADDR_TYPE_GROUP) {
  1581. addr_h = &(p_82xx_addr_filt->gaddr_h);
  1582. addr_l = &(p_82xx_addr_filt->gaddr_l);
  1583. p_lh = &ugeth->group_hash_q;
  1584. p_counter = &(ugeth->numGroupAddrInHash);
  1585. } else if (enet_addr_type == ENET_ADDR_TYPE_INDIVIDUAL) {
  1586. addr_h = &(p_82xx_addr_filt->iaddr_h);
  1587. addr_l = &(p_82xx_addr_filt->iaddr_l);
  1588. p_lh = &ugeth->ind_hash_q;
  1589. p_counter = &(ugeth->numIndAddrInHash);
  1590. } else
  1591. return -EINVAL;
  1592. comm_dir = 0;
  1593. if (uccf->enabled_tx)
  1594. comm_dir |= COMM_DIR_TX;
  1595. if (uccf->enabled_rx)
  1596. comm_dir |= COMM_DIR_RX;
  1597. if (comm_dir)
  1598. ugeth_disable(ugeth, comm_dir);
  1599. /* Clear the hash table. */
  1600. out_be32(addr_h, 0x00000000);
  1601. out_be32(addr_l, 0x00000000);
  1602. if (!p_lh)
  1603. return 0;
  1604. num = *p_counter;
  1605. /* Delete all remaining CQ elements */
  1606. for (i = 0; i < num; i++)
  1607. put_enet_addr_container(ENET_ADDR_CONT_ENTRY(dequeue(p_lh)));
  1608. *p_counter = 0;
  1609. if (comm_dir)
  1610. ugeth_enable(ugeth, comm_dir);
  1611. return 0;
  1612. }
  1613. static int ugeth_82xx_filtering_clear_addr_in_paddr(struct ucc_geth_private *ugeth,
  1614. u8 paddr_num)
  1615. {
  1616. ugeth->indAddrRegUsed[paddr_num] = 0; /* mark this paddr as not used */
  1617. return hw_clear_addr_in_paddr(ugeth, paddr_num);/* clear in hardware */
  1618. }
  1619. static void ucc_geth_memclean(struct ucc_geth_private *ugeth)
  1620. {
  1621. u16 i, j;
  1622. u8 __iomem *bd;
  1623. if (!ugeth)
  1624. return;
  1625. if (ugeth->uccf) {
  1626. ucc_fast_free(ugeth->uccf);
  1627. ugeth->uccf = NULL;
  1628. }
  1629. if (ugeth->p_thread_data_tx) {
  1630. qe_muram_free(ugeth->thread_dat_tx_offset);
  1631. ugeth->p_thread_data_tx = NULL;
  1632. }
  1633. if (ugeth->p_thread_data_rx) {
  1634. qe_muram_free(ugeth->thread_dat_rx_offset);
  1635. ugeth->p_thread_data_rx = NULL;
  1636. }
  1637. if (ugeth->p_exf_glbl_param) {
  1638. qe_muram_free(ugeth->exf_glbl_param_offset);
  1639. ugeth->p_exf_glbl_param = NULL;
  1640. }
  1641. if (ugeth->p_rx_glbl_pram) {
  1642. qe_muram_free(ugeth->rx_glbl_pram_offset);
  1643. ugeth->p_rx_glbl_pram = NULL;
  1644. }
  1645. if (ugeth->p_tx_glbl_pram) {
  1646. qe_muram_free(ugeth->tx_glbl_pram_offset);
  1647. ugeth->p_tx_glbl_pram = NULL;
  1648. }
  1649. if (ugeth->p_send_q_mem_reg) {
  1650. qe_muram_free(ugeth->send_q_mem_reg_offset);
  1651. ugeth->p_send_q_mem_reg = NULL;
  1652. }
  1653. if (ugeth->p_scheduler) {
  1654. qe_muram_free(ugeth->scheduler_offset);
  1655. ugeth->p_scheduler = NULL;
  1656. }
  1657. if (ugeth->p_tx_fw_statistics_pram) {
  1658. qe_muram_free(ugeth->tx_fw_statistics_pram_offset);
  1659. ugeth->p_tx_fw_statistics_pram = NULL;
  1660. }
  1661. if (ugeth->p_rx_fw_statistics_pram) {
  1662. qe_muram_free(ugeth->rx_fw_statistics_pram_offset);
  1663. ugeth->p_rx_fw_statistics_pram = NULL;
  1664. }
  1665. if (ugeth->p_rx_irq_coalescing_tbl) {
  1666. qe_muram_free(ugeth->rx_irq_coalescing_tbl_offset);
  1667. ugeth->p_rx_irq_coalescing_tbl = NULL;
  1668. }
  1669. if (ugeth->p_rx_bd_qs_tbl) {
  1670. qe_muram_free(ugeth->rx_bd_qs_tbl_offset);
  1671. ugeth->p_rx_bd_qs_tbl = NULL;
  1672. }
  1673. if (ugeth->p_init_enet_param_shadow) {
  1674. return_init_enet_entries(ugeth,
  1675. &(ugeth->p_init_enet_param_shadow->
  1676. rxthread[0]),
  1677. ENET_INIT_PARAM_MAX_ENTRIES_RX,
  1678. ugeth->ug_info->riscRx, 1);
  1679. return_init_enet_entries(ugeth,
  1680. &(ugeth->p_init_enet_param_shadow->
  1681. txthread[0]),
  1682. ENET_INIT_PARAM_MAX_ENTRIES_TX,
  1683. ugeth->ug_info->riscTx, 0);
  1684. kfree(ugeth->p_init_enet_param_shadow);
  1685. ugeth->p_init_enet_param_shadow = NULL;
  1686. }
  1687. for (i = 0; i < ugeth->ug_info->numQueuesTx; i++) {
  1688. bd = ugeth->p_tx_bd_ring[i];
  1689. if (!bd)
  1690. continue;
  1691. for (j = 0; j < ugeth->ug_info->bdRingLenTx[i]; j++) {
  1692. if (ugeth->tx_skbuff[i][j]) {
  1693. dma_unmap_single(ugeth->dev,
  1694. in_be32(&((struct qe_bd __iomem *)bd)->buf),
  1695. (in_be32((u32 __iomem *)bd) &
  1696. BD_LENGTH_MASK),
  1697. DMA_TO_DEVICE);
  1698. dev_kfree_skb_any(ugeth->tx_skbuff[i][j]);
  1699. ugeth->tx_skbuff[i][j] = NULL;
  1700. }
  1701. }
  1702. kfree(ugeth->tx_skbuff[i]);
  1703. if (ugeth->p_tx_bd_ring[i]) {
  1704. if (ugeth->ug_info->uf_info.bd_mem_part ==
  1705. MEM_PART_SYSTEM)
  1706. kfree((void *)ugeth->tx_bd_ring_offset[i]);
  1707. else if (ugeth->ug_info->uf_info.bd_mem_part ==
  1708. MEM_PART_MURAM)
  1709. qe_muram_free(ugeth->tx_bd_ring_offset[i]);
  1710. ugeth->p_tx_bd_ring[i] = NULL;
  1711. }
  1712. }
  1713. for (i = 0; i < ugeth->ug_info->numQueuesRx; i++) {
  1714. if (ugeth->p_rx_bd_ring[i]) {
  1715. /* Return existing data buffers in ring */
  1716. bd = ugeth->p_rx_bd_ring[i];
  1717. for (j = 0; j < ugeth->ug_info->bdRingLenRx[i]; j++) {
  1718. if (ugeth->rx_skbuff[i][j]) {
  1719. dma_unmap_single(ugeth->dev,
  1720. in_be32(&((struct qe_bd __iomem *)bd)->buf),
  1721. ugeth->ug_info->
  1722. uf_info.max_rx_buf_length +
  1723. UCC_GETH_RX_DATA_BUF_ALIGNMENT,
  1724. DMA_FROM_DEVICE);
  1725. dev_kfree_skb_any(
  1726. ugeth->rx_skbuff[i][j]);
  1727. ugeth->rx_skbuff[i][j] = NULL;
  1728. }
  1729. bd += sizeof(struct qe_bd);
  1730. }
  1731. kfree(ugeth->rx_skbuff[i]);
  1732. if (ugeth->ug_info->uf_info.bd_mem_part ==
  1733. MEM_PART_SYSTEM)
  1734. kfree((void *)ugeth->rx_bd_ring_offset[i]);
  1735. else if (ugeth->ug_info->uf_info.bd_mem_part ==
  1736. MEM_PART_MURAM)
  1737. qe_muram_free(ugeth->rx_bd_ring_offset[i]);
  1738. ugeth->p_rx_bd_ring[i] = NULL;
  1739. }
  1740. }
  1741. while (!list_empty(&ugeth->group_hash_q))
  1742. put_enet_addr_container(ENET_ADDR_CONT_ENTRY
  1743. (dequeue(&ugeth->group_hash_q)));
  1744. while (!list_empty(&ugeth->ind_hash_q))
  1745. put_enet_addr_container(ENET_ADDR_CONT_ENTRY
  1746. (dequeue(&ugeth->ind_hash_q)));
  1747. if (ugeth->ug_regs) {
  1748. iounmap(ugeth->ug_regs);
  1749. ugeth->ug_regs = NULL;
  1750. }
  1751. }
  1752. static void ucc_geth_set_multi(struct net_device *dev)
  1753. {
  1754. struct ucc_geth_private *ugeth;
  1755. struct dev_mc_list *dmi;
  1756. struct ucc_fast __iomem *uf_regs;
  1757. struct ucc_geth_82xx_address_filtering_pram __iomem *p_82xx_addr_filt;
  1758. int i;
  1759. ugeth = netdev_priv(dev);
  1760. uf_regs = ugeth->uccf->uf_regs;
  1761. if (dev->flags & IFF_PROMISC) {
  1762. setbits32(&uf_regs->upsmr, UCC_GETH_UPSMR_PRO);
  1763. } else {
  1764. clrbits32(&uf_regs->upsmr, UCC_GETH_UPSMR_PRO);
  1765. p_82xx_addr_filt =
  1766. (struct ucc_geth_82xx_address_filtering_pram __iomem *) ugeth->
  1767. p_rx_glbl_pram->addressfiltering;
  1768. if (dev->flags & IFF_ALLMULTI) {
  1769. /* Catch all multicast addresses, so set the
  1770. * filter to all 1's.
  1771. */
  1772. out_be32(&p_82xx_addr_filt->gaddr_h, 0xffffffff);
  1773. out_be32(&p_82xx_addr_filt->gaddr_l, 0xffffffff);
  1774. } else {
  1775. /* Clear filter and add the addresses in the list.
  1776. */
  1777. out_be32(&p_82xx_addr_filt->gaddr_h, 0x0);
  1778. out_be32(&p_82xx_addr_filt->gaddr_l, 0x0);
  1779. dmi = dev->mc_list;
  1780. for (i = 0; i < dev->mc_count; i++, dmi = dmi->next) {
  1781. /* Only support group multicast for now.
  1782. */
  1783. if (!(dmi->dmi_addr[0] & 1))
  1784. continue;
  1785. /* Ask CPM to run CRC and set bit in
  1786. * filter mask.
  1787. */
  1788. hw_add_addr_in_hash(ugeth, dmi->dmi_addr);
  1789. }
  1790. }
  1791. }
  1792. }
  1793. static void ucc_geth_stop(struct ucc_geth_private *ugeth)
  1794. {
  1795. struct ucc_geth __iomem *ug_regs = ugeth->ug_regs;
  1796. struct phy_device *phydev = ugeth->phydev;
  1797. ugeth_vdbg("%s: IN", __func__);
  1798. /* Disable the controller */
  1799. ugeth_disable(ugeth, COMM_DIR_RX_AND_TX);
  1800. /* Tell the kernel the link is down */
  1801. phy_stop(phydev);
  1802. /* Mask all interrupts */
  1803. out_be32(ugeth->uccf->p_uccm, 0x00000000);
  1804. /* Clear all interrupts */
  1805. out_be32(ugeth->uccf->p_ucce, 0xffffffff);
  1806. /* Disable Rx and Tx */
  1807. clrbits32(&ug_regs->maccfg1, MACCFG1_ENABLE_RX | MACCFG1_ENABLE_TX);
  1808. phy_disconnect(ugeth->phydev);
  1809. ugeth->phydev = NULL;
  1810. ucc_geth_memclean(ugeth);
  1811. }
  1812. static int ucc_struct_init(struct ucc_geth_private *ugeth)
  1813. {
  1814. struct ucc_geth_info *ug_info;
  1815. struct ucc_fast_info *uf_info;
  1816. int i;
  1817. ug_info = ugeth->ug_info;
  1818. uf_info = &ug_info->uf_info;
  1819. if (!((uf_info->bd_mem_part == MEM_PART_SYSTEM) ||
  1820. (uf_info->bd_mem_part == MEM_PART_MURAM))) {
  1821. if (netif_msg_probe(ugeth))
  1822. ugeth_err("%s: Bad memory partition value.",
  1823. __func__);
  1824. return -EINVAL;
  1825. }
  1826. /* Rx BD lengths */
  1827. for (i = 0; i < ug_info->numQueuesRx; i++) {
  1828. if ((ug_info->bdRingLenRx[i] < UCC_GETH_RX_BD_RING_SIZE_MIN) ||
  1829. (ug_info->bdRingLenRx[i] %
  1830. UCC_GETH_RX_BD_RING_SIZE_ALIGNMENT)) {
  1831. if (netif_msg_probe(ugeth))
  1832. ugeth_err
  1833. ("%s: Rx BD ring length must be multiple of 4, no smaller than 8.",
  1834. __func__);
  1835. return -EINVAL;
  1836. }
  1837. }
  1838. /* Tx BD lengths */
  1839. for (i = 0; i < ug_info->numQueuesTx; i++) {
  1840. if (ug_info->bdRingLenTx[i] < UCC_GETH_TX_BD_RING_SIZE_MIN) {
  1841. if (netif_msg_probe(ugeth))
  1842. ugeth_err
  1843. ("%s: Tx BD ring length must be no smaller than 2.",
  1844. __func__);
  1845. return -EINVAL;
  1846. }
  1847. }
  1848. /* mrblr */
  1849. if ((uf_info->max_rx_buf_length == 0) ||
  1850. (uf_info->max_rx_buf_length % UCC_GETH_MRBLR_ALIGNMENT)) {
  1851. if (netif_msg_probe(ugeth))
  1852. ugeth_err
  1853. ("%s: max_rx_buf_length must be non-zero multiple of 128.",
  1854. __func__);
  1855. return -EINVAL;
  1856. }
  1857. /* num Tx queues */
  1858. if (ug_info->numQueuesTx > NUM_TX_QUEUES) {
  1859. if (netif_msg_probe(ugeth))
  1860. ugeth_err("%s: number of tx queues too large.", __func__);
  1861. return -EINVAL;
  1862. }
  1863. /* num Rx queues */
  1864. if (ug_info->numQueuesRx > NUM_RX_QUEUES) {
  1865. if (netif_msg_probe(ugeth))
  1866. ugeth_err("%s: number of rx queues too large.", __func__);
  1867. return -EINVAL;
  1868. }
  1869. /* l2qt */
  1870. for (i = 0; i < UCC_GETH_VLAN_PRIORITY_MAX; i++) {
  1871. if (ug_info->l2qt[i] >= ug_info->numQueuesRx) {
  1872. if (netif_msg_probe(ugeth))
  1873. ugeth_err
  1874. ("%s: VLAN priority table entry must not be"
  1875. " larger than number of Rx queues.",
  1876. __func__);
  1877. return -EINVAL;
  1878. }
  1879. }
  1880. /* l3qt */
  1881. for (i = 0; i < UCC_GETH_IP_PRIORITY_MAX; i++) {
  1882. if (ug_info->l3qt[i] >= ug_info->numQueuesRx) {
  1883. if (netif_msg_probe(ugeth))
  1884. ugeth_err
  1885. ("%s: IP priority table entry must not be"
  1886. " larger than number of Rx queues.",
  1887. __func__);
  1888. return -EINVAL;
  1889. }
  1890. }
  1891. if (ug_info->cam && !ug_info->ecamptr) {
  1892. if (netif_msg_probe(ugeth))
  1893. ugeth_err("%s: If cam mode is chosen, must supply cam ptr.",
  1894. __func__);
  1895. return -EINVAL;
  1896. }
  1897. if ((ug_info->numStationAddresses !=
  1898. UCC_GETH_NUM_OF_STATION_ADDRESSES_1)
  1899. && ug_info->rxExtendedFiltering) {
  1900. if (netif_msg_probe(ugeth))
  1901. ugeth_err("%s: Number of station addresses greater than 1 "
  1902. "not allowed in extended parsing mode.",
  1903. __func__);
  1904. return -EINVAL;
  1905. }
  1906. /* Generate uccm_mask for receive */
  1907. uf_info->uccm_mask = ug_info->eventRegMask & UCCE_OTHER;/* Errors */
  1908. for (i = 0; i < ug_info->numQueuesRx; i++)
  1909. uf_info->uccm_mask |= (UCC_GETH_UCCE_RXF0 << i);
  1910. for (i = 0; i < ug_info->numQueuesTx; i++)
  1911. uf_info->uccm_mask |= (UCC_GETH_UCCE_TXB0 << i);
  1912. /* Initialize the general fast UCC block. */
  1913. if (ucc_fast_init(uf_info, &ugeth->uccf)) {
  1914. if (netif_msg_probe(ugeth))
  1915. ugeth_err("%s: Failed to init uccf.", __func__);
  1916. return -ENOMEM;
  1917. }
  1918. ugeth->ug_regs = ioremap(uf_info->regs, sizeof(*ugeth->ug_regs));
  1919. if (!ugeth->ug_regs) {
  1920. if (netif_msg_probe(ugeth))
  1921. ugeth_err("%s: Failed to ioremap regs.", __func__);
  1922. return -ENOMEM;
  1923. }
  1924. return 0;
  1925. }
  1926. static int ucc_geth_startup(struct ucc_geth_private *ugeth)
  1927. {
  1928. struct ucc_geth_82xx_address_filtering_pram __iomem *p_82xx_addr_filt;
  1929. struct ucc_geth_init_pram __iomem *p_init_enet_pram;
  1930. struct ucc_fast_private *uccf;
  1931. struct ucc_geth_info *ug_info;
  1932. struct ucc_fast_info *uf_info;
  1933. struct ucc_fast __iomem *uf_regs;
  1934. struct ucc_geth __iomem *ug_regs;
  1935. int ret_val = -EINVAL;
  1936. u32 remoder = UCC_GETH_REMODER_INIT;
  1937. u32 init_enet_pram_offset, cecr_subblock, command;
  1938. u32 ifstat, i, j, size, l2qt, l3qt, length;
  1939. u16 temoder = UCC_GETH_TEMODER_INIT;
  1940. u16 test;
  1941. u8 function_code = 0;
  1942. u8 __iomem *bd;
  1943. u8 __iomem *endOfRing;
  1944. u8 numThreadsRxNumerical, numThreadsTxNumerical;
  1945. ugeth_vdbg("%s: IN", __func__);
  1946. uccf = ugeth->uccf;
  1947. ug_info = ugeth->ug_info;
  1948. uf_info = &ug_info->uf_info;
  1949. uf_regs = uccf->uf_regs;
  1950. ug_regs = ugeth->ug_regs;
  1951. switch (ug_info->numThreadsRx) {
  1952. case UCC_GETH_NUM_OF_THREADS_1:
  1953. numThreadsRxNumerical = 1;
  1954. break;
  1955. case UCC_GETH_NUM_OF_THREADS_2:
  1956. numThreadsRxNumerical = 2;
  1957. break;
  1958. case UCC_GETH_NUM_OF_THREADS_4:
  1959. numThreadsRxNumerical = 4;
  1960. break;
  1961. case UCC_GETH_NUM_OF_THREADS_6:
  1962. numThreadsRxNumerical = 6;
  1963. break;
  1964. case UCC_GETH_NUM_OF_THREADS_8:
  1965. numThreadsRxNumerical = 8;
  1966. break;
  1967. default:
  1968. if (netif_msg_ifup(ugeth))
  1969. ugeth_err("%s: Bad number of Rx threads value.",
  1970. __func__);
  1971. return -EINVAL;
  1972. break;
  1973. }
  1974. switch (ug_info->numThreadsTx) {
  1975. case UCC_GETH_NUM_OF_THREADS_1:
  1976. numThreadsTxNumerical = 1;
  1977. break;
  1978. case UCC_GETH_NUM_OF_THREADS_2:
  1979. numThreadsTxNumerical = 2;
  1980. break;
  1981. case UCC_GETH_NUM_OF_THREADS_4:
  1982. numThreadsTxNumerical = 4;
  1983. break;
  1984. case UCC_GETH_NUM_OF_THREADS_6:
  1985. numThreadsTxNumerical = 6;
  1986. break;
  1987. case UCC_GETH_NUM_OF_THREADS_8:
  1988. numThreadsTxNumerical = 8;
  1989. break;
  1990. default:
  1991. if (netif_msg_ifup(ugeth))
  1992. ugeth_err("%s: Bad number of Tx threads value.",
  1993. __func__);
  1994. return -EINVAL;
  1995. break;
  1996. }
  1997. /* Calculate rx_extended_features */
  1998. ugeth->rx_non_dynamic_extended_features = ug_info->ipCheckSumCheck ||
  1999. ug_info->ipAddressAlignment ||
  2000. (ug_info->numStationAddresses !=
  2001. UCC_GETH_NUM_OF_STATION_ADDRESSES_1);
  2002. ugeth->rx_extended_features = ugeth->rx_non_dynamic_extended_features ||
  2003. (ug_info->vlanOperationTagged != UCC_GETH_VLAN_OPERATION_TAGGED_NOP)
  2004. || (ug_info->vlanOperationNonTagged !=
  2005. UCC_GETH_VLAN_OPERATION_NON_TAGGED_NOP);
  2006. init_default_reg_vals(&uf_regs->upsmr,
  2007. &ug_regs->maccfg1, &ug_regs->maccfg2);
  2008. /* Set UPSMR */
  2009. /* For more details see the hardware spec. */
  2010. init_rx_parameters(ug_info->bro,
  2011. ug_info->rsh, ug_info->pro, &uf_regs->upsmr);
  2012. /* We're going to ignore other registers for now, */
  2013. /* except as needed to get up and running */
  2014. /* Set MACCFG1 */
  2015. /* For more details see the hardware spec. */
  2016. init_flow_control_params(ug_info->aufc,
  2017. ug_info->receiveFlowControl,
  2018. ug_info->transmitFlowControl,
  2019. ug_info->pausePeriod,
  2020. ug_info->extensionField,
  2021. &uf_regs->upsmr,
  2022. &ug_regs->uempr, &ug_regs->maccfg1);
  2023. setbits32(&ug_regs->maccfg1, MACCFG1_ENABLE_RX | MACCFG1_ENABLE_TX);
  2024. /* Set IPGIFG */
  2025. /* For more details see the hardware spec. */
  2026. ret_val = init_inter_frame_gap_params(ug_info->nonBackToBackIfgPart1,
  2027. ug_info->nonBackToBackIfgPart2,
  2028. ug_info->
  2029. miminumInterFrameGapEnforcement,
  2030. ug_info->backToBackInterFrameGap,
  2031. &ug_regs->ipgifg);
  2032. if (ret_val != 0) {
  2033. if (netif_msg_ifup(ugeth))
  2034. ugeth_err("%s: IPGIFG initialization parameter too large.",
  2035. __func__);
  2036. return ret_val;
  2037. }
  2038. /* Set HAFDUP */
  2039. /* For more details see the hardware spec. */
  2040. ret_val = init_half_duplex_params(ug_info->altBeb,
  2041. ug_info->backPressureNoBackoff,
  2042. ug_info->noBackoff,
  2043. ug_info->excessDefer,
  2044. ug_info->altBebTruncation,
  2045. ug_info->maxRetransmission,
  2046. ug_info->collisionWindow,
  2047. &ug_regs->hafdup);
  2048. if (ret_val != 0) {
  2049. if (netif_msg_ifup(ugeth))
  2050. ugeth_err("%s: Half Duplex initialization parameter too large.",
  2051. __func__);
  2052. return ret_val;
  2053. }
  2054. /* Set IFSTAT */
  2055. /* For more details see the hardware spec. */
  2056. /* Read only - resets upon read */
  2057. ifstat = in_be32(&ug_regs->ifstat);
  2058. /* Clear UEMPR */
  2059. /* For more details see the hardware spec. */
  2060. out_be32(&ug_regs->uempr, 0);
  2061. /* Set UESCR */
  2062. /* For more details see the hardware spec. */
  2063. init_hw_statistics_gathering_mode((ug_info->statisticsMode &
  2064. UCC_GETH_STATISTICS_GATHERING_MODE_HARDWARE),
  2065. 0, &uf_regs->upsmr, &ug_regs->uescr);
  2066. /* Allocate Tx bds */
  2067. for (j = 0; j < ug_info->numQueuesTx; j++) {
  2068. /* Allocate in multiple of
  2069. UCC_GETH_TX_BD_RING_SIZE_MEMORY_ALIGNMENT,
  2070. according to spec */
  2071. length = ((ug_info->bdRingLenTx[j] * sizeof(struct qe_bd))
  2072. / UCC_GETH_TX_BD_RING_SIZE_MEMORY_ALIGNMENT)
  2073. * UCC_GETH_TX_BD_RING_SIZE_MEMORY_ALIGNMENT;
  2074. if ((ug_info->bdRingLenTx[j] * sizeof(struct qe_bd)) %
  2075. UCC_GETH_TX_BD_RING_SIZE_MEMORY_ALIGNMENT)
  2076. length += UCC_GETH_TX_BD_RING_SIZE_MEMORY_ALIGNMENT;
  2077. if (uf_info->bd_mem_part == MEM_PART_SYSTEM) {
  2078. u32 align = 4;
  2079. if (UCC_GETH_TX_BD_RING_ALIGNMENT > 4)
  2080. align = UCC_GETH_TX_BD_RING_ALIGNMENT;
  2081. ugeth->tx_bd_ring_offset[j] =
  2082. (u32) kmalloc((u32) (length + align), GFP_KERNEL);
  2083. if (ugeth->tx_bd_ring_offset[j] != 0)
  2084. ugeth->p_tx_bd_ring[j] =
  2085. (u8 __iomem *)((ugeth->tx_bd_ring_offset[j] +
  2086. align) & ~(align - 1));
  2087. } else if (uf_info->bd_mem_part == MEM_PART_MURAM) {
  2088. ugeth->tx_bd_ring_offset[j] =
  2089. qe_muram_alloc(length,
  2090. UCC_GETH_TX_BD_RING_ALIGNMENT);
  2091. if (!IS_ERR_VALUE(ugeth->tx_bd_ring_offset[j]))
  2092. ugeth->p_tx_bd_ring[j] =
  2093. (u8 __iomem *) qe_muram_addr(ugeth->
  2094. tx_bd_ring_offset[j]);
  2095. }
  2096. if (!ugeth->p_tx_bd_ring[j]) {
  2097. if (netif_msg_ifup(ugeth))
  2098. ugeth_err
  2099. ("%s: Can not allocate memory for Tx bd rings.",
  2100. __func__);
  2101. return -ENOMEM;
  2102. }
  2103. /* Zero unused end of bd ring, according to spec */
  2104. memset_io((void __iomem *)(ugeth->p_tx_bd_ring[j] +
  2105. ug_info->bdRingLenTx[j] * sizeof(struct qe_bd)), 0,
  2106. length - ug_info->bdRingLenTx[j] * sizeof(struct qe_bd));
  2107. }
  2108. /* Allocate Rx bds */
  2109. for (j = 0; j < ug_info->numQueuesRx; j++) {
  2110. length = ug_info->bdRingLenRx[j] * sizeof(struct qe_bd);
  2111. if (uf_info->bd_mem_part == MEM_PART_SYSTEM) {
  2112. u32 align = 4;
  2113. if (UCC_GETH_RX_BD_RING_ALIGNMENT > 4)
  2114. align = UCC_GETH_RX_BD_RING_ALIGNMENT;
  2115. ugeth->rx_bd_ring_offset[j] =
  2116. (u32) kmalloc((u32) (length + align), GFP_KERNEL);
  2117. if (ugeth->rx_bd_ring_offset[j] != 0)
  2118. ugeth->p_rx_bd_ring[j] =
  2119. (u8 __iomem *)((ugeth->rx_bd_ring_offset[j] +
  2120. align) & ~(align - 1));
  2121. } else if (uf_info->bd_mem_part == MEM_PART_MURAM) {
  2122. ugeth->rx_bd_ring_offset[j] =
  2123. qe_muram_alloc(length,
  2124. UCC_GETH_RX_BD_RING_ALIGNMENT);
  2125. if (!IS_ERR_VALUE(ugeth->rx_bd_ring_offset[j]))
  2126. ugeth->p_rx_bd_ring[j] =
  2127. (u8 __iomem *) qe_muram_addr(ugeth->
  2128. rx_bd_ring_offset[j]);
  2129. }
  2130. if (!ugeth->p_rx_bd_ring[j]) {
  2131. if (netif_msg_ifup(ugeth))
  2132. ugeth_err
  2133. ("%s: Can not allocate memory for Rx bd rings.",
  2134. __func__);
  2135. return -ENOMEM;
  2136. }
  2137. }
  2138. /* Init Tx bds */
  2139. for (j = 0; j < ug_info->numQueuesTx; j++) {
  2140. /* Setup the skbuff rings */
  2141. ugeth->tx_skbuff[j] = kmalloc(sizeof(struct sk_buff *) *
  2142. ugeth->ug_info->bdRingLenTx[j],
  2143. GFP_KERNEL);
  2144. if (ugeth->tx_skbuff[j] == NULL) {
  2145. if (netif_msg_ifup(ugeth))
  2146. ugeth_err("%s: Could not allocate tx_skbuff",
  2147. __func__);
  2148. return -ENOMEM;
  2149. }
  2150. for (i = 0; i < ugeth->ug_info->bdRingLenTx[j]; i++)
  2151. ugeth->tx_skbuff[j][i] = NULL;
  2152. ugeth->skb_curtx[j] = ugeth->skb_dirtytx[j] = 0;
  2153. bd = ugeth->confBd[j] = ugeth->txBd[j] = ugeth->p_tx_bd_ring[j];
  2154. for (i = 0; i < ug_info->bdRingLenTx[j]; i++) {
  2155. /* clear bd buffer */
  2156. out_be32(&((struct qe_bd __iomem *)bd)->buf, 0);
  2157. /* set bd status and length */
  2158. out_be32((u32 __iomem *)bd, 0);
  2159. bd += sizeof(struct qe_bd);
  2160. }
  2161. bd -= sizeof(struct qe_bd);
  2162. /* set bd status and length */
  2163. out_be32((u32 __iomem *)bd, T_W); /* for last BD set Wrap bit */
  2164. }
  2165. /* Init Rx bds */
  2166. for (j = 0; j < ug_info->numQueuesRx; j++) {
  2167. /* Setup the skbuff rings */
  2168. ugeth->rx_skbuff[j] = kmalloc(sizeof(struct sk_buff *) *
  2169. ugeth->ug_info->bdRingLenRx[j],
  2170. GFP_KERNEL);
  2171. if (ugeth->rx_skbuff[j] == NULL) {
  2172. if (netif_msg_ifup(ugeth))
  2173. ugeth_err("%s: Could not allocate rx_skbuff",
  2174. __func__);
  2175. return -ENOMEM;
  2176. }
  2177. for (i = 0; i < ugeth->ug_info->bdRingLenRx[j]; i++)
  2178. ugeth->rx_skbuff[j][i] = NULL;
  2179. ugeth->skb_currx[j] = 0;
  2180. bd = ugeth->rxBd[j] = ugeth->p_rx_bd_ring[j];
  2181. for (i = 0; i < ug_info->bdRingLenRx[j]; i++) {
  2182. /* set bd status and length */
  2183. out_be32((u32 __iomem *)bd, R_I);
  2184. /* clear bd buffer */
  2185. out_be32(&((struct qe_bd __iomem *)bd)->buf, 0);
  2186. bd += sizeof(struct qe_bd);
  2187. }
  2188. bd -= sizeof(struct qe_bd);
  2189. /* set bd status and length */
  2190. out_be32((u32 __iomem *)bd, R_W); /* for last BD set Wrap bit */
  2191. }
  2192. /*
  2193. * Global PRAM
  2194. */
  2195. /* Tx global PRAM */
  2196. /* Allocate global tx parameter RAM page */
  2197. ugeth->tx_glbl_pram_offset =
  2198. qe_muram_alloc(sizeof(struct ucc_geth_tx_global_pram),
  2199. UCC_GETH_TX_GLOBAL_PRAM_ALIGNMENT);
  2200. if (IS_ERR_VALUE(ugeth->tx_glbl_pram_offset)) {
  2201. if (netif_msg_ifup(ugeth))
  2202. ugeth_err
  2203. ("%s: Can not allocate DPRAM memory for p_tx_glbl_pram.",
  2204. __func__);
  2205. return -ENOMEM;
  2206. }
  2207. ugeth->p_tx_glbl_pram =
  2208. (struct ucc_geth_tx_global_pram __iomem *) qe_muram_addr(ugeth->
  2209. tx_glbl_pram_offset);
  2210. /* Zero out p_tx_glbl_pram */
  2211. memset_io((void __iomem *)ugeth->p_tx_glbl_pram, 0, sizeof(struct ucc_geth_tx_global_pram));
  2212. /* Fill global PRAM */
  2213. /* TQPTR */
  2214. /* Size varies with number of Tx threads */
  2215. ugeth->thread_dat_tx_offset =
  2216. qe_muram_alloc(numThreadsTxNumerical *
  2217. sizeof(struct ucc_geth_thread_data_tx) +
  2218. 32 * (numThreadsTxNumerical == 1),
  2219. UCC_GETH_THREAD_DATA_ALIGNMENT);
  2220. if (IS_ERR_VALUE(ugeth->thread_dat_tx_offset)) {
  2221. if (netif_msg_ifup(ugeth))
  2222. ugeth_err
  2223. ("%s: Can not allocate DPRAM memory for p_thread_data_tx.",
  2224. __func__);
  2225. return -ENOMEM;
  2226. }
  2227. ugeth->p_thread_data_tx =
  2228. (struct ucc_geth_thread_data_tx __iomem *) qe_muram_addr(ugeth->
  2229. thread_dat_tx_offset);
  2230. out_be32(&ugeth->p_tx_glbl_pram->tqptr, ugeth->thread_dat_tx_offset);
  2231. /* vtagtable */
  2232. for (i = 0; i < UCC_GETH_TX_VTAG_TABLE_ENTRY_MAX; i++)
  2233. out_be32(&ugeth->p_tx_glbl_pram->vtagtable[i],
  2234. ug_info->vtagtable[i]);
  2235. /* iphoffset */
  2236. for (i = 0; i < TX_IP_OFFSET_ENTRY_MAX; i++)
  2237. out_8(&ugeth->p_tx_glbl_pram->iphoffset[i],
  2238. ug_info->iphoffset[i]);
  2239. /* SQPTR */
  2240. /* Size varies with number of Tx queues */
  2241. ugeth->send_q_mem_reg_offset =
  2242. qe_muram_alloc(ug_info->numQueuesTx *
  2243. sizeof(struct ucc_geth_send_queue_qd),
  2244. UCC_GETH_SEND_QUEUE_QUEUE_DESCRIPTOR_ALIGNMENT);
  2245. if (IS_ERR_VALUE(ugeth->send_q_mem_reg_offset)) {
  2246. if (netif_msg_ifup(ugeth))
  2247. ugeth_err
  2248. ("%s: Can not allocate DPRAM memory for p_send_q_mem_reg.",
  2249. __func__);
  2250. return -ENOMEM;
  2251. }
  2252. ugeth->p_send_q_mem_reg =
  2253. (struct ucc_geth_send_queue_mem_region __iomem *) qe_muram_addr(ugeth->
  2254. send_q_mem_reg_offset);
  2255. out_be32(&ugeth->p_tx_glbl_pram->sqptr, ugeth->send_q_mem_reg_offset);
  2256. /* Setup the table */
  2257. /* Assume BD rings are already established */
  2258. for (i = 0; i < ug_info->numQueuesTx; i++) {
  2259. endOfRing =
  2260. ugeth->p_tx_bd_ring[i] + (ug_info->bdRingLenTx[i] -
  2261. 1) * sizeof(struct qe_bd);
  2262. if (ugeth->ug_info->uf_info.bd_mem_part == MEM_PART_SYSTEM) {
  2263. out_be32(&ugeth->p_send_q_mem_reg->sqqd[i].bd_ring_base,
  2264. (u32) virt_to_phys(ugeth->p_tx_bd_ring[i]));
  2265. out_be32(&ugeth->p_send_q_mem_reg->sqqd[i].
  2266. last_bd_completed_address,
  2267. (u32) virt_to_phys(endOfRing));
  2268. } else if (ugeth->ug_info->uf_info.bd_mem_part ==
  2269. MEM_PART_MURAM) {
  2270. out_be32(&ugeth->p_send_q_mem_reg->sqqd[i].bd_ring_base,
  2271. (u32) immrbar_virt_to_phys(ugeth->
  2272. p_tx_bd_ring[i]));
  2273. out_be32(&ugeth->p_send_q_mem_reg->sqqd[i].
  2274. last_bd_completed_address,
  2275. (u32) immrbar_virt_to_phys(endOfRing));
  2276. }
  2277. }
  2278. /* schedulerbasepointer */
  2279. if (ug_info->numQueuesTx > 1) {
  2280. /* scheduler exists only if more than 1 tx queue */
  2281. ugeth->scheduler_offset =
  2282. qe_muram_alloc(sizeof(struct ucc_geth_scheduler),
  2283. UCC_GETH_SCHEDULER_ALIGNMENT);
  2284. if (IS_ERR_VALUE(ugeth->scheduler_offset)) {
  2285. if (netif_msg_ifup(ugeth))
  2286. ugeth_err
  2287. ("%s: Can not allocate DPRAM memory for p_scheduler.",
  2288. __func__);
  2289. return -ENOMEM;
  2290. }
  2291. ugeth->p_scheduler =
  2292. (struct ucc_geth_scheduler __iomem *) qe_muram_addr(ugeth->
  2293. scheduler_offset);
  2294. out_be32(&ugeth->p_tx_glbl_pram->schedulerbasepointer,
  2295. ugeth->scheduler_offset);
  2296. /* Zero out p_scheduler */
  2297. memset_io((void __iomem *)ugeth->p_scheduler, 0, sizeof(struct ucc_geth_scheduler));
  2298. /* Set values in scheduler */
  2299. out_be32(&ugeth->p_scheduler->mblinterval,
  2300. ug_info->mblinterval);
  2301. out_be16(&ugeth->p_scheduler->nortsrbytetime,
  2302. ug_info->nortsrbytetime);
  2303. out_8(&ugeth->p_scheduler->fracsiz, ug_info->fracsiz);
  2304. out_8(&ugeth->p_scheduler->strictpriorityq,
  2305. ug_info->strictpriorityq);
  2306. out_8(&ugeth->p_scheduler->txasap, ug_info->txasap);
  2307. out_8(&ugeth->p_scheduler->extrabw, ug_info->extrabw);
  2308. for (i = 0; i < NUM_TX_QUEUES; i++)
  2309. out_8(&ugeth->p_scheduler->weightfactor[i],
  2310. ug_info->weightfactor[i]);
  2311. /* Set pointers to cpucount registers in scheduler */
  2312. ugeth->p_cpucount[0] = &(ugeth->p_scheduler->cpucount0);
  2313. ugeth->p_cpucount[1] = &(ugeth->p_scheduler->cpucount1);
  2314. ugeth->p_cpucount[2] = &(ugeth->p_scheduler->cpucount2);
  2315. ugeth->p_cpucount[3] = &(ugeth->p_scheduler->cpucount3);
  2316. ugeth->p_cpucount[4] = &(ugeth->p_scheduler->cpucount4);
  2317. ugeth->p_cpucount[5] = &(ugeth->p_scheduler->cpucount5);
  2318. ugeth->p_cpucount[6] = &(ugeth->p_scheduler->cpucount6);
  2319. ugeth->p_cpucount[7] = &(ugeth->p_scheduler->cpucount7);
  2320. }
  2321. /* schedulerbasepointer */
  2322. /* TxRMON_PTR (statistics) */
  2323. if (ug_info->
  2324. statisticsMode & UCC_GETH_STATISTICS_GATHERING_MODE_FIRMWARE_TX) {
  2325. ugeth->tx_fw_statistics_pram_offset =
  2326. qe_muram_alloc(sizeof
  2327. (struct ucc_geth_tx_firmware_statistics_pram),
  2328. UCC_GETH_TX_STATISTICS_ALIGNMENT);
  2329. if (IS_ERR_VALUE(ugeth->tx_fw_statistics_pram_offset)) {
  2330. if (netif_msg_ifup(ugeth))
  2331. ugeth_err
  2332. ("%s: Can not allocate DPRAM memory for"
  2333. " p_tx_fw_statistics_pram.",
  2334. __func__);
  2335. return -ENOMEM;
  2336. }
  2337. ugeth->p_tx_fw_statistics_pram =
  2338. (struct ucc_geth_tx_firmware_statistics_pram __iomem *)
  2339. qe_muram_addr(ugeth->tx_fw_statistics_pram_offset);
  2340. /* Zero out p_tx_fw_statistics_pram */
  2341. memset_io((void __iomem *)ugeth->p_tx_fw_statistics_pram,
  2342. 0, sizeof(struct ucc_geth_tx_firmware_statistics_pram));
  2343. }
  2344. /* temoder */
  2345. /* Already has speed set */
  2346. if (ug_info->numQueuesTx > 1)
  2347. temoder |= TEMODER_SCHEDULER_ENABLE;
  2348. if (ug_info->ipCheckSumGenerate)
  2349. temoder |= TEMODER_IP_CHECKSUM_GENERATE;
  2350. temoder |= ((ug_info->numQueuesTx - 1) << TEMODER_NUM_OF_QUEUES_SHIFT);
  2351. out_be16(&ugeth->p_tx_glbl_pram->temoder, temoder);
  2352. test = in_be16(&ugeth->p_tx_glbl_pram->temoder);
  2353. /* Function code register value to be used later */
  2354. function_code = UCC_BMR_BO_BE | UCC_BMR_GBL;
  2355. /* Required for QE */
  2356. /* function code register */
  2357. out_be32(&ugeth->p_tx_glbl_pram->tstate, ((u32) function_code) << 24);
  2358. /* Rx global PRAM */
  2359. /* Allocate global rx parameter RAM page */
  2360. ugeth->rx_glbl_pram_offset =
  2361. qe_muram_alloc(sizeof(struct ucc_geth_rx_global_pram),
  2362. UCC_GETH_RX_GLOBAL_PRAM_ALIGNMENT);
  2363. if (IS_ERR_VALUE(ugeth->rx_glbl_pram_offset)) {
  2364. if (netif_msg_ifup(ugeth))
  2365. ugeth_err
  2366. ("%s: Can not allocate DPRAM memory for p_rx_glbl_pram.",
  2367. __func__);
  2368. return -ENOMEM;
  2369. }
  2370. ugeth->p_rx_glbl_pram =
  2371. (struct ucc_geth_rx_global_pram __iomem *) qe_muram_addr(ugeth->
  2372. rx_glbl_pram_offset);
  2373. /* Zero out p_rx_glbl_pram */
  2374. memset_io((void __iomem *)ugeth->p_rx_glbl_pram, 0, sizeof(struct ucc_geth_rx_global_pram));
  2375. /* Fill global PRAM */
  2376. /* RQPTR */
  2377. /* Size varies with number of Rx threads */
  2378. ugeth->thread_dat_rx_offset =
  2379. qe_muram_alloc(numThreadsRxNumerical *
  2380. sizeof(struct ucc_geth_thread_data_rx),
  2381. UCC_GETH_THREAD_DATA_ALIGNMENT);
  2382. if (IS_ERR_VALUE(ugeth->thread_dat_rx_offset)) {
  2383. if (netif_msg_ifup(ugeth))
  2384. ugeth_err
  2385. ("%s: Can not allocate DPRAM memory for p_thread_data_rx.",
  2386. __func__);
  2387. return -ENOMEM;
  2388. }
  2389. ugeth->p_thread_data_rx =
  2390. (struct ucc_geth_thread_data_rx __iomem *) qe_muram_addr(ugeth->
  2391. thread_dat_rx_offset);
  2392. out_be32(&ugeth->p_rx_glbl_pram->rqptr, ugeth->thread_dat_rx_offset);
  2393. /* typeorlen */
  2394. out_be16(&ugeth->p_rx_glbl_pram->typeorlen, ug_info->typeorlen);
  2395. /* rxrmonbaseptr (statistics) */
  2396. if (ug_info->
  2397. statisticsMode & UCC_GETH_STATISTICS_GATHERING_MODE_FIRMWARE_RX) {
  2398. ugeth->rx_fw_statistics_pram_offset =
  2399. qe_muram_alloc(sizeof
  2400. (struct ucc_geth_rx_firmware_statistics_pram),
  2401. UCC_GETH_RX_STATISTICS_ALIGNMENT);
  2402. if (IS_ERR_VALUE(ugeth->rx_fw_statistics_pram_offset)) {
  2403. if (netif_msg_ifup(ugeth))
  2404. ugeth_err
  2405. ("%s: Can not allocate DPRAM memory for"
  2406. " p_rx_fw_statistics_pram.", __func__);
  2407. return -ENOMEM;
  2408. }
  2409. ugeth->p_rx_fw_statistics_pram =
  2410. (struct ucc_geth_rx_firmware_statistics_pram __iomem *)
  2411. qe_muram_addr(ugeth->rx_fw_statistics_pram_offset);
  2412. /* Zero out p_rx_fw_statistics_pram */
  2413. memset_io((void __iomem *)ugeth->p_rx_fw_statistics_pram, 0,
  2414. sizeof(struct ucc_geth_rx_firmware_statistics_pram));
  2415. }
  2416. /* intCoalescingPtr */
  2417. /* Size varies with number of Rx queues */
  2418. ugeth->rx_irq_coalescing_tbl_offset =
  2419. qe_muram_alloc(ug_info->numQueuesRx *
  2420. sizeof(struct ucc_geth_rx_interrupt_coalescing_entry)
  2421. + 4, UCC_GETH_RX_INTERRUPT_COALESCING_ALIGNMENT);
  2422. if (IS_ERR_VALUE(ugeth->rx_irq_coalescing_tbl_offset)) {
  2423. if (netif_msg_ifup(ugeth))
  2424. ugeth_err
  2425. ("%s: Can not allocate DPRAM memory for"
  2426. " p_rx_irq_coalescing_tbl.", __func__);
  2427. return -ENOMEM;
  2428. }
  2429. ugeth->p_rx_irq_coalescing_tbl =
  2430. (struct ucc_geth_rx_interrupt_coalescing_table __iomem *)
  2431. qe_muram_addr(ugeth->rx_irq_coalescing_tbl_offset);
  2432. out_be32(&ugeth->p_rx_glbl_pram->intcoalescingptr,
  2433. ugeth->rx_irq_coalescing_tbl_offset);
  2434. /* Fill interrupt coalescing table */
  2435. for (i = 0; i < ug_info->numQueuesRx; i++) {
  2436. out_be32(&ugeth->p_rx_irq_coalescing_tbl->coalescingentry[i].
  2437. interruptcoalescingmaxvalue,
  2438. ug_info->interruptcoalescingmaxvalue[i]);
  2439. out_be32(&ugeth->p_rx_irq_coalescing_tbl->coalescingentry[i].
  2440. interruptcoalescingcounter,
  2441. ug_info->interruptcoalescingmaxvalue[i]);
  2442. }
  2443. /* MRBLR */
  2444. init_max_rx_buff_len(uf_info->max_rx_buf_length,
  2445. &ugeth->p_rx_glbl_pram->mrblr);
  2446. /* MFLR */
  2447. out_be16(&ugeth->p_rx_glbl_pram->mflr, ug_info->maxFrameLength);
  2448. /* MINFLR */
  2449. init_min_frame_len(ug_info->minFrameLength,
  2450. &ugeth->p_rx_glbl_pram->minflr,
  2451. &ugeth->p_rx_glbl_pram->mrblr);
  2452. /* MAXD1 */
  2453. out_be16(&ugeth->p_rx_glbl_pram->maxd1, ug_info->maxD1Length);
  2454. /* MAXD2 */
  2455. out_be16(&ugeth->p_rx_glbl_pram->maxd2, ug_info->maxD2Length);
  2456. /* l2qt */
  2457. l2qt = 0;
  2458. for (i = 0; i < UCC_GETH_VLAN_PRIORITY_MAX; i++)
  2459. l2qt |= (ug_info->l2qt[i] << (28 - 4 * i));
  2460. out_be32(&ugeth->p_rx_glbl_pram->l2qt, l2qt);
  2461. /* l3qt */
  2462. for (j = 0; j < UCC_GETH_IP_PRIORITY_MAX; j += 8) {
  2463. l3qt = 0;
  2464. for (i = 0; i < 8; i++)
  2465. l3qt |= (ug_info->l3qt[j + i] << (28 - 4 * i));
  2466. out_be32(&ugeth->p_rx_glbl_pram->l3qt[j/8], l3qt);
  2467. }
  2468. /* vlantype */
  2469. out_be16(&ugeth->p_rx_glbl_pram->vlantype, ug_info->vlantype);
  2470. /* vlantci */
  2471. out_be16(&ugeth->p_rx_glbl_pram->vlantci, ug_info->vlantci);
  2472. /* ecamptr */
  2473. out_be32(&ugeth->p_rx_glbl_pram->ecamptr, ug_info->ecamptr);
  2474. /* RBDQPTR */
  2475. /* Size varies with number of Rx queues */
  2476. ugeth->rx_bd_qs_tbl_offset =
  2477. qe_muram_alloc(ug_info->numQueuesRx *
  2478. (sizeof(struct ucc_geth_rx_bd_queues_entry) +
  2479. sizeof(struct ucc_geth_rx_prefetched_bds)),
  2480. UCC_GETH_RX_BD_QUEUES_ALIGNMENT);
  2481. if (IS_ERR_VALUE(ugeth->rx_bd_qs_tbl_offset)) {
  2482. if (netif_msg_ifup(ugeth))
  2483. ugeth_err
  2484. ("%s: Can not allocate DPRAM memory for p_rx_bd_qs_tbl.",
  2485. __func__);
  2486. return -ENOMEM;
  2487. }
  2488. ugeth->p_rx_bd_qs_tbl =
  2489. (struct ucc_geth_rx_bd_queues_entry __iomem *) qe_muram_addr(ugeth->
  2490. rx_bd_qs_tbl_offset);
  2491. out_be32(&ugeth->p_rx_glbl_pram->rbdqptr, ugeth->rx_bd_qs_tbl_offset);
  2492. /* Zero out p_rx_bd_qs_tbl */
  2493. memset_io((void __iomem *)ugeth->p_rx_bd_qs_tbl,
  2494. 0,
  2495. ug_info->numQueuesRx * (sizeof(struct ucc_geth_rx_bd_queues_entry) +
  2496. sizeof(struct ucc_geth_rx_prefetched_bds)));
  2497. /* Setup the table */
  2498. /* Assume BD rings are already established */
  2499. for (i = 0; i < ug_info->numQueuesRx; i++) {
  2500. if (ugeth->ug_info->uf_info.bd_mem_part == MEM_PART_SYSTEM) {
  2501. out_be32(&ugeth->p_rx_bd_qs_tbl[i].externalbdbaseptr,
  2502. (u32) virt_to_phys(ugeth->p_rx_bd_ring[i]));
  2503. } else if (ugeth->ug_info->uf_info.bd_mem_part ==
  2504. MEM_PART_MURAM) {
  2505. out_be32(&ugeth->p_rx_bd_qs_tbl[i].externalbdbaseptr,
  2506. (u32) immrbar_virt_to_phys(ugeth->
  2507. p_rx_bd_ring[i]));
  2508. }
  2509. /* rest of fields handled by QE */
  2510. }
  2511. /* remoder */
  2512. /* Already has speed set */
  2513. if (ugeth->rx_extended_features)
  2514. remoder |= REMODER_RX_EXTENDED_FEATURES;
  2515. if (ug_info->rxExtendedFiltering)
  2516. remoder |= REMODER_RX_EXTENDED_FILTERING;
  2517. if (ug_info->dynamicMaxFrameLength)
  2518. remoder |= REMODER_DYNAMIC_MAX_FRAME_LENGTH;
  2519. if (ug_info->dynamicMinFrameLength)
  2520. remoder |= REMODER_DYNAMIC_MIN_FRAME_LENGTH;
  2521. remoder |=
  2522. ug_info->vlanOperationTagged << REMODER_VLAN_OPERATION_TAGGED_SHIFT;
  2523. remoder |=
  2524. ug_info->
  2525. vlanOperationNonTagged << REMODER_VLAN_OPERATION_NON_TAGGED_SHIFT;
  2526. remoder |= ug_info->rxQoSMode << REMODER_RX_QOS_MODE_SHIFT;
  2527. remoder |= ((ug_info->numQueuesRx - 1) << REMODER_NUM_OF_QUEUES_SHIFT);
  2528. if (ug_info->ipCheckSumCheck)
  2529. remoder |= REMODER_IP_CHECKSUM_CHECK;
  2530. if (ug_info->ipAddressAlignment)
  2531. remoder |= REMODER_IP_ADDRESS_ALIGNMENT;
  2532. out_be32(&ugeth->p_rx_glbl_pram->remoder, remoder);
  2533. /* Note that this function must be called */
  2534. /* ONLY AFTER p_tx_fw_statistics_pram */
  2535. /* andp_UccGethRxFirmwareStatisticsPram are allocated ! */
  2536. init_firmware_statistics_gathering_mode((ug_info->
  2537. statisticsMode &
  2538. UCC_GETH_STATISTICS_GATHERING_MODE_FIRMWARE_TX),
  2539. (ug_info->statisticsMode &
  2540. UCC_GETH_STATISTICS_GATHERING_MODE_FIRMWARE_RX),
  2541. &ugeth->p_tx_glbl_pram->txrmonbaseptr,
  2542. ugeth->tx_fw_statistics_pram_offset,
  2543. &ugeth->p_rx_glbl_pram->rxrmonbaseptr,
  2544. ugeth->rx_fw_statistics_pram_offset,
  2545. &ugeth->p_tx_glbl_pram->temoder,
  2546. &ugeth->p_rx_glbl_pram->remoder);
  2547. /* function code register */
  2548. out_8(&ugeth->p_rx_glbl_pram->rstate, function_code);
  2549. /* initialize extended filtering */
  2550. if (ug_info->rxExtendedFiltering) {
  2551. if (!ug_info->extendedFilteringChainPointer) {
  2552. if (netif_msg_ifup(ugeth))
  2553. ugeth_err("%s: Null Extended Filtering Chain Pointer.",
  2554. __func__);
  2555. return -EINVAL;
  2556. }
  2557. /* Allocate memory for extended filtering Mode Global
  2558. Parameters */
  2559. ugeth->exf_glbl_param_offset =
  2560. qe_muram_alloc(sizeof(struct ucc_geth_exf_global_pram),
  2561. UCC_GETH_RX_EXTENDED_FILTERING_GLOBAL_PARAMETERS_ALIGNMENT);
  2562. if (IS_ERR_VALUE(ugeth->exf_glbl_param_offset)) {
  2563. if (netif_msg_ifup(ugeth))
  2564. ugeth_err
  2565. ("%s: Can not allocate DPRAM memory for"
  2566. " p_exf_glbl_param.", __func__);
  2567. return -ENOMEM;
  2568. }
  2569. ugeth->p_exf_glbl_param =
  2570. (struct ucc_geth_exf_global_pram __iomem *) qe_muram_addr(ugeth->
  2571. exf_glbl_param_offset);
  2572. out_be32(&ugeth->p_rx_glbl_pram->exfGlobalParam,
  2573. ugeth->exf_glbl_param_offset);
  2574. out_be32(&ugeth->p_exf_glbl_param->l2pcdptr,
  2575. (u32) ug_info->extendedFilteringChainPointer);
  2576. } else { /* initialize 82xx style address filtering */
  2577. /* Init individual address recognition registers to disabled */
  2578. for (j = 0; j < NUM_OF_PADDRS; j++)
  2579. ugeth_82xx_filtering_clear_addr_in_paddr(ugeth, (u8) j);
  2580. p_82xx_addr_filt =
  2581. (struct ucc_geth_82xx_address_filtering_pram __iomem *) ugeth->
  2582. p_rx_glbl_pram->addressfiltering;
  2583. ugeth_82xx_filtering_clear_all_addr_in_hash(ugeth,
  2584. ENET_ADDR_TYPE_GROUP);
  2585. ugeth_82xx_filtering_clear_all_addr_in_hash(ugeth,
  2586. ENET_ADDR_TYPE_INDIVIDUAL);
  2587. }
  2588. /*
  2589. * Initialize UCC at QE level
  2590. */
  2591. command = QE_INIT_TX_RX;
  2592. /* Allocate shadow InitEnet command parameter structure.
  2593. * This is needed because after the InitEnet command is executed,
  2594. * the structure in DPRAM is released, because DPRAM is a premium
  2595. * resource.
  2596. * This shadow structure keeps a copy of what was done so that the
  2597. * allocated resources can be released when the channel is freed.
  2598. */
  2599. if (!(ugeth->p_init_enet_param_shadow =
  2600. kmalloc(sizeof(struct ucc_geth_init_pram), GFP_KERNEL))) {
  2601. if (netif_msg_ifup(ugeth))
  2602. ugeth_err
  2603. ("%s: Can not allocate memory for"
  2604. " p_UccInitEnetParamShadows.", __func__);
  2605. return -ENOMEM;
  2606. }
  2607. /* Zero out *p_init_enet_param_shadow */
  2608. memset((char *)ugeth->p_init_enet_param_shadow,
  2609. 0, sizeof(struct ucc_geth_init_pram));
  2610. /* Fill shadow InitEnet command parameter structure */
  2611. ugeth->p_init_enet_param_shadow->resinit1 =
  2612. ENET_INIT_PARAM_MAGIC_RES_INIT1;
  2613. ugeth->p_init_enet_param_shadow->resinit2 =
  2614. ENET_INIT_PARAM_MAGIC_RES_INIT2;
  2615. ugeth->p_init_enet_param_shadow->resinit3 =
  2616. ENET_INIT_PARAM_MAGIC_RES_INIT3;
  2617. ugeth->p_init_enet_param_shadow->resinit4 =
  2618. ENET_INIT_PARAM_MAGIC_RES_INIT4;
  2619. ugeth->p_init_enet_param_shadow->resinit5 =
  2620. ENET_INIT_PARAM_MAGIC_RES_INIT5;
  2621. ugeth->p_init_enet_param_shadow->rgftgfrxglobal |=
  2622. ((u32) ug_info->numThreadsRx) << ENET_INIT_PARAM_RGF_SHIFT;
  2623. ugeth->p_init_enet_param_shadow->rgftgfrxglobal |=
  2624. ((u32) ug_info->numThreadsTx) << ENET_INIT_PARAM_TGF_SHIFT;
  2625. ugeth->p_init_enet_param_shadow->rgftgfrxglobal |=
  2626. ugeth->rx_glbl_pram_offset | ug_info->riscRx;
  2627. if ((ug_info->largestexternallookupkeysize !=
  2628. QE_FLTR_LARGEST_EXTERNAL_TABLE_LOOKUP_KEY_SIZE_NONE)
  2629. && (ug_info->largestexternallookupkeysize !=
  2630. QE_FLTR_LARGEST_EXTERNAL_TABLE_LOOKUP_KEY_SIZE_8_BYTES)
  2631. && (ug_info->largestexternallookupkeysize !=
  2632. QE_FLTR_LARGEST_EXTERNAL_TABLE_LOOKUP_KEY_SIZE_16_BYTES)) {
  2633. if (netif_msg_ifup(ugeth))
  2634. ugeth_err("%s: Invalid largest External Lookup Key Size.",
  2635. __func__);
  2636. return -EINVAL;
  2637. }
  2638. ugeth->p_init_enet_param_shadow->largestexternallookupkeysize =
  2639. ug_info->largestexternallookupkeysize;
  2640. size = sizeof(struct ucc_geth_thread_rx_pram);
  2641. if (ug_info->rxExtendedFiltering) {
  2642. size += THREAD_RX_PRAM_ADDITIONAL_FOR_EXTENDED_FILTERING;
  2643. if (ug_info->largestexternallookupkeysize ==
  2644. QE_FLTR_TABLE_LOOKUP_KEY_SIZE_8_BYTES)
  2645. size +=
  2646. THREAD_RX_PRAM_ADDITIONAL_FOR_EXTENDED_FILTERING_8;
  2647. if (ug_info->largestexternallookupkeysize ==
  2648. QE_FLTR_TABLE_LOOKUP_KEY_SIZE_16_BYTES)
  2649. size +=
  2650. THREAD_RX_PRAM_ADDITIONAL_FOR_EXTENDED_FILTERING_16;
  2651. }
  2652. if ((ret_val = fill_init_enet_entries(ugeth, &(ugeth->
  2653. p_init_enet_param_shadow->rxthread[0]),
  2654. (u8) (numThreadsRxNumerical + 1)
  2655. /* Rx needs one extra for terminator */
  2656. , size, UCC_GETH_THREAD_RX_PRAM_ALIGNMENT,
  2657. ug_info->riscRx, 1)) != 0) {
  2658. if (netif_msg_ifup(ugeth))
  2659. ugeth_err("%s: Can not fill p_init_enet_param_shadow.",
  2660. __func__);
  2661. return ret_val;
  2662. }
  2663. ugeth->p_init_enet_param_shadow->txglobal =
  2664. ugeth->tx_glbl_pram_offset | ug_info->riscTx;
  2665. if ((ret_val =
  2666. fill_init_enet_entries(ugeth,
  2667. &(ugeth->p_init_enet_param_shadow->
  2668. txthread[0]), numThreadsTxNumerical,
  2669. sizeof(struct ucc_geth_thread_tx_pram),
  2670. UCC_GETH_THREAD_TX_PRAM_ALIGNMENT,
  2671. ug_info->riscTx, 0)) != 0) {
  2672. if (netif_msg_ifup(ugeth))
  2673. ugeth_err("%s: Can not fill p_init_enet_param_shadow.",
  2674. __func__);
  2675. return ret_val;
  2676. }
  2677. /* Load Rx bds with buffers */
  2678. for (i = 0; i < ug_info->numQueuesRx; i++) {
  2679. if ((ret_val = rx_bd_buffer_set(ugeth, (u8) i)) != 0) {
  2680. if (netif_msg_ifup(ugeth))
  2681. ugeth_err("%s: Can not fill Rx bds with buffers.",
  2682. __func__);
  2683. return ret_val;
  2684. }
  2685. }
  2686. /* Allocate InitEnet command parameter structure */
  2687. init_enet_pram_offset = qe_muram_alloc(sizeof(struct ucc_geth_init_pram), 4);
  2688. if (IS_ERR_VALUE(init_enet_pram_offset)) {
  2689. if (netif_msg_ifup(ugeth))
  2690. ugeth_err
  2691. ("%s: Can not allocate DPRAM memory for p_init_enet_pram.",
  2692. __func__);
  2693. return -ENOMEM;
  2694. }
  2695. p_init_enet_pram =
  2696. (struct ucc_geth_init_pram __iomem *) qe_muram_addr(init_enet_pram_offset);
  2697. /* Copy shadow InitEnet command parameter structure into PRAM */
  2698. out_8(&p_init_enet_pram->resinit1,
  2699. ugeth->p_init_enet_param_shadow->resinit1);
  2700. out_8(&p_init_enet_pram->resinit2,
  2701. ugeth->p_init_enet_param_shadow->resinit2);
  2702. out_8(&p_init_enet_pram->resinit3,
  2703. ugeth->p_init_enet_param_shadow->resinit3);
  2704. out_8(&p_init_enet_pram->resinit4,
  2705. ugeth->p_init_enet_param_shadow->resinit4);
  2706. out_be16(&p_init_enet_pram->resinit5,
  2707. ugeth->p_init_enet_param_shadow->resinit5);
  2708. out_8(&p_init_enet_pram->largestexternallookupkeysize,
  2709. ugeth->p_init_enet_param_shadow->largestexternallookupkeysize);
  2710. out_be32(&p_init_enet_pram->rgftgfrxglobal,
  2711. ugeth->p_init_enet_param_shadow->rgftgfrxglobal);
  2712. for (i = 0; i < ENET_INIT_PARAM_MAX_ENTRIES_RX; i++)
  2713. out_be32(&p_init_enet_pram->rxthread[i],
  2714. ugeth->p_init_enet_param_shadow->rxthread[i]);
  2715. out_be32(&p_init_enet_pram->txglobal,
  2716. ugeth->p_init_enet_param_shadow->txglobal);
  2717. for (i = 0; i < ENET_INIT_PARAM_MAX_ENTRIES_TX; i++)
  2718. out_be32(&p_init_enet_pram->txthread[i],
  2719. ugeth->p_init_enet_param_shadow->txthread[i]);
  2720. /* Issue QE command */
  2721. cecr_subblock =
  2722. ucc_fast_get_qe_cr_subblock(ugeth->ug_info->uf_info.ucc_num);
  2723. qe_issue_cmd(command, cecr_subblock, QE_CR_PROTOCOL_ETHERNET,
  2724. init_enet_pram_offset);
  2725. /* Free InitEnet command parameter */
  2726. qe_muram_free(init_enet_pram_offset);
  2727. return 0;
  2728. }
  2729. /* This is called by the kernel when a frame is ready for transmission. */
  2730. /* It is pointed to by the dev->hard_start_xmit function pointer */
  2731. static int ucc_geth_start_xmit(struct sk_buff *skb, struct net_device *dev)
  2732. {
  2733. struct ucc_geth_private *ugeth = netdev_priv(dev);
  2734. #ifdef CONFIG_UGETH_TX_ON_DEMAND
  2735. struct ucc_fast_private *uccf;
  2736. #endif
  2737. u8 __iomem *bd; /* BD pointer */
  2738. u32 bd_status;
  2739. u8 txQ = 0;
  2740. ugeth_vdbg("%s: IN", __func__);
  2741. spin_lock_irq(&ugeth->lock);
  2742. dev->stats.tx_bytes += skb->len;
  2743. /* Start from the next BD that should be filled */
  2744. bd = ugeth->txBd[txQ];
  2745. bd_status = in_be32((u32 __iomem *)bd);
  2746. /* Save the skb pointer so we can free it later */
  2747. ugeth->tx_skbuff[txQ][ugeth->skb_curtx[txQ]] = skb;
  2748. /* Update the current skb pointer (wrapping if this was the last) */
  2749. ugeth->skb_curtx[txQ] =
  2750. (ugeth->skb_curtx[txQ] +
  2751. 1) & TX_RING_MOD_MASK(ugeth->ug_info->bdRingLenTx[txQ]);
  2752. /* set up the buffer descriptor */
  2753. out_be32(&((struct qe_bd __iomem *)bd)->buf,
  2754. dma_map_single(ugeth->dev, skb->data,
  2755. skb->len, DMA_TO_DEVICE));
  2756. /* printk(KERN_DEBUG"skb->data is 0x%x\n",skb->data); */
  2757. bd_status = (bd_status & T_W) | T_R | T_I | T_L | skb->len;
  2758. /* set bd status and length */
  2759. out_be32((u32 __iomem *)bd, bd_status);
  2760. dev->trans_start = jiffies;
  2761. /* Move to next BD in the ring */
  2762. if (!(bd_status & T_W))
  2763. bd += sizeof(struct qe_bd);
  2764. else
  2765. bd = ugeth->p_tx_bd_ring[txQ];
  2766. /* If the next BD still needs to be cleaned up, then the bds
  2767. are full. We need to tell the kernel to stop sending us stuff. */
  2768. if (bd == ugeth->confBd[txQ]) {
  2769. if (!netif_queue_stopped(dev))
  2770. netif_stop_queue(dev);
  2771. }
  2772. ugeth->txBd[txQ] = bd;
  2773. if (ugeth->p_scheduler) {
  2774. ugeth->cpucount[txQ]++;
  2775. /* Indicate to QE that there are more Tx bds ready for
  2776. transmission */
  2777. /* This is done by writing a running counter of the bd
  2778. count to the scheduler PRAM. */
  2779. out_be16(ugeth->p_cpucount[txQ], ugeth->cpucount[txQ]);
  2780. }
  2781. #ifdef CONFIG_UGETH_TX_ON_DEMAND
  2782. uccf = ugeth->uccf;
  2783. out_be16(uccf->p_utodr, UCC_FAST_TOD);
  2784. #endif
  2785. spin_unlock_irq(&ugeth->lock);
  2786. return 0;
  2787. }
  2788. static int ucc_geth_rx(struct ucc_geth_private *ugeth, u8 rxQ, int rx_work_limit)
  2789. {
  2790. struct sk_buff *skb;
  2791. u8 __iomem *bd;
  2792. u16 length, howmany = 0;
  2793. u32 bd_status;
  2794. u8 *bdBuffer;
  2795. struct net_device *dev;
  2796. ugeth_vdbg("%s: IN", __func__);
  2797. dev = ugeth->ndev;
  2798. /* collect received buffers */
  2799. bd = ugeth->rxBd[rxQ];
  2800. bd_status = in_be32((u32 __iomem *)bd);
  2801. /* while there are received buffers and BD is full (~R_E) */
  2802. while (!((bd_status & (R_E)) || (--rx_work_limit < 0))) {
  2803. bdBuffer = (u8 *) in_be32(&((struct qe_bd __iomem *)bd)->buf);
  2804. length = (u16) ((bd_status & BD_LENGTH_MASK) - 4);
  2805. skb = ugeth->rx_skbuff[rxQ][ugeth->skb_currx[rxQ]];
  2806. /* determine whether buffer is first, last, first and last
  2807. (single buffer frame) or middle (not first and not last) */
  2808. if (!skb ||
  2809. (!(bd_status & (R_F | R_L))) ||
  2810. (bd_status & R_ERRORS_FATAL)) {
  2811. if (netif_msg_rx_err(ugeth))
  2812. ugeth_err("%s, %d: ERROR!!! skb - 0x%08x",
  2813. __func__, __LINE__, (u32) skb);
  2814. if (skb)
  2815. dev_kfree_skb_any(skb);
  2816. ugeth->rx_skbuff[rxQ][ugeth->skb_currx[rxQ]] = NULL;
  2817. dev->stats.rx_dropped++;
  2818. } else {
  2819. dev->stats.rx_packets++;
  2820. howmany++;
  2821. /* Prep the skb for the packet */
  2822. skb_put(skb, length);
  2823. /* Tell the skb what kind of packet this is */
  2824. skb->protocol = eth_type_trans(skb, ugeth->ndev);
  2825. dev->stats.rx_bytes += length;
  2826. /* Send the packet up the stack */
  2827. netif_receive_skb(skb);
  2828. }
  2829. skb = get_new_skb(ugeth, bd);
  2830. if (!skb) {
  2831. if (netif_msg_rx_err(ugeth))
  2832. ugeth_warn("%s: No Rx Data Buffer", __func__);
  2833. dev->stats.rx_dropped++;
  2834. break;
  2835. }
  2836. ugeth->rx_skbuff[rxQ][ugeth->skb_currx[rxQ]] = skb;
  2837. /* update to point at the next skb */
  2838. ugeth->skb_currx[rxQ] =
  2839. (ugeth->skb_currx[rxQ] +
  2840. 1) & RX_RING_MOD_MASK(ugeth->ug_info->bdRingLenRx[rxQ]);
  2841. if (bd_status & R_W)
  2842. bd = ugeth->p_rx_bd_ring[rxQ];
  2843. else
  2844. bd += sizeof(struct qe_bd);
  2845. bd_status = in_be32((u32 __iomem *)bd);
  2846. }
  2847. ugeth->rxBd[rxQ] = bd;
  2848. return howmany;
  2849. }
  2850. static int ucc_geth_tx(struct net_device *dev, u8 txQ)
  2851. {
  2852. /* Start from the next BD that should be filled */
  2853. struct ucc_geth_private *ugeth = netdev_priv(dev);
  2854. u8 __iomem *bd; /* BD pointer */
  2855. u32 bd_status;
  2856. bd = ugeth->confBd[txQ];
  2857. bd_status = in_be32((u32 __iomem *)bd);
  2858. /* Normal processing. */
  2859. while ((bd_status & T_R) == 0) {
  2860. /* BD contains already transmitted buffer. */
  2861. /* Handle the transmitted buffer and release */
  2862. /* the BD to be used with the current frame */
  2863. if ((bd == ugeth->txBd[txQ]) && (netif_queue_stopped(dev) == 0))
  2864. break;
  2865. dev->stats.tx_packets++;
  2866. /* Free the sk buffer associated with this TxBD */
  2867. dev_kfree_skb(ugeth->
  2868. tx_skbuff[txQ][ugeth->skb_dirtytx[txQ]]);
  2869. ugeth->tx_skbuff[txQ][ugeth->skb_dirtytx[txQ]] = NULL;
  2870. ugeth->skb_dirtytx[txQ] =
  2871. (ugeth->skb_dirtytx[txQ] +
  2872. 1) & TX_RING_MOD_MASK(ugeth->ug_info->bdRingLenTx[txQ]);
  2873. /* We freed a buffer, so now we can restart transmission */
  2874. if (netif_queue_stopped(dev))
  2875. netif_wake_queue(dev);
  2876. /* Advance the confirmation BD pointer */
  2877. if (!(bd_status & T_W))
  2878. bd += sizeof(struct qe_bd);
  2879. else
  2880. bd = ugeth->p_tx_bd_ring[txQ];
  2881. bd_status = in_be32((u32 __iomem *)bd);
  2882. }
  2883. ugeth->confBd[txQ] = bd;
  2884. return 0;
  2885. }
  2886. static int ucc_geth_poll(struct napi_struct *napi, int budget)
  2887. {
  2888. struct ucc_geth_private *ugeth = container_of(napi, struct ucc_geth_private, napi);
  2889. struct ucc_geth_info *ug_info;
  2890. int howmany, i;
  2891. ug_info = ugeth->ug_info;
  2892. howmany = 0;
  2893. for (i = 0; i < ug_info->numQueuesRx; i++)
  2894. howmany += ucc_geth_rx(ugeth, i, budget - howmany);
  2895. /* Tx event processing */
  2896. spin_lock(&ugeth->lock);
  2897. for (i = 0; i < ug_info->numQueuesTx; i++)
  2898. ucc_geth_tx(ugeth->ndev, i);
  2899. spin_unlock(&ugeth->lock);
  2900. if (howmany < budget) {
  2901. napi_complete(napi);
  2902. setbits32(ugeth->uccf->p_uccm, UCCE_RX_EVENTS | UCCE_TX_EVENTS);
  2903. }
  2904. return howmany;
  2905. }
  2906. static irqreturn_t ucc_geth_irq_handler(int irq, void *info)
  2907. {
  2908. struct net_device *dev = info;
  2909. struct ucc_geth_private *ugeth = netdev_priv(dev);
  2910. struct ucc_fast_private *uccf;
  2911. struct ucc_geth_info *ug_info;
  2912. register u32 ucce;
  2913. register u32 uccm;
  2914. ugeth_vdbg("%s: IN", __func__);
  2915. uccf = ugeth->uccf;
  2916. ug_info = ugeth->ug_info;
  2917. /* read and clear events */
  2918. ucce = (u32) in_be32(uccf->p_ucce);
  2919. uccm = (u32) in_be32(uccf->p_uccm);
  2920. ucce &= uccm;
  2921. out_be32(uccf->p_ucce, ucce);
  2922. /* check for receive events that require processing */
  2923. if (ucce & (UCCE_RX_EVENTS | UCCE_TX_EVENTS)) {
  2924. if (napi_schedule_prep(&ugeth->napi)) {
  2925. uccm &= ~(UCCE_RX_EVENTS | UCCE_TX_EVENTS);
  2926. out_be32(uccf->p_uccm, uccm);
  2927. __napi_schedule(&ugeth->napi);
  2928. }
  2929. }
  2930. /* Errors and other events */
  2931. if (ucce & UCCE_OTHER) {
  2932. if (ucce & UCC_GETH_UCCE_BSY)
  2933. dev->stats.rx_errors++;
  2934. if (ucce & UCC_GETH_UCCE_TXE)
  2935. dev->stats.tx_errors++;
  2936. }
  2937. return IRQ_HANDLED;
  2938. }
  2939. #ifdef CONFIG_NET_POLL_CONTROLLER
  2940. /*
  2941. * Polling 'interrupt' - used by things like netconsole to send skbs
  2942. * without having to re-enable interrupts. It's not called while
  2943. * the interrupt routine is executing.
  2944. */
  2945. static void ucc_netpoll(struct net_device *dev)
  2946. {
  2947. struct ucc_geth_private *ugeth = netdev_priv(dev);
  2948. int irq = ugeth->ug_info->uf_info.irq;
  2949. disable_irq(irq);
  2950. ucc_geth_irq_handler(irq, dev);
  2951. enable_irq(irq);
  2952. }
  2953. #endif /* CONFIG_NET_POLL_CONTROLLER */
  2954. static int ucc_geth_set_mac_addr(struct net_device *dev, void *p)
  2955. {
  2956. struct ucc_geth_private *ugeth = netdev_priv(dev);
  2957. struct sockaddr *addr = p;
  2958. if (!is_valid_ether_addr(addr->sa_data))
  2959. return -EADDRNOTAVAIL;
  2960. memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
  2961. /*
  2962. * If device is not running, we will set mac addr register
  2963. * when opening the device.
  2964. */
  2965. if (!netif_running(dev))
  2966. return 0;
  2967. spin_lock_irq(&ugeth->lock);
  2968. init_mac_station_addr_regs(dev->dev_addr[0],
  2969. dev->dev_addr[1],
  2970. dev->dev_addr[2],
  2971. dev->dev_addr[3],
  2972. dev->dev_addr[4],
  2973. dev->dev_addr[5],
  2974. &ugeth->ug_regs->macstnaddr1,
  2975. &ugeth->ug_regs->macstnaddr2);
  2976. spin_unlock_irq(&ugeth->lock);
  2977. return 0;
  2978. }
  2979. /* Called when something needs to use the ethernet device */
  2980. /* Returns 0 for success. */
  2981. static int ucc_geth_open(struct net_device *dev)
  2982. {
  2983. struct ucc_geth_private *ugeth = netdev_priv(dev);
  2984. int err;
  2985. ugeth_vdbg("%s: IN", __func__);
  2986. /* Test station address */
  2987. if (dev->dev_addr[0] & ENET_GROUP_ADDR) {
  2988. if (netif_msg_ifup(ugeth))
  2989. ugeth_err("%s: Multicast address used for station address"
  2990. " - is this what you wanted?", __func__);
  2991. return -EINVAL;
  2992. }
  2993. err = init_phy(dev);
  2994. if (err) {
  2995. if (netif_msg_ifup(ugeth))
  2996. ugeth_err("%s: Cannot initialize PHY, aborting.",
  2997. dev->name);
  2998. return err;
  2999. }
  3000. err = ucc_struct_init(ugeth);
  3001. if (err) {
  3002. if (netif_msg_ifup(ugeth))
  3003. ugeth_err("%s: Cannot configure internal struct, aborting.", dev->name);
  3004. goto out_err_stop;
  3005. }
  3006. napi_enable(&ugeth->napi);
  3007. err = ucc_geth_startup(ugeth);
  3008. if (err) {
  3009. if (netif_msg_ifup(ugeth))
  3010. ugeth_err("%s: Cannot configure net device, aborting.",
  3011. dev->name);
  3012. goto out_err;
  3013. }
  3014. err = adjust_enet_interface(ugeth);
  3015. if (err) {
  3016. if (netif_msg_ifup(ugeth))
  3017. ugeth_err("%s: Cannot configure net device, aborting.",
  3018. dev->name);
  3019. goto out_err;
  3020. }
  3021. /* Set MACSTNADDR1, MACSTNADDR2 */
  3022. /* For more details see the hardware spec. */
  3023. init_mac_station_addr_regs(dev->dev_addr[0],
  3024. dev->dev_addr[1],
  3025. dev->dev_addr[2],
  3026. dev->dev_addr[3],
  3027. dev->dev_addr[4],
  3028. dev->dev_addr[5],
  3029. &ugeth->ug_regs->macstnaddr1,
  3030. &ugeth->ug_regs->macstnaddr2);
  3031. phy_start(ugeth->phydev);
  3032. err = ugeth_enable(ugeth, COMM_DIR_RX_AND_TX);
  3033. if (err) {
  3034. if (netif_msg_ifup(ugeth))
  3035. ugeth_err("%s: Cannot enable net device, aborting.", dev->name);
  3036. goto out_err;
  3037. }
  3038. err = request_irq(ugeth->ug_info->uf_info.irq, ucc_geth_irq_handler,
  3039. 0, "UCC Geth", dev);
  3040. if (err) {
  3041. if (netif_msg_ifup(ugeth))
  3042. ugeth_err("%s: Cannot get IRQ for net device, aborting.",
  3043. dev->name);
  3044. goto out_err;
  3045. }
  3046. netif_start_queue(dev);
  3047. return err;
  3048. out_err:
  3049. napi_disable(&ugeth->napi);
  3050. out_err_stop:
  3051. ucc_geth_stop(ugeth);
  3052. return err;
  3053. }
  3054. /* Stops the kernel queue, and halts the controller */
  3055. static int ucc_geth_close(struct net_device *dev)
  3056. {
  3057. struct ucc_geth_private *ugeth = netdev_priv(dev);
  3058. ugeth_vdbg("%s: IN", __func__);
  3059. napi_disable(&ugeth->napi);
  3060. ucc_geth_stop(ugeth);
  3061. free_irq(ugeth->ug_info->uf_info.irq, ugeth->ndev);
  3062. netif_stop_queue(dev);
  3063. return 0;
  3064. }
  3065. /* Reopen device. This will reset the MAC and PHY. */
  3066. static void ucc_geth_timeout_work(struct work_struct *work)
  3067. {
  3068. struct ucc_geth_private *ugeth;
  3069. struct net_device *dev;
  3070. ugeth = container_of(work, struct ucc_geth_private, timeout_work);
  3071. dev = ugeth->ndev;
  3072. ugeth_vdbg("%s: IN", __func__);
  3073. dev->stats.tx_errors++;
  3074. ugeth_dump_regs(ugeth);
  3075. if (dev->flags & IFF_UP) {
  3076. /*
  3077. * Must reset MAC *and* PHY. This is done by reopening
  3078. * the device.
  3079. */
  3080. ucc_geth_close(dev);
  3081. ucc_geth_open(dev);
  3082. }
  3083. netif_tx_schedule_all(dev);
  3084. }
  3085. /*
  3086. * ucc_geth_timeout gets called when a packet has not been
  3087. * transmitted after a set amount of time.
  3088. */
  3089. static void ucc_geth_timeout(struct net_device *dev)
  3090. {
  3091. struct ucc_geth_private *ugeth = netdev_priv(dev);
  3092. netif_carrier_off(dev);
  3093. schedule_work(&ugeth->timeout_work);
  3094. }
  3095. static phy_interface_t to_phy_interface(const char *phy_connection_type)
  3096. {
  3097. if (strcasecmp(phy_connection_type, "mii") == 0)
  3098. return PHY_INTERFACE_MODE_MII;
  3099. if (strcasecmp(phy_connection_type, "gmii") == 0)
  3100. return PHY_INTERFACE_MODE_GMII;
  3101. if (strcasecmp(phy_connection_type, "tbi") == 0)
  3102. return PHY_INTERFACE_MODE_TBI;
  3103. if (strcasecmp(phy_connection_type, "rmii") == 0)
  3104. return PHY_INTERFACE_MODE_RMII;
  3105. if (strcasecmp(phy_connection_type, "rgmii") == 0)
  3106. return PHY_INTERFACE_MODE_RGMII;
  3107. if (strcasecmp(phy_connection_type, "rgmii-id") == 0)
  3108. return PHY_INTERFACE_MODE_RGMII_ID;
  3109. if (strcasecmp(phy_connection_type, "rgmii-txid") == 0)
  3110. return PHY_INTERFACE_MODE_RGMII_TXID;
  3111. if (strcasecmp(phy_connection_type, "rgmii-rxid") == 0)
  3112. return PHY_INTERFACE_MODE_RGMII_RXID;
  3113. if (strcasecmp(phy_connection_type, "rtbi") == 0)
  3114. return PHY_INTERFACE_MODE_RTBI;
  3115. if (strcasecmp(phy_connection_type, "sgmii") == 0)
  3116. return PHY_INTERFACE_MODE_SGMII;
  3117. return PHY_INTERFACE_MODE_MII;
  3118. }
  3119. static const struct net_device_ops ucc_geth_netdev_ops = {
  3120. .ndo_open = ucc_geth_open,
  3121. .ndo_stop = ucc_geth_close,
  3122. .ndo_start_xmit = ucc_geth_start_xmit,
  3123. .ndo_validate_addr = eth_validate_addr,
  3124. .ndo_set_mac_address = ucc_geth_set_mac_addr,
  3125. .ndo_change_mtu = eth_change_mtu,
  3126. .ndo_set_multicast_list = ucc_geth_set_multi,
  3127. .ndo_tx_timeout = ucc_geth_timeout,
  3128. #ifdef CONFIG_NET_POLL_CONTROLLER
  3129. .ndo_poll_controller = ucc_netpoll,
  3130. #endif
  3131. };
  3132. static int ucc_geth_probe(struct of_device* ofdev, const struct of_device_id *match)
  3133. {
  3134. struct device *device = &ofdev->dev;
  3135. struct device_node *np = ofdev->node;
  3136. struct net_device *dev = NULL;
  3137. struct ucc_geth_private *ugeth = NULL;
  3138. struct ucc_geth_info *ug_info;
  3139. struct resource res;
  3140. struct device_node *phy;
  3141. int err, ucc_num, max_speed = 0;
  3142. const u32 *fixed_link;
  3143. const unsigned int *prop;
  3144. const char *sprop;
  3145. const void *mac_addr;
  3146. phy_interface_t phy_interface;
  3147. static const int enet_to_speed[] = {
  3148. SPEED_10, SPEED_10, SPEED_10,
  3149. SPEED_100, SPEED_100, SPEED_100,
  3150. SPEED_1000, SPEED_1000, SPEED_1000, SPEED_1000,
  3151. };
  3152. static const phy_interface_t enet_to_phy_interface[] = {
  3153. PHY_INTERFACE_MODE_MII, PHY_INTERFACE_MODE_RMII,
  3154. PHY_INTERFACE_MODE_RGMII, PHY_INTERFACE_MODE_MII,
  3155. PHY_INTERFACE_MODE_RMII, PHY_INTERFACE_MODE_RGMII,
  3156. PHY_INTERFACE_MODE_GMII, PHY_INTERFACE_MODE_RGMII,
  3157. PHY_INTERFACE_MODE_TBI, PHY_INTERFACE_MODE_RTBI,
  3158. PHY_INTERFACE_MODE_SGMII,
  3159. };
  3160. ugeth_vdbg("%s: IN", __func__);
  3161. prop = of_get_property(np, "cell-index", NULL);
  3162. if (!prop) {
  3163. prop = of_get_property(np, "device-id", NULL);
  3164. if (!prop)
  3165. return -ENODEV;
  3166. }
  3167. ucc_num = *prop - 1;
  3168. if ((ucc_num < 0) || (ucc_num > 7))
  3169. return -ENODEV;
  3170. ug_info = &ugeth_info[ucc_num];
  3171. if (ug_info == NULL) {
  3172. if (netif_msg_probe(&debug))
  3173. ugeth_err("%s: [%d] Missing additional data!",
  3174. __func__, ucc_num);
  3175. return -ENODEV;
  3176. }
  3177. ug_info->uf_info.ucc_num = ucc_num;
  3178. sprop = of_get_property(np, "rx-clock-name", NULL);
  3179. if (sprop) {
  3180. ug_info->uf_info.rx_clock = qe_clock_source(sprop);
  3181. if ((ug_info->uf_info.rx_clock < QE_CLK_NONE) ||
  3182. (ug_info->uf_info.rx_clock > QE_CLK24)) {
  3183. printk(KERN_ERR
  3184. "ucc_geth: invalid rx-clock-name property\n");
  3185. return -EINVAL;
  3186. }
  3187. } else {
  3188. prop = of_get_property(np, "rx-clock", NULL);
  3189. if (!prop) {
  3190. /* If both rx-clock-name and rx-clock are missing,
  3191. we want to tell people to use rx-clock-name. */
  3192. printk(KERN_ERR
  3193. "ucc_geth: missing rx-clock-name property\n");
  3194. return -EINVAL;
  3195. }
  3196. if ((*prop < QE_CLK_NONE) || (*prop > QE_CLK24)) {
  3197. printk(KERN_ERR
  3198. "ucc_geth: invalid rx-clock propperty\n");
  3199. return -EINVAL;
  3200. }
  3201. ug_info->uf_info.rx_clock = *prop;
  3202. }
  3203. sprop = of_get_property(np, "tx-clock-name", NULL);
  3204. if (sprop) {
  3205. ug_info->uf_info.tx_clock = qe_clock_source(sprop);
  3206. if ((ug_info->uf_info.tx_clock < QE_CLK_NONE) ||
  3207. (ug_info->uf_info.tx_clock > QE_CLK24)) {
  3208. printk(KERN_ERR
  3209. "ucc_geth: invalid tx-clock-name property\n");
  3210. return -EINVAL;
  3211. }
  3212. } else {
  3213. prop = of_get_property(np, "tx-clock", NULL);
  3214. if (!prop) {
  3215. printk(KERN_ERR
  3216. "ucc_geth: mising tx-clock-name property\n");
  3217. return -EINVAL;
  3218. }
  3219. if ((*prop < QE_CLK_NONE) || (*prop > QE_CLK24)) {
  3220. printk(KERN_ERR
  3221. "ucc_geth: invalid tx-clock property\n");
  3222. return -EINVAL;
  3223. }
  3224. ug_info->uf_info.tx_clock = *prop;
  3225. }
  3226. err = of_address_to_resource(np, 0, &res);
  3227. if (err)
  3228. return -EINVAL;
  3229. ug_info->uf_info.regs = res.start;
  3230. ug_info->uf_info.irq = irq_of_parse_and_map(np, 0);
  3231. fixed_link = of_get_property(np, "fixed-link", NULL);
  3232. if (fixed_link) {
  3233. phy = NULL;
  3234. } else {
  3235. phy = of_parse_phandle(np, "phy-handle", 0);
  3236. if (phy == NULL)
  3237. return -ENODEV;
  3238. }
  3239. ug_info->phy_node = phy;
  3240. /* get the phy interface type, or default to MII */
  3241. prop = of_get_property(np, "phy-connection-type", NULL);
  3242. if (!prop) {
  3243. /* handle interface property present in old trees */
  3244. prop = of_get_property(phy, "interface", NULL);
  3245. if (prop != NULL) {
  3246. phy_interface = enet_to_phy_interface[*prop];
  3247. max_speed = enet_to_speed[*prop];
  3248. } else
  3249. phy_interface = PHY_INTERFACE_MODE_MII;
  3250. } else {
  3251. phy_interface = to_phy_interface((const char *)prop);
  3252. }
  3253. /* get speed, or derive from PHY interface */
  3254. if (max_speed == 0)
  3255. switch (phy_interface) {
  3256. case PHY_INTERFACE_MODE_GMII:
  3257. case PHY_INTERFACE_MODE_RGMII:
  3258. case PHY_INTERFACE_MODE_RGMII_ID:
  3259. case PHY_INTERFACE_MODE_RGMII_RXID:
  3260. case PHY_INTERFACE_MODE_RGMII_TXID:
  3261. case PHY_INTERFACE_MODE_TBI:
  3262. case PHY_INTERFACE_MODE_RTBI:
  3263. case PHY_INTERFACE_MODE_SGMII:
  3264. max_speed = SPEED_1000;
  3265. break;
  3266. default:
  3267. max_speed = SPEED_100;
  3268. break;
  3269. }
  3270. if (max_speed == SPEED_1000) {
  3271. /* configure muram FIFOs for gigabit operation */
  3272. ug_info->uf_info.urfs = UCC_GETH_URFS_GIGA_INIT;
  3273. ug_info->uf_info.urfet = UCC_GETH_URFET_GIGA_INIT;
  3274. ug_info->uf_info.urfset = UCC_GETH_URFSET_GIGA_INIT;
  3275. ug_info->uf_info.utfs = UCC_GETH_UTFS_GIGA_INIT;
  3276. ug_info->uf_info.utfet = UCC_GETH_UTFET_GIGA_INIT;
  3277. ug_info->uf_info.utftt = UCC_GETH_UTFTT_GIGA_INIT;
  3278. ug_info->numThreadsTx = UCC_GETH_NUM_OF_THREADS_4;
  3279. ug_info->numThreadsRx = UCC_GETH_NUM_OF_THREADS_4;
  3280. }
  3281. if (netif_msg_probe(&debug))
  3282. printk(KERN_INFO "ucc_geth: UCC%1d at 0x%8x (irq = %d) \n",
  3283. ug_info->uf_info.ucc_num + 1, ug_info->uf_info.regs,
  3284. ug_info->uf_info.irq);
  3285. /* Create an ethernet device instance */
  3286. dev = alloc_etherdev(sizeof(*ugeth));
  3287. if (dev == NULL)
  3288. return -ENOMEM;
  3289. ugeth = netdev_priv(dev);
  3290. spin_lock_init(&ugeth->lock);
  3291. /* Create CQs for hash tables */
  3292. INIT_LIST_HEAD(&ugeth->group_hash_q);
  3293. INIT_LIST_HEAD(&ugeth->ind_hash_q);
  3294. dev_set_drvdata(device, dev);
  3295. /* Set the dev->base_addr to the gfar reg region */
  3296. dev->base_addr = (unsigned long)(ug_info->uf_info.regs);
  3297. SET_NETDEV_DEV(dev, device);
  3298. /* Fill in the dev structure */
  3299. uec_set_ethtool_ops(dev);
  3300. dev->netdev_ops = &ucc_geth_netdev_ops;
  3301. dev->watchdog_timeo = TX_TIMEOUT;
  3302. INIT_WORK(&ugeth->timeout_work, ucc_geth_timeout_work);
  3303. netif_napi_add(dev, &ugeth->napi, ucc_geth_poll, 64);
  3304. dev->mtu = 1500;
  3305. ugeth->msg_enable = netif_msg_init(debug.msg_enable, UGETH_MSG_DEFAULT);
  3306. ugeth->phy_interface = phy_interface;
  3307. ugeth->max_speed = max_speed;
  3308. err = register_netdev(dev);
  3309. if (err) {
  3310. if (netif_msg_probe(ugeth))
  3311. ugeth_err("%s: Cannot register net device, aborting.",
  3312. dev->name);
  3313. free_netdev(dev);
  3314. return err;
  3315. }
  3316. mac_addr = of_get_mac_address(np);
  3317. if (mac_addr)
  3318. memcpy(dev->dev_addr, mac_addr, 6);
  3319. ugeth->ug_info = ug_info;
  3320. ugeth->dev = device;
  3321. ugeth->ndev = dev;
  3322. ugeth->node = np;
  3323. /* Find the TBI PHY. If it's not there, we don't support SGMII */
  3324. ph = of_get_property(np, "tbi-handle", NULL);
  3325. if (ph) {
  3326. struct device_node *tbi = of_find_node_by_phandle(*ph);
  3327. struct of_device *ofdev;
  3328. struct mii_bus *bus;
  3329. const unsigned int *id;
  3330. if (!tbi)
  3331. return 0;
  3332. mdio = of_get_parent(tbi);
  3333. if (!mdio)
  3334. return 0;
  3335. ofdev = of_find_device_by_node(mdio);
  3336. of_node_put(mdio);
  3337. id = of_get_property(tbi, "reg", NULL);
  3338. if (!id)
  3339. return 0;
  3340. of_node_put(tbi);
  3341. bus = dev_get_drvdata(&ofdev->dev);
  3342. if (!bus)
  3343. return 0;
  3344. ugeth->tbiphy = bus->phy_map[*id];
  3345. }
  3346. return 0;
  3347. }
  3348. static int ucc_geth_remove(struct of_device* ofdev)
  3349. {
  3350. struct device *device = &ofdev->dev;
  3351. struct net_device *dev = dev_get_drvdata(device);
  3352. struct ucc_geth_private *ugeth = netdev_priv(dev);
  3353. unregister_netdev(dev);
  3354. free_netdev(dev);
  3355. ucc_geth_memclean(ugeth);
  3356. dev_set_drvdata(device, NULL);
  3357. return 0;
  3358. }
  3359. static struct of_device_id ucc_geth_match[] = {
  3360. {
  3361. .type = "network",
  3362. .compatible = "ucc_geth",
  3363. },
  3364. {},
  3365. };
  3366. MODULE_DEVICE_TABLE(of, ucc_geth_match);
  3367. static struct of_platform_driver ucc_geth_driver = {
  3368. .name = DRV_NAME,
  3369. .match_table = ucc_geth_match,
  3370. .probe = ucc_geth_probe,
  3371. .remove = ucc_geth_remove,
  3372. };
  3373. static int __init ucc_geth_init(void)
  3374. {
  3375. int i, ret;
  3376. if (netif_msg_drv(&debug))
  3377. printk(KERN_INFO "ucc_geth: " DRV_DESC "\n");
  3378. for (i = 0; i < 8; i++)
  3379. memcpy(&(ugeth_info[i]), &ugeth_primary_info,
  3380. sizeof(ugeth_primary_info));
  3381. ret = of_register_platform_driver(&ucc_geth_driver);
  3382. return ret;
  3383. }
  3384. static void __exit ucc_geth_exit(void)
  3385. {
  3386. of_unregister_platform_driver(&ucc_geth_driver);
  3387. }
  3388. module_init(ucc_geth_init);
  3389. module_exit(ucc_geth_exit);
  3390. MODULE_AUTHOR("Freescale Semiconductor, Inc");
  3391. MODULE_DESCRIPTION(DRV_DESC);
  3392. MODULE_VERSION(DRV_VERSION);
  3393. MODULE_LICENSE("GPL");