ucc_geth.c 118 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043304430453046304730483049305030513052305330543055305630573058305930603061306230633064306530663067306830693070307130723073307430753076307730783079308030813082308330843085308630873088308930903091309230933094309530963097309830993100310131023103310431053106310731083109311031113112311331143115311631173118311931203121312231233124312531263127312831293130313131323133313431353136313731383139314031413142314331443145314631473148314931503151315231533154315531563157315831593160316131623163316431653166316731683169317031713172317331743175317631773178317931803181318231833184318531863187318831893190319131923193319431953196319731983199320032013202320332043205320632073208320932103211321232133214321532163217321832193220322132223223322432253226322732283229323032313232323332343235323632373238323932403241324232433244324532463247324832493250325132523253325432553256325732583259326032613262326332643265326632673268326932703271327232733274327532763277327832793280328132823283328432853286328732883289329032913292329332943295329632973298329933003301330233033304330533063307330833093310331133123313331433153316331733183319332033213322332333243325332633273328332933303331333233333334333533363337333833393340334133423343334433453346334733483349335033513352335333543355335633573358335933603361336233633364336533663367336833693370337133723373337433753376337733783379338033813382338333843385338633873388338933903391339233933394339533963397339833993400340134023403340434053406340734083409341034113412341334143415341634173418341934203421342234233424342534263427342834293430343134323433343434353436343734383439344034413442344334443445344634473448344934503451345234533454345534563457345834593460346134623463346434653466346734683469347034713472347334743475347634773478347934803481348234833484348534863487348834893490349134923493349434953496349734983499350035013502350335043505350635073508350935103511351235133514351535163517351835193520352135223523352435253526352735283529353035313532353335343535353635373538353935403541354235433544354535463547354835493550355135523553355435553556355735583559356035613562356335643565356635673568356935703571357235733574357535763577357835793580358135823583358435853586358735883589359035913592359335943595359635973598359936003601360236033604360536063607360836093610361136123613361436153616361736183619362036213622362336243625362636273628362936303631363236333634363536363637363836393640364136423643364436453646364736483649365036513652365336543655365636573658365936603661366236633664366536663667366836693670367136723673367436753676367736783679368036813682368336843685368636873688368936903691369236933694369536963697369836993700370137023703370437053706370737083709371037113712371337143715371637173718371937203721372237233724372537263727372837293730373137323733373437353736373737383739374037413742374337443745374637473748374937503751375237533754375537563757375837593760376137623763376437653766376737683769377037713772377337743775377637773778377937803781378237833784378537863787378837893790379137923793379437953796379737983799380038013802380338043805380638073808380938103811381238133814381538163817381838193820382138223823382438253826382738283829383038313832383338343835383638373838383938403841384238433844384538463847384838493850385138523853385438553856385738583859386038613862386338643865386638673868386938703871387238733874387538763877387838793880388138823883388438853886388738883889389038913892389338943895389638973898389939003901390239033904390539063907390839093910391139123913391439153916391739183919392039213922392339243925392639273928392939303931393239333934393539363937393839393940394139423943394439453946394739483949395039513952395339543955395639573958395939603961396239633964396539663967
  1. /*
  2. * Copyright (C) 2006-2009 Freescale Semicondutor, Inc. All rights reserved.
  3. *
  4. * Author: Shlomi Gridish <gridish@freescale.com>
  5. * Li Yang <leoli@freescale.com>
  6. *
  7. * Description:
  8. * QE UCC Gigabit Ethernet Driver
  9. *
  10. * This program is free software; you can redistribute it and/or modify it
  11. * under the terms of the GNU General Public License as published by the
  12. * Free Software Foundation; either version 2 of the License, or (at your
  13. * option) any later version.
  14. */
  15. #include <linux/kernel.h>
  16. #include <linux/init.h>
  17. #include <linux/errno.h>
  18. #include <linux/slab.h>
  19. #include <linux/stddef.h>
  20. #include <linux/interrupt.h>
  21. #include <linux/netdevice.h>
  22. #include <linux/etherdevice.h>
  23. #include <linux/skbuff.h>
  24. #include <linux/spinlock.h>
  25. #include <linux/mm.h>
  26. #include <linux/dma-mapping.h>
  27. #include <linux/mii.h>
  28. #include <linux/phy.h>
  29. #include <linux/workqueue.h>
  30. #include <linux/of_mdio.h>
  31. #include <linux/of_platform.h>
  32. #include <asm/uaccess.h>
  33. #include <asm/irq.h>
  34. #include <asm/io.h>
  35. #include <asm/immap_qe.h>
  36. #include <asm/qe.h>
  37. #include <asm/ucc.h>
  38. #include <asm/ucc_fast.h>
  39. #include "ucc_geth.h"
  40. #include "fsl_pq_mdio.h"
  41. #undef DEBUG
  42. #define ugeth_printk(level, format, arg...) \
  43. printk(level format "\n", ## arg)
  44. #define ugeth_dbg(format, arg...) \
  45. ugeth_printk(KERN_DEBUG , format , ## arg)
  46. #define ugeth_err(format, arg...) \
  47. ugeth_printk(KERN_ERR , format , ## arg)
  48. #define ugeth_info(format, arg...) \
  49. ugeth_printk(KERN_INFO , format , ## arg)
  50. #define ugeth_warn(format, arg...) \
  51. ugeth_printk(KERN_WARNING , format , ## arg)
  52. #ifdef UGETH_VERBOSE_DEBUG
  53. #define ugeth_vdbg ugeth_dbg
  54. #else
  55. #define ugeth_vdbg(fmt, args...) do { } while (0)
  56. #endif /* UGETH_VERBOSE_DEBUG */
  57. #define UGETH_MSG_DEFAULT (NETIF_MSG_IFUP << 1 ) - 1
  58. static DEFINE_SPINLOCK(ugeth_lock);
  59. static struct {
  60. u32 msg_enable;
  61. } debug = { -1 };
  62. module_param_named(debug, debug.msg_enable, int, 0);
  63. MODULE_PARM_DESC(debug, "Debug verbosity level (0=none, ..., 0xffff=all)");
  64. static struct ucc_geth_info ugeth_primary_info = {
  65. .uf_info = {
  66. .bd_mem_part = MEM_PART_SYSTEM,
  67. .rtsm = UCC_FAST_SEND_IDLES_BETWEEN_FRAMES,
  68. .max_rx_buf_length = 1536,
  69. /* adjusted at startup if max-speed 1000 */
  70. .urfs = UCC_GETH_URFS_INIT,
  71. .urfet = UCC_GETH_URFET_INIT,
  72. .urfset = UCC_GETH_URFSET_INIT,
  73. .utfs = UCC_GETH_UTFS_INIT,
  74. .utfet = UCC_GETH_UTFET_INIT,
  75. .utftt = UCC_GETH_UTFTT_INIT,
  76. .ufpt = 256,
  77. .mode = UCC_FAST_PROTOCOL_MODE_ETHERNET,
  78. .ttx_trx = UCC_FAST_GUMR_TRANSPARENT_TTX_TRX_NORMAL,
  79. .tenc = UCC_FAST_TX_ENCODING_NRZ,
  80. .renc = UCC_FAST_RX_ENCODING_NRZ,
  81. .tcrc = UCC_FAST_16_BIT_CRC,
  82. .synl = UCC_FAST_SYNC_LEN_NOT_USED,
  83. },
  84. .numQueuesTx = 1,
  85. .numQueuesRx = 1,
  86. .extendedFilteringChainPointer = ((uint32_t) NULL),
  87. .typeorlen = 3072 /*1536 */ ,
  88. .nonBackToBackIfgPart1 = 0x40,
  89. .nonBackToBackIfgPart2 = 0x60,
  90. .miminumInterFrameGapEnforcement = 0x50,
  91. .backToBackInterFrameGap = 0x60,
  92. .mblinterval = 128,
  93. .nortsrbytetime = 5,
  94. .fracsiz = 1,
  95. .strictpriorityq = 0xff,
  96. .altBebTruncation = 0xa,
  97. .excessDefer = 1,
  98. .maxRetransmission = 0xf,
  99. .collisionWindow = 0x37,
  100. .receiveFlowControl = 1,
  101. .transmitFlowControl = 1,
  102. .maxGroupAddrInHash = 4,
  103. .maxIndAddrInHash = 4,
  104. .prel = 7,
  105. .maxFrameLength = 1518,
  106. .minFrameLength = 64,
  107. .maxD1Length = 1520,
  108. .maxD2Length = 1520,
  109. .vlantype = 0x8100,
  110. .ecamptr = ((uint32_t) NULL),
  111. .eventRegMask = UCCE_OTHER,
  112. .pausePeriod = 0xf000,
  113. .interruptcoalescingmaxvalue = {1, 1, 1, 1, 1, 1, 1, 1},
  114. .bdRingLenTx = {
  115. TX_BD_RING_LEN,
  116. TX_BD_RING_LEN,
  117. TX_BD_RING_LEN,
  118. TX_BD_RING_LEN,
  119. TX_BD_RING_LEN,
  120. TX_BD_RING_LEN,
  121. TX_BD_RING_LEN,
  122. TX_BD_RING_LEN},
  123. .bdRingLenRx = {
  124. RX_BD_RING_LEN,
  125. RX_BD_RING_LEN,
  126. RX_BD_RING_LEN,
  127. RX_BD_RING_LEN,
  128. RX_BD_RING_LEN,
  129. RX_BD_RING_LEN,
  130. RX_BD_RING_LEN,
  131. RX_BD_RING_LEN},
  132. .numStationAddresses = UCC_GETH_NUM_OF_STATION_ADDRESSES_1,
  133. .largestexternallookupkeysize =
  134. QE_FLTR_LARGEST_EXTERNAL_TABLE_LOOKUP_KEY_SIZE_NONE,
  135. .statisticsMode = UCC_GETH_STATISTICS_GATHERING_MODE_HARDWARE |
  136. UCC_GETH_STATISTICS_GATHERING_MODE_FIRMWARE_TX |
  137. UCC_GETH_STATISTICS_GATHERING_MODE_FIRMWARE_RX,
  138. .vlanOperationTagged = UCC_GETH_VLAN_OPERATION_TAGGED_NOP,
  139. .vlanOperationNonTagged = UCC_GETH_VLAN_OPERATION_NON_TAGGED_NOP,
  140. .rxQoSMode = UCC_GETH_QOS_MODE_DEFAULT,
  141. .aufc = UPSMR_AUTOMATIC_FLOW_CONTROL_MODE_NONE,
  142. .padAndCrc = MACCFG2_PAD_AND_CRC_MODE_PAD_AND_CRC,
  143. .numThreadsTx = UCC_GETH_NUM_OF_THREADS_1,
  144. .numThreadsRx = UCC_GETH_NUM_OF_THREADS_1,
  145. .riscTx = QE_RISC_ALLOCATION_RISC1_AND_RISC2,
  146. .riscRx = QE_RISC_ALLOCATION_RISC1_AND_RISC2,
  147. };
  148. static struct ucc_geth_info ugeth_info[8];
  149. #ifdef DEBUG
  150. static void mem_disp(u8 *addr, int size)
  151. {
  152. u8 *i;
  153. int size16Aling = (size >> 4) << 4;
  154. int size4Aling = (size >> 2) << 2;
  155. int notAlign = 0;
  156. if (size % 16)
  157. notAlign = 1;
  158. for (i = addr; (u32) i < (u32) addr + size16Aling; i += 16)
  159. printk("0x%08x: %08x %08x %08x %08x\r\n",
  160. (u32) i,
  161. *((u32 *) (i)),
  162. *((u32 *) (i + 4)),
  163. *((u32 *) (i + 8)), *((u32 *) (i + 12)));
  164. if (notAlign == 1)
  165. printk("0x%08x: ", (u32) i);
  166. for (; (u32) i < (u32) addr + size4Aling; i += 4)
  167. printk("%08x ", *((u32 *) (i)));
  168. for (; (u32) i < (u32) addr + size; i++)
  169. printk("%02x", *((u8 *) (i)));
  170. if (notAlign == 1)
  171. printk("\r\n");
  172. }
  173. #endif /* DEBUG */
  174. static struct list_head *dequeue(struct list_head *lh)
  175. {
  176. unsigned long flags;
  177. spin_lock_irqsave(&ugeth_lock, flags);
  178. if (!list_empty(lh)) {
  179. struct list_head *node = lh->next;
  180. list_del(node);
  181. spin_unlock_irqrestore(&ugeth_lock, flags);
  182. return node;
  183. } else {
  184. spin_unlock_irqrestore(&ugeth_lock, flags);
  185. return NULL;
  186. }
  187. }
  188. static struct sk_buff *get_new_skb(struct ucc_geth_private *ugeth,
  189. u8 __iomem *bd)
  190. {
  191. struct sk_buff *skb = NULL;
  192. skb = __skb_dequeue(&ugeth->rx_recycle);
  193. if (!skb)
  194. skb = dev_alloc_skb(ugeth->ug_info->uf_info.max_rx_buf_length +
  195. UCC_GETH_RX_DATA_BUF_ALIGNMENT);
  196. if (skb == NULL)
  197. return NULL;
  198. /* We need the data buffer to be aligned properly. We will reserve
  199. * as many bytes as needed to align the data properly
  200. */
  201. skb_reserve(skb,
  202. UCC_GETH_RX_DATA_BUF_ALIGNMENT -
  203. (((unsigned)skb->data) & (UCC_GETH_RX_DATA_BUF_ALIGNMENT -
  204. 1)));
  205. skb->dev = ugeth->ndev;
  206. out_be32(&((struct qe_bd __iomem *)bd)->buf,
  207. dma_map_single(ugeth->dev,
  208. skb->data,
  209. ugeth->ug_info->uf_info.max_rx_buf_length +
  210. UCC_GETH_RX_DATA_BUF_ALIGNMENT,
  211. DMA_FROM_DEVICE));
  212. out_be32((u32 __iomem *)bd,
  213. (R_E | R_I | (in_be32((u32 __iomem*)bd) & R_W)));
  214. return skb;
  215. }
  216. static int rx_bd_buffer_set(struct ucc_geth_private *ugeth, u8 rxQ)
  217. {
  218. u8 __iomem *bd;
  219. u32 bd_status;
  220. struct sk_buff *skb;
  221. int i;
  222. bd = ugeth->p_rx_bd_ring[rxQ];
  223. i = 0;
  224. do {
  225. bd_status = in_be32((u32 __iomem *)bd);
  226. skb = get_new_skb(ugeth, bd);
  227. if (!skb) /* If can not allocate data buffer,
  228. abort. Cleanup will be elsewhere */
  229. return -ENOMEM;
  230. ugeth->rx_skbuff[rxQ][i] = skb;
  231. /* advance the BD pointer */
  232. bd += sizeof(struct qe_bd);
  233. i++;
  234. } while (!(bd_status & R_W));
  235. return 0;
  236. }
  237. static int fill_init_enet_entries(struct ucc_geth_private *ugeth,
  238. u32 *p_start,
  239. u8 num_entries,
  240. u32 thread_size,
  241. u32 thread_alignment,
  242. unsigned int risc,
  243. int skip_page_for_first_entry)
  244. {
  245. u32 init_enet_offset;
  246. u8 i;
  247. int snum;
  248. for (i = 0; i < num_entries; i++) {
  249. if ((snum = qe_get_snum()) < 0) {
  250. if (netif_msg_ifup(ugeth))
  251. ugeth_err("fill_init_enet_entries: Can not get SNUM.");
  252. return snum;
  253. }
  254. if ((i == 0) && skip_page_for_first_entry)
  255. /* First entry of Rx does not have page */
  256. init_enet_offset = 0;
  257. else {
  258. init_enet_offset =
  259. qe_muram_alloc(thread_size, thread_alignment);
  260. if (IS_ERR_VALUE(init_enet_offset)) {
  261. if (netif_msg_ifup(ugeth))
  262. ugeth_err("fill_init_enet_entries: Can not allocate DPRAM memory.");
  263. qe_put_snum((u8) snum);
  264. return -ENOMEM;
  265. }
  266. }
  267. *(p_start++) =
  268. ((u8) snum << ENET_INIT_PARAM_SNUM_SHIFT) | init_enet_offset
  269. | risc;
  270. }
  271. return 0;
  272. }
  273. static int return_init_enet_entries(struct ucc_geth_private *ugeth,
  274. u32 *p_start,
  275. u8 num_entries,
  276. unsigned int risc,
  277. int skip_page_for_first_entry)
  278. {
  279. u32 init_enet_offset;
  280. u8 i;
  281. int snum;
  282. for (i = 0; i < num_entries; i++) {
  283. u32 val = *p_start;
  284. /* Check that this entry was actually valid --
  285. needed in case failed in allocations */
  286. if ((val & ENET_INIT_PARAM_RISC_MASK) == risc) {
  287. snum =
  288. (u32) (val & ENET_INIT_PARAM_SNUM_MASK) >>
  289. ENET_INIT_PARAM_SNUM_SHIFT;
  290. qe_put_snum((u8) snum);
  291. if (!((i == 0) && skip_page_for_first_entry)) {
  292. /* First entry of Rx does not have page */
  293. init_enet_offset =
  294. (val & ENET_INIT_PARAM_PTR_MASK);
  295. qe_muram_free(init_enet_offset);
  296. }
  297. *p_start++ = 0;
  298. }
  299. }
  300. return 0;
  301. }
  302. #ifdef DEBUG
  303. static int dump_init_enet_entries(struct ucc_geth_private *ugeth,
  304. u32 __iomem *p_start,
  305. u8 num_entries,
  306. u32 thread_size,
  307. unsigned int risc,
  308. int skip_page_for_first_entry)
  309. {
  310. u32 init_enet_offset;
  311. u8 i;
  312. int snum;
  313. for (i = 0; i < num_entries; i++) {
  314. u32 val = in_be32(p_start);
  315. /* Check that this entry was actually valid --
  316. needed in case failed in allocations */
  317. if ((val & ENET_INIT_PARAM_RISC_MASK) == risc) {
  318. snum =
  319. (u32) (val & ENET_INIT_PARAM_SNUM_MASK) >>
  320. ENET_INIT_PARAM_SNUM_SHIFT;
  321. qe_put_snum((u8) snum);
  322. if (!((i == 0) && skip_page_for_first_entry)) {
  323. /* First entry of Rx does not have page */
  324. init_enet_offset =
  325. (in_be32(p_start) &
  326. ENET_INIT_PARAM_PTR_MASK);
  327. ugeth_info("Init enet entry %d:", i);
  328. ugeth_info("Base address: 0x%08x",
  329. (u32)
  330. qe_muram_addr(init_enet_offset));
  331. mem_disp(qe_muram_addr(init_enet_offset),
  332. thread_size);
  333. }
  334. p_start++;
  335. }
  336. }
  337. return 0;
  338. }
  339. #endif
  340. static void put_enet_addr_container(struct enet_addr_container *enet_addr_cont)
  341. {
  342. kfree(enet_addr_cont);
  343. }
  344. static void set_mac_addr(__be16 __iomem *reg, u8 *mac)
  345. {
  346. out_be16(&reg[0], ((u16)mac[5] << 8) | mac[4]);
  347. out_be16(&reg[1], ((u16)mac[3] << 8) | mac[2]);
  348. out_be16(&reg[2], ((u16)mac[1] << 8) | mac[0]);
  349. }
  350. static int hw_clear_addr_in_paddr(struct ucc_geth_private *ugeth, u8 paddr_num)
  351. {
  352. struct ucc_geth_82xx_address_filtering_pram __iomem *p_82xx_addr_filt;
  353. if (!(paddr_num < NUM_OF_PADDRS)) {
  354. ugeth_warn("%s: Illagel paddr_num.", __func__);
  355. return -EINVAL;
  356. }
  357. p_82xx_addr_filt =
  358. (struct ucc_geth_82xx_address_filtering_pram __iomem *) ugeth->p_rx_glbl_pram->
  359. addressfiltering;
  360. /* Writing address ff.ff.ff.ff.ff.ff disables address
  361. recognition for this register */
  362. out_be16(&p_82xx_addr_filt->paddr[paddr_num].h, 0xffff);
  363. out_be16(&p_82xx_addr_filt->paddr[paddr_num].m, 0xffff);
  364. out_be16(&p_82xx_addr_filt->paddr[paddr_num].l, 0xffff);
  365. return 0;
  366. }
  367. static void hw_add_addr_in_hash(struct ucc_geth_private *ugeth,
  368. u8 *p_enet_addr)
  369. {
  370. struct ucc_geth_82xx_address_filtering_pram __iomem *p_82xx_addr_filt;
  371. u32 cecr_subblock;
  372. p_82xx_addr_filt =
  373. (struct ucc_geth_82xx_address_filtering_pram __iomem *) ugeth->p_rx_glbl_pram->
  374. addressfiltering;
  375. cecr_subblock =
  376. ucc_fast_get_qe_cr_subblock(ugeth->ug_info->uf_info.ucc_num);
  377. /* Ethernet frames are defined in Little Endian mode,
  378. therefor to insert */
  379. /* the address to the hash (Big Endian mode), we reverse the bytes.*/
  380. set_mac_addr(&p_82xx_addr_filt->taddr.h, p_enet_addr);
  381. qe_issue_cmd(QE_SET_GROUP_ADDRESS, cecr_subblock,
  382. QE_CR_PROTOCOL_ETHERNET, 0);
  383. }
  384. static inline int compare_addr(u8 **addr1, u8 **addr2)
  385. {
  386. return memcmp(addr1, addr2, ENET_NUM_OCTETS_PER_ADDRESS);
  387. }
  388. #ifdef DEBUG
  389. static void get_statistics(struct ucc_geth_private *ugeth,
  390. struct ucc_geth_tx_firmware_statistics *
  391. tx_firmware_statistics,
  392. struct ucc_geth_rx_firmware_statistics *
  393. rx_firmware_statistics,
  394. struct ucc_geth_hardware_statistics *hardware_statistics)
  395. {
  396. struct ucc_fast __iomem *uf_regs;
  397. struct ucc_geth __iomem *ug_regs;
  398. struct ucc_geth_tx_firmware_statistics_pram *p_tx_fw_statistics_pram;
  399. struct ucc_geth_rx_firmware_statistics_pram *p_rx_fw_statistics_pram;
  400. ug_regs = ugeth->ug_regs;
  401. uf_regs = (struct ucc_fast __iomem *) ug_regs;
  402. p_tx_fw_statistics_pram = ugeth->p_tx_fw_statistics_pram;
  403. p_rx_fw_statistics_pram = ugeth->p_rx_fw_statistics_pram;
  404. /* Tx firmware only if user handed pointer and driver actually
  405. gathers Tx firmware statistics */
  406. if (tx_firmware_statistics && p_tx_fw_statistics_pram) {
  407. tx_firmware_statistics->sicoltx =
  408. in_be32(&p_tx_fw_statistics_pram->sicoltx);
  409. tx_firmware_statistics->mulcoltx =
  410. in_be32(&p_tx_fw_statistics_pram->mulcoltx);
  411. tx_firmware_statistics->latecoltxfr =
  412. in_be32(&p_tx_fw_statistics_pram->latecoltxfr);
  413. tx_firmware_statistics->frabortduecol =
  414. in_be32(&p_tx_fw_statistics_pram->frabortduecol);
  415. tx_firmware_statistics->frlostinmactxer =
  416. in_be32(&p_tx_fw_statistics_pram->frlostinmactxer);
  417. tx_firmware_statistics->carriersenseertx =
  418. in_be32(&p_tx_fw_statistics_pram->carriersenseertx);
  419. tx_firmware_statistics->frtxok =
  420. in_be32(&p_tx_fw_statistics_pram->frtxok);
  421. tx_firmware_statistics->txfrexcessivedefer =
  422. in_be32(&p_tx_fw_statistics_pram->txfrexcessivedefer);
  423. tx_firmware_statistics->txpkts256 =
  424. in_be32(&p_tx_fw_statistics_pram->txpkts256);
  425. tx_firmware_statistics->txpkts512 =
  426. in_be32(&p_tx_fw_statistics_pram->txpkts512);
  427. tx_firmware_statistics->txpkts1024 =
  428. in_be32(&p_tx_fw_statistics_pram->txpkts1024);
  429. tx_firmware_statistics->txpktsjumbo =
  430. in_be32(&p_tx_fw_statistics_pram->txpktsjumbo);
  431. }
  432. /* Rx firmware only if user handed pointer and driver actually
  433. * gathers Rx firmware statistics */
  434. if (rx_firmware_statistics && p_rx_fw_statistics_pram) {
  435. int i;
  436. rx_firmware_statistics->frrxfcser =
  437. in_be32(&p_rx_fw_statistics_pram->frrxfcser);
  438. rx_firmware_statistics->fraligner =
  439. in_be32(&p_rx_fw_statistics_pram->fraligner);
  440. rx_firmware_statistics->inrangelenrxer =
  441. in_be32(&p_rx_fw_statistics_pram->inrangelenrxer);
  442. rx_firmware_statistics->outrangelenrxer =
  443. in_be32(&p_rx_fw_statistics_pram->outrangelenrxer);
  444. rx_firmware_statistics->frtoolong =
  445. in_be32(&p_rx_fw_statistics_pram->frtoolong);
  446. rx_firmware_statistics->runt =
  447. in_be32(&p_rx_fw_statistics_pram->runt);
  448. rx_firmware_statistics->verylongevent =
  449. in_be32(&p_rx_fw_statistics_pram->verylongevent);
  450. rx_firmware_statistics->symbolerror =
  451. in_be32(&p_rx_fw_statistics_pram->symbolerror);
  452. rx_firmware_statistics->dropbsy =
  453. in_be32(&p_rx_fw_statistics_pram->dropbsy);
  454. for (i = 0; i < 0x8; i++)
  455. rx_firmware_statistics->res0[i] =
  456. p_rx_fw_statistics_pram->res0[i];
  457. rx_firmware_statistics->mismatchdrop =
  458. in_be32(&p_rx_fw_statistics_pram->mismatchdrop);
  459. rx_firmware_statistics->underpkts =
  460. in_be32(&p_rx_fw_statistics_pram->underpkts);
  461. rx_firmware_statistics->pkts256 =
  462. in_be32(&p_rx_fw_statistics_pram->pkts256);
  463. rx_firmware_statistics->pkts512 =
  464. in_be32(&p_rx_fw_statistics_pram->pkts512);
  465. rx_firmware_statistics->pkts1024 =
  466. in_be32(&p_rx_fw_statistics_pram->pkts1024);
  467. rx_firmware_statistics->pktsjumbo =
  468. in_be32(&p_rx_fw_statistics_pram->pktsjumbo);
  469. rx_firmware_statistics->frlossinmacer =
  470. in_be32(&p_rx_fw_statistics_pram->frlossinmacer);
  471. rx_firmware_statistics->pausefr =
  472. in_be32(&p_rx_fw_statistics_pram->pausefr);
  473. for (i = 0; i < 0x4; i++)
  474. rx_firmware_statistics->res1[i] =
  475. p_rx_fw_statistics_pram->res1[i];
  476. rx_firmware_statistics->removevlan =
  477. in_be32(&p_rx_fw_statistics_pram->removevlan);
  478. rx_firmware_statistics->replacevlan =
  479. in_be32(&p_rx_fw_statistics_pram->replacevlan);
  480. rx_firmware_statistics->insertvlan =
  481. in_be32(&p_rx_fw_statistics_pram->insertvlan);
  482. }
  483. /* Hardware only if user handed pointer and driver actually
  484. gathers hardware statistics */
  485. if (hardware_statistics &&
  486. (in_be32(&uf_regs->upsmr) & UCC_GETH_UPSMR_HSE)) {
  487. hardware_statistics->tx64 = in_be32(&ug_regs->tx64);
  488. hardware_statistics->tx127 = in_be32(&ug_regs->tx127);
  489. hardware_statistics->tx255 = in_be32(&ug_regs->tx255);
  490. hardware_statistics->rx64 = in_be32(&ug_regs->rx64);
  491. hardware_statistics->rx127 = in_be32(&ug_regs->rx127);
  492. hardware_statistics->rx255 = in_be32(&ug_regs->rx255);
  493. hardware_statistics->txok = in_be32(&ug_regs->txok);
  494. hardware_statistics->txcf = in_be16(&ug_regs->txcf);
  495. hardware_statistics->tmca = in_be32(&ug_regs->tmca);
  496. hardware_statistics->tbca = in_be32(&ug_regs->tbca);
  497. hardware_statistics->rxfok = in_be32(&ug_regs->rxfok);
  498. hardware_statistics->rxbok = in_be32(&ug_regs->rxbok);
  499. hardware_statistics->rbyt = in_be32(&ug_regs->rbyt);
  500. hardware_statistics->rmca = in_be32(&ug_regs->rmca);
  501. hardware_statistics->rbca = in_be32(&ug_regs->rbca);
  502. }
  503. }
  504. static void dump_bds(struct ucc_geth_private *ugeth)
  505. {
  506. int i;
  507. int length;
  508. for (i = 0; i < ugeth->ug_info->numQueuesTx; i++) {
  509. if (ugeth->p_tx_bd_ring[i]) {
  510. length =
  511. (ugeth->ug_info->bdRingLenTx[i] *
  512. sizeof(struct qe_bd));
  513. ugeth_info("TX BDs[%d]", i);
  514. mem_disp(ugeth->p_tx_bd_ring[i], length);
  515. }
  516. }
  517. for (i = 0; i < ugeth->ug_info->numQueuesRx; i++) {
  518. if (ugeth->p_rx_bd_ring[i]) {
  519. length =
  520. (ugeth->ug_info->bdRingLenRx[i] *
  521. sizeof(struct qe_bd));
  522. ugeth_info("RX BDs[%d]", i);
  523. mem_disp(ugeth->p_rx_bd_ring[i], length);
  524. }
  525. }
  526. }
  527. static void dump_regs(struct ucc_geth_private *ugeth)
  528. {
  529. int i;
  530. ugeth_info("UCC%d Geth registers:", ugeth->ug_info->uf_info.ucc_num);
  531. ugeth_info("Base address: 0x%08x", (u32) ugeth->ug_regs);
  532. ugeth_info("maccfg1 : addr - 0x%08x, val - 0x%08x",
  533. (u32) & ugeth->ug_regs->maccfg1,
  534. in_be32(&ugeth->ug_regs->maccfg1));
  535. ugeth_info("maccfg2 : addr - 0x%08x, val - 0x%08x",
  536. (u32) & ugeth->ug_regs->maccfg2,
  537. in_be32(&ugeth->ug_regs->maccfg2));
  538. ugeth_info("ipgifg : addr - 0x%08x, val - 0x%08x",
  539. (u32) & ugeth->ug_regs->ipgifg,
  540. in_be32(&ugeth->ug_regs->ipgifg));
  541. ugeth_info("hafdup : addr - 0x%08x, val - 0x%08x",
  542. (u32) & ugeth->ug_regs->hafdup,
  543. in_be32(&ugeth->ug_regs->hafdup));
  544. ugeth_info("ifctl : addr - 0x%08x, val - 0x%08x",
  545. (u32) & ugeth->ug_regs->ifctl,
  546. in_be32(&ugeth->ug_regs->ifctl));
  547. ugeth_info("ifstat : addr - 0x%08x, val - 0x%08x",
  548. (u32) & ugeth->ug_regs->ifstat,
  549. in_be32(&ugeth->ug_regs->ifstat));
  550. ugeth_info("macstnaddr1: addr - 0x%08x, val - 0x%08x",
  551. (u32) & ugeth->ug_regs->macstnaddr1,
  552. in_be32(&ugeth->ug_regs->macstnaddr1));
  553. ugeth_info("macstnaddr2: addr - 0x%08x, val - 0x%08x",
  554. (u32) & ugeth->ug_regs->macstnaddr2,
  555. in_be32(&ugeth->ug_regs->macstnaddr2));
  556. ugeth_info("uempr : addr - 0x%08x, val - 0x%08x",
  557. (u32) & ugeth->ug_regs->uempr,
  558. in_be32(&ugeth->ug_regs->uempr));
  559. ugeth_info("utbipar : addr - 0x%08x, val - 0x%08x",
  560. (u32) & ugeth->ug_regs->utbipar,
  561. in_be32(&ugeth->ug_regs->utbipar));
  562. ugeth_info("uescr : addr - 0x%08x, val - 0x%04x",
  563. (u32) & ugeth->ug_regs->uescr,
  564. in_be16(&ugeth->ug_regs->uescr));
  565. ugeth_info("tx64 : addr - 0x%08x, val - 0x%08x",
  566. (u32) & ugeth->ug_regs->tx64,
  567. in_be32(&ugeth->ug_regs->tx64));
  568. ugeth_info("tx127 : addr - 0x%08x, val - 0x%08x",
  569. (u32) & ugeth->ug_regs->tx127,
  570. in_be32(&ugeth->ug_regs->tx127));
  571. ugeth_info("tx255 : addr - 0x%08x, val - 0x%08x",
  572. (u32) & ugeth->ug_regs->tx255,
  573. in_be32(&ugeth->ug_regs->tx255));
  574. ugeth_info("rx64 : addr - 0x%08x, val - 0x%08x",
  575. (u32) & ugeth->ug_regs->rx64,
  576. in_be32(&ugeth->ug_regs->rx64));
  577. ugeth_info("rx127 : addr - 0x%08x, val - 0x%08x",
  578. (u32) & ugeth->ug_regs->rx127,
  579. in_be32(&ugeth->ug_regs->rx127));
  580. ugeth_info("rx255 : addr - 0x%08x, val - 0x%08x",
  581. (u32) & ugeth->ug_regs->rx255,
  582. in_be32(&ugeth->ug_regs->rx255));
  583. ugeth_info("txok : addr - 0x%08x, val - 0x%08x",
  584. (u32) & ugeth->ug_regs->txok,
  585. in_be32(&ugeth->ug_regs->txok));
  586. ugeth_info("txcf : addr - 0x%08x, val - 0x%04x",
  587. (u32) & ugeth->ug_regs->txcf,
  588. in_be16(&ugeth->ug_regs->txcf));
  589. ugeth_info("tmca : addr - 0x%08x, val - 0x%08x",
  590. (u32) & ugeth->ug_regs->tmca,
  591. in_be32(&ugeth->ug_regs->tmca));
  592. ugeth_info("tbca : addr - 0x%08x, val - 0x%08x",
  593. (u32) & ugeth->ug_regs->tbca,
  594. in_be32(&ugeth->ug_regs->tbca));
  595. ugeth_info("rxfok : addr - 0x%08x, val - 0x%08x",
  596. (u32) & ugeth->ug_regs->rxfok,
  597. in_be32(&ugeth->ug_regs->rxfok));
  598. ugeth_info("rxbok : addr - 0x%08x, val - 0x%08x",
  599. (u32) & ugeth->ug_regs->rxbok,
  600. in_be32(&ugeth->ug_regs->rxbok));
  601. ugeth_info("rbyt : addr - 0x%08x, val - 0x%08x",
  602. (u32) & ugeth->ug_regs->rbyt,
  603. in_be32(&ugeth->ug_regs->rbyt));
  604. ugeth_info("rmca : addr - 0x%08x, val - 0x%08x",
  605. (u32) & ugeth->ug_regs->rmca,
  606. in_be32(&ugeth->ug_regs->rmca));
  607. ugeth_info("rbca : addr - 0x%08x, val - 0x%08x",
  608. (u32) & ugeth->ug_regs->rbca,
  609. in_be32(&ugeth->ug_regs->rbca));
  610. ugeth_info("scar : addr - 0x%08x, val - 0x%08x",
  611. (u32) & ugeth->ug_regs->scar,
  612. in_be32(&ugeth->ug_regs->scar));
  613. ugeth_info("scam : addr - 0x%08x, val - 0x%08x",
  614. (u32) & ugeth->ug_regs->scam,
  615. in_be32(&ugeth->ug_regs->scam));
  616. if (ugeth->p_thread_data_tx) {
  617. int numThreadsTxNumerical;
  618. switch (ugeth->ug_info->numThreadsTx) {
  619. case UCC_GETH_NUM_OF_THREADS_1:
  620. numThreadsTxNumerical = 1;
  621. break;
  622. case UCC_GETH_NUM_OF_THREADS_2:
  623. numThreadsTxNumerical = 2;
  624. break;
  625. case UCC_GETH_NUM_OF_THREADS_4:
  626. numThreadsTxNumerical = 4;
  627. break;
  628. case UCC_GETH_NUM_OF_THREADS_6:
  629. numThreadsTxNumerical = 6;
  630. break;
  631. case UCC_GETH_NUM_OF_THREADS_8:
  632. numThreadsTxNumerical = 8;
  633. break;
  634. default:
  635. numThreadsTxNumerical = 0;
  636. break;
  637. }
  638. ugeth_info("Thread data TXs:");
  639. ugeth_info("Base address: 0x%08x",
  640. (u32) ugeth->p_thread_data_tx);
  641. for (i = 0; i < numThreadsTxNumerical; i++) {
  642. ugeth_info("Thread data TX[%d]:", i);
  643. ugeth_info("Base address: 0x%08x",
  644. (u32) & ugeth->p_thread_data_tx[i]);
  645. mem_disp((u8 *) & ugeth->p_thread_data_tx[i],
  646. sizeof(struct ucc_geth_thread_data_tx));
  647. }
  648. }
  649. if (ugeth->p_thread_data_rx) {
  650. int numThreadsRxNumerical;
  651. switch (ugeth->ug_info->numThreadsRx) {
  652. case UCC_GETH_NUM_OF_THREADS_1:
  653. numThreadsRxNumerical = 1;
  654. break;
  655. case UCC_GETH_NUM_OF_THREADS_2:
  656. numThreadsRxNumerical = 2;
  657. break;
  658. case UCC_GETH_NUM_OF_THREADS_4:
  659. numThreadsRxNumerical = 4;
  660. break;
  661. case UCC_GETH_NUM_OF_THREADS_6:
  662. numThreadsRxNumerical = 6;
  663. break;
  664. case UCC_GETH_NUM_OF_THREADS_8:
  665. numThreadsRxNumerical = 8;
  666. break;
  667. default:
  668. numThreadsRxNumerical = 0;
  669. break;
  670. }
  671. ugeth_info("Thread data RX:");
  672. ugeth_info("Base address: 0x%08x",
  673. (u32) ugeth->p_thread_data_rx);
  674. for (i = 0; i < numThreadsRxNumerical; i++) {
  675. ugeth_info("Thread data RX[%d]:", i);
  676. ugeth_info("Base address: 0x%08x",
  677. (u32) & ugeth->p_thread_data_rx[i]);
  678. mem_disp((u8 *) & ugeth->p_thread_data_rx[i],
  679. sizeof(struct ucc_geth_thread_data_rx));
  680. }
  681. }
  682. if (ugeth->p_exf_glbl_param) {
  683. ugeth_info("EXF global param:");
  684. ugeth_info("Base address: 0x%08x",
  685. (u32) ugeth->p_exf_glbl_param);
  686. mem_disp((u8 *) ugeth->p_exf_glbl_param,
  687. sizeof(*ugeth->p_exf_glbl_param));
  688. }
  689. if (ugeth->p_tx_glbl_pram) {
  690. ugeth_info("TX global param:");
  691. ugeth_info("Base address: 0x%08x", (u32) ugeth->p_tx_glbl_pram);
  692. ugeth_info("temoder : addr - 0x%08x, val - 0x%04x",
  693. (u32) & ugeth->p_tx_glbl_pram->temoder,
  694. in_be16(&ugeth->p_tx_glbl_pram->temoder));
  695. ugeth_info("sqptr : addr - 0x%08x, val - 0x%08x",
  696. (u32) & ugeth->p_tx_glbl_pram->sqptr,
  697. in_be32(&ugeth->p_tx_glbl_pram->sqptr));
  698. ugeth_info("schedulerbasepointer: addr - 0x%08x, val - 0x%08x",
  699. (u32) & ugeth->p_tx_glbl_pram->schedulerbasepointer,
  700. in_be32(&ugeth->p_tx_glbl_pram->
  701. schedulerbasepointer));
  702. ugeth_info("txrmonbaseptr: addr - 0x%08x, val - 0x%08x",
  703. (u32) & ugeth->p_tx_glbl_pram->txrmonbaseptr,
  704. in_be32(&ugeth->p_tx_glbl_pram->txrmonbaseptr));
  705. ugeth_info("tstate : addr - 0x%08x, val - 0x%08x",
  706. (u32) & ugeth->p_tx_glbl_pram->tstate,
  707. in_be32(&ugeth->p_tx_glbl_pram->tstate));
  708. ugeth_info("iphoffset[0] : addr - 0x%08x, val - 0x%02x",
  709. (u32) & ugeth->p_tx_glbl_pram->iphoffset[0],
  710. ugeth->p_tx_glbl_pram->iphoffset[0]);
  711. ugeth_info("iphoffset[1] : addr - 0x%08x, val - 0x%02x",
  712. (u32) & ugeth->p_tx_glbl_pram->iphoffset[1],
  713. ugeth->p_tx_glbl_pram->iphoffset[1]);
  714. ugeth_info("iphoffset[2] : addr - 0x%08x, val - 0x%02x",
  715. (u32) & ugeth->p_tx_glbl_pram->iphoffset[2],
  716. ugeth->p_tx_glbl_pram->iphoffset[2]);
  717. ugeth_info("iphoffset[3] : addr - 0x%08x, val - 0x%02x",
  718. (u32) & ugeth->p_tx_glbl_pram->iphoffset[3],
  719. ugeth->p_tx_glbl_pram->iphoffset[3]);
  720. ugeth_info("iphoffset[4] : addr - 0x%08x, val - 0x%02x",
  721. (u32) & ugeth->p_tx_glbl_pram->iphoffset[4],
  722. ugeth->p_tx_glbl_pram->iphoffset[4]);
  723. ugeth_info("iphoffset[5] : addr - 0x%08x, val - 0x%02x",
  724. (u32) & ugeth->p_tx_glbl_pram->iphoffset[5],
  725. ugeth->p_tx_glbl_pram->iphoffset[5]);
  726. ugeth_info("iphoffset[6] : addr - 0x%08x, val - 0x%02x",
  727. (u32) & ugeth->p_tx_glbl_pram->iphoffset[6],
  728. ugeth->p_tx_glbl_pram->iphoffset[6]);
  729. ugeth_info("iphoffset[7] : addr - 0x%08x, val - 0x%02x",
  730. (u32) & ugeth->p_tx_glbl_pram->iphoffset[7],
  731. ugeth->p_tx_glbl_pram->iphoffset[7]);
  732. ugeth_info("vtagtable[0] : addr - 0x%08x, val - 0x%08x",
  733. (u32) & ugeth->p_tx_glbl_pram->vtagtable[0],
  734. in_be32(&ugeth->p_tx_glbl_pram->vtagtable[0]));
  735. ugeth_info("vtagtable[1] : addr - 0x%08x, val - 0x%08x",
  736. (u32) & ugeth->p_tx_glbl_pram->vtagtable[1],
  737. in_be32(&ugeth->p_tx_glbl_pram->vtagtable[1]));
  738. ugeth_info("vtagtable[2] : addr - 0x%08x, val - 0x%08x",
  739. (u32) & ugeth->p_tx_glbl_pram->vtagtable[2],
  740. in_be32(&ugeth->p_tx_glbl_pram->vtagtable[2]));
  741. ugeth_info("vtagtable[3] : addr - 0x%08x, val - 0x%08x",
  742. (u32) & ugeth->p_tx_glbl_pram->vtagtable[3],
  743. in_be32(&ugeth->p_tx_glbl_pram->vtagtable[3]));
  744. ugeth_info("vtagtable[4] : addr - 0x%08x, val - 0x%08x",
  745. (u32) & ugeth->p_tx_glbl_pram->vtagtable[4],
  746. in_be32(&ugeth->p_tx_glbl_pram->vtagtable[4]));
  747. ugeth_info("vtagtable[5] : addr - 0x%08x, val - 0x%08x",
  748. (u32) & ugeth->p_tx_glbl_pram->vtagtable[5],
  749. in_be32(&ugeth->p_tx_glbl_pram->vtagtable[5]));
  750. ugeth_info("vtagtable[6] : addr - 0x%08x, val - 0x%08x",
  751. (u32) & ugeth->p_tx_glbl_pram->vtagtable[6],
  752. in_be32(&ugeth->p_tx_glbl_pram->vtagtable[6]));
  753. ugeth_info("vtagtable[7] : addr - 0x%08x, val - 0x%08x",
  754. (u32) & ugeth->p_tx_glbl_pram->vtagtable[7],
  755. in_be32(&ugeth->p_tx_glbl_pram->vtagtable[7]));
  756. ugeth_info("tqptr : addr - 0x%08x, val - 0x%08x",
  757. (u32) & ugeth->p_tx_glbl_pram->tqptr,
  758. in_be32(&ugeth->p_tx_glbl_pram->tqptr));
  759. }
  760. if (ugeth->p_rx_glbl_pram) {
  761. ugeth_info("RX global param:");
  762. ugeth_info("Base address: 0x%08x", (u32) ugeth->p_rx_glbl_pram);
  763. ugeth_info("remoder : addr - 0x%08x, val - 0x%08x",
  764. (u32) & ugeth->p_rx_glbl_pram->remoder,
  765. in_be32(&ugeth->p_rx_glbl_pram->remoder));
  766. ugeth_info("rqptr : addr - 0x%08x, val - 0x%08x",
  767. (u32) & ugeth->p_rx_glbl_pram->rqptr,
  768. in_be32(&ugeth->p_rx_glbl_pram->rqptr));
  769. ugeth_info("typeorlen : addr - 0x%08x, val - 0x%04x",
  770. (u32) & ugeth->p_rx_glbl_pram->typeorlen,
  771. in_be16(&ugeth->p_rx_glbl_pram->typeorlen));
  772. ugeth_info("rxgstpack : addr - 0x%08x, val - 0x%02x",
  773. (u32) & ugeth->p_rx_glbl_pram->rxgstpack,
  774. ugeth->p_rx_glbl_pram->rxgstpack);
  775. ugeth_info("rxrmonbaseptr : addr - 0x%08x, val - 0x%08x",
  776. (u32) & ugeth->p_rx_glbl_pram->rxrmonbaseptr,
  777. in_be32(&ugeth->p_rx_glbl_pram->rxrmonbaseptr));
  778. ugeth_info("intcoalescingptr: addr - 0x%08x, val - 0x%08x",
  779. (u32) & ugeth->p_rx_glbl_pram->intcoalescingptr,
  780. in_be32(&ugeth->p_rx_glbl_pram->intcoalescingptr));
  781. ugeth_info("rstate : addr - 0x%08x, val - 0x%02x",
  782. (u32) & ugeth->p_rx_glbl_pram->rstate,
  783. ugeth->p_rx_glbl_pram->rstate);
  784. ugeth_info("mrblr : addr - 0x%08x, val - 0x%04x",
  785. (u32) & ugeth->p_rx_glbl_pram->mrblr,
  786. in_be16(&ugeth->p_rx_glbl_pram->mrblr));
  787. ugeth_info("rbdqptr : addr - 0x%08x, val - 0x%08x",
  788. (u32) & ugeth->p_rx_glbl_pram->rbdqptr,
  789. in_be32(&ugeth->p_rx_glbl_pram->rbdqptr));
  790. ugeth_info("mflr : addr - 0x%08x, val - 0x%04x",
  791. (u32) & ugeth->p_rx_glbl_pram->mflr,
  792. in_be16(&ugeth->p_rx_glbl_pram->mflr));
  793. ugeth_info("minflr : addr - 0x%08x, val - 0x%04x",
  794. (u32) & ugeth->p_rx_glbl_pram->minflr,
  795. in_be16(&ugeth->p_rx_glbl_pram->minflr));
  796. ugeth_info("maxd1 : addr - 0x%08x, val - 0x%04x",
  797. (u32) & ugeth->p_rx_glbl_pram->maxd1,
  798. in_be16(&ugeth->p_rx_glbl_pram->maxd1));
  799. ugeth_info("maxd2 : addr - 0x%08x, val - 0x%04x",
  800. (u32) & ugeth->p_rx_glbl_pram->maxd2,
  801. in_be16(&ugeth->p_rx_glbl_pram->maxd2));
  802. ugeth_info("ecamptr : addr - 0x%08x, val - 0x%08x",
  803. (u32) & ugeth->p_rx_glbl_pram->ecamptr,
  804. in_be32(&ugeth->p_rx_glbl_pram->ecamptr));
  805. ugeth_info("l2qt : addr - 0x%08x, val - 0x%08x",
  806. (u32) & ugeth->p_rx_glbl_pram->l2qt,
  807. in_be32(&ugeth->p_rx_glbl_pram->l2qt));
  808. ugeth_info("l3qt[0] : addr - 0x%08x, val - 0x%08x",
  809. (u32) & ugeth->p_rx_glbl_pram->l3qt[0],
  810. in_be32(&ugeth->p_rx_glbl_pram->l3qt[0]));
  811. ugeth_info("l3qt[1] : addr - 0x%08x, val - 0x%08x",
  812. (u32) & ugeth->p_rx_glbl_pram->l3qt[1],
  813. in_be32(&ugeth->p_rx_glbl_pram->l3qt[1]));
  814. ugeth_info("l3qt[2] : addr - 0x%08x, val - 0x%08x",
  815. (u32) & ugeth->p_rx_glbl_pram->l3qt[2],
  816. in_be32(&ugeth->p_rx_glbl_pram->l3qt[2]));
  817. ugeth_info("l3qt[3] : addr - 0x%08x, val - 0x%08x",
  818. (u32) & ugeth->p_rx_glbl_pram->l3qt[3],
  819. in_be32(&ugeth->p_rx_glbl_pram->l3qt[3]));
  820. ugeth_info("l3qt[4] : addr - 0x%08x, val - 0x%08x",
  821. (u32) & ugeth->p_rx_glbl_pram->l3qt[4],
  822. in_be32(&ugeth->p_rx_glbl_pram->l3qt[4]));
  823. ugeth_info("l3qt[5] : addr - 0x%08x, val - 0x%08x",
  824. (u32) & ugeth->p_rx_glbl_pram->l3qt[5],
  825. in_be32(&ugeth->p_rx_glbl_pram->l3qt[5]));
  826. ugeth_info("l3qt[6] : addr - 0x%08x, val - 0x%08x",
  827. (u32) & ugeth->p_rx_glbl_pram->l3qt[6],
  828. in_be32(&ugeth->p_rx_glbl_pram->l3qt[6]));
  829. ugeth_info("l3qt[7] : addr - 0x%08x, val - 0x%08x",
  830. (u32) & ugeth->p_rx_glbl_pram->l3qt[7],
  831. in_be32(&ugeth->p_rx_glbl_pram->l3qt[7]));
  832. ugeth_info("vlantype : addr - 0x%08x, val - 0x%04x",
  833. (u32) & ugeth->p_rx_glbl_pram->vlantype,
  834. in_be16(&ugeth->p_rx_glbl_pram->vlantype));
  835. ugeth_info("vlantci : addr - 0x%08x, val - 0x%04x",
  836. (u32) & ugeth->p_rx_glbl_pram->vlantci,
  837. in_be16(&ugeth->p_rx_glbl_pram->vlantci));
  838. for (i = 0; i < 64; i++)
  839. ugeth_info
  840. ("addressfiltering[%d]: addr - 0x%08x, val - 0x%02x",
  841. i,
  842. (u32) & ugeth->p_rx_glbl_pram->addressfiltering[i],
  843. ugeth->p_rx_glbl_pram->addressfiltering[i]);
  844. ugeth_info("exfGlobalParam : addr - 0x%08x, val - 0x%08x",
  845. (u32) & ugeth->p_rx_glbl_pram->exfGlobalParam,
  846. in_be32(&ugeth->p_rx_glbl_pram->exfGlobalParam));
  847. }
  848. if (ugeth->p_send_q_mem_reg) {
  849. ugeth_info("Send Q memory registers:");
  850. ugeth_info("Base address: 0x%08x",
  851. (u32) ugeth->p_send_q_mem_reg);
  852. for (i = 0; i < ugeth->ug_info->numQueuesTx; i++) {
  853. ugeth_info("SQQD[%d]:", i);
  854. ugeth_info("Base address: 0x%08x",
  855. (u32) & ugeth->p_send_q_mem_reg->sqqd[i]);
  856. mem_disp((u8 *) & ugeth->p_send_q_mem_reg->sqqd[i],
  857. sizeof(struct ucc_geth_send_queue_qd));
  858. }
  859. }
  860. if (ugeth->p_scheduler) {
  861. ugeth_info("Scheduler:");
  862. ugeth_info("Base address: 0x%08x", (u32) ugeth->p_scheduler);
  863. mem_disp((u8 *) ugeth->p_scheduler,
  864. sizeof(*ugeth->p_scheduler));
  865. }
  866. if (ugeth->p_tx_fw_statistics_pram) {
  867. ugeth_info("TX FW statistics pram:");
  868. ugeth_info("Base address: 0x%08x",
  869. (u32) ugeth->p_tx_fw_statistics_pram);
  870. mem_disp((u8 *) ugeth->p_tx_fw_statistics_pram,
  871. sizeof(*ugeth->p_tx_fw_statistics_pram));
  872. }
  873. if (ugeth->p_rx_fw_statistics_pram) {
  874. ugeth_info("RX FW statistics pram:");
  875. ugeth_info("Base address: 0x%08x",
  876. (u32) ugeth->p_rx_fw_statistics_pram);
  877. mem_disp((u8 *) ugeth->p_rx_fw_statistics_pram,
  878. sizeof(*ugeth->p_rx_fw_statistics_pram));
  879. }
  880. if (ugeth->p_rx_irq_coalescing_tbl) {
  881. ugeth_info("RX IRQ coalescing tables:");
  882. ugeth_info("Base address: 0x%08x",
  883. (u32) ugeth->p_rx_irq_coalescing_tbl);
  884. for (i = 0; i < ugeth->ug_info->numQueuesRx; i++) {
  885. ugeth_info("RX IRQ coalescing table entry[%d]:", i);
  886. ugeth_info("Base address: 0x%08x",
  887. (u32) & ugeth->p_rx_irq_coalescing_tbl->
  888. coalescingentry[i]);
  889. ugeth_info
  890. ("interruptcoalescingmaxvalue: addr - 0x%08x, val - 0x%08x",
  891. (u32) & ugeth->p_rx_irq_coalescing_tbl->
  892. coalescingentry[i].interruptcoalescingmaxvalue,
  893. in_be32(&ugeth->p_rx_irq_coalescing_tbl->
  894. coalescingentry[i].
  895. interruptcoalescingmaxvalue));
  896. ugeth_info
  897. ("interruptcoalescingcounter : addr - 0x%08x, val - 0x%08x",
  898. (u32) & ugeth->p_rx_irq_coalescing_tbl->
  899. coalescingentry[i].interruptcoalescingcounter,
  900. in_be32(&ugeth->p_rx_irq_coalescing_tbl->
  901. coalescingentry[i].
  902. interruptcoalescingcounter));
  903. }
  904. }
  905. if (ugeth->p_rx_bd_qs_tbl) {
  906. ugeth_info("RX BD QS tables:");
  907. ugeth_info("Base address: 0x%08x", (u32) ugeth->p_rx_bd_qs_tbl);
  908. for (i = 0; i < ugeth->ug_info->numQueuesRx; i++) {
  909. ugeth_info("RX BD QS table[%d]:", i);
  910. ugeth_info("Base address: 0x%08x",
  911. (u32) & ugeth->p_rx_bd_qs_tbl[i]);
  912. ugeth_info
  913. ("bdbaseptr : addr - 0x%08x, val - 0x%08x",
  914. (u32) & ugeth->p_rx_bd_qs_tbl[i].bdbaseptr,
  915. in_be32(&ugeth->p_rx_bd_qs_tbl[i].bdbaseptr));
  916. ugeth_info
  917. ("bdptr : addr - 0x%08x, val - 0x%08x",
  918. (u32) & ugeth->p_rx_bd_qs_tbl[i].bdptr,
  919. in_be32(&ugeth->p_rx_bd_qs_tbl[i].bdptr));
  920. ugeth_info
  921. ("externalbdbaseptr: addr - 0x%08x, val - 0x%08x",
  922. (u32) & ugeth->p_rx_bd_qs_tbl[i].externalbdbaseptr,
  923. in_be32(&ugeth->p_rx_bd_qs_tbl[i].
  924. externalbdbaseptr));
  925. ugeth_info
  926. ("externalbdptr : addr - 0x%08x, val - 0x%08x",
  927. (u32) & ugeth->p_rx_bd_qs_tbl[i].externalbdptr,
  928. in_be32(&ugeth->p_rx_bd_qs_tbl[i].externalbdptr));
  929. ugeth_info("ucode RX Prefetched BDs:");
  930. ugeth_info("Base address: 0x%08x",
  931. (u32)
  932. qe_muram_addr(in_be32
  933. (&ugeth->p_rx_bd_qs_tbl[i].
  934. bdbaseptr)));
  935. mem_disp((u8 *)
  936. qe_muram_addr(in_be32
  937. (&ugeth->p_rx_bd_qs_tbl[i].
  938. bdbaseptr)),
  939. sizeof(struct ucc_geth_rx_prefetched_bds));
  940. }
  941. }
  942. if (ugeth->p_init_enet_param_shadow) {
  943. int size;
  944. ugeth_info("Init enet param shadow:");
  945. ugeth_info("Base address: 0x%08x",
  946. (u32) ugeth->p_init_enet_param_shadow);
  947. mem_disp((u8 *) ugeth->p_init_enet_param_shadow,
  948. sizeof(*ugeth->p_init_enet_param_shadow));
  949. size = sizeof(struct ucc_geth_thread_rx_pram);
  950. if (ugeth->ug_info->rxExtendedFiltering) {
  951. size +=
  952. THREAD_RX_PRAM_ADDITIONAL_FOR_EXTENDED_FILTERING;
  953. if (ugeth->ug_info->largestexternallookupkeysize ==
  954. QE_FLTR_TABLE_LOOKUP_KEY_SIZE_8_BYTES)
  955. size +=
  956. THREAD_RX_PRAM_ADDITIONAL_FOR_EXTENDED_FILTERING_8;
  957. if (ugeth->ug_info->largestexternallookupkeysize ==
  958. QE_FLTR_TABLE_LOOKUP_KEY_SIZE_16_BYTES)
  959. size +=
  960. THREAD_RX_PRAM_ADDITIONAL_FOR_EXTENDED_FILTERING_16;
  961. }
  962. dump_init_enet_entries(ugeth,
  963. &(ugeth->p_init_enet_param_shadow->
  964. txthread[0]),
  965. ENET_INIT_PARAM_MAX_ENTRIES_TX,
  966. sizeof(struct ucc_geth_thread_tx_pram),
  967. ugeth->ug_info->riscTx, 0);
  968. dump_init_enet_entries(ugeth,
  969. &(ugeth->p_init_enet_param_shadow->
  970. rxthread[0]),
  971. ENET_INIT_PARAM_MAX_ENTRIES_RX, size,
  972. ugeth->ug_info->riscRx, 1);
  973. }
  974. }
  975. #endif /* DEBUG */
  976. static void init_default_reg_vals(u32 __iomem *upsmr_register,
  977. u32 __iomem *maccfg1_register,
  978. u32 __iomem *maccfg2_register)
  979. {
  980. out_be32(upsmr_register, UCC_GETH_UPSMR_INIT);
  981. out_be32(maccfg1_register, UCC_GETH_MACCFG1_INIT);
  982. out_be32(maccfg2_register, UCC_GETH_MACCFG2_INIT);
  983. }
  984. static int init_half_duplex_params(int alt_beb,
  985. int back_pressure_no_backoff,
  986. int no_backoff,
  987. int excess_defer,
  988. u8 alt_beb_truncation,
  989. u8 max_retransmissions,
  990. u8 collision_window,
  991. u32 __iomem *hafdup_register)
  992. {
  993. u32 value = 0;
  994. if ((alt_beb_truncation > HALFDUP_ALT_BEB_TRUNCATION_MAX) ||
  995. (max_retransmissions > HALFDUP_MAX_RETRANSMISSION_MAX) ||
  996. (collision_window > HALFDUP_COLLISION_WINDOW_MAX))
  997. return -EINVAL;
  998. value = (u32) (alt_beb_truncation << HALFDUP_ALT_BEB_TRUNCATION_SHIFT);
  999. if (alt_beb)
  1000. value |= HALFDUP_ALT_BEB;
  1001. if (back_pressure_no_backoff)
  1002. value |= HALFDUP_BACK_PRESSURE_NO_BACKOFF;
  1003. if (no_backoff)
  1004. value |= HALFDUP_NO_BACKOFF;
  1005. if (excess_defer)
  1006. value |= HALFDUP_EXCESSIVE_DEFER;
  1007. value |= (max_retransmissions << HALFDUP_MAX_RETRANSMISSION_SHIFT);
  1008. value |= collision_window;
  1009. out_be32(hafdup_register, value);
  1010. return 0;
  1011. }
  1012. static int init_inter_frame_gap_params(u8 non_btb_cs_ipg,
  1013. u8 non_btb_ipg,
  1014. u8 min_ifg,
  1015. u8 btb_ipg,
  1016. u32 __iomem *ipgifg_register)
  1017. {
  1018. u32 value = 0;
  1019. /* Non-Back-to-back IPG part 1 should be <= Non-Back-to-back
  1020. IPG part 2 */
  1021. if (non_btb_cs_ipg > non_btb_ipg)
  1022. return -EINVAL;
  1023. if ((non_btb_cs_ipg > IPGIFG_NON_BACK_TO_BACK_IFG_PART1_MAX) ||
  1024. (non_btb_ipg > IPGIFG_NON_BACK_TO_BACK_IFG_PART2_MAX) ||
  1025. /*(min_ifg > IPGIFG_MINIMUM_IFG_ENFORCEMENT_MAX) || */
  1026. (btb_ipg > IPGIFG_BACK_TO_BACK_IFG_MAX))
  1027. return -EINVAL;
  1028. value |=
  1029. ((non_btb_cs_ipg << IPGIFG_NON_BACK_TO_BACK_IFG_PART1_SHIFT) &
  1030. IPGIFG_NBTB_CS_IPG_MASK);
  1031. value |=
  1032. ((non_btb_ipg << IPGIFG_NON_BACK_TO_BACK_IFG_PART2_SHIFT) &
  1033. IPGIFG_NBTB_IPG_MASK);
  1034. value |=
  1035. ((min_ifg << IPGIFG_MINIMUM_IFG_ENFORCEMENT_SHIFT) &
  1036. IPGIFG_MIN_IFG_MASK);
  1037. value |= (btb_ipg & IPGIFG_BTB_IPG_MASK);
  1038. out_be32(ipgifg_register, value);
  1039. return 0;
  1040. }
  1041. int init_flow_control_params(u32 automatic_flow_control_mode,
  1042. int rx_flow_control_enable,
  1043. int tx_flow_control_enable,
  1044. u16 pause_period,
  1045. u16 extension_field,
  1046. u32 __iomem *upsmr_register,
  1047. u32 __iomem *uempr_register,
  1048. u32 __iomem *maccfg1_register)
  1049. {
  1050. u32 value = 0;
  1051. /* Set UEMPR register */
  1052. value = (u32) pause_period << UEMPR_PAUSE_TIME_VALUE_SHIFT;
  1053. value |= (u32) extension_field << UEMPR_EXTENDED_PAUSE_TIME_VALUE_SHIFT;
  1054. out_be32(uempr_register, value);
  1055. /* Set UPSMR register */
  1056. setbits32(upsmr_register, automatic_flow_control_mode);
  1057. value = in_be32(maccfg1_register);
  1058. if (rx_flow_control_enable)
  1059. value |= MACCFG1_FLOW_RX;
  1060. if (tx_flow_control_enable)
  1061. value |= MACCFG1_FLOW_TX;
  1062. out_be32(maccfg1_register, value);
  1063. return 0;
  1064. }
  1065. static int init_hw_statistics_gathering_mode(int enable_hardware_statistics,
  1066. int auto_zero_hardware_statistics,
  1067. u32 __iomem *upsmr_register,
  1068. u16 __iomem *uescr_register)
  1069. {
  1070. u16 uescr_value = 0;
  1071. /* Enable hardware statistics gathering if requested */
  1072. if (enable_hardware_statistics)
  1073. setbits32(upsmr_register, UCC_GETH_UPSMR_HSE);
  1074. /* Clear hardware statistics counters */
  1075. uescr_value = in_be16(uescr_register);
  1076. uescr_value |= UESCR_CLRCNT;
  1077. /* Automatically zero hardware statistics counters on read,
  1078. if requested */
  1079. if (auto_zero_hardware_statistics)
  1080. uescr_value |= UESCR_AUTOZ;
  1081. out_be16(uescr_register, uescr_value);
  1082. return 0;
  1083. }
  1084. static int init_firmware_statistics_gathering_mode(int
  1085. enable_tx_firmware_statistics,
  1086. int enable_rx_firmware_statistics,
  1087. u32 __iomem *tx_rmon_base_ptr,
  1088. u32 tx_firmware_statistics_structure_address,
  1089. u32 __iomem *rx_rmon_base_ptr,
  1090. u32 rx_firmware_statistics_structure_address,
  1091. u16 __iomem *temoder_register,
  1092. u32 __iomem *remoder_register)
  1093. {
  1094. /* Note: this function does not check if */
  1095. /* the parameters it receives are NULL */
  1096. if (enable_tx_firmware_statistics) {
  1097. out_be32(tx_rmon_base_ptr,
  1098. tx_firmware_statistics_structure_address);
  1099. setbits16(temoder_register, TEMODER_TX_RMON_STATISTICS_ENABLE);
  1100. }
  1101. if (enable_rx_firmware_statistics) {
  1102. out_be32(rx_rmon_base_ptr,
  1103. rx_firmware_statistics_structure_address);
  1104. setbits32(remoder_register, REMODER_RX_RMON_STATISTICS_ENABLE);
  1105. }
  1106. return 0;
  1107. }
  1108. static int init_mac_station_addr_regs(u8 address_byte_0,
  1109. u8 address_byte_1,
  1110. u8 address_byte_2,
  1111. u8 address_byte_3,
  1112. u8 address_byte_4,
  1113. u8 address_byte_5,
  1114. u32 __iomem *macstnaddr1_register,
  1115. u32 __iomem *macstnaddr2_register)
  1116. {
  1117. u32 value = 0;
  1118. /* Example: for a station address of 0x12345678ABCD, */
  1119. /* 0x12 is byte 0, 0x34 is byte 1 and so on and 0xCD is byte 5 */
  1120. /* MACSTNADDR1 Register: */
  1121. /* 0 7 8 15 */
  1122. /* station address byte 5 station address byte 4 */
  1123. /* 16 23 24 31 */
  1124. /* station address byte 3 station address byte 2 */
  1125. value |= (u32) ((address_byte_2 << 0) & 0x000000FF);
  1126. value |= (u32) ((address_byte_3 << 8) & 0x0000FF00);
  1127. value |= (u32) ((address_byte_4 << 16) & 0x00FF0000);
  1128. value |= (u32) ((address_byte_5 << 24) & 0xFF000000);
  1129. out_be32(macstnaddr1_register, value);
  1130. /* MACSTNADDR2 Register: */
  1131. /* 0 7 8 15 */
  1132. /* station address byte 1 station address byte 0 */
  1133. /* 16 23 24 31 */
  1134. /* reserved reserved */
  1135. value = 0;
  1136. value |= (u32) ((address_byte_0 << 16) & 0x00FF0000);
  1137. value |= (u32) ((address_byte_1 << 24) & 0xFF000000);
  1138. out_be32(macstnaddr2_register, value);
  1139. return 0;
  1140. }
  1141. static int init_check_frame_length_mode(int length_check,
  1142. u32 __iomem *maccfg2_register)
  1143. {
  1144. u32 value = 0;
  1145. value = in_be32(maccfg2_register);
  1146. if (length_check)
  1147. value |= MACCFG2_LC;
  1148. else
  1149. value &= ~MACCFG2_LC;
  1150. out_be32(maccfg2_register, value);
  1151. return 0;
  1152. }
  1153. static int init_preamble_length(u8 preamble_length,
  1154. u32 __iomem *maccfg2_register)
  1155. {
  1156. if ((preamble_length < 3) || (preamble_length > 7))
  1157. return -EINVAL;
  1158. clrsetbits_be32(maccfg2_register, MACCFG2_PREL_MASK,
  1159. preamble_length << MACCFG2_PREL_SHIFT);
  1160. return 0;
  1161. }
  1162. static int init_rx_parameters(int reject_broadcast,
  1163. int receive_short_frames,
  1164. int promiscuous, u32 __iomem *upsmr_register)
  1165. {
  1166. u32 value = 0;
  1167. value = in_be32(upsmr_register);
  1168. if (reject_broadcast)
  1169. value |= UCC_GETH_UPSMR_BRO;
  1170. else
  1171. value &= ~UCC_GETH_UPSMR_BRO;
  1172. if (receive_short_frames)
  1173. value |= UCC_GETH_UPSMR_RSH;
  1174. else
  1175. value &= ~UCC_GETH_UPSMR_RSH;
  1176. if (promiscuous)
  1177. value |= UCC_GETH_UPSMR_PRO;
  1178. else
  1179. value &= ~UCC_GETH_UPSMR_PRO;
  1180. out_be32(upsmr_register, value);
  1181. return 0;
  1182. }
  1183. static int init_max_rx_buff_len(u16 max_rx_buf_len,
  1184. u16 __iomem *mrblr_register)
  1185. {
  1186. /* max_rx_buf_len value must be a multiple of 128 */
  1187. if ((max_rx_buf_len == 0)
  1188. || (max_rx_buf_len % UCC_GETH_MRBLR_ALIGNMENT))
  1189. return -EINVAL;
  1190. out_be16(mrblr_register, max_rx_buf_len);
  1191. return 0;
  1192. }
  1193. static int init_min_frame_len(u16 min_frame_length,
  1194. u16 __iomem *minflr_register,
  1195. u16 __iomem *mrblr_register)
  1196. {
  1197. u16 mrblr_value = 0;
  1198. mrblr_value = in_be16(mrblr_register);
  1199. if (min_frame_length >= (mrblr_value - 4))
  1200. return -EINVAL;
  1201. out_be16(minflr_register, min_frame_length);
  1202. return 0;
  1203. }
  1204. static int adjust_enet_interface(struct ucc_geth_private *ugeth)
  1205. {
  1206. struct ucc_geth_info *ug_info;
  1207. struct ucc_geth __iomem *ug_regs;
  1208. struct ucc_fast __iomem *uf_regs;
  1209. int ret_val;
  1210. u32 upsmr, maccfg2, tbiBaseAddress;
  1211. u16 value;
  1212. ugeth_vdbg("%s: IN", __func__);
  1213. ug_info = ugeth->ug_info;
  1214. ug_regs = ugeth->ug_regs;
  1215. uf_regs = ugeth->uccf->uf_regs;
  1216. /* Set MACCFG2 */
  1217. maccfg2 = in_be32(&ug_regs->maccfg2);
  1218. maccfg2 &= ~MACCFG2_INTERFACE_MODE_MASK;
  1219. if ((ugeth->max_speed == SPEED_10) ||
  1220. (ugeth->max_speed == SPEED_100))
  1221. maccfg2 |= MACCFG2_INTERFACE_MODE_NIBBLE;
  1222. else if (ugeth->max_speed == SPEED_1000)
  1223. maccfg2 |= MACCFG2_INTERFACE_MODE_BYTE;
  1224. maccfg2 |= ug_info->padAndCrc;
  1225. out_be32(&ug_regs->maccfg2, maccfg2);
  1226. /* Set UPSMR */
  1227. upsmr = in_be32(&uf_regs->upsmr);
  1228. upsmr &= ~(UCC_GETH_UPSMR_RPM | UCC_GETH_UPSMR_R10M |
  1229. UCC_GETH_UPSMR_TBIM | UCC_GETH_UPSMR_RMM);
  1230. if ((ugeth->phy_interface == PHY_INTERFACE_MODE_RMII) ||
  1231. (ugeth->phy_interface == PHY_INTERFACE_MODE_RGMII) ||
  1232. (ugeth->phy_interface == PHY_INTERFACE_MODE_RGMII_ID) ||
  1233. (ugeth->phy_interface == PHY_INTERFACE_MODE_RGMII_RXID) ||
  1234. (ugeth->phy_interface == PHY_INTERFACE_MODE_RGMII_TXID) ||
  1235. (ugeth->phy_interface == PHY_INTERFACE_MODE_RTBI)) {
  1236. if (ugeth->phy_interface != PHY_INTERFACE_MODE_RMII)
  1237. upsmr |= UCC_GETH_UPSMR_RPM;
  1238. switch (ugeth->max_speed) {
  1239. case SPEED_10:
  1240. upsmr |= UCC_GETH_UPSMR_R10M;
  1241. /* FALLTHROUGH */
  1242. case SPEED_100:
  1243. if (ugeth->phy_interface != PHY_INTERFACE_MODE_RTBI)
  1244. upsmr |= UCC_GETH_UPSMR_RMM;
  1245. }
  1246. }
  1247. if ((ugeth->phy_interface == PHY_INTERFACE_MODE_TBI) ||
  1248. (ugeth->phy_interface == PHY_INTERFACE_MODE_RTBI)) {
  1249. upsmr |= UCC_GETH_UPSMR_TBIM;
  1250. }
  1251. if ((ugeth->phy_interface == PHY_INTERFACE_MODE_SGMII))
  1252. upsmr |= UCC_GETH_UPSMR_SGMM;
  1253. out_be32(&uf_regs->upsmr, upsmr);
  1254. /* Disable autonegotiation in tbi mode, because by default it
  1255. comes up in autonegotiation mode. */
  1256. /* Note that this depends on proper setting in utbipar register. */
  1257. if ((ugeth->phy_interface == PHY_INTERFACE_MODE_TBI) ||
  1258. (ugeth->phy_interface == PHY_INTERFACE_MODE_RTBI)) {
  1259. tbiBaseAddress = in_be32(&ug_regs->utbipar);
  1260. tbiBaseAddress &= UTBIPAR_PHY_ADDRESS_MASK;
  1261. tbiBaseAddress >>= UTBIPAR_PHY_ADDRESS_SHIFT;
  1262. value = ugeth->phydev->bus->read(ugeth->phydev->bus,
  1263. (u8) tbiBaseAddress, ENET_TBI_MII_CR);
  1264. value &= ~0x1000; /* Turn off autonegotiation */
  1265. ugeth->phydev->bus->write(ugeth->phydev->bus,
  1266. (u8) tbiBaseAddress, ENET_TBI_MII_CR, value);
  1267. }
  1268. init_check_frame_length_mode(ug_info->lengthCheckRx, &ug_regs->maccfg2);
  1269. ret_val = init_preamble_length(ug_info->prel, &ug_regs->maccfg2);
  1270. if (ret_val != 0) {
  1271. if (netif_msg_probe(ugeth))
  1272. ugeth_err("%s: Preamble length must be between 3 and 7 inclusive.",
  1273. __func__);
  1274. return ret_val;
  1275. }
  1276. return 0;
  1277. }
  1278. static int ugeth_graceful_stop_tx(struct ucc_geth_private *ugeth)
  1279. {
  1280. struct ucc_fast_private *uccf;
  1281. u32 cecr_subblock;
  1282. u32 temp;
  1283. int i = 10;
  1284. uccf = ugeth->uccf;
  1285. /* Mask GRACEFUL STOP TX interrupt bit and clear it */
  1286. clrbits32(uccf->p_uccm, UCC_GETH_UCCE_GRA);
  1287. out_be32(uccf->p_ucce, UCC_GETH_UCCE_GRA); /* clear by writing 1 */
  1288. /* Issue host command */
  1289. cecr_subblock =
  1290. ucc_fast_get_qe_cr_subblock(ugeth->ug_info->uf_info.ucc_num);
  1291. qe_issue_cmd(QE_GRACEFUL_STOP_TX, cecr_subblock,
  1292. QE_CR_PROTOCOL_ETHERNET, 0);
  1293. /* Wait for command to complete */
  1294. do {
  1295. msleep(10);
  1296. temp = in_be32(uccf->p_ucce);
  1297. } while (!(temp & UCC_GETH_UCCE_GRA) && --i);
  1298. uccf->stopped_tx = 1;
  1299. return 0;
  1300. }
  1301. static int ugeth_graceful_stop_rx(struct ucc_geth_private *ugeth)
  1302. {
  1303. struct ucc_fast_private *uccf;
  1304. u32 cecr_subblock;
  1305. u8 temp;
  1306. int i = 10;
  1307. uccf = ugeth->uccf;
  1308. /* Clear acknowledge bit */
  1309. temp = in_8(&ugeth->p_rx_glbl_pram->rxgstpack);
  1310. temp &= ~GRACEFUL_STOP_ACKNOWLEDGE_RX;
  1311. out_8(&ugeth->p_rx_glbl_pram->rxgstpack, temp);
  1312. /* Keep issuing command and checking acknowledge bit until
  1313. it is asserted, according to spec */
  1314. do {
  1315. /* Issue host command */
  1316. cecr_subblock =
  1317. ucc_fast_get_qe_cr_subblock(ugeth->ug_info->uf_info.
  1318. ucc_num);
  1319. qe_issue_cmd(QE_GRACEFUL_STOP_RX, cecr_subblock,
  1320. QE_CR_PROTOCOL_ETHERNET, 0);
  1321. msleep(10);
  1322. temp = in_8(&ugeth->p_rx_glbl_pram->rxgstpack);
  1323. } while (!(temp & GRACEFUL_STOP_ACKNOWLEDGE_RX) && --i);
  1324. uccf->stopped_rx = 1;
  1325. return 0;
  1326. }
  1327. static int ugeth_restart_tx(struct ucc_geth_private *ugeth)
  1328. {
  1329. struct ucc_fast_private *uccf;
  1330. u32 cecr_subblock;
  1331. uccf = ugeth->uccf;
  1332. cecr_subblock =
  1333. ucc_fast_get_qe_cr_subblock(ugeth->ug_info->uf_info.ucc_num);
  1334. qe_issue_cmd(QE_RESTART_TX, cecr_subblock, QE_CR_PROTOCOL_ETHERNET, 0);
  1335. uccf->stopped_tx = 0;
  1336. return 0;
  1337. }
  1338. static int ugeth_restart_rx(struct ucc_geth_private *ugeth)
  1339. {
  1340. struct ucc_fast_private *uccf;
  1341. u32 cecr_subblock;
  1342. uccf = ugeth->uccf;
  1343. cecr_subblock =
  1344. ucc_fast_get_qe_cr_subblock(ugeth->ug_info->uf_info.ucc_num);
  1345. qe_issue_cmd(QE_RESTART_RX, cecr_subblock, QE_CR_PROTOCOL_ETHERNET,
  1346. 0);
  1347. uccf->stopped_rx = 0;
  1348. return 0;
  1349. }
  1350. static int ugeth_enable(struct ucc_geth_private *ugeth, enum comm_dir mode)
  1351. {
  1352. struct ucc_fast_private *uccf;
  1353. int enabled_tx, enabled_rx;
  1354. uccf = ugeth->uccf;
  1355. /* check if the UCC number is in range. */
  1356. if (ugeth->ug_info->uf_info.ucc_num >= UCC_MAX_NUM) {
  1357. if (netif_msg_probe(ugeth))
  1358. ugeth_err("%s: ucc_num out of range.", __func__);
  1359. return -EINVAL;
  1360. }
  1361. enabled_tx = uccf->enabled_tx;
  1362. enabled_rx = uccf->enabled_rx;
  1363. /* Get Tx and Rx going again, in case this channel was actively
  1364. disabled. */
  1365. if ((mode & COMM_DIR_TX) && (!enabled_tx) && uccf->stopped_tx)
  1366. ugeth_restart_tx(ugeth);
  1367. if ((mode & COMM_DIR_RX) && (!enabled_rx) && uccf->stopped_rx)
  1368. ugeth_restart_rx(ugeth);
  1369. ucc_fast_enable(uccf, mode); /* OK to do even if not disabled */
  1370. return 0;
  1371. }
  1372. static int ugeth_disable(struct ucc_geth_private *ugeth, enum comm_dir mode)
  1373. {
  1374. struct ucc_fast_private *uccf;
  1375. uccf = ugeth->uccf;
  1376. /* check if the UCC number is in range. */
  1377. if (ugeth->ug_info->uf_info.ucc_num >= UCC_MAX_NUM) {
  1378. if (netif_msg_probe(ugeth))
  1379. ugeth_err("%s: ucc_num out of range.", __func__);
  1380. return -EINVAL;
  1381. }
  1382. /* Stop any transmissions */
  1383. if ((mode & COMM_DIR_TX) && uccf->enabled_tx && !uccf->stopped_tx)
  1384. ugeth_graceful_stop_tx(ugeth);
  1385. /* Stop any receptions */
  1386. if ((mode & COMM_DIR_RX) && uccf->enabled_rx && !uccf->stopped_rx)
  1387. ugeth_graceful_stop_rx(ugeth);
  1388. ucc_fast_disable(ugeth->uccf, mode); /* OK to do even if not enabled */
  1389. return 0;
  1390. }
  1391. /* Called every time the controller might need to be made
  1392. * aware of new link state. The PHY code conveys this
  1393. * information through variables in the ugeth structure, and this
  1394. * function converts those variables into the appropriate
  1395. * register values, and can bring down the device if needed.
  1396. */
  1397. static void adjust_link(struct net_device *dev)
  1398. {
  1399. struct ucc_geth_private *ugeth = netdev_priv(dev);
  1400. struct ucc_geth __iomem *ug_regs;
  1401. struct ucc_fast __iomem *uf_regs;
  1402. struct phy_device *phydev = ugeth->phydev;
  1403. unsigned long flags;
  1404. int new_state = 0;
  1405. ug_regs = ugeth->ug_regs;
  1406. uf_regs = ugeth->uccf->uf_regs;
  1407. spin_lock_irqsave(&ugeth->lock, flags);
  1408. if (phydev->link) {
  1409. u32 tempval = in_be32(&ug_regs->maccfg2);
  1410. u32 upsmr = in_be32(&uf_regs->upsmr);
  1411. /* Now we make sure that we can be in full duplex mode.
  1412. * If not, we operate in half-duplex mode. */
  1413. if (phydev->duplex != ugeth->oldduplex) {
  1414. new_state = 1;
  1415. if (!(phydev->duplex))
  1416. tempval &= ~(MACCFG2_FDX);
  1417. else
  1418. tempval |= MACCFG2_FDX;
  1419. ugeth->oldduplex = phydev->duplex;
  1420. }
  1421. if (phydev->speed != ugeth->oldspeed) {
  1422. new_state = 1;
  1423. switch (phydev->speed) {
  1424. case SPEED_1000:
  1425. tempval = ((tempval &
  1426. ~(MACCFG2_INTERFACE_MODE_MASK)) |
  1427. MACCFG2_INTERFACE_MODE_BYTE);
  1428. break;
  1429. case SPEED_100:
  1430. case SPEED_10:
  1431. tempval = ((tempval &
  1432. ~(MACCFG2_INTERFACE_MODE_MASK)) |
  1433. MACCFG2_INTERFACE_MODE_NIBBLE);
  1434. /* if reduced mode, re-set UPSMR.R10M */
  1435. if ((ugeth->phy_interface == PHY_INTERFACE_MODE_RMII) ||
  1436. (ugeth->phy_interface == PHY_INTERFACE_MODE_RGMII) ||
  1437. (ugeth->phy_interface == PHY_INTERFACE_MODE_RGMII_ID) ||
  1438. (ugeth->phy_interface == PHY_INTERFACE_MODE_RGMII_RXID) ||
  1439. (ugeth->phy_interface == PHY_INTERFACE_MODE_RGMII_TXID) ||
  1440. (ugeth->phy_interface == PHY_INTERFACE_MODE_RTBI)) {
  1441. if (phydev->speed == SPEED_10)
  1442. upsmr |= UCC_GETH_UPSMR_R10M;
  1443. else
  1444. upsmr &= ~UCC_GETH_UPSMR_R10M;
  1445. }
  1446. break;
  1447. default:
  1448. if (netif_msg_link(ugeth))
  1449. ugeth_warn(
  1450. "%s: Ack! Speed (%d) is not 10/100/1000!",
  1451. dev->name, phydev->speed);
  1452. break;
  1453. }
  1454. ugeth->oldspeed = phydev->speed;
  1455. }
  1456. out_be32(&ug_regs->maccfg2, tempval);
  1457. out_be32(&uf_regs->upsmr, upsmr);
  1458. if (!ugeth->oldlink) {
  1459. new_state = 1;
  1460. ugeth->oldlink = 1;
  1461. }
  1462. } else if (ugeth->oldlink) {
  1463. new_state = 1;
  1464. ugeth->oldlink = 0;
  1465. ugeth->oldspeed = 0;
  1466. ugeth->oldduplex = -1;
  1467. }
  1468. if (new_state && netif_msg_link(ugeth))
  1469. phy_print_status(phydev);
  1470. spin_unlock_irqrestore(&ugeth->lock, flags);
  1471. }
  1472. /* Initialize TBI PHY interface for communicating with the
  1473. * SERDES lynx PHY on the chip. We communicate with this PHY
  1474. * through the MDIO bus on each controller, treating it as a
  1475. * "normal" PHY at the address found in the UTBIPA register. We assume
  1476. * that the UTBIPA register is valid. Either the MDIO bus code will set
  1477. * it to a value that doesn't conflict with other PHYs on the bus, or the
  1478. * value doesn't matter, as there are no other PHYs on the bus.
  1479. */
  1480. static void uec_configure_serdes(struct net_device *dev)
  1481. {
  1482. struct ucc_geth_private *ugeth = netdev_priv(dev);
  1483. struct ucc_geth_info *ug_info = ugeth->ug_info;
  1484. struct phy_device *tbiphy;
  1485. if (!ug_info->tbi_node) {
  1486. dev_warn(&dev->dev, "SGMII mode requires that the device "
  1487. "tree specify a tbi-handle\n");
  1488. return;
  1489. }
  1490. tbiphy = of_phy_find_device(ug_info->tbi_node);
  1491. if (!tbiphy) {
  1492. dev_err(&dev->dev, "error: Could not get TBI device\n");
  1493. return;
  1494. }
  1495. /*
  1496. * If the link is already up, we must already be ok, and don't need to
  1497. * configure and reset the TBI<->SerDes link. Maybe U-Boot configured
  1498. * everything for us? Resetting it takes the link down and requires
  1499. * several seconds for it to come back.
  1500. */
  1501. if (phy_read(tbiphy, ENET_TBI_MII_SR) & TBISR_LSTATUS)
  1502. return;
  1503. /* Single clk mode, mii mode off(for serdes communication) */
  1504. phy_write(tbiphy, ENET_TBI_MII_ANA, TBIANA_SETTINGS);
  1505. phy_write(tbiphy, ENET_TBI_MII_TBICON, TBICON_CLK_SELECT);
  1506. phy_write(tbiphy, ENET_TBI_MII_CR, TBICR_SETTINGS);
  1507. }
  1508. /* Configure the PHY for dev.
  1509. * returns 0 if success. -1 if failure
  1510. */
  1511. static int init_phy(struct net_device *dev)
  1512. {
  1513. struct ucc_geth_private *priv = netdev_priv(dev);
  1514. struct ucc_geth_info *ug_info = priv->ug_info;
  1515. struct phy_device *phydev;
  1516. priv->oldlink = 0;
  1517. priv->oldspeed = 0;
  1518. priv->oldduplex = -1;
  1519. phydev = of_phy_connect(dev, ug_info->phy_node, &adjust_link, 0,
  1520. priv->phy_interface);
  1521. if (!phydev)
  1522. phydev = of_phy_connect_fixed_link(dev, &adjust_link,
  1523. priv->phy_interface);
  1524. if (!phydev) {
  1525. dev_err(&dev->dev, "Could not attach to PHY\n");
  1526. return -ENODEV;
  1527. }
  1528. if (priv->phy_interface == PHY_INTERFACE_MODE_SGMII)
  1529. uec_configure_serdes(dev);
  1530. phydev->supported &= (ADVERTISED_10baseT_Half |
  1531. ADVERTISED_10baseT_Full |
  1532. ADVERTISED_100baseT_Half |
  1533. ADVERTISED_100baseT_Full);
  1534. if (priv->max_speed == SPEED_1000)
  1535. phydev->supported |= ADVERTISED_1000baseT_Full;
  1536. phydev->advertising = phydev->supported;
  1537. priv->phydev = phydev;
  1538. return 0;
  1539. }
  1540. static void ugeth_dump_regs(struct ucc_geth_private *ugeth)
  1541. {
  1542. #ifdef DEBUG
  1543. ucc_fast_dump_regs(ugeth->uccf);
  1544. dump_regs(ugeth);
  1545. dump_bds(ugeth);
  1546. #endif
  1547. }
  1548. static int ugeth_82xx_filtering_clear_all_addr_in_hash(struct ucc_geth_private *
  1549. ugeth,
  1550. enum enet_addr_type
  1551. enet_addr_type)
  1552. {
  1553. struct ucc_geth_82xx_address_filtering_pram __iomem *p_82xx_addr_filt;
  1554. struct ucc_fast_private *uccf;
  1555. enum comm_dir comm_dir;
  1556. struct list_head *p_lh;
  1557. u16 i, num;
  1558. u32 __iomem *addr_h;
  1559. u32 __iomem *addr_l;
  1560. u8 *p_counter;
  1561. uccf = ugeth->uccf;
  1562. p_82xx_addr_filt =
  1563. (struct ucc_geth_82xx_address_filtering_pram __iomem *)
  1564. ugeth->p_rx_glbl_pram->addressfiltering;
  1565. if (enet_addr_type == ENET_ADDR_TYPE_GROUP) {
  1566. addr_h = &(p_82xx_addr_filt->gaddr_h);
  1567. addr_l = &(p_82xx_addr_filt->gaddr_l);
  1568. p_lh = &ugeth->group_hash_q;
  1569. p_counter = &(ugeth->numGroupAddrInHash);
  1570. } else if (enet_addr_type == ENET_ADDR_TYPE_INDIVIDUAL) {
  1571. addr_h = &(p_82xx_addr_filt->iaddr_h);
  1572. addr_l = &(p_82xx_addr_filt->iaddr_l);
  1573. p_lh = &ugeth->ind_hash_q;
  1574. p_counter = &(ugeth->numIndAddrInHash);
  1575. } else
  1576. return -EINVAL;
  1577. comm_dir = 0;
  1578. if (uccf->enabled_tx)
  1579. comm_dir |= COMM_DIR_TX;
  1580. if (uccf->enabled_rx)
  1581. comm_dir |= COMM_DIR_RX;
  1582. if (comm_dir)
  1583. ugeth_disable(ugeth, comm_dir);
  1584. /* Clear the hash table. */
  1585. out_be32(addr_h, 0x00000000);
  1586. out_be32(addr_l, 0x00000000);
  1587. if (!p_lh)
  1588. return 0;
  1589. num = *p_counter;
  1590. /* Delete all remaining CQ elements */
  1591. for (i = 0; i < num; i++)
  1592. put_enet_addr_container(ENET_ADDR_CONT_ENTRY(dequeue(p_lh)));
  1593. *p_counter = 0;
  1594. if (comm_dir)
  1595. ugeth_enable(ugeth, comm_dir);
  1596. return 0;
  1597. }
  1598. static int ugeth_82xx_filtering_clear_addr_in_paddr(struct ucc_geth_private *ugeth,
  1599. u8 paddr_num)
  1600. {
  1601. ugeth->indAddrRegUsed[paddr_num] = 0; /* mark this paddr as not used */
  1602. return hw_clear_addr_in_paddr(ugeth, paddr_num);/* clear in hardware */
  1603. }
  1604. static void ucc_geth_memclean(struct ucc_geth_private *ugeth)
  1605. {
  1606. u16 i, j;
  1607. u8 __iomem *bd;
  1608. if (!ugeth)
  1609. return;
  1610. if (ugeth->uccf) {
  1611. ucc_fast_free(ugeth->uccf);
  1612. ugeth->uccf = NULL;
  1613. }
  1614. if (ugeth->p_thread_data_tx) {
  1615. qe_muram_free(ugeth->thread_dat_tx_offset);
  1616. ugeth->p_thread_data_tx = NULL;
  1617. }
  1618. if (ugeth->p_thread_data_rx) {
  1619. qe_muram_free(ugeth->thread_dat_rx_offset);
  1620. ugeth->p_thread_data_rx = NULL;
  1621. }
  1622. if (ugeth->p_exf_glbl_param) {
  1623. qe_muram_free(ugeth->exf_glbl_param_offset);
  1624. ugeth->p_exf_glbl_param = NULL;
  1625. }
  1626. if (ugeth->p_rx_glbl_pram) {
  1627. qe_muram_free(ugeth->rx_glbl_pram_offset);
  1628. ugeth->p_rx_glbl_pram = NULL;
  1629. }
  1630. if (ugeth->p_tx_glbl_pram) {
  1631. qe_muram_free(ugeth->tx_glbl_pram_offset);
  1632. ugeth->p_tx_glbl_pram = NULL;
  1633. }
  1634. if (ugeth->p_send_q_mem_reg) {
  1635. qe_muram_free(ugeth->send_q_mem_reg_offset);
  1636. ugeth->p_send_q_mem_reg = NULL;
  1637. }
  1638. if (ugeth->p_scheduler) {
  1639. qe_muram_free(ugeth->scheduler_offset);
  1640. ugeth->p_scheduler = NULL;
  1641. }
  1642. if (ugeth->p_tx_fw_statistics_pram) {
  1643. qe_muram_free(ugeth->tx_fw_statistics_pram_offset);
  1644. ugeth->p_tx_fw_statistics_pram = NULL;
  1645. }
  1646. if (ugeth->p_rx_fw_statistics_pram) {
  1647. qe_muram_free(ugeth->rx_fw_statistics_pram_offset);
  1648. ugeth->p_rx_fw_statistics_pram = NULL;
  1649. }
  1650. if (ugeth->p_rx_irq_coalescing_tbl) {
  1651. qe_muram_free(ugeth->rx_irq_coalescing_tbl_offset);
  1652. ugeth->p_rx_irq_coalescing_tbl = NULL;
  1653. }
  1654. if (ugeth->p_rx_bd_qs_tbl) {
  1655. qe_muram_free(ugeth->rx_bd_qs_tbl_offset);
  1656. ugeth->p_rx_bd_qs_tbl = NULL;
  1657. }
  1658. if (ugeth->p_init_enet_param_shadow) {
  1659. return_init_enet_entries(ugeth,
  1660. &(ugeth->p_init_enet_param_shadow->
  1661. rxthread[0]),
  1662. ENET_INIT_PARAM_MAX_ENTRIES_RX,
  1663. ugeth->ug_info->riscRx, 1);
  1664. return_init_enet_entries(ugeth,
  1665. &(ugeth->p_init_enet_param_shadow->
  1666. txthread[0]),
  1667. ENET_INIT_PARAM_MAX_ENTRIES_TX,
  1668. ugeth->ug_info->riscTx, 0);
  1669. kfree(ugeth->p_init_enet_param_shadow);
  1670. ugeth->p_init_enet_param_shadow = NULL;
  1671. }
  1672. for (i = 0; i < ugeth->ug_info->numQueuesTx; i++) {
  1673. bd = ugeth->p_tx_bd_ring[i];
  1674. if (!bd)
  1675. continue;
  1676. for (j = 0; j < ugeth->ug_info->bdRingLenTx[i]; j++) {
  1677. if (ugeth->tx_skbuff[i][j]) {
  1678. dma_unmap_single(ugeth->dev,
  1679. in_be32(&((struct qe_bd __iomem *)bd)->buf),
  1680. (in_be32((u32 __iomem *)bd) &
  1681. BD_LENGTH_MASK),
  1682. DMA_TO_DEVICE);
  1683. dev_kfree_skb_any(ugeth->tx_skbuff[i][j]);
  1684. ugeth->tx_skbuff[i][j] = NULL;
  1685. }
  1686. }
  1687. kfree(ugeth->tx_skbuff[i]);
  1688. if (ugeth->p_tx_bd_ring[i]) {
  1689. if (ugeth->ug_info->uf_info.bd_mem_part ==
  1690. MEM_PART_SYSTEM)
  1691. kfree((void *)ugeth->tx_bd_ring_offset[i]);
  1692. else if (ugeth->ug_info->uf_info.bd_mem_part ==
  1693. MEM_PART_MURAM)
  1694. qe_muram_free(ugeth->tx_bd_ring_offset[i]);
  1695. ugeth->p_tx_bd_ring[i] = NULL;
  1696. }
  1697. }
  1698. for (i = 0; i < ugeth->ug_info->numQueuesRx; i++) {
  1699. if (ugeth->p_rx_bd_ring[i]) {
  1700. /* Return existing data buffers in ring */
  1701. bd = ugeth->p_rx_bd_ring[i];
  1702. for (j = 0; j < ugeth->ug_info->bdRingLenRx[i]; j++) {
  1703. if (ugeth->rx_skbuff[i][j]) {
  1704. dma_unmap_single(ugeth->dev,
  1705. in_be32(&((struct qe_bd __iomem *)bd)->buf),
  1706. ugeth->ug_info->
  1707. uf_info.max_rx_buf_length +
  1708. UCC_GETH_RX_DATA_BUF_ALIGNMENT,
  1709. DMA_FROM_DEVICE);
  1710. dev_kfree_skb_any(
  1711. ugeth->rx_skbuff[i][j]);
  1712. ugeth->rx_skbuff[i][j] = NULL;
  1713. }
  1714. bd += sizeof(struct qe_bd);
  1715. }
  1716. kfree(ugeth->rx_skbuff[i]);
  1717. if (ugeth->ug_info->uf_info.bd_mem_part ==
  1718. MEM_PART_SYSTEM)
  1719. kfree((void *)ugeth->rx_bd_ring_offset[i]);
  1720. else if (ugeth->ug_info->uf_info.bd_mem_part ==
  1721. MEM_PART_MURAM)
  1722. qe_muram_free(ugeth->rx_bd_ring_offset[i]);
  1723. ugeth->p_rx_bd_ring[i] = NULL;
  1724. }
  1725. }
  1726. while (!list_empty(&ugeth->group_hash_q))
  1727. put_enet_addr_container(ENET_ADDR_CONT_ENTRY
  1728. (dequeue(&ugeth->group_hash_q)));
  1729. while (!list_empty(&ugeth->ind_hash_q))
  1730. put_enet_addr_container(ENET_ADDR_CONT_ENTRY
  1731. (dequeue(&ugeth->ind_hash_q)));
  1732. if (ugeth->ug_regs) {
  1733. iounmap(ugeth->ug_regs);
  1734. ugeth->ug_regs = NULL;
  1735. }
  1736. skb_queue_purge(&ugeth->rx_recycle);
  1737. }
  1738. static void ucc_geth_set_multi(struct net_device *dev)
  1739. {
  1740. struct ucc_geth_private *ugeth;
  1741. struct dev_mc_list *dmi;
  1742. struct ucc_fast __iomem *uf_regs;
  1743. struct ucc_geth_82xx_address_filtering_pram __iomem *p_82xx_addr_filt;
  1744. int i;
  1745. ugeth = netdev_priv(dev);
  1746. uf_regs = ugeth->uccf->uf_regs;
  1747. if (dev->flags & IFF_PROMISC) {
  1748. setbits32(&uf_regs->upsmr, UCC_GETH_UPSMR_PRO);
  1749. } else {
  1750. clrbits32(&uf_regs->upsmr, UCC_GETH_UPSMR_PRO);
  1751. p_82xx_addr_filt =
  1752. (struct ucc_geth_82xx_address_filtering_pram __iomem *) ugeth->
  1753. p_rx_glbl_pram->addressfiltering;
  1754. if (dev->flags & IFF_ALLMULTI) {
  1755. /* Catch all multicast addresses, so set the
  1756. * filter to all 1's.
  1757. */
  1758. out_be32(&p_82xx_addr_filt->gaddr_h, 0xffffffff);
  1759. out_be32(&p_82xx_addr_filt->gaddr_l, 0xffffffff);
  1760. } else {
  1761. /* Clear filter and add the addresses in the list.
  1762. */
  1763. out_be32(&p_82xx_addr_filt->gaddr_h, 0x0);
  1764. out_be32(&p_82xx_addr_filt->gaddr_l, 0x0);
  1765. dmi = dev->mc_list;
  1766. for (i = 0; i < dev->mc_count; i++, dmi = dmi->next) {
  1767. /* Only support group multicast for now.
  1768. */
  1769. if (!(dmi->dmi_addr[0] & 1))
  1770. continue;
  1771. /* Ask CPM to run CRC and set bit in
  1772. * filter mask.
  1773. */
  1774. hw_add_addr_in_hash(ugeth, dmi->dmi_addr);
  1775. }
  1776. }
  1777. }
  1778. }
  1779. static void ucc_geth_stop(struct ucc_geth_private *ugeth)
  1780. {
  1781. struct ucc_geth __iomem *ug_regs = ugeth->ug_regs;
  1782. struct phy_device *phydev = ugeth->phydev;
  1783. ugeth_vdbg("%s: IN", __func__);
  1784. /* Disable the controller */
  1785. ugeth_disable(ugeth, COMM_DIR_RX_AND_TX);
  1786. /* Tell the kernel the link is down */
  1787. phy_stop(phydev);
  1788. /* Mask all interrupts */
  1789. out_be32(ugeth->uccf->p_uccm, 0x00000000);
  1790. /* Clear all interrupts */
  1791. out_be32(ugeth->uccf->p_ucce, 0xffffffff);
  1792. /* Disable Rx and Tx */
  1793. clrbits32(&ug_regs->maccfg1, MACCFG1_ENABLE_RX | MACCFG1_ENABLE_TX);
  1794. phy_disconnect(ugeth->phydev);
  1795. ugeth->phydev = NULL;
  1796. ucc_geth_memclean(ugeth);
  1797. }
  1798. static int ucc_struct_init(struct ucc_geth_private *ugeth)
  1799. {
  1800. struct ucc_geth_info *ug_info;
  1801. struct ucc_fast_info *uf_info;
  1802. int i;
  1803. ug_info = ugeth->ug_info;
  1804. uf_info = &ug_info->uf_info;
  1805. if (!((uf_info->bd_mem_part == MEM_PART_SYSTEM) ||
  1806. (uf_info->bd_mem_part == MEM_PART_MURAM))) {
  1807. if (netif_msg_probe(ugeth))
  1808. ugeth_err("%s: Bad memory partition value.",
  1809. __func__);
  1810. return -EINVAL;
  1811. }
  1812. /* Rx BD lengths */
  1813. for (i = 0; i < ug_info->numQueuesRx; i++) {
  1814. if ((ug_info->bdRingLenRx[i] < UCC_GETH_RX_BD_RING_SIZE_MIN) ||
  1815. (ug_info->bdRingLenRx[i] %
  1816. UCC_GETH_RX_BD_RING_SIZE_ALIGNMENT)) {
  1817. if (netif_msg_probe(ugeth))
  1818. ugeth_err
  1819. ("%s: Rx BD ring length must be multiple of 4, no smaller than 8.",
  1820. __func__);
  1821. return -EINVAL;
  1822. }
  1823. }
  1824. /* Tx BD lengths */
  1825. for (i = 0; i < ug_info->numQueuesTx; i++) {
  1826. if (ug_info->bdRingLenTx[i] < UCC_GETH_TX_BD_RING_SIZE_MIN) {
  1827. if (netif_msg_probe(ugeth))
  1828. ugeth_err
  1829. ("%s: Tx BD ring length must be no smaller than 2.",
  1830. __func__);
  1831. return -EINVAL;
  1832. }
  1833. }
  1834. /* mrblr */
  1835. if ((uf_info->max_rx_buf_length == 0) ||
  1836. (uf_info->max_rx_buf_length % UCC_GETH_MRBLR_ALIGNMENT)) {
  1837. if (netif_msg_probe(ugeth))
  1838. ugeth_err
  1839. ("%s: max_rx_buf_length must be non-zero multiple of 128.",
  1840. __func__);
  1841. return -EINVAL;
  1842. }
  1843. /* num Tx queues */
  1844. if (ug_info->numQueuesTx > NUM_TX_QUEUES) {
  1845. if (netif_msg_probe(ugeth))
  1846. ugeth_err("%s: number of tx queues too large.", __func__);
  1847. return -EINVAL;
  1848. }
  1849. /* num Rx queues */
  1850. if (ug_info->numQueuesRx > NUM_RX_QUEUES) {
  1851. if (netif_msg_probe(ugeth))
  1852. ugeth_err("%s: number of rx queues too large.", __func__);
  1853. return -EINVAL;
  1854. }
  1855. /* l2qt */
  1856. for (i = 0; i < UCC_GETH_VLAN_PRIORITY_MAX; i++) {
  1857. if (ug_info->l2qt[i] >= ug_info->numQueuesRx) {
  1858. if (netif_msg_probe(ugeth))
  1859. ugeth_err
  1860. ("%s: VLAN priority table entry must not be"
  1861. " larger than number of Rx queues.",
  1862. __func__);
  1863. return -EINVAL;
  1864. }
  1865. }
  1866. /* l3qt */
  1867. for (i = 0; i < UCC_GETH_IP_PRIORITY_MAX; i++) {
  1868. if (ug_info->l3qt[i] >= ug_info->numQueuesRx) {
  1869. if (netif_msg_probe(ugeth))
  1870. ugeth_err
  1871. ("%s: IP priority table entry must not be"
  1872. " larger than number of Rx queues.",
  1873. __func__);
  1874. return -EINVAL;
  1875. }
  1876. }
  1877. if (ug_info->cam && !ug_info->ecamptr) {
  1878. if (netif_msg_probe(ugeth))
  1879. ugeth_err("%s: If cam mode is chosen, must supply cam ptr.",
  1880. __func__);
  1881. return -EINVAL;
  1882. }
  1883. if ((ug_info->numStationAddresses !=
  1884. UCC_GETH_NUM_OF_STATION_ADDRESSES_1)
  1885. && ug_info->rxExtendedFiltering) {
  1886. if (netif_msg_probe(ugeth))
  1887. ugeth_err("%s: Number of station addresses greater than 1 "
  1888. "not allowed in extended parsing mode.",
  1889. __func__);
  1890. return -EINVAL;
  1891. }
  1892. /* Generate uccm_mask for receive */
  1893. uf_info->uccm_mask = ug_info->eventRegMask & UCCE_OTHER;/* Errors */
  1894. for (i = 0; i < ug_info->numQueuesRx; i++)
  1895. uf_info->uccm_mask |= (UCC_GETH_UCCE_RXF0 << i);
  1896. for (i = 0; i < ug_info->numQueuesTx; i++)
  1897. uf_info->uccm_mask |= (UCC_GETH_UCCE_TXB0 << i);
  1898. /* Initialize the general fast UCC block. */
  1899. if (ucc_fast_init(uf_info, &ugeth->uccf)) {
  1900. if (netif_msg_probe(ugeth))
  1901. ugeth_err("%s: Failed to init uccf.", __func__);
  1902. return -ENOMEM;
  1903. }
  1904. /* read the number of risc engines, update the riscTx and riscRx
  1905. * if there are 4 riscs in QE
  1906. */
  1907. if (qe_get_num_of_risc() == 4) {
  1908. ug_info->riscTx = QE_RISC_ALLOCATION_FOUR_RISCS;
  1909. ug_info->riscRx = QE_RISC_ALLOCATION_FOUR_RISCS;
  1910. }
  1911. ugeth->ug_regs = ioremap(uf_info->regs, sizeof(*ugeth->ug_regs));
  1912. if (!ugeth->ug_regs) {
  1913. if (netif_msg_probe(ugeth))
  1914. ugeth_err("%s: Failed to ioremap regs.", __func__);
  1915. return -ENOMEM;
  1916. }
  1917. skb_queue_head_init(&ugeth->rx_recycle);
  1918. return 0;
  1919. }
  1920. static int ucc_geth_startup(struct ucc_geth_private *ugeth)
  1921. {
  1922. struct ucc_geth_82xx_address_filtering_pram __iomem *p_82xx_addr_filt;
  1923. struct ucc_geth_init_pram __iomem *p_init_enet_pram;
  1924. struct ucc_fast_private *uccf;
  1925. struct ucc_geth_info *ug_info;
  1926. struct ucc_fast_info *uf_info;
  1927. struct ucc_fast __iomem *uf_regs;
  1928. struct ucc_geth __iomem *ug_regs;
  1929. int ret_val = -EINVAL;
  1930. u32 remoder = UCC_GETH_REMODER_INIT;
  1931. u32 init_enet_pram_offset, cecr_subblock, command;
  1932. u32 ifstat, i, j, size, l2qt, l3qt, length;
  1933. u16 temoder = UCC_GETH_TEMODER_INIT;
  1934. u16 test;
  1935. u8 function_code = 0;
  1936. u8 __iomem *bd;
  1937. u8 __iomem *endOfRing;
  1938. u8 numThreadsRxNumerical, numThreadsTxNumerical;
  1939. ugeth_vdbg("%s: IN", __func__);
  1940. uccf = ugeth->uccf;
  1941. ug_info = ugeth->ug_info;
  1942. uf_info = &ug_info->uf_info;
  1943. uf_regs = uccf->uf_regs;
  1944. ug_regs = ugeth->ug_regs;
  1945. switch (ug_info->numThreadsRx) {
  1946. case UCC_GETH_NUM_OF_THREADS_1:
  1947. numThreadsRxNumerical = 1;
  1948. break;
  1949. case UCC_GETH_NUM_OF_THREADS_2:
  1950. numThreadsRxNumerical = 2;
  1951. break;
  1952. case UCC_GETH_NUM_OF_THREADS_4:
  1953. numThreadsRxNumerical = 4;
  1954. break;
  1955. case UCC_GETH_NUM_OF_THREADS_6:
  1956. numThreadsRxNumerical = 6;
  1957. break;
  1958. case UCC_GETH_NUM_OF_THREADS_8:
  1959. numThreadsRxNumerical = 8;
  1960. break;
  1961. default:
  1962. if (netif_msg_ifup(ugeth))
  1963. ugeth_err("%s: Bad number of Rx threads value.",
  1964. __func__);
  1965. return -EINVAL;
  1966. break;
  1967. }
  1968. switch (ug_info->numThreadsTx) {
  1969. case UCC_GETH_NUM_OF_THREADS_1:
  1970. numThreadsTxNumerical = 1;
  1971. break;
  1972. case UCC_GETH_NUM_OF_THREADS_2:
  1973. numThreadsTxNumerical = 2;
  1974. break;
  1975. case UCC_GETH_NUM_OF_THREADS_4:
  1976. numThreadsTxNumerical = 4;
  1977. break;
  1978. case UCC_GETH_NUM_OF_THREADS_6:
  1979. numThreadsTxNumerical = 6;
  1980. break;
  1981. case UCC_GETH_NUM_OF_THREADS_8:
  1982. numThreadsTxNumerical = 8;
  1983. break;
  1984. default:
  1985. if (netif_msg_ifup(ugeth))
  1986. ugeth_err("%s: Bad number of Tx threads value.",
  1987. __func__);
  1988. return -EINVAL;
  1989. break;
  1990. }
  1991. /* Calculate rx_extended_features */
  1992. ugeth->rx_non_dynamic_extended_features = ug_info->ipCheckSumCheck ||
  1993. ug_info->ipAddressAlignment ||
  1994. (ug_info->numStationAddresses !=
  1995. UCC_GETH_NUM_OF_STATION_ADDRESSES_1);
  1996. ugeth->rx_extended_features = ugeth->rx_non_dynamic_extended_features ||
  1997. (ug_info->vlanOperationTagged != UCC_GETH_VLAN_OPERATION_TAGGED_NOP)
  1998. || (ug_info->vlanOperationNonTagged !=
  1999. UCC_GETH_VLAN_OPERATION_NON_TAGGED_NOP);
  2000. init_default_reg_vals(&uf_regs->upsmr,
  2001. &ug_regs->maccfg1, &ug_regs->maccfg2);
  2002. /* Set UPSMR */
  2003. /* For more details see the hardware spec. */
  2004. init_rx_parameters(ug_info->bro,
  2005. ug_info->rsh, ug_info->pro, &uf_regs->upsmr);
  2006. /* We're going to ignore other registers for now, */
  2007. /* except as needed to get up and running */
  2008. /* Set MACCFG1 */
  2009. /* For more details see the hardware spec. */
  2010. init_flow_control_params(ug_info->aufc,
  2011. ug_info->receiveFlowControl,
  2012. ug_info->transmitFlowControl,
  2013. ug_info->pausePeriod,
  2014. ug_info->extensionField,
  2015. &uf_regs->upsmr,
  2016. &ug_regs->uempr, &ug_regs->maccfg1);
  2017. setbits32(&ug_regs->maccfg1, MACCFG1_ENABLE_RX | MACCFG1_ENABLE_TX);
  2018. /* Set IPGIFG */
  2019. /* For more details see the hardware spec. */
  2020. ret_val = init_inter_frame_gap_params(ug_info->nonBackToBackIfgPart1,
  2021. ug_info->nonBackToBackIfgPart2,
  2022. ug_info->
  2023. miminumInterFrameGapEnforcement,
  2024. ug_info->backToBackInterFrameGap,
  2025. &ug_regs->ipgifg);
  2026. if (ret_val != 0) {
  2027. if (netif_msg_ifup(ugeth))
  2028. ugeth_err("%s: IPGIFG initialization parameter too large.",
  2029. __func__);
  2030. return ret_val;
  2031. }
  2032. /* Set HAFDUP */
  2033. /* For more details see the hardware spec. */
  2034. ret_val = init_half_duplex_params(ug_info->altBeb,
  2035. ug_info->backPressureNoBackoff,
  2036. ug_info->noBackoff,
  2037. ug_info->excessDefer,
  2038. ug_info->altBebTruncation,
  2039. ug_info->maxRetransmission,
  2040. ug_info->collisionWindow,
  2041. &ug_regs->hafdup);
  2042. if (ret_val != 0) {
  2043. if (netif_msg_ifup(ugeth))
  2044. ugeth_err("%s: Half Duplex initialization parameter too large.",
  2045. __func__);
  2046. return ret_val;
  2047. }
  2048. /* Set IFSTAT */
  2049. /* For more details see the hardware spec. */
  2050. /* Read only - resets upon read */
  2051. ifstat = in_be32(&ug_regs->ifstat);
  2052. /* Clear UEMPR */
  2053. /* For more details see the hardware spec. */
  2054. out_be32(&ug_regs->uempr, 0);
  2055. /* Set UESCR */
  2056. /* For more details see the hardware spec. */
  2057. init_hw_statistics_gathering_mode((ug_info->statisticsMode &
  2058. UCC_GETH_STATISTICS_GATHERING_MODE_HARDWARE),
  2059. 0, &uf_regs->upsmr, &ug_regs->uescr);
  2060. /* Allocate Tx bds */
  2061. for (j = 0; j < ug_info->numQueuesTx; j++) {
  2062. /* Allocate in multiple of
  2063. UCC_GETH_TX_BD_RING_SIZE_MEMORY_ALIGNMENT,
  2064. according to spec */
  2065. length = ((ug_info->bdRingLenTx[j] * sizeof(struct qe_bd))
  2066. / UCC_GETH_TX_BD_RING_SIZE_MEMORY_ALIGNMENT)
  2067. * UCC_GETH_TX_BD_RING_SIZE_MEMORY_ALIGNMENT;
  2068. if ((ug_info->bdRingLenTx[j] * sizeof(struct qe_bd)) %
  2069. UCC_GETH_TX_BD_RING_SIZE_MEMORY_ALIGNMENT)
  2070. length += UCC_GETH_TX_BD_RING_SIZE_MEMORY_ALIGNMENT;
  2071. if (uf_info->bd_mem_part == MEM_PART_SYSTEM) {
  2072. u32 align = 4;
  2073. if (UCC_GETH_TX_BD_RING_ALIGNMENT > 4)
  2074. align = UCC_GETH_TX_BD_RING_ALIGNMENT;
  2075. ugeth->tx_bd_ring_offset[j] =
  2076. (u32) kmalloc((u32) (length + align), GFP_KERNEL);
  2077. if (ugeth->tx_bd_ring_offset[j] != 0)
  2078. ugeth->p_tx_bd_ring[j] =
  2079. (u8 __iomem *)((ugeth->tx_bd_ring_offset[j] +
  2080. align) & ~(align - 1));
  2081. } else if (uf_info->bd_mem_part == MEM_PART_MURAM) {
  2082. ugeth->tx_bd_ring_offset[j] =
  2083. qe_muram_alloc(length,
  2084. UCC_GETH_TX_BD_RING_ALIGNMENT);
  2085. if (!IS_ERR_VALUE(ugeth->tx_bd_ring_offset[j]))
  2086. ugeth->p_tx_bd_ring[j] =
  2087. (u8 __iomem *) qe_muram_addr(ugeth->
  2088. tx_bd_ring_offset[j]);
  2089. }
  2090. if (!ugeth->p_tx_bd_ring[j]) {
  2091. if (netif_msg_ifup(ugeth))
  2092. ugeth_err
  2093. ("%s: Can not allocate memory for Tx bd rings.",
  2094. __func__);
  2095. return -ENOMEM;
  2096. }
  2097. /* Zero unused end of bd ring, according to spec */
  2098. memset_io((void __iomem *)(ugeth->p_tx_bd_ring[j] +
  2099. ug_info->bdRingLenTx[j] * sizeof(struct qe_bd)), 0,
  2100. length - ug_info->bdRingLenTx[j] * sizeof(struct qe_bd));
  2101. }
  2102. /* Allocate Rx bds */
  2103. for (j = 0; j < ug_info->numQueuesRx; j++) {
  2104. length = ug_info->bdRingLenRx[j] * sizeof(struct qe_bd);
  2105. if (uf_info->bd_mem_part == MEM_PART_SYSTEM) {
  2106. u32 align = 4;
  2107. if (UCC_GETH_RX_BD_RING_ALIGNMENT > 4)
  2108. align = UCC_GETH_RX_BD_RING_ALIGNMENT;
  2109. ugeth->rx_bd_ring_offset[j] =
  2110. (u32) kmalloc((u32) (length + align), GFP_KERNEL);
  2111. if (ugeth->rx_bd_ring_offset[j] != 0)
  2112. ugeth->p_rx_bd_ring[j] =
  2113. (u8 __iomem *)((ugeth->rx_bd_ring_offset[j] +
  2114. align) & ~(align - 1));
  2115. } else if (uf_info->bd_mem_part == MEM_PART_MURAM) {
  2116. ugeth->rx_bd_ring_offset[j] =
  2117. qe_muram_alloc(length,
  2118. UCC_GETH_RX_BD_RING_ALIGNMENT);
  2119. if (!IS_ERR_VALUE(ugeth->rx_bd_ring_offset[j]))
  2120. ugeth->p_rx_bd_ring[j] =
  2121. (u8 __iomem *) qe_muram_addr(ugeth->
  2122. rx_bd_ring_offset[j]);
  2123. }
  2124. if (!ugeth->p_rx_bd_ring[j]) {
  2125. if (netif_msg_ifup(ugeth))
  2126. ugeth_err
  2127. ("%s: Can not allocate memory for Rx bd rings.",
  2128. __func__);
  2129. return -ENOMEM;
  2130. }
  2131. }
  2132. /* Init Tx bds */
  2133. for (j = 0; j < ug_info->numQueuesTx; j++) {
  2134. /* Setup the skbuff rings */
  2135. ugeth->tx_skbuff[j] = kmalloc(sizeof(struct sk_buff *) *
  2136. ugeth->ug_info->bdRingLenTx[j],
  2137. GFP_KERNEL);
  2138. if (ugeth->tx_skbuff[j] == NULL) {
  2139. if (netif_msg_ifup(ugeth))
  2140. ugeth_err("%s: Could not allocate tx_skbuff",
  2141. __func__);
  2142. return -ENOMEM;
  2143. }
  2144. for (i = 0; i < ugeth->ug_info->bdRingLenTx[j]; i++)
  2145. ugeth->tx_skbuff[j][i] = NULL;
  2146. ugeth->skb_curtx[j] = ugeth->skb_dirtytx[j] = 0;
  2147. bd = ugeth->confBd[j] = ugeth->txBd[j] = ugeth->p_tx_bd_ring[j];
  2148. for (i = 0; i < ug_info->bdRingLenTx[j]; i++) {
  2149. /* clear bd buffer */
  2150. out_be32(&((struct qe_bd __iomem *)bd)->buf, 0);
  2151. /* set bd status and length */
  2152. out_be32((u32 __iomem *)bd, 0);
  2153. bd += sizeof(struct qe_bd);
  2154. }
  2155. bd -= sizeof(struct qe_bd);
  2156. /* set bd status and length */
  2157. out_be32((u32 __iomem *)bd, T_W); /* for last BD set Wrap bit */
  2158. }
  2159. /* Init Rx bds */
  2160. for (j = 0; j < ug_info->numQueuesRx; j++) {
  2161. /* Setup the skbuff rings */
  2162. ugeth->rx_skbuff[j] = kmalloc(sizeof(struct sk_buff *) *
  2163. ugeth->ug_info->bdRingLenRx[j],
  2164. GFP_KERNEL);
  2165. if (ugeth->rx_skbuff[j] == NULL) {
  2166. if (netif_msg_ifup(ugeth))
  2167. ugeth_err("%s: Could not allocate rx_skbuff",
  2168. __func__);
  2169. return -ENOMEM;
  2170. }
  2171. for (i = 0; i < ugeth->ug_info->bdRingLenRx[j]; i++)
  2172. ugeth->rx_skbuff[j][i] = NULL;
  2173. ugeth->skb_currx[j] = 0;
  2174. bd = ugeth->rxBd[j] = ugeth->p_rx_bd_ring[j];
  2175. for (i = 0; i < ug_info->bdRingLenRx[j]; i++) {
  2176. /* set bd status and length */
  2177. out_be32((u32 __iomem *)bd, R_I);
  2178. /* clear bd buffer */
  2179. out_be32(&((struct qe_bd __iomem *)bd)->buf, 0);
  2180. bd += sizeof(struct qe_bd);
  2181. }
  2182. bd -= sizeof(struct qe_bd);
  2183. /* set bd status and length */
  2184. out_be32((u32 __iomem *)bd, R_W); /* for last BD set Wrap bit */
  2185. }
  2186. /*
  2187. * Global PRAM
  2188. */
  2189. /* Tx global PRAM */
  2190. /* Allocate global tx parameter RAM page */
  2191. ugeth->tx_glbl_pram_offset =
  2192. qe_muram_alloc(sizeof(struct ucc_geth_tx_global_pram),
  2193. UCC_GETH_TX_GLOBAL_PRAM_ALIGNMENT);
  2194. if (IS_ERR_VALUE(ugeth->tx_glbl_pram_offset)) {
  2195. if (netif_msg_ifup(ugeth))
  2196. ugeth_err
  2197. ("%s: Can not allocate DPRAM memory for p_tx_glbl_pram.",
  2198. __func__);
  2199. return -ENOMEM;
  2200. }
  2201. ugeth->p_tx_glbl_pram =
  2202. (struct ucc_geth_tx_global_pram __iomem *) qe_muram_addr(ugeth->
  2203. tx_glbl_pram_offset);
  2204. /* Zero out p_tx_glbl_pram */
  2205. memset_io((void __iomem *)ugeth->p_tx_glbl_pram, 0, sizeof(struct ucc_geth_tx_global_pram));
  2206. /* Fill global PRAM */
  2207. /* TQPTR */
  2208. /* Size varies with number of Tx threads */
  2209. ugeth->thread_dat_tx_offset =
  2210. qe_muram_alloc(numThreadsTxNumerical *
  2211. sizeof(struct ucc_geth_thread_data_tx) +
  2212. 32 * (numThreadsTxNumerical == 1),
  2213. UCC_GETH_THREAD_DATA_ALIGNMENT);
  2214. if (IS_ERR_VALUE(ugeth->thread_dat_tx_offset)) {
  2215. if (netif_msg_ifup(ugeth))
  2216. ugeth_err
  2217. ("%s: Can not allocate DPRAM memory for p_thread_data_tx.",
  2218. __func__);
  2219. return -ENOMEM;
  2220. }
  2221. ugeth->p_thread_data_tx =
  2222. (struct ucc_geth_thread_data_tx __iomem *) qe_muram_addr(ugeth->
  2223. thread_dat_tx_offset);
  2224. out_be32(&ugeth->p_tx_glbl_pram->tqptr, ugeth->thread_dat_tx_offset);
  2225. /* vtagtable */
  2226. for (i = 0; i < UCC_GETH_TX_VTAG_TABLE_ENTRY_MAX; i++)
  2227. out_be32(&ugeth->p_tx_glbl_pram->vtagtable[i],
  2228. ug_info->vtagtable[i]);
  2229. /* iphoffset */
  2230. for (i = 0; i < TX_IP_OFFSET_ENTRY_MAX; i++)
  2231. out_8(&ugeth->p_tx_glbl_pram->iphoffset[i],
  2232. ug_info->iphoffset[i]);
  2233. /* SQPTR */
  2234. /* Size varies with number of Tx queues */
  2235. ugeth->send_q_mem_reg_offset =
  2236. qe_muram_alloc(ug_info->numQueuesTx *
  2237. sizeof(struct ucc_geth_send_queue_qd),
  2238. UCC_GETH_SEND_QUEUE_QUEUE_DESCRIPTOR_ALIGNMENT);
  2239. if (IS_ERR_VALUE(ugeth->send_q_mem_reg_offset)) {
  2240. if (netif_msg_ifup(ugeth))
  2241. ugeth_err
  2242. ("%s: Can not allocate DPRAM memory for p_send_q_mem_reg.",
  2243. __func__);
  2244. return -ENOMEM;
  2245. }
  2246. ugeth->p_send_q_mem_reg =
  2247. (struct ucc_geth_send_queue_mem_region __iomem *) qe_muram_addr(ugeth->
  2248. send_q_mem_reg_offset);
  2249. out_be32(&ugeth->p_tx_glbl_pram->sqptr, ugeth->send_q_mem_reg_offset);
  2250. /* Setup the table */
  2251. /* Assume BD rings are already established */
  2252. for (i = 0; i < ug_info->numQueuesTx; i++) {
  2253. endOfRing =
  2254. ugeth->p_tx_bd_ring[i] + (ug_info->bdRingLenTx[i] -
  2255. 1) * sizeof(struct qe_bd);
  2256. if (ugeth->ug_info->uf_info.bd_mem_part == MEM_PART_SYSTEM) {
  2257. out_be32(&ugeth->p_send_q_mem_reg->sqqd[i].bd_ring_base,
  2258. (u32) virt_to_phys(ugeth->p_tx_bd_ring[i]));
  2259. out_be32(&ugeth->p_send_q_mem_reg->sqqd[i].
  2260. last_bd_completed_address,
  2261. (u32) virt_to_phys(endOfRing));
  2262. } else if (ugeth->ug_info->uf_info.bd_mem_part ==
  2263. MEM_PART_MURAM) {
  2264. out_be32(&ugeth->p_send_q_mem_reg->sqqd[i].bd_ring_base,
  2265. (u32) immrbar_virt_to_phys(ugeth->
  2266. p_tx_bd_ring[i]));
  2267. out_be32(&ugeth->p_send_q_mem_reg->sqqd[i].
  2268. last_bd_completed_address,
  2269. (u32) immrbar_virt_to_phys(endOfRing));
  2270. }
  2271. }
  2272. /* schedulerbasepointer */
  2273. if (ug_info->numQueuesTx > 1) {
  2274. /* scheduler exists only if more than 1 tx queue */
  2275. ugeth->scheduler_offset =
  2276. qe_muram_alloc(sizeof(struct ucc_geth_scheduler),
  2277. UCC_GETH_SCHEDULER_ALIGNMENT);
  2278. if (IS_ERR_VALUE(ugeth->scheduler_offset)) {
  2279. if (netif_msg_ifup(ugeth))
  2280. ugeth_err
  2281. ("%s: Can not allocate DPRAM memory for p_scheduler.",
  2282. __func__);
  2283. return -ENOMEM;
  2284. }
  2285. ugeth->p_scheduler =
  2286. (struct ucc_geth_scheduler __iomem *) qe_muram_addr(ugeth->
  2287. scheduler_offset);
  2288. out_be32(&ugeth->p_tx_glbl_pram->schedulerbasepointer,
  2289. ugeth->scheduler_offset);
  2290. /* Zero out p_scheduler */
  2291. memset_io((void __iomem *)ugeth->p_scheduler, 0, sizeof(struct ucc_geth_scheduler));
  2292. /* Set values in scheduler */
  2293. out_be32(&ugeth->p_scheduler->mblinterval,
  2294. ug_info->mblinterval);
  2295. out_be16(&ugeth->p_scheduler->nortsrbytetime,
  2296. ug_info->nortsrbytetime);
  2297. out_8(&ugeth->p_scheduler->fracsiz, ug_info->fracsiz);
  2298. out_8(&ugeth->p_scheduler->strictpriorityq,
  2299. ug_info->strictpriorityq);
  2300. out_8(&ugeth->p_scheduler->txasap, ug_info->txasap);
  2301. out_8(&ugeth->p_scheduler->extrabw, ug_info->extrabw);
  2302. for (i = 0; i < NUM_TX_QUEUES; i++)
  2303. out_8(&ugeth->p_scheduler->weightfactor[i],
  2304. ug_info->weightfactor[i]);
  2305. /* Set pointers to cpucount registers in scheduler */
  2306. ugeth->p_cpucount[0] = &(ugeth->p_scheduler->cpucount0);
  2307. ugeth->p_cpucount[1] = &(ugeth->p_scheduler->cpucount1);
  2308. ugeth->p_cpucount[2] = &(ugeth->p_scheduler->cpucount2);
  2309. ugeth->p_cpucount[3] = &(ugeth->p_scheduler->cpucount3);
  2310. ugeth->p_cpucount[4] = &(ugeth->p_scheduler->cpucount4);
  2311. ugeth->p_cpucount[5] = &(ugeth->p_scheduler->cpucount5);
  2312. ugeth->p_cpucount[6] = &(ugeth->p_scheduler->cpucount6);
  2313. ugeth->p_cpucount[7] = &(ugeth->p_scheduler->cpucount7);
  2314. }
  2315. /* schedulerbasepointer */
  2316. /* TxRMON_PTR (statistics) */
  2317. if (ug_info->
  2318. statisticsMode & UCC_GETH_STATISTICS_GATHERING_MODE_FIRMWARE_TX) {
  2319. ugeth->tx_fw_statistics_pram_offset =
  2320. qe_muram_alloc(sizeof
  2321. (struct ucc_geth_tx_firmware_statistics_pram),
  2322. UCC_GETH_TX_STATISTICS_ALIGNMENT);
  2323. if (IS_ERR_VALUE(ugeth->tx_fw_statistics_pram_offset)) {
  2324. if (netif_msg_ifup(ugeth))
  2325. ugeth_err
  2326. ("%s: Can not allocate DPRAM memory for"
  2327. " p_tx_fw_statistics_pram.",
  2328. __func__);
  2329. return -ENOMEM;
  2330. }
  2331. ugeth->p_tx_fw_statistics_pram =
  2332. (struct ucc_geth_tx_firmware_statistics_pram __iomem *)
  2333. qe_muram_addr(ugeth->tx_fw_statistics_pram_offset);
  2334. /* Zero out p_tx_fw_statistics_pram */
  2335. memset_io((void __iomem *)ugeth->p_tx_fw_statistics_pram,
  2336. 0, sizeof(struct ucc_geth_tx_firmware_statistics_pram));
  2337. }
  2338. /* temoder */
  2339. /* Already has speed set */
  2340. if (ug_info->numQueuesTx > 1)
  2341. temoder |= TEMODER_SCHEDULER_ENABLE;
  2342. if (ug_info->ipCheckSumGenerate)
  2343. temoder |= TEMODER_IP_CHECKSUM_GENERATE;
  2344. temoder |= ((ug_info->numQueuesTx - 1) << TEMODER_NUM_OF_QUEUES_SHIFT);
  2345. out_be16(&ugeth->p_tx_glbl_pram->temoder, temoder);
  2346. test = in_be16(&ugeth->p_tx_glbl_pram->temoder);
  2347. /* Function code register value to be used later */
  2348. function_code = UCC_BMR_BO_BE | UCC_BMR_GBL;
  2349. /* Required for QE */
  2350. /* function code register */
  2351. out_be32(&ugeth->p_tx_glbl_pram->tstate, ((u32) function_code) << 24);
  2352. /* Rx global PRAM */
  2353. /* Allocate global rx parameter RAM page */
  2354. ugeth->rx_glbl_pram_offset =
  2355. qe_muram_alloc(sizeof(struct ucc_geth_rx_global_pram),
  2356. UCC_GETH_RX_GLOBAL_PRAM_ALIGNMENT);
  2357. if (IS_ERR_VALUE(ugeth->rx_glbl_pram_offset)) {
  2358. if (netif_msg_ifup(ugeth))
  2359. ugeth_err
  2360. ("%s: Can not allocate DPRAM memory for p_rx_glbl_pram.",
  2361. __func__);
  2362. return -ENOMEM;
  2363. }
  2364. ugeth->p_rx_glbl_pram =
  2365. (struct ucc_geth_rx_global_pram __iomem *) qe_muram_addr(ugeth->
  2366. rx_glbl_pram_offset);
  2367. /* Zero out p_rx_glbl_pram */
  2368. memset_io((void __iomem *)ugeth->p_rx_glbl_pram, 0, sizeof(struct ucc_geth_rx_global_pram));
  2369. /* Fill global PRAM */
  2370. /* RQPTR */
  2371. /* Size varies with number of Rx threads */
  2372. ugeth->thread_dat_rx_offset =
  2373. qe_muram_alloc(numThreadsRxNumerical *
  2374. sizeof(struct ucc_geth_thread_data_rx),
  2375. UCC_GETH_THREAD_DATA_ALIGNMENT);
  2376. if (IS_ERR_VALUE(ugeth->thread_dat_rx_offset)) {
  2377. if (netif_msg_ifup(ugeth))
  2378. ugeth_err
  2379. ("%s: Can not allocate DPRAM memory for p_thread_data_rx.",
  2380. __func__);
  2381. return -ENOMEM;
  2382. }
  2383. ugeth->p_thread_data_rx =
  2384. (struct ucc_geth_thread_data_rx __iomem *) qe_muram_addr(ugeth->
  2385. thread_dat_rx_offset);
  2386. out_be32(&ugeth->p_rx_glbl_pram->rqptr, ugeth->thread_dat_rx_offset);
  2387. /* typeorlen */
  2388. out_be16(&ugeth->p_rx_glbl_pram->typeorlen, ug_info->typeorlen);
  2389. /* rxrmonbaseptr (statistics) */
  2390. if (ug_info->
  2391. statisticsMode & UCC_GETH_STATISTICS_GATHERING_MODE_FIRMWARE_RX) {
  2392. ugeth->rx_fw_statistics_pram_offset =
  2393. qe_muram_alloc(sizeof
  2394. (struct ucc_geth_rx_firmware_statistics_pram),
  2395. UCC_GETH_RX_STATISTICS_ALIGNMENT);
  2396. if (IS_ERR_VALUE(ugeth->rx_fw_statistics_pram_offset)) {
  2397. if (netif_msg_ifup(ugeth))
  2398. ugeth_err
  2399. ("%s: Can not allocate DPRAM memory for"
  2400. " p_rx_fw_statistics_pram.", __func__);
  2401. return -ENOMEM;
  2402. }
  2403. ugeth->p_rx_fw_statistics_pram =
  2404. (struct ucc_geth_rx_firmware_statistics_pram __iomem *)
  2405. qe_muram_addr(ugeth->rx_fw_statistics_pram_offset);
  2406. /* Zero out p_rx_fw_statistics_pram */
  2407. memset_io((void __iomem *)ugeth->p_rx_fw_statistics_pram, 0,
  2408. sizeof(struct ucc_geth_rx_firmware_statistics_pram));
  2409. }
  2410. /* intCoalescingPtr */
  2411. /* Size varies with number of Rx queues */
  2412. ugeth->rx_irq_coalescing_tbl_offset =
  2413. qe_muram_alloc(ug_info->numQueuesRx *
  2414. sizeof(struct ucc_geth_rx_interrupt_coalescing_entry)
  2415. + 4, UCC_GETH_RX_INTERRUPT_COALESCING_ALIGNMENT);
  2416. if (IS_ERR_VALUE(ugeth->rx_irq_coalescing_tbl_offset)) {
  2417. if (netif_msg_ifup(ugeth))
  2418. ugeth_err
  2419. ("%s: Can not allocate DPRAM memory for"
  2420. " p_rx_irq_coalescing_tbl.", __func__);
  2421. return -ENOMEM;
  2422. }
  2423. ugeth->p_rx_irq_coalescing_tbl =
  2424. (struct ucc_geth_rx_interrupt_coalescing_table __iomem *)
  2425. qe_muram_addr(ugeth->rx_irq_coalescing_tbl_offset);
  2426. out_be32(&ugeth->p_rx_glbl_pram->intcoalescingptr,
  2427. ugeth->rx_irq_coalescing_tbl_offset);
  2428. /* Fill interrupt coalescing table */
  2429. for (i = 0; i < ug_info->numQueuesRx; i++) {
  2430. out_be32(&ugeth->p_rx_irq_coalescing_tbl->coalescingentry[i].
  2431. interruptcoalescingmaxvalue,
  2432. ug_info->interruptcoalescingmaxvalue[i]);
  2433. out_be32(&ugeth->p_rx_irq_coalescing_tbl->coalescingentry[i].
  2434. interruptcoalescingcounter,
  2435. ug_info->interruptcoalescingmaxvalue[i]);
  2436. }
  2437. /* MRBLR */
  2438. init_max_rx_buff_len(uf_info->max_rx_buf_length,
  2439. &ugeth->p_rx_glbl_pram->mrblr);
  2440. /* MFLR */
  2441. out_be16(&ugeth->p_rx_glbl_pram->mflr, ug_info->maxFrameLength);
  2442. /* MINFLR */
  2443. init_min_frame_len(ug_info->minFrameLength,
  2444. &ugeth->p_rx_glbl_pram->minflr,
  2445. &ugeth->p_rx_glbl_pram->mrblr);
  2446. /* MAXD1 */
  2447. out_be16(&ugeth->p_rx_glbl_pram->maxd1, ug_info->maxD1Length);
  2448. /* MAXD2 */
  2449. out_be16(&ugeth->p_rx_glbl_pram->maxd2, ug_info->maxD2Length);
  2450. /* l2qt */
  2451. l2qt = 0;
  2452. for (i = 0; i < UCC_GETH_VLAN_PRIORITY_MAX; i++)
  2453. l2qt |= (ug_info->l2qt[i] << (28 - 4 * i));
  2454. out_be32(&ugeth->p_rx_glbl_pram->l2qt, l2qt);
  2455. /* l3qt */
  2456. for (j = 0; j < UCC_GETH_IP_PRIORITY_MAX; j += 8) {
  2457. l3qt = 0;
  2458. for (i = 0; i < 8; i++)
  2459. l3qt |= (ug_info->l3qt[j + i] << (28 - 4 * i));
  2460. out_be32(&ugeth->p_rx_glbl_pram->l3qt[j/8], l3qt);
  2461. }
  2462. /* vlantype */
  2463. out_be16(&ugeth->p_rx_glbl_pram->vlantype, ug_info->vlantype);
  2464. /* vlantci */
  2465. out_be16(&ugeth->p_rx_glbl_pram->vlantci, ug_info->vlantci);
  2466. /* ecamptr */
  2467. out_be32(&ugeth->p_rx_glbl_pram->ecamptr, ug_info->ecamptr);
  2468. /* RBDQPTR */
  2469. /* Size varies with number of Rx queues */
  2470. ugeth->rx_bd_qs_tbl_offset =
  2471. qe_muram_alloc(ug_info->numQueuesRx *
  2472. (sizeof(struct ucc_geth_rx_bd_queues_entry) +
  2473. sizeof(struct ucc_geth_rx_prefetched_bds)),
  2474. UCC_GETH_RX_BD_QUEUES_ALIGNMENT);
  2475. if (IS_ERR_VALUE(ugeth->rx_bd_qs_tbl_offset)) {
  2476. if (netif_msg_ifup(ugeth))
  2477. ugeth_err
  2478. ("%s: Can not allocate DPRAM memory for p_rx_bd_qs_tbl.",
  2479. __func__);
  2480. return -ENOMEM;
  2481. }
  2482. ugeth->p_rx_bd_qs_tbl =
  2483. (struct ucc_geth_rx_bd_queues_entry __iomem *) qe_muram_addr(ugeth->
  2484. rx_bd_qs_tbl_offset);
  2485. out_be32(&ugeth->p_rx_glbl_pram->rbdqptr, ugeth->rx_bd_qs_tbl_offset);
  2486. /* Zero out p_rx_bd_qs_tbl */
  2487. memset_io((void __iomem *)ugeth->p_rx_bd_qs_tbl,
  2488. 0,
  2489. ug_info->numQueuesRx * (sizeof(struct ucc_geth_rx_bd_queues_entry) +
  2490. sizeof(struct ucc_geth_rx_prefetched_bds)));
  2491. /* Setup the table */
  2492. /* Assume BD rings are already established */
  2493. for (i = 0; i < ug_info->numQueuesRx; i++) {
  2494. if (ugeth->ug_info->uf_info.bd_mem_part == MEM_PART_SYSTEM) {
  2495. out_be32(&ugeth->p_rx_bd_qs_tbl[i].externalbdbaseptr,
  2496. (u32) virt_to_phys(ugeth->p_rx_bd_ring[i]));
  2497. } else if (ugeth->ug_info->uf_info.bd_mem_part ==
  2498. MEM_PART_MURAM) {
  2499. out_be32(&ugeth->p_rx_bd_qs_tbl[i].externalbdbaseptr,
  2500. (u32) immrbar_virt_to_phys(ugeth->
  2501. p_rx_bd_ring[i]));
  2502. }
  2503. /* rest of fields handled by QE */
  2504. }
  2505. /* remoder */
  2506. /* Already has speed set */
  2507. if (ugeth->rx_extended_features)
  2508. remoder |= REMODER_RX_EXTENDED_FEATURES;
  2509. if (ug_info->rxExtendedFiltering)
  2510. remoder |= REMODER_RX_EXTENDED_FILTERING;
  2511. if (ug_info->dynamicMaxFrameLength)
  2512. remoder |= REMODER_DYNAMIC_MAX_FRAME_LENGTH;
  2513. if (ug_info->dynamicMinFrameLength)
  2514. remoder |= REMODER_DYNAMIC_MIN_FRAME_LENGTH;
  2515. remoder |=
  2516. ug_info->vlanOperationTagged << REMODER_VLAN_OPERATION_TAGGED_SHIFT;
  2517. remoder |=
  2518. ug_info->
  2519. vlanOperationNonTagged << REMODER_VLAN_OPERATION_NON_TAGGED_SHIFT;
  2520. remoder |= ug_info->rxQoSMode << REMODER_RX_QOS_MODE_SHIFT;
  2521. remoder |= ((ug_info->numQueuesRx - 1) << REMODER_NUM_OF_QUEUES_SHIFT);
  2522. if (ug_info->ipCheckSumCheck)
  2523. remoder |= REMODER_IP_CHECKSUM_CHECK;
  2524. if (ug_info->ipAddressAlignment)
  2525. remoder |= REMODER_IP_ADDRESS_ALIGNMENT;
  2526. out_be32(&ugeth->p_rx_glbl_pram->remoder, remoder);
  2527. /* Note that this function must be called */
  2528. /* ONLY AFTER p_tx_fw_statistics_pram */
  2529. /* andp_UccGethRxFirmwareStatisticsPram are allocated ! */
  2530. init_firmware_statistics_gathering_mode((ug_info->
  2531. statisticsMode &
  2532. UCC_GETH_STATISTICS_GATHERING_MODE_FIRMWARE_TX),
  2533. (ug_info->statisticsMode &
  2534. UCC_GETH_STATISTICS_GATHERING_MODE_FIRMWARE_RX),
  2535. &ugeth->p_tx_glbl_pram->txrmonbaseptr,
  2536. ugeth->tx_fw_statistics_pram_offset,
  2537. &ugeth->p_rx_glbl_pram->rxrmonbaseptr,
  2538. ugeth->rx_fw_statistics_pram_offset,
  2539. &ugeth->p_tx_glbl_pram->temoder,
  2540. &ugeth->p_rx_glbl_pram->remoder);
  2541. /* function code register */
  2542. out_8(&ugeth->p_rx_glbl_pram->rstate, function_code);
  2543. /* initialize extended filtering */
  2544. if (ug_info->rxExtendedFiltering) {
  2545. if (!ug_info->extendedFilteringChainPointer) {
  2546. if (netif_msg_ifup(ugeth))
  2547. ugeth_err("%s: Null Extended Filtering Chain Pointer.",
  2548. __func__);
  2549. return -EINVAL;
  2550. }
  2551. /* Allocate memory for extended filtering Mode Global
  2552. Parameters */
  2553. ugeth->exf_glbl_param_offset =
  2554. qe_muram_alloc(sizeof(struct ucc_geth_exf_global_pram),
  2555. UCC_GETH_RX_EXTENDED_FILTERING_GLOBAL_PARAMETERS_ALIGNMENT);
  2556. if (IS_ERR_VALUE(ugeth->exf_glbl_param_offset)) {
  2557. if (netif_msg_ifup(ugeth))
  2558. ugeth_err
  2559. ("%s: Can not allocate DPRAM memory for"
  2560. " p_exf_glbl_param.", __func__);
  2561. return -ENOMEM;
  2562. }
  2563. ugeth->p_exf_glbl_param =
  2564. (struct ucc_geth_exf_global_pram __iomem *) qe_muram_addr(ugeth->
  2565. exf_glbl_param_offset);
  2566. out_be32(&ugeth->p_rx_glbl_pram->exfGlobalParam,
  2567. ugeth->exf_glbl_param_offset);
  2568. out_be32(&ugeth->p_exf_glbl_param->l2pcdptr,
  2569. (u32) ug_info->extendedFilteringChainPointer);
  2570. } else { /* initialize 82xx style address filtering */
  2571. /* Init individual address recognition registers to disabled */
  2572. for (j = 0; j < NUM_OF_PADDRS; j++)
  2573. ugeth_82xx_filtering_clear_addr_in_paddr(ugeth, (u8) j);
  2574. p_82xx_addr_filt =
  2575. (struct ucc_geth_82xx_address_filtering_pram __iomem *) ugeth->
  2576. p_rx_glbl_pram->addressfiltering;
  2577. ugeth_82xx_filtering_clear_all_addr_in_hash(ugeth,
  2578. ENET_ADDR_TYPE_GROUP);
  2579. ugeth_82xx_filtering_clear_all_addr_in_hash(ugeth,
  2580. ENET_ADDR_TYPE_INDIVIDUAL);
  2581. }
  2582. /*
  2583. * Initialize UCC at QE level
  2584. */
  2585. command = QE_INIT_TX_RX;
  2586. /* Allocate shadow InitEnet command parameter structure.
  2587. * This is needed because after the InitEnet command is executed,
  2588. * the structure in DPRAM is released, because DPRAM is a premium
  2589. * resource.
  2590. * This shadow structure keeps a copy of what was done so that the
  2591. * allocated resources can be released when the channel is freed.
  2592. */
  2593. if (!(ugeth->p_init_enet_param_shadow =
  2594. kmalloc(sizeof(struct ucc_geth_init_pram), GFP_KERNEL))) {
  2595. if (netif_msg_ifup(ugeth))
  2596. ugeth_err
  2597. ("%s: Can not allocate memory for"
  2598. " p_UccInitEnetParamShadows.", __func__);
  2599. return -ENOMEM;
  2600. }
  2601. /* Zero out *p_init_enet_param_shadow */
  2602. memset((char *)ugeth->p_init_enet_param_shadow,
  2603. 0, sizeof(struct ucc_geth_init_pram));
  2604. /* Fill shadow InitEnet command parameter structure */
  2605. ugeth->p_init_enet_param_shadow->resinit1 =
  2606. ENET_INIT_PARAM_MAGIC_RES_INIT1;
  2607. ugeth->p_init_enet_param_shadow->resinit2 =
  2608. ENET_INIT_PARAM_MAGIC_RES_INIT2;
  2609. ugeth->p_init_enet_param_shadow->resinit3 =
  2610. ENET_INIT_PARAM_MAGIC_RES_INIT3;
  2611. ugeth->p_init_enet_param_shadow->resinit4 =
  2612. ENET_INIT_PARAM_MAGIC_RES_INIT4;
  2613. ugeth->p_init_enet_param_shadow->resinit5 =
  2614. ENET_INIT_PARAM_MAGIC_RES_INIT5;
  2615. ugeth->p_init_enet_param_shadow->rgftgfrxglobal |=
  2616. ((u32) ug_info->numThreadsRx) << ENET_INIT_PARAM_RGF_SHIFT;
  2617. ugeth->p_init_enet_param_shadow->rgftgfrxglobal |=
  2618. ((u32) ug_info->numThreadsTx) << ENET_INIT_PARAM_TGF_SHIFT;
  2619. ugeth->p_init_enet_param_shadow->rgftgfrxglobal |=
  2620. ugeth->rx_glbl_pram_offset | ug_info->riscRx;
  2621. if ((ug_info->largestexternallookupkeysize !=
  2622. QE_FLTR_LARGEST_EXTERNAL_TABLE_LOOKUP_KEY_SIZE_NONE)
  2623. && (ug_info->largestexternallookupkeysize !=
  2624. QE_FLTR_LARGEST_EXTERNAL_TABLE_LOOKUP_KEY_SIZE_8_BYTES)
  2625. && (ug_info->largestexternallookupkeysize !=
  2626. QE_FLTR_LARGEST_EXTERNAL_TABLE_LOOKUP_KEY_SIZE_16_BYTES)) {
  2627. if (netif_msg_ifup(ugeth))
  2628. ugeth_err("%s: Invalid largest External Lookup Key Size.",
  2629. __func__);
  2630. return -EINVAL;
  2631. }
  2632. ugeth->p_init_enet_param_shadow->largestexternallookupkeysize =
  2633. ug_info->largestexternallookupkeysize;
  2634. size = sizeof(struct ucc_geth_thread_rx_pram);
  2635. if (ug_info->rxExtendedFiltering) {
  2636. size += THREAD_RX_PRAM_ADDITIONAL_FOR_EXTENDED_FILTERING;
  2637. if (ug_info->largestexternallookupkeysize ==
  2638. QE_FLTR_TABLE_LOOKUP_KEY_SIZE_8_BYTES)
  2639. size +=
  2640. THREAD_RX_PRAM_ADDITIONAL_FOR_EXTENDED_FILTERING_8;
  2641. if (ug_info->largestexternallookupkeysize ==
  2642. QE_FLTR_TABLE_LOOKUP_KEY_SIZE_16_BYTES)
  2643. size +=
  2644. THREAD_RX_PRAM_ADDITIONAL_FOR_EXTENDED_FILTERING_16;
  2645. }
  2646. if ((ret_val = fill_init_enet_entries(ugeth, &(ugeth->
  2647. p_init_enet_param_shadow->rxthread[0]),
  2648. (u8) (numThreadsRxNumerical + 1)
  2649. /* Rx needs one extra for terminator */
  2650. , size, UCC_GETH_THREAD_RX_PRAM_ALIGNMENT,
  2651. ug_info->riscRx, 1)) != 0) {
  2652. if (netif_msg_ifup(ugeth))
  2653. ugeth_err("%s: Can not fill p_init_enet_param_shadow.",
  2654. __func__);
  2655. return ret_val;
  2656. }
  2657. ugeth->p_init_enet_param_shadow->txglobal =
  2658. ugeth->tx_glbl_pram_offset | ug_info->riscTx;
  2659. if ((ret_val =
  2660. fill_init_enet_entries(ugeth,
  2661. &(ugeth->p_init_enet_param_shadow->
  2662. txthread[0]), numThreadsTxNumerical,
  2663. sizeof(struct ucc_geth_thread_tx_pram),
  2664. UCC_GETH_THREAD_TX_PRAM_ALIGNMENT,
  2665. ug_info->riscTx, 0)) != 0) {
  2666. if (netif_msg_ifup(ugeth))
  2667. ugeth_err("%s: Can not fill p_init_enet_param_shadow.",
  2668. __func__);
  2669. return ret_val;
  2670. }
  2671. /* Load Rx bds with buffers */
  2672. for (i = 0; i < ug_info->numQueuesRx; i++) {
  2673. if ((ret_val = rx_bd_buffer_set(ugeth, (u8) i)) != 0) {
  2674. if (netif_msg_ifup(ugeth))
  2675. ugeth_err("%s: Can not fill Rx bds with buffers.",
  2676. __func__);
  2677. return ret_val;
  2678. }
  2679. }
  2680. /* Allocate InitEnet command parameter structure */
  2681. init_enet_pram_offset = qe_muram_alloc(sizeof(struct ucc_geth_init_pram), 4);
  2682. if (IS_ERR_VALUE(init_enet_pram_offset)) {
  2683. if (netif_msg_ifup(ugeth))
  2684. ugeth_err
  2685. ("%s: Can not allocate DPRAM memory for p_init_enet_pram.",
  2686. __func__);
  2687. return -ENOMEM;
  2688. }
  2689. p_init_enet_pram =
  2690. (struct ucc_geth_init_pram __iomem *) qe_muram_addr(init_enet_pram_offset);
  2691. /* Copy shadow InitEnet command parameter structure into PRAM */
  2692. out_8(&p_init_enet_pram->resinit1,
  2693. ugeth->p_init_enet_param_shadow->resinit1);
  2694. out_8(&p_init_enet_pram->resinit2,
  2695. ugeth->p_init_enet_param_shadow->resinit2);
  2696. out_8(&p_init_enet_pram->resinit3,
  2697. ugeth->p_init_enet_param_shadow->resinit3);
  2698. out_8(&p_init_enet_pram->resinit4,
  2699. ugeth->p_init_enet_param_shadow->resinit4);
  2700. out_be16(&p_init_enet_pram->resinit5,
  2701. ugeth->p_init_enet_param_shadow->resinit5);
  2702. out_8(&p_init_enet_pram->largestexternallookupkeysize,
  2703. ugeth->p_init_enet_param_shadow->largestexternallookupkeysize);
  2704. out_be32(&p_init_enet_pram->rgftgfrxglobal,
  2705. ugeth->p_init_enet_param_shadow->rgftgfrxglobal);
  2706. for (i = 0; i < ENET_INIT_PARAM_MAX_ENTRIES_RX; i++)
  2707. out_be32(&p_init_enet_pram->rxthread[i],
  2708. ugeth->p_init_enet_param_shadow->rxthread[i]);
  2709. out_be32(&p_init_enet_pram->txglobal,
  2710. ugeth->p_init_enet_param_shadow->txglobal);
  2711. for (i = 0; i < ENET_INIT_PARAM_MAX_ENTRIES_TX; i++)
  2712. out_be32(&p_init_enet_pram->txthread[i],
  2713. ugeth->p_init_enet_param_shadow->txthread[i]);
  2714. /* Issue QE command */
  2715. cecr_subblock =
  2716. ucc_fast_get_qe_cr_subblock(ugeth->ug_info->uf_info.ucc_num);
  2717. qe_issue_cmd(command, cecr_subblock, QE_CR_PROTOCOL_ETHERNET,
  2718. init_enet_pram_offset);
  2719. /* Free InitEnet command parameter */
  2720. qe_muram_free(init_enet_pram_offset);
  2721. return 0;
  2722. }
  2723. /* This is called by the kernel when a frame is ready for transmission. */
  2724. /* It is pointed to by the dev->hard_start_xmit function pointer */
  2725. static int ucc_geth_start_xmit(struct sk_buff *skb, struct net_device *dev)
  2726. {
  2727. struct ucc_geth_private *ugeth = netdev_priv(dev);
  2728. #ifdef CONFIG_UGETH_TX_ON_DEMAND
  2729. struct ucc_fast_private *uccf;
  2730. #endif
  2731. u8 __iomem *bd; /* BD pointer */
  2732. u32 bd_status;
  2733. u8 txQ = 0;
  2734. unsigned long flags;
  2735. ugeth_vdbg("%s: IN", __func__);
  2736. spin_lock_irqsave(&ugeth->lock, flags);
  2737. dev->stats.tx_bytes += skb->len;
  2738. /* Start from the next BD that should be filled */
  2739. bd = ugeth->txBd[txQ];
  2740. bd_status = in_be32((u32 __iomem *)bd);
  2741. /* Save the skb pointer so we can free it later */
  2742. ugeth->tx_skbuff[txQ][ugeth->skb_curtx[txQ]] = skb;
  2743. /* Update the current skb pointer (wrapping if this was the last) */
  2744. ugeth->skb_curtx[txQ] =
  2745. (ugeth->skb_curtx[txQ] +
  2746. 1) & TX_RING_MOD_MASK(ugeth->ug_info->bdRingLenTx[txQ]);
  2747. /* set up the buffer descriptor */
  2748. out_be32(&((struct qe_bd __iomem *)bd)->buf,
  2749. dma_map_single(ugeth->dev, skb->data,
  2750. skb->len, DMA_TO_DEVICE));
  2751. /* printk(KERN_DEBUG"skb->data is 0x%x\n",skb->data); */
  2752. bd_status = (bd_status & T_W) | T_R | T_I | T_L | skb->len;
  2753. /* set bd status and length */
  2754. out_be32((u32 __iomem *)bd, bd_status);
  2755. dev->trans_start = jiffies;
  2756. /* Move to next BD in the ring */
  2757. if (!(bd_status & T_W))
  2758. bd += sizeof(struct qe_bd);
  2759. else
  2760. bd = ugeth->p_tx_bd_ring[txQ];
  2761. /* If the next BD still needs to be cleaned up, then the bds
  2762. are full. We need to tell the kernel to stop sending us stuff. */
  2763. if (bd == ugeth->confBd[txQ]) {
  2764. if (!netif_queue_stopped(dev))
  2765. netif_stop_queue(dev);
  2766. }
  2767. ugeth->txBd[txQ] = bd;
  2768. if (ugeth->p_scheduler) {
  2769. ugeth->cpucount[txQ]++;
  2770. /* Indicate to QE that there are more Tx bds ready for
  2771. transmission */
  2772. /* This is done by writing a running counter of the bd
  2773. count to the scheduler PRAM. */
  2774. out_be16(ugeth->p_cpucount[txQ], ugeth->cpucount[txQ]);
  2775. }
  2776. #ifdef CONFIG_UGETH_TX_ON_DEMAND
  2777. uccf = ugeth->uccf;
  2778. out_be16(uccf->p_utodr, UCC_FAST_TOD);
  2779. #endif
  2780. spin_unlock_irqrestore(&ugeth->lock, flags);
  2781. return NETDEV_TX_OK;
  2782. }
  2783. static int ucc_geth_rx(struct ucc_geth_private *ugeth, u8 rxQ, int rx_work_limit)
  2784. {
  2785. struct sk_buff *skb;
  2786. u8 __iomem *bd;
  2787. u16 length, howmany = 0;
  2788. u32 bd_status;
  2789. u8 *bdBuffer;
  2790. struct net_device *dev;
  2791. ugeth_vdbg("%s: IN", __func__);
  2792. dev = ugeth->ndev;
  2793. /* collect received buffers */
  2794. bd = ugeth->rxBd[rxQ];
  2795. bd_status = in_be32((u32 __iomem *)bd);
  2796. /* while there are received buffers and BD is full (~R_E) */
  2797. while (!((bd_status & (R_E)) || (--rx_work_limit < 0))) {
  2798. bdBuffer = (u8 *) in_be32(&((struct qe_bd __iomem *)bd)->buf);
  2799. length = (u16) ((bd_status & BD_LENGTH_MASK) - 4);
  2800. skb = ugeth->rx_skbuff[rxQ][ugeth->skb_currx[rxQ]];
  2801. /* determine whether buffer is first, last, first and last
  2802. (single buffer frame) or middle (not first and not last) */
  2803. if (!skb ||
  2804. (!(bd_status & (R_F | R_L))) ||
  2805. (bd_status & R_ERRORS_FATAL)) {
  2806. if (netif_msg_rx_err(ugeth))
  2807. ugeth_err("%s, %d: ERROR!!! skb - 0x%08x",
  2808. __func__, __LINE__, (u32) skb);
  2809. if (skb) {
  2810. skb->data = skb->head + NET_SKB_PAD;
  2811. __skb_queue_head(&ugeth->rx_recycle, skb);
  2812. }
  2813. ugeth->rx_skbuff[rxQ][ugeth->skb_currx[rxQ]] = NULL;
  2814. dev->stats.rx_dropped++;
  2815. } else {
  2816. dev->stats.rx_packets++;
  2817. howmany++;
  2818. /* Prep the skb for the packet */
  2819. skb_put(skb, length);
  2820. /* Tell the skb what kind of packet this is */
  2821. skb->protocol = eth_type_trans(skb, ugeth->ndev);
  2822. dev->stats.rx_bytes += length;
  2823. /* Send the packet up the stack */
  2824. netif_receive_skb(skb);
  2825. }
  2826. skb = get_new_skb(ugeth, bd);
  2827. if (!skb) {
  2828. if (netif_msg_rx_err(ugeth))
  2829. ugeth_warn("%s: No Rx Data Buffer", __func__);
  2830. dev->stats.rx_dropped++;
  2831. break;
  2832. }
  2833. ugeth->rx_skbuff[rxQ][ugeth->skb_currx[rxQ]] = skb;
  2834. /* update to point at the next skb */
  2835. ugeth->skb_currx[rxQ] =
  2836. (ugeth->skb_currx[rxQ] +
  2837. 1) & RX_RING_MOD_MASK(ugeth->ug_info->bdRingLenRx[rxQ]);
  2838. if (bd_status & R_W)
  2839. bd = ugeth->p_rx_bd_ring[rxQ];
  2840. else
  2841. bd += sizeof(struct qe_bd);
  2842. bd_status = in_be32((u32 __iomem *)bd);
  2843. }
  2844. ugeth->rxBd[rxQ] = bd;
  2845. return howmany;
  2846. }
  2847. static int ucc_geth_tx(struct net_device *dev, u8 txQ)
  2848. {
  2849. /* Start from the next BD that should be filled */
  2850. struct ucc_geth_private *ugeth = netdev_priv(dev);
  2851. u8 __iomem *bd; /* BD pointer */
  2852. u32 bd_status;
  2853. bd = ugeth->confBd[txQ];
  2854. bd_status = in_be32((u32 __iomem *)bd);
  2855. /* Normal processing. */
  2856. while ((bd_status & T_R) == 0) {
  2857. struct sk_buff *skb;
  2858. /* BD contains already transmitted buffer. */
  2859. /* Handle the transmitted buffer and release */
  2860. /* the BD to be used with the current frame */
  2861. if ((bd == ugeth->txBd[txQ]) && (netif_queue_stopped(dev) == 0))
  2862. break;
  2863. dev->stats.tx_packets++;
  2864. skb = ugeth->tx_skbuff[txQ][ugeth->skb_dirtytx[txQ]];
  2865. if (skb_queue_len(&ugeth->rx_recycle) < RX_BD_RING_LEN &&
  2866. skb_recycle_check(skb,
  2867. ugeth->ug_info->uf_info.max_rx_buf_length +
  2868. UCC_GETH_RX_DATA_BUF_ALIGNMENT))
  2869. __skb_queue_head(&ugeth->rx_recycle, skb);
  2870. else
  2871. dev_kfree_skb(skb);
  2872. ugeth->tx_skbuff[txQ][ugeth->skb_dirtytx[txQ]] = NULL;
  2873. ugeth->skb_dirtytx[txQ] =
  2874. (ugeth->skb_dirtytx[txQ] +
  2875. 1) & TX_RING_MOD_MASK(ugeth->ug_info->bdRingLenTx[txQ]);
  2876. /* We freed a buffer, so now we can restart transmission */
  2877. if (netif_queue_stopped(dev))
  2878. netif_wake_queue(dev);
  2879. /* Advance the confirmation BD pointer */
  2880. if (!(bd_status & T_W))
  2881. bd += sizeof(struct qe_bd);
  2882. else
  2883. bd = ugeth->p_tx_bd_ring[txQ];
  2884. bd_status = in_be32((u32 __iomem *)bd);
  2885. }
  2886. ugeth->confBd[txQ] = bd;
  2887. return 0;
  2888. }
  2889. static int ucc_geth_poll(struct napi_struct *napi, int budget)
  2890. {
  2891. struct ucc_geth_private *ugeth = container_of(napi, struct ucc_geth_private, napi);
  2892. struct ucc_geth_info *ug_info;
  2893. int howmany, i;
  2894. ug_info = ugeth->ug_info;
  2895. /* Tx event processing */
  2896. spin_lock(&ugeth->lock);
  2897. for (i = 0; i < ug_info->numQueuesTx; i++)
  2898. ucc_geth_tx(ugeth->ndev, i);
  2899. spin_unlock(&ugeth->lock);
  2900. howmany = 0;
  2901. for (i = 0; i < ug_info->numQueuesRx; i++)
  2902. howmany += ucc_geth_rx(ugeth, i, budget - howmany);
  2903. if (howmany < budget) {
  2904. napi_complete(napi);
  2905. setbits32(ugeth->uccf->p_uccm, UCCE_RX_EVENTS | UCCE_TX_EVENTS);
  2906. }
  2907. return howmany;
  2908. }
  2909. static irqreturn_t ucc_geth_irq_handler(int irq, void *info)
  2910. {
  2911. struct net_device *dev = info;
  2912. struct ucc_geth_private *ugeth = netdev_priv(dev);
  2913. struct ucc_fast_private *uccf;
  2914. struct ucc_geth_info *ug_info;
  2915. register u32 ucce;
  2916. register u32 uccm;
  2917. ugeth_vdbg("%s: IN", __func__);
  2918. uccf = ugeth->uccf;
  2919. ug_info = ugeth->ug_info;
  2920. /* read and clear events */
  2921. ucce = (u32) in_be32(uccf->p_ucce);
  2922. uccm = (u32) in_be32(uccf->p_uccm);
  2923. ucce &= uccm;
  2924. out_be32(uccf->p_ucce, ucce);
  2925. /* check for receive events that require processing */
  2926. if (ucce & (UCCE_RX_EVENTS | UCCE_TX_EVENTS)) {
  2927. if (napi_schedule_prep(&ugeth->napi)) {
  2928. uccm &= ~(UCCE_RX_EVENTS | UCCE_TX_EVENTS);
  2929. out_be32(uccf->p_uccm, uccm);
  2930. __napi_schedule(&ugeth->napi);
  2931. }
  2932. }
  2933. /* Errors and other events */
  2934. if (ucce & UCCE_OTHER) {
  2935. if (ucce & UCC_GETH_UCCE_BSY)
  2936. dev->stats.rx_errors++;
  2937. if (ucce & UCC_GETH_UCCE_TXE)
  2938. dev->stats.tx_errors++;
  2939. }
  2940. return IRQ_HANDLED;
  2941. }
  2942. #ifdef CONFIG_NET_POLL_CONTROLLER
  2943. /*
  2944. * Polling 'interrupt' - used by things like netconsole to send skbs
  2945. * without having to re-enable interrupts. It's not called while
  2946. * the interrupt routine is executing.
  2947. */
  2948. static void ucc_netpoll(struct net_device *dev)
  2949. {
  2950. struct ucc_geth_private *ugeth = netdev_priv(dev);
  2951. int irq = ugeth->ug_info->uf_info.irq;
  2952. disable_irq(irq);
  2953. ucc_geth_irq_handler(irq, dev);
  2954. enable_irq(irq);
  2955. }
  2956. #endif /* CONFIG_NET_POLL_CONTROLLER */
  2957. static int ucc_geth_set_mac_addr(struct net_device *dev, void *p)
  2958. {
  2959. struct ucc_geth_private *ugeth = netdev_priv(dev);
  2960. struct sockaddr *addr = p;
  2961. if (!is_valid_ether_addr(addr->sa_data))
  2962. return -EADDRNOTAVAIL;
  2963. memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
  2964. /*
  2965. * If device is not running, we will set mac addr register
  2966. * when opening the device.
  2967. */
  2968. if (!netif_running(dev))
  2969. return 0;
  2970. spin_lock_irq(&ugeth->lock);
  2971. init_mac_station_addr_regs(dev->dev_addr[0],
  2972. dev->dev_addr[1],
  2973. dev->dev_addr[2],
  2974. dev->dev_addr[3],
  2975. dev->dev_addr[4],
  2976. dev->dev_addr[5],
  2977. &ugeth->ug_regs->macstnaddr1,
  2978. &ugeth->ug_regs->macstnaddr2);
  2979. spin_unlock_irq(&ugeth->lock);
  2980. return 0;
  2981. }
  2982. static int ucc_geth_init_mac(struct ucc_geth_private *ugeth)
  2983. {
  2984. struct net_device *dev = ugeth->ndev;
  2985. int err;
  2986. err = ucc_struct_init(ugeth);
  2987. if (err) {
  2988. if (netif_msg_ifup(ugeth))
  2989. ugeth_err("%s: Cannot configure internal struct, "
  2990. "aborting.", dev->name);
  2991. goto err;
  2992. }
  2993. err = ucc_geth_startup(ugeth);
  2994. if (err) {
  2995. if (netif_msg_ifup(ugeth))
  2996. ugeth_err("%s: Cannot configure net device, aborting.",
  2997. dev->name);
  2998. goto err;
  2999. }
  3000. err = adjust_enet_interface(ugeth);
  3001. if (err) {
  3002. if (netif_msg_ifup(ugeth))
  3003. ugeth_err("%s: Cannot configure net device, aborting.",
  3004. dev->name);
  3005. goto err;
  3006. }
  3007. /* Set MACSTNADDR1, MACSTNADDR2 */
  3008. /* For more details see the hardware spec. */
  3009. init_mac_station_addr_regs(dev->dev_addr[0],
  3010. dev->dev_addr[1],
  3011. dev->dev_addr[2],
  3012. dev->dev_addr[3],
  3013. dev->dev_addr[4],
  3014. dev->dev_addr[5],
  3015. &ugeth->ug_regs->macstnaddr1,
  3016. &ugeth->ug_regs->macstnaddr2);
  3017. err = ugeth_enable(ugeth, COMM_DIR_RX_AND_TX);
  3018. if (err) {
  3019. if (netif_msg_ifup(ugeth))
  3020. ugeth_err("%s: Cannot enable net device, aborting.", dev->name);
  3021. goto err;
  3022. }
  3023. return 0;
  3024. err:
  3025. ucc_geth_stop(ugeth);
  3026. return err;
  3027. }
  3028. /* Called when something needs to use the ethernet device */
  3029. /* Returns 0 for success. */
  3030. static int ucc_geth_open(struct net_device *dev)
  3031. {
  3032. struct ucc_geth_private *ugeth = netdev_priv(dev);
  3033. int err;
  3034. ugeth_vdbg("%s: IN", __func__);
  3035. /* Test station address */
  3036. if (dev->dev_addr[0] & ENET_GROUP_ADDR) {
  3037. if (netif_msg_ifup(ugeth))
  3038. ugeth_err("%s: Multicast address used for station "
  3039. "address - is this what you wanted?",
  3040. __func__);
  3041. return -EINVAL;
  3042. }
  3043. err = init_phy(dev);
  3044. if (err) {
  3045. if (netif_msg_ifup(ugeth))
  3046. ugeth_err("%s: Cannot initialize PHY, aborting.",
  3047. dev->name);
  3048. return err;
  3049. }
  3050. err = ucc_geth_init_mac(ugeth);
  3051. if (err) {
  3052. if (netif_msg_ifup(ugeth))
  3053. ugeth_err("%s: Cannot initialize MAC, aborting.",
  3054. dev->name);
  3055. goto err;
  3056. }
  3057. err = request_irq(ugeth->ug_info->uf_info.irq, ucc_geth_irq_handler,
  3058. 0, "UCC Geth", dev);
  3059. if (err) {
  3060. if (netif_msg_ifup(ugeth))
  3061. ugeth_err("%s: Cannot get IRQ for net device, aborting.",
  3062. dev->name);
  3063. goto err;
  3064. }
  3065. phy_start(ugeth->phydev);
  3066. napi_enable(&ugeth->napi);
  3067. netif_start_queue(dev);
  3068. device_set_wakeup_capable(&dev->dev,
  3069. qe_alive_during_sleep() || ugeth->phydev->irq);
  3070. device_set_wakeup_enable(&dev->dev, ugeth->wol_en);
  3071. return err;
  3072. err:
  3073. ucc_geth_stop(ugeth);
  3074. return err;
  3075. }
  3076. /* Stops the kernel queue, and halts the controller */
  3077. static int ucc_geth_close(struct net_device *dev)
  3078. {
  3079. struct ucc_geth_private *ugeth = netdev_priv(dev);
  3080. ugeth_vdbg("%s: IN", __func__);
  3081. napi_disable(&ugeth->napi);
  3082. ucc_geth_stop(ugeth);
  3083. free_irq(ugeth->ug_info->uf_info.irq, ugeth->ndev);
  3084. netif_stop_queue(dev);
  3085. return 0;
  3086. }
  3087. /* Reopen device. This will reset the MAC and PHY. */
  3088. static void ucc_geth_timeout_work(struct work_struct *work)
  3089. {
  3090. struct ucc_geth_private *ugeth;
  3091. struct net_device *dev;
  3092. ugeth = container_of(work, struct ucc_geth_private, timeout_work);
  3093. dev = ugeth->ndev;
  3094. ugeth_vdbg("%s: IN", __func__);
  3095. dev->stats.tx_errors++;
  3096. ugeth_dump_regs(ugeth);
  3097. if (dev->flags & IFF_UP) {
  3098. /*
  3099. * Must reset MAC *and* PHY. This is done by reopening
  3100. * the device.
  3101. */
  3102. ucc_geth_close(dev);
  3103. ucc_geth_open(dev);
  3104. }
  3105. netif_tx_schedule_all(dev);
  3106. }
  3107. /*
  3108. * ucc_geth_timeout gets called when a packet has not been
  3109. * transmitted after a set amount of time.
  3110. */
  3111. static void ucc_geth_timeout(struct net_device *dev)
  3112. {
  3113. struct ucc_geth_private *ugeth = netdev_priv(dev);
  3114. netif_carrier_off(dev);
  3115. schedule_work(&ugeth->timeout_work);
  3116. }
  3117. #ifdef CONFIG_PM
  3118. static int ucc_geth_suspend(struct of_device *ofdev, pm_message_t state)
  3119. {
  3120. struct net_device *ndev = dev_get_drvdata(&ofdev->dev);
  3121. struct ucc_geth_private *ugeth = netdev_priv(ndev);
  3122. if (!netif_running(ndev))
  3123. return 0;
  3124. napi_disable(&ugeth->napi);
  3125. /*
  3126. * Disable the controller, otherwise we'll wakeup on any network
  3127. * activity.
  3128. */
  3129. ugeth_disable(ugeth, COMM_DIR_RX_AND_TX);
  3130. if (ugeth->wol_en & WAKE_MAGIC) {
  3131. setbits32(ugeth->uccf->p_uccm, UCC_GETH_UCCE_MPD);
  3132. setbits32(&ugeth->ug_regs->maccfg2, MACCFG2_MPE);
  3133. ucc_fast_enable(ugeth->uccf, COMM_DIR_RX_AND_TX);
  3134. } else if (!(ugeth->wol_en & WAKE_PHY)) {
  3135. phy_stop(ugeth->phydev);
  3136. }
  3137. return 0;
  3138. }
  3139. static int ucc_geth_resume(struct of_device *ofdev)
  3140. {
  3141. struct net_device *ndev = dev_get_drvdata(&ofdev->dev);
  3142. struct ucc_geth_private *ugeth = netdev_priv(ndev);
  3143. int err;
  3144. if (!netif_running(ndev))
  3145. return 0;
  3146. if (qe_alive_during_sleep()) {
  3147. if (ugeth->wol_en & WAKE_MAGIC) {
  3148. ucc_fast_disable(ugeth->uccf, COMM_DIR_RX_AND_TX);
  3149. clrbits32(&ugeth->ug_regs->maccfg2, MACCFG2_MPE);
  3150. clrbits32(ugeth->uccf->p_uccm, UCC_GETH_UCCE_MPD);
  3151. }
  3152. ugeth_enable(ugeth, COMM_DIR_RX_AND_TX);
  3153. } else {
  3154. /*
  3155. * Full reinitialization is required if QE shuts down
  3156. * during sleep.
  3157. */
  3158. ucc_geth_memclean(ugeth);
  3159. err = ucc_geth_init_mac(ugeth);
  3160. if (err) {
  3161. ugeth_err("%s: Cannot initialize MAC, aborting.",
  3162. ndev->name);
  3163. return err;
  3164. }
  3165. }
  3166. ugeth->oldlink = 0;
  3167. ugeth->oldspeed = 0;
  3168. ugeth->oldduplex = -1;
  3169. phy_stop(ugeth->phydev);
  3170. phy_start(ugeth->phydev);
  3171. napi_enable(&ugeth->napi);
  3172. netif_start_queue(ndev);
  3173. return 0;
  3174. }
  3175. #else
  3176. #define ucc_geth_suspend NULL
  3177. #define ucc_geth_resume NULL
  3178. #endif
  3179. static phy_interface_t to_phy_interface(const char *phy_connection_type)
  3180. {
  3181. if (strcasecmp(phy_connection_type, "mii") == 0)
  3182. return PHY_INTERFACE_MODE_MII;
  3183. if (strcasecmp(phy_connection_type, "gmii") == 0)
  3184. return PHY_INTERFACE_MODE_GMII;
  3185. if (strcasecmp(phy_connection_type, "tbi") == 0)
  3186. return PHY_INTERFACE_MODE_TBI;
  3187. if (strcasecmp(phy_connection_type, "rmii") == 0)
  3188. return PHY_INTERFACE_MODE_RMII;
  3189. if (strcasecmp(phy_connection_type, "rgmii") == 0)
  3190. return PHY_INTERFACE_MODE_RGMII;
  3191. if (strcasecmp(phy_connection_type, "rgmii-id") == 0)
  3192. return PHY_INTERFACE_MODE_RGMII_ID;
  3193. if (strcasecmp(phy_connection_type, "rgmii-txid") == 0)
  3194. return PHY_INTERFACE_MODE_RGMII_TXID;
  3195. if (strcasecmp(phy_connection_type, "rgmii-rxid") == 0)
  3196. return PHY_INTERFACE_MODE_RGMII_RXID;
  3197. if (strcasecmp(phy_connection_type, "rtbi") == 0)
  3198. return PHY_INTERFACE_MODE_RTBI;
  3199. if (strcasecmp(phy_connection_type, "sgmii") == 0)
  3200. return PHY_INTERFACE_MODE_SGMII;
  3201. return PHY_INTERFACE_MODE_MII;
  3202. }
  3203. static const struct net_device_ops ucc_geth_netdev_ops = {
  3204. .ndo_open = ucc_geth_open,
  3205. .ndo_stop = ucc_geth_close,
  3206. .ndo_start_xmit = ucc_geth_start_xmit,
  3207. .ndo_validate_addr = eth_validate_addr,
  3208. .ndo_set_mac_address = ucc_geth_set_mac_addr,
  3209. .ndo_change_mtu = eth_change_mtu,
  3210. .ndo_set_multicast_list = ucc_geth_set_multi,
  3211. .ndo_tx_timeout = ucc_geth_timeout,
  3212. #ifdef CONFIG_NET_POLL_CONTROLLER
  3213. .ndo_poll_controller = ucc_netpoll,
  3214. #endif
  3215. };
  3216. static int ucc_geth_probe(struct of_device* ofdev, const struct of_device_id *match)
  3217. {
  3218. struct device *device = &ofdev->dev;
  3219. struct device_node *np = ofdev->node;
  3220. struct net_device *dev = NULL;
  3221. struct ucc_geth_private *ugeth = NULL;
  3222. struct ucc_geth_info *ug_info;
  3223. struct resource res;
  3224. int err, ucc_num, max_speed = 0;
  3225. const unsigned int *prop;
  3226. const char *sprop;
  3227. const void *mac_addr;
  3228. phy_interface_t phy_interface;
  3229. static const int enet_to_speed[] = {
  3230. SPEED_10, SPEED_10, SPEED_10,
  3231. SPEED_100, SPEED_100, SPEED_100,
  3232. SPEED_1000, SPEED_1000, SPEED_1000, SPEED_1000,
  3233. };
  3234. static const phy_interface_t enet_to_phy_interface[] = {
  3235. PHY_INTERFACE_MODE_MII, PHY_INTERFACE_MODE_RMII,
  3236. PHY_INTERFACE_MODE_RGMII, PHY_INTERFACE_MODE_MII,
  3237. PHY_INTERFACE_MODE_RMII, PHY_INTERFACE_MODE_RGMII,
  3238. PHY_INTERFACE_MODE_GMII, PHY_INTERFACE_MODE_RGMII,
  3239. PHY_INTERFACE_MODE_TBI, PHY_INTERFACE_MODE_RTBI,
  3240. PHY_INTERFACE_MODE_SGMII,
  3241. };
  3242. ugeth_vdbg("%s: IN", __func__);
  3243. prop = of_get_property(np, "cell-index", NULL);
  3244. if (!prop) {
  3245. prop = of_get_property(np, "device-id", NULL);
  3246. if (!prop)
  3247. return -ENODEV;
  3248. }
  3249. ucc_num = *prop - 1;
  3250. if ((ucc_num < 0) || (ucc_num > 7))
  3251. return -ENODEV;
  3252. ug_info = &ugeth_info[ucc_num];
  3253. if (ug_info == NULL) {
  3254. if (netif_msg_probe(&debug))
  3255. ugeth_err("%s: [%d] Missing additional data!",
  3256. __func__, ucc_num);
  3257. return -ENODEV;
  3258. }
  3259. ug_info->uf_info.ucc_num = ucc_num;
  3260. sprop = of_get_property(np, "rx-clock-name", NULL);
  3261. if (sprop) {
  3262. ug_info->uf_info.rx_clock = qe_clock_source(sprop);
  3263. if ((ug_info->uf_info.rx_clock < QE_CLK_NONE) ||
  3264. (ug_info->uf_info.rx_clock > QE_CLK24)) {
  3265. printk(KERN_ERR
  3266. "ucc_geth: invalid rx-clock-name property\n");
  3267. return -EINVAL;
  3268. }
  3269. } else {
  3270. prop = of_get_property(np, "rx-clock", NULL);
  3271. if (!prop) {
  3272. /* If both rx-clock-name and rx-clock are missing,
  3273. we want to tell people to use rx-clock-name. */
  3274. printk(KERN_ERR
  3275. "ucc_geth: missing rx-clock-name property\n");
  3276. return -EINVAL;
  3277. }
  3278. if ((*prop < QE_CLK_NONE) || (*prop > QE_CLK24)) {
  3279. printk(KERN_ERR
  3280. "ucc_geth: invalid rx-clock propperty\n");
  3281. return -EINVAL;
  3282. }
  3283. ug_info->uf_info.rx_clock = *prop;
  3284. }
  3285. sprop = of_get_property(np, "tx-clock-name", NULL);
  3286. if (sprop) {
  3287. ug_info->uf_info.tx_clock = qe_clock_source(sprop);
  3288. if ((ug_info->uf_info.tx_clock < QE_CLK_NONE) ||
  3289. (ug_info->uf_info.tx_clock > QE_CLK24)) {
  3290. printk(KERN_ERR
  3291. "ucc_geth: invalid tx-clock-name property\n");
  3292. return -EINVAL;
  3293. }
  3294. } else {
  3295. prop = of_get_property(np, "tx-clock", NULL);
  3296. if (!prop) {
  3297. printk(KERN_ERR
  3298. "ucc_geth: mising tx-clock-name property\n");
  3299. return -EINVAL;
  3300. }
  3301. if ((*prop < QE_CLK_NONE) || (*prop > QE_CLK24)) {
  3302. printk(KERN_ERR
  3303. "ucc_geth: invalid tx-clock property\n");
  3304. return -EINVAL;
  3305. }
  3306. ug_info->uf_info.tx_clock = *prop;
  3307. }
  3308. err = of_address_to_resource(np, 0, &res);
  3309. if (err)
  3310. return -EINVAL;
  3311. ug_info->uf_info.regs = res.start;
  3312. ug_info->uf_info.irq = irq_of_parse_and_map(np, 0);
  3313. ug_info->phy_node = of_parse_phandle(np, "phy-handle", 0);
  3314. /* Find the TBI PHY node. If it's not there, we don't support SGMII */
  3315. ug_info->tbi_node = of_parse_phandle(np, "tbi-handle", 0);
  3316. /* get the phy interface type, or default to MII */
  3317. prop = of_get_property(np, "phy-connection-type", NULL);
  3318. if (!prop) {
  3319. /* handle interface property present in old trees */
  3320. prop = of_get_property(ug_info->phy_node, "interface", NULL);
  3321. if (prop != NULL) {
  3322. phy_interface = enet_to_phy_interface[*prop];
  3323. max_speed = enet_to_speed[*prop];
  3324. } else
  3325. phy_interface = PHY_INTERFACE_MODE_MII;
  3326. } else {
  3327. phy_interface = to_phy_interface((const char *)prop);
  3328. }
  3329. /* get speed, or derive from PHY interface */
  3330. if (max_speed == 0)
  3331. switch (phy_interface) {
  3332. case PHY_INTERFACE_MODE_GMII:
  3333. case PHY_INTERFACE_MODE_RGMII:
  3334. case PHY_INTERFACE_MODE_RGMII_ID:
  3335. case PHY_INTERFACE_MODE_RGMII_RXID:
  3336. case PHY_INTERFACE_MODE_RGMII_TXID:
  3337. case PHY_INTERFACE_MODE_TBI:
  3338. case PHY_INTERFACE_MODE_RTBI:
  3339. case PHY_INTERFACE_MODE_SGMII:
  3340. max_speed = SPEED_1000;
  3341. break;
  3342. default:
  3343. max_speed = SPEED_100;
  3344. break;
  3345. }
  3346. if (max_speed == SPEED_1000) {
  3347. /* configure muram FIFOs for gigabit operation */
  3348. ug_info->uf_info.urfs = UCC_GETH_URFS_GIGA_INIT;
  3349. ug_info->uf_info.urfet = UCC_GETH_URFET_GIGA_INIT;
  3350. ug_info->uf_info.urfset = UCC_GETH_URFSET_GIGA_INIT;
  3351. ug_info->uf_info.utfs = UCC_GETH_UTFS_GIGA_INIT;
  3352. ug_info->uf_info.utfet = UCC_GETH_UTFET_GIGA_INIT;
  3353. ug_info->uf_info.utftt = UCC_GETH_UTFTT_GIGA_INIT;
  3354. ug_info->numThreadsTx = UCC_GETH_NUM_OF_THREADS_4;
  3355. /* If QE's snum number is 46 which means we need to support
  3356. * 4 UECs at 1000Base-T simultaneously, we need to allocate
  3357. * more Threads to Rx.
  3358. */
  3359. if (qe_get_num_of_snums() == 46)
  3360. ug_info->numThreadsRx = UCC_GETH_NUM_OF_THREADS_6;
  3361. else
  3362. ug_info->numThreadsRx = UCC_GETH_NUM_OF_THREADS_4;
  3363. }
  3364. if (netif_msg_probe(&debug))
  3365. printk(KERN_INFO "ucc_geth: UCC%1d at 0x%8x (irq = %d) \n",
  3366. ug_info->uf_info.ucc_num + 1, ug_info->uf_info.regs,
  3367. ug_info->uf_info.irq);
  3368. /* Create an ethernet device instance */
  3369. dev = alloc_etherdev(sizeof(*ugeth));
  3370. if (dev == NULL)
  3371. return -ENOMEM;
  3372. ugeth = netdev_priv(dev);
  3373. spin_lock_init(&ugeth->lock);
  3374. /* Create CQs for hash tables */
  3375. INIT_LIST_HEAD(&ugeth->group_hash_q);
  3376. INIT_LIST_HEAD(&ugeth->ind_hash_q);
  3377. dev_set_drvdata(device, dev);
  3378. /* Set the dev->base_addr to the gfar reg region */
  3379. dev->base_addr = (unsigned long)(ug_info->uf_info.regs);
  3380. SET_NETDEV_DEV(dev, device);
  3381. /* Fill in the dev structure */
  3382. uec_set_ethtool_ops(dev);
  3383. dev->netdev_ops = &ucc_geth_netdev_ops;
  3384. dev->watchdog_timeo = TX_TIMEOUT;
  3385. INIT_WORK(&ugeth->timeout_work, ucc_geth_timeout_work);
  3386. netif_napi_add(dev, &ugeth->napi, ucc_geth_poll, 64);
  3387. dev->mtu = 1500;
  3388. ugeth->msg_enable = netif_msg_init(debug.msg_enable, UGETH_MSG_DEFAULT);
  3389. ugeth->phy_interface = phy_interface;
  3390. ugeth->max_speed = max_speed;
  3391. err = register_netdev(dev);
  3392. if (err) {
  3393. if (netif_msg_probe(ugeth))
  3394. ugeth_err("%s: Cannot register net device, aborting.",
  3395. dev->name);
  3396. free_netdev(dev);
  3397. return err;
  3398. }
  3399. mac_addr = of_get_mac_address(np);
  3400. if (mac_addr)
  3401. memcpy(dev->dev_addr, mac_addr, 6);
  3402. ugeth->ug_info = ug_info;
  3403. ugeth->dev = device;
  3404. ugeth->ndev = dev;
  3405. ugeth->node = np;
  3406. return 0;
  3407. }
  3408. static int ucc_geth_remove(struct of_device* ofdev)
  3409. {
  3410. struct device *device = &ofdev->dev;
  3411. struct net_device *dev = dev_get_drvdata(device);
  3412. struct ucc_geth_private *ugeth = netdev_priv(dev);
  3413. unregister_netdev(dev);
  3414. free_netdev(dev);
  3415. ucc_geth_memclean(ugeth);
  3416. dev_set_drvdata(device, NULL);
  3417. return 0;
  3418. }
  3419. static struct of_device_id ucc_geth_match[] = {
  3420. {
  3421. .type = "network",
  3422. .compatible = "ucc_geth",
  3423. },
  3424. {},
  3425. };
  3426. MODULE_DEVICE_TABLE(of, ucc_geth_match);
  3427. static struct of_platform_driver ucc_geth_driver = {
  3428. .name = DRV_NAME,
  3429. .match_table = ucc_geth_match,
  3430. .probe = ucc_geth_probe,
  3431. .remove = ucc_geth_remove,
  3432. .suspend = ucc_geth_suspend,
  3433. .resume = ucc_geth_resume,
  3434. };
  3435. static int __init ucc_geth_init(void)
  3436. {
  3437. int i, ret;
  3438. if (netif_msg_drv(&debug))
  3439. printk(KERN_INFO "ucc_geth: " DRV_DESC "\n");
  3440. for (i = 0; i < 8; i++)
  3441. memcpy(&(ugeth_info[i]), &ugeth_primary_info,
  3442. sizeof(ugeth_primary_info));
  3443. ret = of_register_platform_driver(&ucc_geth_driver);
  3444. return ret;
  3445. }
  3446. static void __exit ucc_geth_exit(void)
  3447. {
  3448. of_unregister_platform_driver(&ucc_geth_driver);
  3449. }
  3450. module_init(ucc_geth_init);
  3451. module_exit(ucc_geth_exit);
  3452. MODULE_AUTHOR("Freescale Semiconductor, Inc");
  3453. MODULE_DESCRIPTION(DRV_DESC);
  3454. MODULE_VERSION(DRV_VERSION);
  3455. MODULE_LICENSE("GPL");