gianfar.c 86 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043304430453046304730483049305030513052305330543055305630573058305930603061306230633064306530663067306830693070307130723073307430753076307730783079308030813082308330843085308630873088308930903091309230933094309530963097309830993100310131023103310431053106310731083109311031113112311331143115311631173118311931203121312231233124312531263127312831293130313131323133313431353136313731383139314031413142314331443145314631473148314931503151315231533154315531563157315831593160316131623163316431653166316731683169317031713172317331743175317631773178317931803181318231833184318531863187318831893190319131923193319431953196319731983199320032013202320332043205320632073208320932103211321232133214321532163217321832193220322132223223322432253226322732283229323032313232323332343235323632373238323932403241324232433244324532463247324832493250325132523253325432553256325732583259326032613262326332643265326632673268326932703271327232733274327532763277327832793280328132823283328432853286328732883289329032913292329332943295
  1. /* drivers/net/ethernet/freescale/gianfar.c
  2. *
  3. * Gianfar Ethernet Driver
  4. * This driver is designed for the non-CPM ethernet controllers
  5. * on the 85xx and 83xx family of integrated processors
  6. * Based on 8260_io/fcc_enet.c
  7. *
  8. * Author: Andy Fleming
  9. * Maintainer: Kumar Gala
  10. * Modifier: Sandeep Gopalpet <sandeep.kumar@freescale.com>
  11. *
  12. * Copyright 2002-2009, 2011 Freescale Semiconductor, Inc.
  13. * Copyright 2007 MontaVista Software, Inc.
  14. *
  15. * This program is free software; you can redistribute it and/or modify it
  16. * under the terms of the GNU General Public License as published by the
  17. * Free Software Foundation; either version 2 of the License, or (at your
  18. * option) any later version.
  19. *
  20. * Gianfar: AKA Lambda Draconis, "Dragon"
  21. * RA 11 31 24.2
  22. * Dec +69 19 52
  23. * V 3.84
  24. * B-V +1.62
  25. *
  26. * Theory of operation
  27. *
  28. * The driver is initialized through of_device. Configuration information
  29. * is therefore conveyed through an OF-style device tree.
  30. *
  31. * The Gianfar Ethernet Controller uses a ring of buffer
  32. * descriptors. The beginning is indicated by a register
  33. * pointing to the physical address of the start of the ring.
  34. * The end is determined by a "wrap" bit being set in the
  35. * last descriptor of the ring.
  36. *
  37. * When a packet is received, the RXF bit in the
  38. * IEVENT register is set, triggering an interrupt when the
  39. * corresponding bit in the IMASK register is also set (if
  40. * interrupt coalescing is active, then the interrupt may not
  41. * happen immediately, but will wait until either a set number
  42. * of frames or amount of time have passed). In NAPI, the
  43. * interrupt handler will signal there is work to be done, and
  44. * exit. This method will start at the last known empty
  45. * descriptor, and process every subsequent descriptor until there
  46. * are none left with data (NAPI will stop after a set number of
  47. * packets to give time to other tasks, but will eventually
  48. * process all the packets). The data arrives inside a
  49. * pre-allocated skb, and so after the skb is passed up to the
  50. * stack, a new skb must be allocated, and the address field in
  51. * the buffer descriptor must be updated to indicate this new
  52. * skb.
  53. *
  54. * When the kernel requests that a packet be transmitted, the
  55. * driver starts where it left off last time, and points the
  56. * descriptor at the buffer which was passed in. The driver
  57. * then informs the DMA engine that there are packets ready to
  58. * be transmitted. Once the controller is finished transmitting
  59. * the packet, an interrupt may be triggered (under the same
  60. * conditions as for reception, but depending on the TXF bit).
  61. * The driver then cleans up the buffer.
  62. */
  63. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  64. #define DEBUG
  65. #include <linux/kernel.h>
  66. #include <linux/string.h>
  67. #include <linux/errno.h>
  68. #include <linux/unistd.h>
  69. #include <linux/slab.h>
  70. #include <linux/interrupt.h>
  71. #include <linux/init.h>
  72. #include <linux/delay.h>
  73. #include <linux/netdevice.h>
  74. #include <linux/etherdevice.h>
  75. #include <linux/skbuff.h>
  76. #include <linux/if_vlan.h>
  77. #include <linux/spinlock.h>
  78. #include <linux/mm.h>
  79. #include <linux/of_mdio.h>
  80. #include <linux/of_platform.h>
  81. #include <linux/ip.h>
  82. #include <linux/tcp.h>
  83. #include <linux/udp.h>
  84. #include <linux/in.h>
  85. #include <linux/net_tstamp.h>
  86. #include <asm/io.h>
  87. #include <asm/reg.h>
  88. #include <asm/irq.h>
  89. #include <asm/uaccess.h>
  90. #include <linux/module.h>
  91. #include <linux/dma-mapping.h>
  92. #include <linux/crc32.h>
  93. #include <linux/mii.h>
  94. #include <linux/phy.h>
  95. #include <linux/phy_fixed.h>
  96. #include <linux/of.h>
  97. #include <linux/of_net.h>
  98. #include "gianfar.h"
  99. #include "fsl_pq_mdio.h"
  100. #define TX_TIMEOUT (1*HZ)
  101. const char gfar_driver_version[] = "1.3";
  102. static int gfar_enet_open(struct net_device *dev);
  103. static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev);
  104. static void gfar_reset_task(struct work_struct *work);
  105. static void gfar_timeout(struct net_device *dev);
  106. static int gfar_close(struct net_device *dev);
  107. struct sk_buff *gfar_new_skb(struct net_device *dev);
  108. static void gfar_new_rxbdp(struct gfar_priv_rx_q *rx_queue, struct rxbd8 *bdp,
  109. struct sk_buff *skb);
  110. static int gfar_set_mac_address(struct net_device *dev);
  111. static int gfar_change_mtu(struct net_device *dev, int new_mtu);
  112. static irqreturn_t gfar_error(int irq, void *dev_id);
  113. static irqreturn_t gfar_transmit(int irq, void *dev_id);
  114. static irqreturn_t gfar_interrupt(int irq, void *dev_id);
  115. static void adjust_link(struct net_device *dev);
  116. static void init_registers(struct net_device *dev);
  117. static int init_phy(struct net_device *dev);
  118. static int gfar_probe(struct platform_device *ofdev);
  119. static int gfar_remove(struct platform_device *ofdev);
  120. static void free_skb_resources(struct gfar_private *priv);
  121. static void gfar_set_multi(struct net_device *dev);
  122. static void gfar_set_hash_for_addr(struct net_device *dev, u8 *addr);
  123. static void gfar_configure_serdes(struct net_device *dev);
  124. static int gfar_poll(struct napi_struct *napi, int budget);
  125. #ifdef CONFIG_NET_POLL_CONTROLLER
  126. static void gfar_netpoll(struct net_device *dev);
  127. #endif
  128. int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit);
  129. static int gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue);
  130. static int gfar_process_frame(struct net_device *dev, struct sk_buff *skb,
  131. int amount_pull, struct napi_struct *napi);
  132. void gfar_halt(struct net_device *dev);
  133. static void gfar_halt_nodisable(struct net_device *dev);
  134. void gfar_start(struct net_device *dev);
  135. static void gfar_clear_exact_match(struct net_device *dev);
  136. static void gfar_set_mac_for_addr(struct net_device *dev, int num,
  137. const u8 *addr);
  138. static int gfar_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
  139. MODULE_AUTHOR("Freescale Semiconductor, Inc");
  140. MODULE_DESCRIPTION("Gianfar Ethernet Driver");
  141. MODULE_LICENSE("GPL");
  142. static void gfar_init_rxbdp(struct gfar_priv_rx_q *rx_queue, struct rxbd8 *bdp,
  143. dma_addr_t buf)
  144. {
  145. u32 lstatus;
  146. bdp->bufPtr = buf;
  147. lstatus = BD_LFLAG(RXBD_EMPTY | RXBD_INTERRUPT);
  148. if (bdp == rx_queue->rx_bd_base + rx_queue->rx_ring_size - 1)
  149. lstatus |= BD_LFLAG(RXBD_WRAP);
  150. eieio();
  151. bdp->lstatus = lstatus;
  152. }
  153. static int gfar_init_bds(struct net_device *ndev)
  154. {
  155. struct gfar_private *priv = netdev_priv(ndev);
  156. struct gfar_priv_tx_q *tx_queue = NULL;
  157. struct gfar_priv_rx_q *rx_queue = NULL;
  158. struct txbd8 *txbdp;
  159. struct rxbd8 *rxbdp;
  160. int i, j;
  161. for (i = 0; i < priv->num_tx_queues; i++) {
  162. tx_queue = priv->tx_queue[i];
  163. /* Initialize some variables in our dev structure */
  164. tx_queue->num_txbdfree = tx_queue->tx_ring_size;
  165. tx_queue->dirty_tx = tx_queue->tx_bd_base;
  166. tx_queue->cur_tx = tx_queue->tx_bd_base;
  167. tx_queue->skb_curtx = 0;
  168. tx_queue->skb_dirtytx = 0;
  169. /* Initialize Transmit Descriptor Ring */
  170. txbdp = tx_queue->tx_bd_base;
  171. for (j = 0; j < tx_queue->tx_ring_size; j++) {
  172. txbdp->lstatus = 0;
  173. txbdp->bufPtr = 0;
  174. txbdp++;
  175. }
  176. /* Set the last descriptor in the ring to indicate wrap */
  177. txbdp--;
  178. txbdp->status |= TXBD_WRAP;
  179. }
  180. for (i = 0; i < priv->num_rx_queues; i++) {
  181. rx_queue = priv->rx_queue[i];
  182. rx_queue->cur_rx = rx_queue->rx_bd_base;
  183. rx_queue->skb_currx = 0;
  184. rxbdp = rx_queue->rx_bd_base;
  185. for (j = 0; j < rx_queue->rx_ring_size; j++) {
  186. struct sk_buff *skb = rx_queue->rx_skbuff[j];
  187. if (skb) {
  188. gfar_init_rxbdp(rx_queue, rxbdp,
  189. rxbdp->bufPtr);
  190. } else {
  191. skb = gfar_new_skb(ndev);
  192. if (!skb) {
  193. netdev_err(ndev, "Can't allocate RX buffers\n");
  194. goto err_rxalloc_fail;
  195. }
  196. rx_queue->rx_skbuff[j] = skb;
  197. gfar_new_rxbdp(rx_queue, rxbdp, skb);
  198. }
  199. rxbdp++;
  200. }
  201. }
  202. return 0;
  203. err_rxalloc_fail:
  204. free_skb_resources(priv);
  205. return -ENOMEM;
  206. }
  207. static int gfar_alloc_skb_resources(struct net_device *ndev)
  208. {
  209. void *vaddr;
  210. dma_addr_t addr;
  211. int i, j, k;
  212. struct gfar_private *priv = netdev_priv(ndev);
  213. struct device *dev = &priv->ofdev->dev;
  214. struct gfar_priv_tx_q *tx_queue = NULL;
  215. struct gfar_priv_rx_q *rx_queue = NULL;
  216. priv->total_tx_ring_size = 0;
  217. for (i = 0; i < priv->num_tx_queues; i++)
  218. priv->total_tx_ring_size += priv->tx_queue[i]->tx_ring_size;
  219. priv->total_rx_ring_size = 0;
  220. for (i = 0; i < priv->num_rx_queues; i++)
  221. priv->total_rx_ring_size += priv->rx_queue[i]->rx_ring_size;
  222. /* Allocate memory for the buffer descriptors */
  223. vaddr = dma_alloc_coherent(dev,
  224. sizeof(struct txbd8) * priv->total_tx_ring_size +
  225. sizeof(struct rxbd8) * priv->total_rx_ring_size,
  226. &addr, GFP_KERNEL);
  227. if (!vaddr) {
  228. netif_err(priv, ifup, ndev,
  229. "Could not allocate buffer descriptors!\n");
  230. return -ENOMEM;
  231. }
  232. for (i = 0; i < priv->num_tx_queues; i++) {
  233. tx_queue = priv->tx_queue[i];
  234. tx_queue->tx_bd_base = vaddr;
  235. tx_queue->tx_bd_dma_base = addr;
  236. tx_queue->dev = ndev;
  237. /* enet DMA only understands physical addresses */
  238. addr += sizeof(struct txbd8) * tx_queue->tx_ring_size;
  239. vaddr += sizeof(struct txbd8) * tx_queue->tx_ring_size;
  240. }
  241. /* Start the rx descriptor ring where the tx ring leaves off */
  242. for (i = 0; i < priv->num_rx_queues; i++) {
  243. rx_queue = priv->rx_queue[i];
  244. rx_queue->rx_bd_base = vaddr;
  245. rx_queue->rx_bd_dma_base = addr;
  246. rx_queue->dev = ndev;
  247. addr += sizeof(struct rxbd8) * rx_queue->rx_ring_size;
  248. vaddr += sizeof(struct rxbd8) * rx_queue->rx_ring_size;
  249. }
  250. /* Setup the skbuff rings */
  251. for (i = 0; i < priv->num_tx_queues; i++) {
  252. tx_queue = priv->tx_queue[i];
  253. tx_queue->tx_skbuff = kmalloc(sizeof(*tx_queue->tx_skbuff) *
  254. tx_queue->tx_ring_size, GFP_KERNEL);
  255. if (!tx_queue->tx_skbuff) {
  256. netif_err(priv, ifup, ndev,
  257. "Could not allocate tx_skbuff\n");
  258. goto cleanup;
  259. }
  260. for (k = 0; k < tx_queue->tx_ring_size; k++)
  261. tx_queue->tx_skbuff[k] = NULL;
  262. }
  263. for (i = 0; i < priv->num_rx_queues; i++) {
  264. rx_queue = priv->rx_queue[i];
  265. rx_queue->rx_skbuff = kmalloc(sizeof(*rx_queue->rx_skbuff) *
  266. rx_queue->rx_ring_size, GFP_KERNEL);
  267. if (!rx_queue->rx_skbuff) {
  268. netif_err(priv, ifup, ndev,
  269. "Could not allocate rx_skbuff\n");
  270. goto cleanup;
  271. }
  272. for (j = 0; j < rx_queue->rx_ring_size; j++)
  273. rx_queue->rx_skbuff[j] = NULL;
  274. }
  275. if (gfar_init_bds(ndev))
  276. goto cleanup;
  277. return 0;
  278. cleanup:
  279. free_skb_resources(priv);
  280. return -ENOMEM;
  281. }
  282. static void gfar_init_tx_rx_base(struct gfar_private *priv)
  283. {
  284. struct gfar __iomem *regs = priv->gfargrp[0].regs;
  285. u32 __iomem *baddr;
  286. int i;
  287. baddr = &regs->tbase0;
  288. for(i = 0; i < priv->num_tx_queues; i++) {
  289. gfar_write(baddr, priv->tx_queue[i]->tx_bd_dma_base);
  290. baddr += 2;
  291. }
  292. baddr = &regs->rbase0;
  293. for(i = 0; i < priv->num_rx_queues; i++) {
  294. gfar_write(baddr, priv->rx_queue[i]->rx_bd_dma_base);
  295. baddr += 2;
  296. }
  297. }
  298. static void gfar_init_mac(struct net_device *ndev)
  299. {
  300. struct gfar_private *priv = netdev_priv(ndev);
  301. struct gfar __iomem *regs = priv->gfargrp[0].regs;
  302. u32 rctrl = 0;
  303. u32 tctrl = 0;
  304. u32 attrs = 0;
  305. /* write the tx/rx base registers */
  306. gfar_init_tx_rx_base(priv);
  307. /* Configure the coalescing support */
  308. gfar_configure_coalescing(priv, 0xFF, 0xFF);
  309. if (priv->rx_filer_enable) {
  310. rctrl |= RCTRL_FILREN;
  311. /* Program the RIR0 reg with the required distribution */
  312. gfar_write(&regs->rir0, DEFAULT_RIR0);
  313. }
  314. if (ndev->features & NETIF_F_RXCSUM)
  315. rctrl |= RCTRL_CHECKSUMMING;
  316. if (priv->extended_hash) {
  317. rctrl |= RCTRL_EXTHASH;
  318. gfar_clear_exact_match(ndev);
  319. rctrl |= RCTRL_EMEN;
  320. }
  321. if (priv->padding) {
  322. rctrl &= ~RCTRL_PAL_MASK;
  323. rctrl |= RCTRL_PADDING(priv->padding);
  324. }
  325. /* Insert receive time stamps into padding alignment bytes */
  326. if (priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER) {
  327. rctrl &= ~RCTRL_PAL_MASK;
  328. rctrl |= RCTRL_PADDING(8);
  329. priv->padding = 8;
  330. }
  331. /* Enable HW time stamping if requested from user space */
  332. if (priv->hwts_rx_en)
  333. rctrl |= RCTRL_PRSDEP_INIT | RCTRL_TS_ENABLE;
  334. if (ndev->features & NETIF_F_HW_VLAN_RX)
  335. rctrl |= RCTRL_VLEX | RCTRL_PRSDEP_INIT;
  336. /* Init rctrl based on our settings */
  337. gfar_write(&regs->rctrl, rctrl);
  338. if (ndev->features & NETIF_F_IP_CSUM)
  339. tctrl |= TCTRL_INIT_CSUM;
  340. tctrl |= TCTRL_TXSCHED_PRIO;
  341. gfar_write(&regs->tctrl, tctrl);
  342. /* Set the extraction length and index */
  343. attrs = ATTRELI_EL(priv->rx_stash_size) |
  344. ATTRELI_EI(priv->rx_stash_index);
  345. gfar_write(&regs->attreli, attrs);
  346. /* Start with defaults, and add stashing or locking
  347. * depending on the approprate variables
  348. */
  349. attrs = ATTR_INIT_SETTINGS;
  350. if (priv->bd_stash_en)
  351. attrs |= ATTR_BDSTASH;
  352. if (priv->rx_stash_size != 0)
  353. attrs |= ATTR_BUFSTASH;
  354. gfar_write(&regs->attr, attrs);
  355. gfar_write(&regs->fifo_tx_thr, priv->fifo_threshold);
  356. gfar_write(&regs->fifo_tx_starve, priv->fifo_starve);
  357. gfar_write(&regs->fifo_tx_starve_shutoff, priv->fifo_starve_off);
  358. }
  359. static struct net_device_stats *gfar_get_stats(struct net_device *dev)
  360. {
  361. struct gfar_private *priv = netdev_priv(dev);
  362. unsigned long rx_packets = 0, rx_bytes = 0, rx_dropped = 0;
  363. unsigned long tx_packets = 0, tx_bytes = 0;
  364. int i = 0;
  365. for (i = 0; i < priv->num_rx_queues; i++) {
  366. rx_packets += priv->rx_queue[i]->stats.rx_packets;
  367. rx_bytes += priv->rx_queue[i]->stats.rx_bytes;
  368. rx_dropped += priv->rx_queue[i]->stats.rx_dropped;
  369. }
  370. dev->stats.rx_packets = rx_packets;
  371. dev->stats.rx_bytes = rx_bytes;
  372. dev->stats.rx_dropped = rx_dropped;
  373. for (i = 0; i < priv->num_tx_queues; i++) {
  374. tx_bytes += priv->tx_queue[i]->stats.tx_bytes;
  375. tx_packets += priv->tx_queue[i]->stats.tx_packets;
  376. }
  377. dev->stats.tx_bytes = tx_bytes;
  378. dev->stats.tx_packets = tx_packets;
  379. return &dev->stats;
  380. }
  381. static const struct net_device_ops gfar_netdev_ops = {
  382. .ndo_open = gfar_enet_open,
  383. .ndo_start_xmit = gfar_start_xmit,
  384. .ndo_stop = gfar_close,
  385. .ndo_change_mtu = gfar_change_mtu,
  386. .ndo_set_features = gfar_set_features,
  387. .ndo_set_rx_mode = gfar_set_multi,
  388. .ndo_tx_timeout = gfar_timeout,
  389. .ndo_do_ioctl = gfar_ioctl,
  390. .ndo_get_stats = gfar_get_stats,
  391. .ndo_set_mac_address = eth_mac_addr,
  392. .ndo_validate_addr = eth_validate_addr,
  393. #ifdef CONFIG_NET_POLL_CONTROLLER
  394. .ndo_poll_controller = gfar_netpoll,
  395. #endif
  396. };
  397. void lock_rx_qs(struct gfar_private *priv)
  398. {
  399. int i = 0x0;
  400. for (i = 0; i < priv->num_rx_queues; i++)
  401. spin_lock(&priv->rx_queue[i]->rxlock);
  402. }
  403. void lock_tx_qs(struct gfar_private *priv)
  404. {
  405. int i = 0x0;
  406. for (i = 0; i < priv->num_tx_queues; i++)
  407. spin_lock(&priv->tx_queue[i]->txlock);
  408. }
  409. void unlock_rx_qs(struct gfar_private *priv)
  410. {
  411. int i = 0x0;
  412. for (i = 0; i < priv->num_rx_queues; i++)
  413. spin_unlock(&priv->rx_queue[i]->rxlock);
  414. }
  415. void unlock_tx_qs(struct gfar_private *priv)
  416. {
  417. int i = 0x0;
  418. for (i = 0; i < priv->num_tx_queues; i++)
  419. spin_unlock(&priv->tx_queue[i]->txlock);
  420. }
  421. static bool gfar_is_vlan_on(struct gfar_private *priv)
  422. {
  423. return (priv->ndev->features & NETIF_F_HW_VLAN_RX) ||
  424. (priv->ndev->features & NETIF_F_HW_VLAN_TX);
  425. }
  426. /* Returns 1 if incoming frames use an FCB */
  427. static inline int gfar_uses_fcb(struct gfar_private *priv)
  428. {
  429. return gfar_is_vlan_on(priv) ||
  430. (priv->ndev->features & NETIF_F_RXCSUM) ||
  431. (priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER);
  432. }
  433. static void free_tx_pointers(struct gfar_private *priv)
  434. {
  435. int i = 0;
  436. for (i = 0; i < priv->num_tx_queues; i++)
  437. kfree(priv->tx_queue[i]);
  438. }
  439. static void free_rx_pointers(struct gfar_private *priv)
  440. {
  441. int i = 0;
  442. for (i = 0; i < priv->num_rx_queues; i++)
  443. kfree(priv->rx_queue[i]);
  444. }
  445. static void unmap_group_regs(struct gfar_private *priv)
  446. {
  447. int i = 0;
  448. for (i = 0; i < MAXGROUPS; i++)
  449. if (priv->gfargrp[i].regs)
  450. iounmap(priv->gfargrp[i].regs);
  451. }
  452. static void disable_napi(struct gfar_private *priv)
  453. {
  454. int i = 0;
  455. for (i = 0; i < priv->num_grps; i++)
  456. napi_disable(&priv->gfargrp[i].napi);
  457. }
  458. static void enable_napi(struct gfar_private *priv)
  459. {
  460. int i = 0;
  461. for (i = 0; i < priv->num_grps; i++)
  462. napi_enable(&priv->gfargrp[i].napi);
  463. }
  464. static int gfar_parse_group(struct device_node *np,
  465. struct gfar_private *priv, const char *model)
  466. {
  467. u32 *queue_mask;
  468. priv->gfargrp[priv->num_grps].regs = of_iomap(np, 0);
  469. if (!priv->gfargrp[priv->num_grps].regs)
  470. return -ENOMEM;
  471. priv->gfargrp[priv->num_grps].interruptTransmit =
  472. irq_of_parse_and_map(np, 0);
  473. /* If we aren't the FEC we have multiple interrupts */
  474. if (model && strcasecmp(model, "FEC")) {
  475. priv->gfargrp[priv->num_grps].interruptReceive =
  476. irq_of_parse_and_map(np, 1);
  477. priv->gfargrp[priv->num_grps].interruptError =
  478. irq_of_parse_and_map(np,2);
  479. if (priv->gfargrp[priv->num_grps].interruptTransmit == NO_IRQ ||
  480. priv->gfargrp[priv->num_grps].interruptReceive == NO_IRQ ||
  481. priv->gfargrp[priv->num_grps].interruptError == NO_IRQ)
  482. return -EINVAL;
  483. }
  484. priv->gfargrp[priv->num_grps].grp_id = priv->num_grps;
  485. priv->gfargrp[priv->num_grps].priv = priv;
  486. spin_lock_init(&priv->gfargrp[priv->num_grps].grplock);
  487. if(priv->mode == MQ_MG_MODE) {
  488. queue_mask = (u32 *)of_get_property(np,
  489. "fsl,rx-bit-map", NULL);
  490. priv->gfargrp[priv->num_grps].rx_bit_map =
  491. queue_mask ? *queue_mask :(DEFAULT_MAPPING >> priv->num_grps);
  492. queue_mask = (u32 *)of_get_property(np,
  493. "fsl,tx-bit-map", NULL);
  494. priv->gfargrp[priv->num_grps].tx_bit_map =
  495. queue_mask ? *queue_mask : (DEFAULT_MAPPING >> priv->num_grps);
  496. } else {
  497. priv->gfargrp[priv->num_grps].rx_bit_map = 0xFF;
  498. priv->gfargrp[priv->num_grps].tx_bit_map = 0xFF;
  499. }
  500. priv->num_grps++;
  501. return 0;
  502. }
  503. static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev)
  504. {
  505. const char *model;
  506. const char *ctype;
  507. const void *mac_addr;
  508. int err = 0, i;
  509. struct net_device *dev = NULL;
  510. struct gfar_private *priv = NULL;
  511. struct device_node *np = ofdev->dev.of_node;
  512. struct device_node *child = NULL;
  513. const u32 *stash;
  514. const u32 *stash_len;
  515. const u32 *stash_idx;
  516. unsigned int num_tx_qs, num_rx_qs;
  517. u32 *tx_queues, *rx_queues;
  518. if (!np || !of_device_is_available(np))
  519. return -ENODEV;
  520. /* parse the num of tx and rx queues */
  521. tx_queues = (u32 *)of_get_property(np, "fsl,num_tx_queues", NULL);
  522. num_tx_qs = tx_queues ? *tx_queues : 1;
  523. if (num_tx_qs > MAX_TX_QS) {
  524. pr_err("num_tx_qs(=%d) greater than MAX_TX_QS(=%d)\n",
  525. num_tx_qs, MAX_TX_QS);
  526. pr_err("Cannot do alloc_etherdev, aborting\n");
  527. return -EINVAL;
  528. }
  529. rx_queues = (u32 *)of_get_property(np, "fsl,num_rx_queues", NULL);
  530. num_rx_qs = rx_queues ? *rx_queues : 1;
  531. if (num_rx_qs > MAX_RX_QS) {
  532. pr_err("num_rx_qs(=%d) greater than MAX_RX_QS(=%d)\n",
  533. num_rx_qs, MAX_RX_QS);
  534. pr_err("Cannot do alloc_etherdev, aborting\n");
  535. return -EINVAL;
  536. }
  537. *pdev = alloc_etherdev_mq(sizeof(*priv), num_tx_qs);
  538. dev = *pdev;
  539. if (NULL == dev)
  540. return -ENOMEM;
  541. priv = netdev_priv(dev);
  542. priv->node = ofdev->dev.of_node;
  543. priv->ndev = dev;
  544. priv->num_tx_queues = num_tx_qs;
  545. netif_set_real_num_rx_queues(dev, num_rx_qs);
  546. priv->num_rx_queues = num_rx_qs;
  547. priv->num_grps = 0x0;
  548. /* Init Rx queue filer rule set linked list */
  549. INIT_LIST_HEAD(&priv->rx_list.list);
  550. priv->rx_list.count = 0;
  551. mutex_init(&priv->rx_queue_access);
  552. model = of_get_property(np, "model", NULL);
  553. for (i = 0; i < MAXGROUPS; i++)
  554. priv->gfargrp[i].regs = NULL;
  555. /* Parse and initialize group specific information */
  556. if (of_device_is_compatible(np, "fsl,etsec2")) {
  557. priv->mode = MQ_MG_MODE;
  558. for_each_child_of_node(np, child) {
  559. err = gfar_parse_group(child, priv, model);
  560. if (err)
  561. goto err_grp_init;
  562. }
  563. } else {
  564. priv->mode = SQ_SG_MODE;
  565. err = gfar_parse_group(np, priv, model);
  566. if(err)
  567. goto err_grp_init;
  568. }
  569. for (i = 0; i < priv->num_tx_queues; i++)
  570. priv->tx_queue[i] = NULL;
  571. for (i = 0; i < priv->num_rx_queues; i++)
  572. priv->rx_queue[i] = NULL;
  573. for (i = 0; i < priv->num_tx_queues; i++) {
  574. priv->tx_queue[i] = kzalloc(sizeof(struct gfar_priv_tx_q),
  575. GFP_KERNEL);
  576. if (!priv->tx_queue[i]) {
  577. err = -ENOMEM;
  578. goto tx_alloc_failed;
  579. }
  580. priv->tx_queue[i]->tx_skbuff = NULL;
  581. priv->tx_queue[i]->qindex = i;
  582. priv->tx_queue[i]->dev = dev;
  583. spin_lock_init(&(priv->tx_queue[i]->txlock));
  584. }
  585. for (i = 0; i < priv->num_rx_queues; i++) {
  586. priv->rx_queue[i] = kzalloc(sizeof(struct gfar_priv_rx_q),
  587. GFP_KERNEL);
  588. if (!priv->rx_queue[i]) {
  589. err = -ENOMEM;
  590. goto rx_alloc_failed;
  591. }
  592. priv->rx_queue[i]->rx_skbuff = NULL;
  593. priv->rx_queue[i]->qindex = i;
  594. priv->rx_queue[i]->dev = dev;
  595. spin_lock_init(&(priv->rx_queue[i]->rxlock));
  596. }
  597. stash = of_get_property(np, "bd-stash", NULL);
  598. if (stash) {
  599. priv->device_flags |= FSL_GIANFAR_DEV_HAS_BD_STASHING;
  600. priv->bd_stash_en = 1;
  601. }
  602. stash_len = of_get_property(np, "rx-stash-len", NULL);
  603. if (stash_len)
  604. priv->rx_stash_size = *stash_len;
  605. stash_idx = of_get_property(np, "rx-stash-idx", NULL);
  606. if (stash_idx)
  607. priv->rx_stash_index = *stash_idx;
  608. if (stash_len || stash_idx)
  609. priv->device_flags |= FSL_GIANFAR_DEV_HAS_BUF_STASHING;
  610. mac_addr = of_get_mac_address(np);
  611. if (mac_addr)
  612. memcpy(dev->dev_addr, mac_addr, ETH_ALEN);
  613. if (model && !strcasecmp(model, "TSEC"))
  614. priv->device_flags =
  615. FSL_GIANFAR_DEV_HAS_GIGABIT |
  616. FSL_GIANFAR_DEV_HAS_COALESCE |
  617. FSL_GIANFAR_DEV_HAS_RMON |
  618. FSL_GIANFAR_DEV_HAS_MULTI_INTR;
  619. if (model && !strcasecmp(model, "eTSEC"))
  620. priv->device_flags =
  621. FSL_GIANFAR_DEV_HAS_GIGABIT |
  622. FSL_GIANFAR_DEV_HAS_COALESCE |
  623. FSL_GIANFAR_DEV_HAS_RMON |
  624. FSL_GIANFAR_DEV_HAS_MULTI_INTR |
  625. FSL_GIANFAR_DEV_HAS_PADDING |
  626. FSL_GIANFAR_DEV_HAS_CSUM |
  627. FSL_GIANFAR_DEV_HAS_VLAN |
  628. FSL_GIANFAR_DEV_HAS_MAGIC_PACKET |
  629. FSL_GIANFAR_DEV_HAS_EXTENDED_HASH |
  630. FSL_GIANFAR_DEV_HAS_TIMER;
  631. ctype = of_get_property(np, "phy-connection-type", NULL);
  632. /* We only care about rgmii-id. The rest are autodetected */
  633. if (ctype && !strcmp(ctype, "rgmii-id"))
  634. priv->interface = PHY_INTERFACE_MODE_RGMII_ID;
  635. else
  636. priv->interface = PHY_INTERFACE_MODE_MII;
  637. if (of_get_property(np, "fsl,magic-packet", NULL))
  638. priv->device_flags |= FSL_GIANFAR_DEV_HAS_MAGIC_PACKET;
  639. priv->phy_node = of_parse_phandle(np, "phy-handle", 0);
  640. /* Find the TBI PHY. If it's not there, we don't support SGMII */
  641. priv->tbi_node = of_parse_phandle(np, "tbi-handle", 0);
  642. return 0;
  643. rx_alloc_failed:
  644. free_rx_pointers(priv);
  645. tx_alloc_failed:
  646. free_tx_pointers(priv);
  647. err_grp_init:
  648. unmap_group_regs(priv);
  649. free_netdev(dev);
  650. return err;
  651. }
  652. static int gfar_hwtstamp_ioctl(struct net_device *netdev,
  653. struct ifreq *ifr, int cmd)
  654. {
  655. struct hwtstamp_config config;
  656. struct gfar_private *priv = netdev_priv(netdev);
  657. if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
  658. return -EFAULT;
  659. /* reserved for future extensions */
  660. if (config.flags)
  661. return -EINVAL;
  662. switch (config.tx_type) {
  663. case HWTSTAMP_TX_OFF:
  664. priv->hwts_tx_en = 0;
  665. break;
  666. case HWTSTAMP_TX_ON:
  667. if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER))
  668. return -ERANGE;
  669. priv->hwts_tx_en = 1;
  670. break;
  671. default:
  672. return -ERANGE;
  673. }
  674. switch (config.rx_filter) {
  675. case HWTSTAMP_FILTER_NONE:
  676. if (priv->hwts_rx_en) {
  677. stop_gfar(netdev);
  678. priv->hwts_rx_en = 0;
  679. startup_gfar(netdev);
  680. }
  681. break;
  682. default:
  683. if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER))
  684. return -ERANGE;
  685. if (!priv->hwts_rx_en) {
  686. stop_gfar(netdev);
  687. priv->hwts_rx_en = 1;
  688. startup_gfar(netdev);
  689. }
  690. config.rx_filter = HWTSTAMP_FILTER_ALL;
  691. break;
  692. }
  693. return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
  694. -EFAULT : 0;
  695. }
  696. /* Ioctl MII Interface */
  697. static int gfar_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
  698. {
  699. struct gfar_private *priv = netdev_priv(dev);
  700. if (!netif_running(dev))
  701. return -EINVAL;
  702. if (cmd == SIOCSHWTSTAMP)
  703. return gfar_hwtstamp_ioctl(dev, rq, cmd);
  704. if (!priv->phydev)
  705. return -ENODEV;
  706. return phy_mii_ioctl(priv->phydev, rq, cmd);
  707. }
  708. static unsigned int reverse_bitmap(unsigned int bit_map, unsigned int max_qs)
  709. {
  710. unsigned int new_bit_map = 0x0;
  711. int mask = 0x1 << (max_qs - 1), i;
  712. for (i = 0; i < max_qs; i++) {
  713. if (bit_map & mask)
  714. new_bit_map = new_bit_map + (1 << i);
  715. mask = mask >> 0x1;
  716. }
  717. return new_bit_map;
  718. }
  719. static u32 cluster_entry_per_class(struct gfar_private *priv, u32 rqfar,
  720. u32 class)
  721. {
  722. u32 rqfpr = FPR_FILER_MASK;
  723. u32 rqfcr = 0x0;
  724. rqfar--;
  725. rqfcr = RQFCR_CLE | RQFCR_PID_MASK | RQFCR_CMP_EXACT;
  726. priv->ftp_rqfpr[rqfar] = rqfpr;
  727. priv->ftp_rqfcr[rqfar] = rqfcr;
  728. gfar_write_filer(priv, rqfar, rqfcr, rqfpr);
  729. rqfar--;
  730. rqfcr = RQFCR_CMP_NOMATCH;
  731. priv->ftp_rqfpr[rqfar] = rqfpr;
  732. priv->ftp_rqfcr[rqfar] = rqfcr;
  733. gfar_write_filer(priv, rqfar, rqfcr, rqfpr);
  734. rqfar--;
  735. rqfcr = RQFCR_CMP_EXACT | RQFCR_PID_PARSE | RQFCR_CLE | RQFCR_AND;
  736. rqfpr = class;
  737. priv->ftp_rqfcr[rqfar] = rqfcr;
  738. priv->ftp_rqfpr[rqfar] = rqfpr;
  739. gfar_write_filer(priv, rqfar, rqfcr, rqfpr);
  740. rqfar--;
  741. rqfcr = RQFCR_CMP_EXACT | RQFCR_PID_MASK | RQFCR_AND;
  742. rqfpr = class;
  743. priv->ftp_rqfcr[rqfar] = rqfcr;
  744. priv->ftp_rqfpr[rqfar] = rqfpr;
  745. gfar_write_filer(priv, rqfar, rqfcr, rqfpr);
  746. return rqfar;
  747. }
  748. static void gfar_init_filer_table(struct gfar_private *priv)
  749. {
  750. int i = 0x0;
  751. u32 rqfar = MAX_FILER_IDX;
  752. u32 rqfcr = 0x0;
  753. u32 rqfpr = FPR_FILER_MASK;
  754. /* Default rule */
  755. rqfcr = RQFCR_CMP_MATCH;
  756. priv->ftp_rqfcr[rqfar] = rqfcr;
  757. priv->ftp_rqfpr[rqfar] = rqfpr;
  758. gfar_write_filer(priv, rqfar, rqfcr, rqfpr);
  759. rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV6);
  760. rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV6 | RQFPR_UDP);
  761. rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV6 | RQFPR_TCP);
  762. rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV4);
  763. rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV4 | RQFPR_UDP);
  764. rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV4 | RQFPR_TCP);
  765. /* cur_filer_idx indicated the first non-masked rule */
  766. priv->cur_filer_idx = rqfar;
  767. /* Rest are masked rules */
  768. rqfcr = RQFCR_CMP_NOMATCH;
  769. for (i = 0; i < rqfar; i++) {
  770. priv->ftp_rqfcr[i] = rqfcr;
  771. priv->ftp_rqfpr[i] = rqfpr;
  772. gfar_write_filer(priv, i, rqfcr, rqfpr);
  773. }
  774. }
  775. static void gfar_detect_errata(struct gfar_private *priv)
  776. {
  777. struct device *dev = &priv->ofdev->dev;
  778. unsigned int pvr = mfspr(SPRN_PVR);
  779. unsigned int svr = mfspr(SPRN_SVR);
  780. unsigned int mod = (svr >> 16) & 0xfff6; /* w/o E suffix */
  781. unsigned int rev = svr & 0xffff;
  782. /* MPC8313 Rev 2.0 and higher; All MPC837x */
  783. if ((pvr == 0x80850010 && mod == 0x80b0 && rev >= 0x0020) ||
  784. (pvr == 0x80861010 && (mod & 0xfff9) == 0x80c0))
  785. priv->errata |= GFAR_ERRATA_74;
  786. /* MPC8313 and MPC837x all rev */
  787. if ((pvr == 0x80850010 && mod == 0x80b0) ||
  788. (pvr == 0x80861010 && (mod & 0xfff9) == 0x80c0))
  789. priv->errata |= GFAR_ERRATA_76;
  790. /* MPC8313 and MPC837x all rev */
  791. if ((pvr == 0x80850010 && mod == 0x80b0) ||
  792. (pvr == 0x80861010 && (mod & 0xfff9) == 0x80c0))
  793. priv->errata |= GFAR_ERRATA_A002;
  794. /* MPC8313 Rev < 2.0, MPC8548 rev 2.0 */
  795. if ((pvr == 0x80850010 && mod == 0x80b0 && rev < 0x0020) ||
  796. (pvr == 0x80210020 && mod == 0x8030 && rev == 0x0020))
  797. priv->errata |= GFAR_ERRATA_12;
  798. if (priv->errata)
  799. dev_info(dev, "enabled errata workarounds, flags: 0x%x\n",
  800. priv->errata);
  801. }
  802. /* Set up the ethernet device structure, private data,
  803. * and anything else we need before we start
  804. */
  805. static int gfar_probe(struct platform_device *ofdev)
  806. {
  807. u32 tempval;
  808. struct net_device *dev = NULL;
  809. struct gfar_private *priv = NULL;
  810. struct gfar __iomem *regs = NULL;
  811. int err = 0, i, grp_idx = 0;
  812. u32 rstat = 0, tstat = 0, rqueue = 0, tqueue = 0;
  813. u32 isrg = 0;
  814. u32 __iomem *baddr;
  815. err = gfar_of_init(ofdev, &dev);
  816. if (err)
  817. return err;
  818. priv = netdev_priv(dev);
  819. priv->ndev = dev;
  820. priv->ofdev = ofdev;
  821. priv->node = ofdev->dev.of_node;
  822. SET_NETDEV_DEV(dev, &ofdev->dev);
  823. spin_lock_init(&priv->bflock);
  824. INIT_WORK(&priv->reset_task, gfar_reset_task);
  825. dev_set_drvdata(&ofdev->dev, priv);
  826. regs = priv->gfargrp[0].regs;
  827. gfar_detect_errata(priv);
  828. /* Stop the DMA engine now, in case it was running before
  829. * (The firmware could have used it, and left it running).
  830. */
  831. gfar_halt(dev);
  832. /* Reset MAC layer */
  833. gfar_write(&regs->maccfg1, MACCFG1_SOFT_RESET);
  834. /* We need to delay at least 3 TX clocks */
  835. udelay(2);
  836. tempval = (MACCFG1_TX_FLOW | MACCFG1_RX_FLOW);
  837. gfar_write(&regs->maccfg1, tempval);
  838. /* Initialize MACCFG2. */
  839. tempval = MACCFG2_INIT_SETTINGS;
  840. if (gfar_has_errata(priv, GFAR_ERRATA_74))
  841. tempval |= MACCFG2_HUGEFRAME | MACCFG2_LENGTHCHECK;
  842. gfar_write(&regs->maccfg2, tempval);
  843. /* Initialize ECNTRL */
  844. gfar_write(&regs->ecntrl, ECNTRL_INIT_SETTINGS);
  845. /* Set the dev->base_addr to the gfar reg region */
  846. dev->base_addr = (unsigned long) regs;
  847. SET_NETDEV_DEV(dev, &ofdev->dev);
  848. /* Fill in the dev structure */
  849. dev->watchdog_timeo = TX_TIMEOUT;
  850. dev->mtu = 1500;
  851. dev->netdev_ops = &gfar_netdev_ops;
  852. dev->ethtool_ops = &gfar_ethtool_ops;
  853. /* Register for napi ...We are registering NAPI for each grp */
  854. for (i = 0; i < priv->num_grps; i++)
  855. netif_napi_add(dev, &priv->gfargrp[i].napi, gfar_poll, GFAR_DEV_WEIGHT);
  856. if (priv->device_flags & FSL_GIANFAR_DEV_HAS_CSUM) {
  857. dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG |
  858. NETIF_F_RXCSUM;
  859. dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG |
  860. NETIF_F_RXCSUM | NETIF_F_HIGHDMA;
  861. }
  862. if (priv->device_flags & FSL_GIANFAR_DEV_HAS_VLAN) {
  863. dev->hw_features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
  864. dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
  865. }
  866. if (priv->device_flags & FSL_GIANFAR_DEV_HAS_EXTENDED_HASH) {
  867. priv->extended_hash = 1;
  868. priv->hash_width = 9;
  869. priv->hash_regs[0] = &regs->igaddr0;
  870. priv->hash_regs[1] = &regs->igaddr1;
  871. priv->hash_regs[2] = &regs->igaddr2;
  872. priv->hash_regs[3] = &regs->igaddr3;
  873. priv->hash_regs[4] = &regs->igaddr4;
  874. priv->hash_regs[5] = &regs->igaddr5;
  875. priv->hash_regs[6] = &regs->igaddr6;
  876. priv->hash_regs[7] = &regs->igaddr7;
  877. priv->hash_regs[8] = &regs->gaddr0;
  878. priv->hash_regs[9] = &regs->gaddr1;
  879. priv->hash_regs[10] = &regs->gaddr2;
  880. priv->hash_regs[11] = &regs->gaddr3;
  881. priv->hash_regs[12] = &regs->gaddr4;
  882. priv->hash_regs[13] = &regs->gaddr5;
  883. priv->hash_regs[14] = &regs->gaddr6;
  884. priv->hash_regs[15] = &regs->gaddr7;
  885. } else {
  886. priv->extended_hash = 0;
  887. priv->hash_width = 8;
  888. priv->hash_regs[0] = &regs->gaddr0;
  889. priv->hash_regs[1] = &regs->gaddr1;
  890. priv->hash_regs[2] = &regs->gaddr2;
  891. priv->hash_regs[3] = &regs->gaddr3;
  892. priv->hash_regs[4] = &regs->gaddr4;
  893. priv->hash_regs[5] = &regs->gaddr5;
  894. priv->hash_regs[6] = &regs->gaddr6;
  895. priv->hash_regs[7] = &regs->gaddr7;
  896. }
  897. if (priv->device_flags & FSL_GIANFAR_DEV_HAS_PADDING)
  898. priv->padding = DEFAULT_PADDING;
  899. else
  900. priv->padding = 0;
  901. if (dev->features & NETIF_F_IP_CSUM ||
  902. priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER)
  903. dev->needed_headroom = GMAC_FCB_LEN;
  904. /* Program the isrg regs only if number of grps > 1 */
  905. if (priv->num_grps > 1) {
  906. baddr = &regs->isrg0;
  907. for (i = 0; i < priv->num_grps; i++) {
  908. isrg |= (priv->gfargrp[i].rx_bit_map << ISRG_SHIFT_RX);
  909. isrg |= (priv->gfargrp[i].tx_bit_map << ISRG_SHIFT_TX);
  910. gfar_write(baddr, isrg);
  911. baddr++;
  912. isrg = 0x0;
  913. }
  914. }
  915. /* Need to reverse the bit maps as bit_map's MSB is q0
  916. * but, for_each_set_bit parses from right to left, which
  917. * basically reverses the queue numbers
  918. */
  919. for (i = 0; i< priv->num_grps; i++) {
  920. priv->gfargrp[i].tx_bit_map = reverse_bitmap(
  921. priv->gfargrp[i].tx_bit_map, MAX_TX_QS);
  922. priv->gfargrp[i].rx_bit_map = reverse_bitmap(
  923. priv->gfargrp[i].rx_bit_map, MAX_RX_QS);
  924. }
  925. /* Calculate RSTAT, TSTAT, RQUEUE and TQUEUE values,
  926. * also assign queues to groups
  927. */
  928. for (grp_idx = 0; grp_idx < priv->num_grps; grp_idx++) {
  929. priv->gfargrp[grp_idx].num_rx_queues = 0x0;
  930. for_each_set_bit(i, &priv->gfargrp[grp_idx].rx_bit_map,
  931. priv->num_rx_queues) {
  932. priv->gfargrp[grp_idx].num_rx_queues++;
  933. priv->rx_queue[i]->grp = &priv->gfargrp[grp_idx];
  934. rstat = rstat | (RSTAT_CLEAR_RHALT >> i);
  935. rqueue = rqueue | ((RQUEUE_EN0 | RQUEUE_EX0) >> i);
  936. }
  937. priv->gfargrp[grp_idx].num_tx_queues = 0x0;
  938. for_each_set_bit(i, &priv->gfargrp[grp_idx].tx_bit_map,
  939. priv->num_tx_queues) {
  940. priv->gfargrp[grp_idx].num_tx_queues++;
  941. priv->tx_queue[i]->grp = &priv->gfargrp[grp_idx];
  942. tstat = tstat | (TSTAT_CLEAR_THALT >> i);
  943. tqueue = tqueue | (TQUEUE_EN0 >> i);
  944. }
  945. priv->gfargrp[grp_idx].rstat = rstat;
  946. priv->gfargrp[grp_idx].tstat = tstat;
  947. rstat = tstat =0;
  948. }
  949. gfar_write(&regs->rqueue, rqueue);
  950. gfar_write(&regs->tqueue, tqueue);
  951. priv->rx_buffer_size = DEFAULT_RX_BUFFER_SIZE;
  952. /* Initializing some of the rx/tx queue level parameters */
  953. for (i = 0; i < priv->num_tx_queues; i++) {
  954. priv->tx_queue[i]->tx_ring_size = DEFAULT_TX_RING_SIZE;
  955. priv->tx_queue[i]->num_txbdfree = DEFAULT_TX_RING_SIZE;
  956. priv->tx_queue[i]->txcoalescing = DEFAULT_TX_COALESCE;
  957. priv->tx_queue[i]->txic = DEFAULT_TXIC;
  958. }
  959. for (i = 0; i < priv->num_rx_queues; i++) {
  960. priv->rx_queue[i]->rx_ring_size = DEFAULT_RX_RING_SIZE;
  961. priv->rx_queue[i]->rxcoalescing = DEFAULT_RX_COALESCE;
  962. priv->rx_queue[i]->rxic = DEFAULT_RXIC;
  963. }
  964. /* always enable rx filer */
  965. priv->rx_filer_enable = 1;
  966. /* Enable most messages by default */
  967. priv->msg_enable = (NETIF_MSG_IFUP << 1 ) - 1;
  968. /* Carrier starts down, phylib will bring it up */
  969. netif_carrier_off(dev);
  970. err = register_netdev(dev);
  971. if (err) {
  972. pr_err("%s: Cannot register net device, aborting\n", dev->name);
  973. goto register_fail;
  974. }
  975. device_init_wakeup(&dev->dev,
  976. priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET);
  977. /* fill out IRQ number and name fields */
  978. for (i = 0; i < priv->num_grps; i++) {
  979. if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
  980. sprintf(priv->gfargrp[i].int_name_tx, "%s%s%c%s",
  981. dev->name, "_g", '0' + i, "_tx");
  982. sprintf(priv->gfargrp[i].int_name_rx, "%s%s%c%s",
  983. dev->name, "_g", '0' + i, "_rx");
  984. sprintf(priv->gfargrp[i].int_name_er, "%s%s%c%s",
  985. dev->name, "_g", '0' + i, "_er");
  986. } else
  987. strcpy(priv->gfargrp[i].int_name_tx, dev->name);
  988. }
  989. /* Initialize the filer table */
  990. gfar_init_filer_table(priv);
  991. /* Create all the sysfs files */
  992. gfar_init_sysfs(dev);
  993. /* Print out the device info */
  994. netdev_info(dev, "mac: %pM\n", dev->dev_addr);
  995. /* Even more device info helps when determining which kernel
  996. * provided which set of benchmarks.
  997. */
  998. netdev_info(dev, "Running with NAPI enabled\n");
  999. for (i = 0; i < priv->num_rx_queues; i++)
  1000. netdev_info(dev, "RX BD ring size for Q[%d]: %d\n",
  1001. i, priv->rx_queue[i]->rx_ring_size);
  1002. for(i = 0; i < priv->num_tx_queues; i++)
  1003. netdev_info(dev, "TX BD ring size for Q[%d]: %d\n",
  1004. i, priv->tx_queue[i]->tx_ring_size);
  1005. return 0;
  1006. register_fail:
  1007. unmap_group_regs(priv);
  1008. free_tx_pointers(priv);
  1009. free_rx_pointers(priv);
  1010. if (priv->phy_node)
  1011. of_node_put(priv->phy_node);
  1012. if (priv->tbi_node)
  1013. of_node_put(priv->tbi_node);
  1014. free_netdev(dev);
  1015. return err;
  1016. }
  1017. static int gfar_remove(struct platform_device *ofdev)
  1018. {
  1019. struct gfar_private *priv = dev_get_drvdata(&ofdev->dev);
  1020. if (priv->phy_node)
  1021. of_node_put(priv->phy_node);
  1022. if (priv->tbi_node)
  1023. of_node_put(priv->tbi_node);
  1024. dev_set_drvdata(&ofdev->dev, NULL);
  1025. unregister_netdev(priv->ndev);
  1026. unmap_group_regs(priv);
  1027. free_netdev(priv->ndev);
  1028. return 0;
  1029. }
  1030. #ifdef CONFIG_PM
  1031. static int gfar_suspend(struct device *dev)
  1032. {
  1033. struct gfar_private *priv = dev_get_drvdata(dev);
  1034. struct net_device *ndev = priv->ndev;
  1035. struct gfar __iomem *regs = priv->gfargrp[0].regs;
  1036. unsigned long flags;
  1037. u32 tempval;
  1038. int magic_packet = priv->wol_en &&
  1039. (priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET);
  1040. netif_device_detach(ndev);
  1041. if (netif_running(ndev)) {
  1042. local_irq_save(flags);
  1043. lock_tx_qs(priv);
  1044. lock_rx_qs(priv);
  1045. gfar_halt_nodisable(ndev);
  1046. /* Disable Tx, and Rx if wake-on-LAN is disabled. */
  1047. tempval = gfar_read(&regs->maccfg1);
  1048. tempval &= ~MACCFG1_TX_EN;
  1049. if (!magic_packet)
  1050. tempval &= ~MACCFG1_RX_EN;
  1051. gfar_write(&regs->maccfg1, tempval);
  1052. unlock_rx_qs(priv);
  1053. unlock_tx_qs(priv);
  1054. local_irq_restore(flags);
  1055. disable_napi(priv);
  1056. if (magic_packet) {
  1057. /* Enable interrupt on Magic Packet */
  1058. gfar_write(&regs->imask, IMASK_MAG);
  1059. /* Enable Magic Packet mode */
  1060. tempval = gfar_read(&regs->maccfg2);
  1061. tempval |= MACCFG2_MPEN;
  1062. gfar_write(&regs->maccfg2, tempval);
  1063. } else {
  1064. phy_stop(priv->phydev);
  1065. }
  1066. }
  1067. return 0;
  1068. }
  1069. static int gfar_resume(struct device *dev)
  1070. {
  1071. struct gfar_private *priv = dev_get_drvdata(dev);
  1072. struct net_device *ndev = priv->ndev;
  1073. struct gfar __iomem *regs = priv->gfargrp[0].regs;
  1074. unsigned long flags;
  1075. u32 tempval;
  1076. int magic_packet = priv->wol_en &&
  1077. (priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET);
  1078. if (!netif_running(ndev)) {
  1079. netif_device_attach(ndev);
  1080. return 0;
  1081. }
  1082. if (!magic_packet && priv->phydev)
  1083. phy_start(priv->phydev);
  1084. /* Disable Magic Packet mode, in case something
  1085. * else woke us up.
  1086. */
  1087. local_irq_save(flags);
  1088. lock_tx_qs(priv);
  1089. lock_rx_qs(priv);
  1090. tempval = gfar_read(&regs->maccfg2);
  1091. tempval &= ~MACCFG2_MPEN;
  1092. gfar_write(&regs->maccfg2, tempval);
  1093. gfar_start(ndev);
  1094. unlock_rx_qs(priv);
  1095. unlock_tx_qs(priv);
  1096. local_irq_restore(flags);
  1097. netif_device_attach(ndev);
  1098. enable_napi(priv);
  1099. return 0;
  1100. }
  1101. static int gfar_restore(struct device *dev)
  1102. {
  1103. struct gfar_private *priv = dev_get_drvdata(dev);
  1104. struct net_device *ndev = priv->ndev;
  1105. if (!netif_running(ndev))
  1106. return 0;
  1107. gfar_init_bds(ndev);
  1108. init_registers(ndev);
  1109. gfar_set_mac_address(ndev);
  1110. gfar_init_mac(ndev);
  1111. gfar_start(ndev);
  1112. priv->oldlink = 0;
  1113. priv->oldspeed = 0;
  1114. priv->oldduplex = -1;
  1115. if (priv->phydev)
  1116. phy_start(priv->phydev);
  1117. netif_device_attach(ndev);
  1118. enable_napi(priv);
  1119. return 0;
  1120. }
  1121. static struct dev_pm_ops gfar_pm_ops = {
  1122. .suspend = gfar_suspend,
  1123. .resume = gfar_resume,
  1124. .freeze = gfar_suspend,
  1125. .thaw = gfar_resume,
  1126. .restore = gfar_restore,
  1127. };
  1128. #define GFAR_PM_OPS (&gfar_pm_ops)
  1129. #else
  1130. #define GFAR_PM_OPS NULL
  1131. #endif
  1132. /* Reads the controller's registers to determine what interface
  1133. * connects it to the PHY.
  1134. */
  1135. static phy_interface_t gfar_get_interface(struct net_device *dev)
  1136. {
  1137. struct gfar_private *priv = netdev_priv(dev);
  1138. struct gfar __iomem *regs = priv->gfargrp[0].regs;
  1139. u32 ecntrl;
  1140. ecntrl = gfar_read(&regs->ecntrl);
  1141. if (ecntrl & ECNTRL_SGMII_MODE)
  1142. return PHY_INTERFACE_MODE_SGMII;
  1143. if (ecntrl & ECNTRL_TBI_MODE) {
  1144. if (ecntrl & ECNTRL_REDUCED_MODE)
  1145. return PHY_INTERFACE_MODE_RTBI;
  1146. else
  1147. return PHY_INTERFACE_MODE_TBI;
  1148. }
  1149. if (ecntrl & ECNTRL_REDUCED_MODE) {
  1150. if (ecntrl & ECNTRL_REDUCED_MII_MODE)
  1151. return PHY_INTERFACE_MODE_RMII;
  1152. else {
  1153. phy_interface_t interface = priv->interface;
  1154. /* This isn't autodetected right now, so it must
  1155. * be set by the device tree or platform code.
  1156. */
  1157. if (interface == PHY_INTERFACE_MODE_RGMII_ID)
  1158. return PHY_INTERFACE_MODE_RGMII_ID;
  1159. return PHY_INTERFACE_MODE_RGMII;
  1160. }
  1161. }
  1162. if (priv->device_flags & FSL_GIANFAR_DEV_HAS_GIGABIT)
  1163. return PHY_INTERFACE_MODE_GMII;
  1164. return PHY_INTERFACE_MODE_MII;
  1165. }
  1166. /* Initializes driver's PHY state, and attaches to the PHY.
  1167. * Returns 0 on success.
  1168. */
  1169. static int init_phy(struct net_device *dev)
  1170. {
  1171. struct gfar_private *priv = netdev_priv(dev);
  1172. uint gigabit_support =
  1173. priv->device_flags & FSL_GIANFAR_DEV_HAS_GIGABIT ?
  1174. SUPPORTED_1000baseT_Full : 0;
  1175. phy_interface_t interface;
  1176. priv->oldlink = 0;
  1177. priv->oldspeed = 0;
  1178. priv->oldduplex = -1;
  1179. interface = gfar_get_interface(dev);
  1180. priv->phydev = of_phy_connect(dev, priv->phy_node, &adjust_link, 0,
  1181. interface);
  1182. if (!priv->phydev)
  1183. priv->phydev = of_phy_connect_fixed_link(dev, &adjust_link,
  1184. interface);
  1185. if (!priv->phydev) {
  1186. dev_err(&dev->dev, "could not attach to PHY\n");
  1187. return -ENODEV;
  1188. }
  1189. if (interface == PHY_INTERFACE_MODE_SGMII)
  1190. gfar_configure_serdes(dev);
  1191. /* Remove any features not supported by the controller */
  1192. priv->phydev->supported &= (GFAR_SUPPORTED | gigabit_support);
  1193. priv->phydev->advertising = priv->phydev->supported;
  1194. return 0;
  1195. }
  1196. /* Initialize TBI PHY interface for communicating with the
  1197. * SERDES lynx PHY on the chip. We communicate with this PHY
  1198. * through the MDIO bus on each controller, treating it as a
  1199. * "normal" PHY at the address found in the TBIPA register. We assume
  1200. * that the TBIPA register is valid. Either the MDIO bus code will set
  1201. * it to a value that doesn't conflict with other PHYs on the bus, or the
  1202. * value doesn't matter, as there are no other PHYs on the bus.
  1203. */
  1204. static void gfar_configure_serdes(struct net_device *dev)
  1205. {
  1206. struct gfar_private *priv = netdev_priv(dev);
  1207. struct phy_device *tbiphy;
  1208. if (!priv->tbi_node) {
  1209. dev_warn(&dev->dev, "error: SGMII mode requires that the "
  1210. "device tree specify a tbi-handle\n");
  1211. return;
  1212. }
  1213. tbiphy = of_phy_find_device(priv->tbi_node);
  1214. if (!tbiphy) {
  1215. dev_err(&dev->dev, "error: Could not get TBI device\n");
  1216. return;
  1217. }
  1218. /* If the link is already up, we must already be ok, and don't need to
  1219. * configure and reset the TBI<->SerDes link. Maybe U-Boot configured
  1220. * everything for us? Resetting it takes the link down and requires
  1221. * several seconds for it to come back.
  1222. */
  1223. if (phy_read(tbiphy, MII_BMSR) & BMSR_LSTATUS)
  1224. return;
  1225. /* Single clk mode, mii mode off(for serdes communication) */
  1226. phy_write(tbiphy, MII_TBICON, TBICON_CLK_SELECT);
  1227. phy_write(tbiphy, MII_ADVERTISE,
  1228. ADVERTISE_1000XFULL | ADVERTISE_1000XPAUSE |
  1229. ADVERTISE_1000XPSE_ASYM);
  1230. phy_write(tbiphy, MII_BMCR, BMCR_ANENABLE |
  1231. BMCR_ANRESTART | BMCR_FULLDPLX | BMCR_SPEED1000);
  1232. }
  1233. static void init_registers(struct net_device *dev)
  1234. {
  1235. struct gfar_private *priv = netdev_priv(dev);
  1236. struct gfar __iomem *regs = NULL;
  1237. int i = 0;
  1238. for (i = 0; i < priv->num_grps; i++) {
  1239. regs = priv->gfargrp[i].regs;
  1240. /* Clear IEVENT */
  1241. gfar_write(&regs->ievent, IEVENT_INIT_CLEAR);
  1242. /* Initialize IMASK */
  1243. gfar_write(&regs->imask, IMASK_INIT_CLEAR);
  1244. }
  1245. regs = priv->gfargrp[0].regs;
  1246. /* Init hash registers to zero */
  1247. gfar_write(&regs->igaddr0, 0);
  1248. gfar_write(&regs->igaddr1, 0);
  1249. gfar_write(&regs->igaddr2, 0);
  1250. gfar_write(&regs->igaddr3, 0);
  1251. gfar_write(&regs->igaddr4, 0);
  1252. gfar_write(&regs->igaddr5, 0);
  1253. gfar_write(&regs->igaddr6, 0);
  1254. gfar_write(&regs->igaddr7, 0);
  1255. gfar_write(&regs->gaddr0, 0);
  1256. gfar_write(&regs->gaddr1, 0);
  1257. gfar_write(&regs->gaddr2, 0);
  1258. gfar_write(&regs->gaddr3, 0);
  1259. gfar_write(&regs->gaddr4, 0);
  1260. gfar_write(&regs->gaddr5, 0);
  1261. gfar_write(&regs->gaddr6, 0);
  1262. gfar_write(&regs->gaddr7, 0);
  1263. /* Zero out the rmon mib registers if it has them */
  1264. if (priv->device_flags & FSL_GIANFAR_DEV_HAS_RMON) {
  1265. memset_io(&(regs->rmon), 0, sizeof (struct rmon_mib));
  1266. /* Mask off the CAM interrupts */
  1267. gfar_write(&regs->rmon.cam1, 0xffffffff);
  1268. gfar_write(&regs->rmon.cam2, 0xffffffff);
  1269. }
  1270. /* Initialize the max receive buffer length */
  1271. gfar_write(&regs->mrblr, priv->rx_buffer_size);
  1272. /* Initialize the Minimum Frame Length Register */
  1273. gfar_write(&regs->minflr, MINFLR_INIT_SETTINGS);
  1274. }
  1275. static int __gfar_is_rx_idle(struct gfar_private *priv)
  1276. {
  1277. u32 res;
  1278. /* Normaly TSEC should not hang on GRS commands, so we should
  1279. * actually wait for IEVENT_GRSC flag.
  1280. */
  1281. if (likely(!gfar_has_errata(priv, GFAR_ERRATA_A002)))
  1282. return 0;
  1283. /* Read the eTSEC register at offset 0xD1C. If bits 7-14 are
  1284. * the same as bits 23-30, the eTSEC Rx is assumed to be idle
  1285. * and the Rx can be safely reset.
  1286. */
  1287. res = gfar_read((void __iomem *)priv->gfargrp[0].regs + 0xd1c);
  1288. res &= 0x7f807f80;
  1289. if ((res & 0xffff) == (res >> 16))
  1290. return 1;
  1291. return 0;
  1292. }
  1293. /* Halt the receive and transmit queues */
  1294. static void gfar_halt_nodisable(struct net_device *dev)
  1295. {
  1296. struct gfar_private *priv = netdev_priv(dev);
  1297. struct gfar __iomem *regs = NULL;
  1298. u32 tempval;
  1299. int i = 0;
  1300. for (i = 0; i < priv->num_grps; i++) {
  1301. regs = priv->gfargrp[i].regs;
  1302. /* Mask all interrupts */
  1303. gfar_write(&regs->imask, IMASK_INIT_CLEAR);
  1304. /* Clear all interrupts */
  1305. gfar_write(&regs->ievent, IEVENT_INIT_CLEAR);
  1306. }
  1307. regs = priv->gfargrp[0].regs;
  1308. /* Stop the DMA, and wait for it to stop */
  1309. tempval = gfar_read(&regs->dmactrl);
  1310. if ((tempval & (DMACTRL_GRS | DMACTRL_GTS))
  1311. != (DMACTRL_GRS | DMACTRL_GTS)) {
  1312. int ret;
  1313. tempval |= (DMACTRL_GRS | DMACTRL_GTS);
  1314. gfar_write(&regs->dmactrl, tempval);
  1315. do {
  1316. ret = spin_event_timeout(((gfar_read(&regs->ievent) &
  1317. (IEVENT_GRSC | IEVENT_GTSC)) ==
  1318. (IEVENT_GRSC | IEVENT_GTSC)), 1000000, 0);
  1319. if (!ret && !(gfar_read(&regs->ievent) & IEVENT_GRSC))
  1320. ret = __gfar_is_rx_idle(priv);
  1321. } while (!ret);
  1322. }
  1323. }
  1324. /* Halt the receive and transmit queues */
  1325. void gfar_halt(struct net_device *dev)
  1326. {
  1327. struct gfar_private *priv = netdev_priv(dev);
  1328. struct gfar __iomem *regs = priv->gfargrp[0].regs;
  1329. u32 tempval;
  1330. gfar_halt_nodisable(dev);
  1331. /* Disable Rx and Tx */
  1332. tempval = gfar_read(&regs->maccfg1);
  1333. tempval &= ~(MACCFG1_RX_EN | MACCFG1_TX_EN);
  1334. gfar_write(&regs->maccfg1, tempval);
  1335. }
  1336. static void free_grp_irqs(struct gfar_priv_grp *grp)
  1337. {
  1338. free_irq(grp->interruptError, grp);
  1339. free_irq(grp->interruptTransmit, grp);
  1340. free_irq(grp->interruptReceive, grp);
  1341. }
  1342. void stop_gfar(struct net_device *dev)
  1343. {
  1344. struct gfar_private *priv = netdev_priv(dev);
  1345. unsigned long flags;
  1346. int i;
  1347. phy_stop(priv->phydev);
  1348. /* Lock it down */
  1349. local_irq_save(flags);
  1350. lock_tx_qs(priv);
  1351. lock_rx_qs(priv);
  1352. gfar_halt(dev);
  1353. unlock_rx_qs(priv);
  1354. unlock_tx_qs(priv);
  1355. local_irq_restore(flags);
  1356. /* Free the IRQs */
  1357. if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
  1358. for (i = 0; i < priv->num_grps; i++)
  1359. free_grp_irqs(&priv->gfargrp[i]);
  1360. } else {
  1361. for (i = 0; i < priv->num_grps; i++)
  1362. free_irq(priv->gfargrp[i].interruptTransmit,
  1363. &priv->gfargrp[i]);
  1364. }
  1365. free_skb_resources(priv);
  1366. }
  1367. static void free_skb_tx_queue(struct gfar_priv_tx_q *tx_queue)
  1368. {
  1369. struct txbd8 *txbdp;
  1370. struct gfar_private *priv = netdev_priv(tx_queue->dev);
  1371. int i, j;
  1372. txbdp = tx_queue->tx_bd_base;
  1373. for (i = 0; i < tx_queue->tx_ring_size; i++) {
  1374. if (!tx_queue->tx_skbuff[i])
  1375. continue;
  1376. dma_unmap_single(&priv->ofdev->dev, txbdp->bufPtr,
  1377. txbdp->length, DMA_TO_DEVICE);
  1378. txbdp->lstatus = 0;
  1379. for (j = 0; j < skb_shinfo(tx_queue->tx_skbuff[i])->nr_frags;
  1380. j++) {
  1381. txbdp++;
  1382. dma_unmap_page(&priv->ofdev->dev, txbdp->bufPtr,
  1383. txbdp->length, DMA_TO_DEVICE);
  1384. }
  1385. txbdp++;
  1386. dev_kfree_skb_any(tx_queue->tx_skbuff[i]);
  1387. tx_queue->tx_skbuff[i] = NULL;
  1388. }
  1389. kfree(tx_queue->tx_skbuff);
  1390. }
  1391. static void free_skb_rx_queue(struct gfar_priv_rx_q *rx_queue)
  1392. {
  1393. struct rxbd8 *rxbdp;
  1394. struct gfar_private *priv = netdev_priv(rx_queue->dev);
  1395. int i;
  1396. rxbdp = rx_queue->rx_bd_base;
  1397. for (i = 0; i < rx_queue->rx_ring_size; i++) {
  1398. if (rx_queue->rx_skbuff[i]) {
  1399. dma_unmap_single(&priv->ofdev->dev,
  1400. rxbdp->bufPtr, priv->rx_buffer_size,
  1401. DMA_FROM_DEVICE);
  1402. dev_kfree_skb_any(rx_queue->rx_skbuff[i]);
  1403. rx_queue->rx_skbuff[i] = NULL;
  1404. }
  1405. rxbdp->lstatus = 0;
  1406. rxbdp->bufPtr = 0;
  1407. rxbdp++;
  1408. }
  1409. kfree(rx_queue->rx_skbuff);
  1410. }
  1411. /* If there are any tx skbs or rx skbs still around, free them.
  1412. * Then free tx_skbuff and rx_skbuff
  1413. */
  1414. static void free_skb_resources(struct gfar_private *priv)
  1415. {
  1416. struct gfar_priv_tx_q *tx_queue = NULL;
  1417. struct gfar_priv_rx_q *rx_queue = NULL;
  1418. int i;
  1419. /* Go through all the buffer descriptors and free their data buffers */
  1420. for (i = 0; i < priv->num_tx_queues; i++) {
  1421. struct netdev_queue *txq;
  1422. tx_queue = priv->tx_queue[i];
  1423. txq = netdev_get_tx_queue(tx_queue->dev, tx_queue->qindex);
  1424. if(tx_queue->tx_skbuff)
  1425. free_skb_tx_queue(tx_queue);
  1426. netdev_tx_reset_queue(txq);
  1427. }
  1428. for (i = 0; i < priv->num_rx_queues; i++) {
  1429. rx_queue = priv->rx_queue[i];
  1430. if(rx_queue->rx_skbuff)
  1431. free_skb_rx_queue(rx_queue);
  1432. }
  1433. dma_free_coherent(&priv->ofdev->dev,
  1434. sizeof(struct txbd8) * priv->total_tx_ring_size +
  1435. sizeof(struct rxbd8) * priv->total_rx_ring_size,
  1436. priv->tx_queue[0]->tx_bd_base,
  1437. priv->tx_queue[0]->tx_bd_dma_base);
  1438. skb_queue_purge(&priv->rx_recycle);
  1439. }
  1440. void gfar_start(struct net_device *dev)
  1441. {
  1442. struct gfar_private *priv = netdev_priv(dev);
  1443. struct gfar __iomem *regs = priv->gfargrp[0].regs;
  1444. u32 tempval;
  1445. int i = 0;
  1446. /* Enable Rx and Tx in MACCFG1 */
  1447. tempval = gfar_read(&regs->maccfg1);
  1448. tempval |= (MACCFG1_RX_EN | MACCFG1_TX_EN);
  1449. gfar_write(&regs->maccfg1, tempval);
  1450. /* Initialize DMACTRL to have WWR and WOP */
  1451. tempval = gfar_read(&regs->dmactrl);
  1452. tempval |= DMACTRL_INIT_SETTINGS;
  1453. gfar_write(&regs->dmactrl, tempval);
  1454. /* Make sure we aren't stopped */
  1455. tempval = gfar_read(&regs->dmactrl);
  1456. tempval &= ~(DMACTRL_GRS | DMACTRL_GTS);
  1457. gfar_write(&regs->dmactrl, tempval);
  1458. for (i = 0; i < priv->num_grps; i++) {
  1459. regs = priv->gfargrp[i].regs;
  1460. /* Clear THLT/RHLT, so that the DMA starts polling now */
  1461. gfar_write(&regs->tstat, priv->gfargrp[i].tstat);
  1462. gfar_write(&regs->rstat, priv->gfargrp[i].rstat);
  1463. /* Unmask the interrupts we look for */
  1464. gfar_write(&regs->imask, IMASK_DEFAULT);
  1465. }
  1466. dev->trans_start = jiffies; /* prevent tx timeout */
  1467. }
  1468. void gfar_configure_coalescing(struct gfar_private *priv,
  1469. unsigned long tx_mask, unsigned long rx_mask)
  1470. {
  1471. struct gfar __iomem *regs = priv->gfargrp[0].regs;
  1472. u32 __iomem *baddr;
  1473. int i = 0;
  1474. /* Backward compatible case ---- even if we enable
  1475. * multiple queues, there's only single reg to program
  1476. */
  1477. gfar_write(&regs->txic, 0);
  1478. if(likely(priv->tx_queue[0]->txcoalescing))
  1479. gfar_write(&regs->txic, priv->tx_queue[0]->txic);
  1480. gfar_write(&regs->rxic, 0);
  1481. if(unlikely(priv->rx_queue[0]->rxcoalescing))
  1482. gfar_write(&regs->rxic, priv->rx_queue[0]->rxic);
  1483. if (priv->mode == MQ_MG_MODE) {
  1484. baddr = &regs->txic0;
  1485. for_each_set_bit(i, &tx_mask, priv->num_tx_queues) {
  1486. if (likely(priv->tx_queue[i]->txcoalescing)) {
  1487. gfar_write(baddr + i, 0);
  1488. gfar_write(baddr + i, priv->tx_queue[i]->txic);
  1489. }
  1490. }
  1491. baddr = &regs->rxic0;
  1492. for_each_set_bit(i, &rx_mask, priv->num_rx_queues) {
  1493. if (likely(priv->rx_queue[i]->rxcoalescing)) {
  1494. gfar_write(baddr + i, 0);
  1495. gfar_write(baddr + i, priv->rx_queue[i]->rxic);
  1496. }
  1497. }
  1498. }
  1499. }
  1500. static int register_grp_irqs(struct gfar_priv_grp *grp)
  1501. {
  1502. struct gfar_private *priv = grp->priv;
  1503. struct net_device *dev = priv->ndev;
  1504. int err;
  1505. /* If the device has multiple interrupts, register for
  1506. * them. Otherwise, only register for the one
  1507. */
  1508. if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
  1509. /* Install our interrupt handlers for Error,
  1510. * Transmit, and Receive
  1511. */
  1512. if ((err = request_irq(grp->interruptError, gfar_error, 0,
  1513. grp->int_name_er,grp)) < 0) {
  1514. netif_err(priv, intr, dev, "Can't get IRQ %d\n",
  1515. grp->interruptError);
  1516. goto err_irq_fail;
  1517. }
  1518. if ((err = request_irq(grp->interruptTransmit, gfar_transmit,
  1519. 0, grp->int_name_tx, grp)) < 0) {
  1520. netif_err(priv, intr, dev, "Can't get IRQ %d\n",
  1521. grp->interruptTransmit);
  1522. goto tx_irq_fail;
  1523. }
  1524. if ((err = request_irq(grp->interruptReceive, gfar_receive, 0,
  1525. grp->int_name_rx, grp)) < 0) {
  1526. netif_err(priv, intr, dev, "Can't get IRQ %d\n",
  1527. grp->interruptReceive);
  1528. goto rx_irq_fail;
  1529. }
  1530. } else {
  1531. if ((err = request_irq(grp->interruptTransmit, gfar_interrupt, 0,
  1532. grp->int_name_tx, grp)) < 0) {
  1533. netif_err(priv, intr, dev, "Can't get IRQ %d\n",
  1534. grp->interruptTransmit);
  1535. goto err_irq_fail;
  1536. }
  1537. }
  1538. return 0;
  1539. rx_irq_fail:
  1540. free_irq(grp->interruptTransmit, grp);
  1541. tx_irq_fail:
  1542. free_irq(grp->interruptError, grp);
  1543. err_irq_fail:
  1544. return err;
  1545. }
  1546. /* Bring the controller up and running */
  1547. int startup_gfar(struct net_device *ndev)
  1548. {
  1549. struct gfar_private *priv = netdev_priv(ndev);
  1550. struct gfar __iomem *regs = NULL;
  1551. int err, i, j;
  1552. for (i = 0; i < priv->num_grps; i++) {
  1553. regs= priv->gfargrp[i].regs;
  1554. gfar_write(&regs->imask, IMASK_INIT_CLEAR);
  1555. }
  1556. regs= priv->gfargrp[0].regs;
  1557. err = gfar_alloc_skb_resources(ndev);
  1558. if (err)
  1559. return err;
  1560. gfar_init_mac(ndev);
  1561. for (i = 0; i < priv->num_grps; i++) {
  1562. err = register_grp_irqs(&priv->gfargrp[i]);
  1563. if (err) {
  1564. for (j = 0; j < i; j++)
  1565. free_grp_irqs(&priv->gfargrp[j]);
  1566. goto irq_fail;
  1567. }
  1568. }
  1569. /* Start the controller */
  1570. gfar_start(ndev);
  1571. phy_start(priv->phydev);
  1572. gfar_configure_coalescing(priv, 0xFF, 0xFF);
  1573. return 0;
  1574. irq_fail:
  1575. free_skb_resources(priv);
  1576. return err;
  1577. }
  1578. /* Called when something needs to use the ethernet device
  1579. * Returns 0 for success.
  1580. */
  1581. static int gfar_enet_open(struct net_device *dev)
  1582. {
  1583. struct gfar_private *priv = netdev_priv(dev);
  1584. int err;
  1585. enable_napi(priv);
  1586. skb_queue_head_init(&priv->rx_recycle);
  1587. /* Initialize a bunch of registers */
  1588. init_registers(dev);
  1589. gfar_set_mac_address(dev);
  1590. err = init_phy(dev);
  1591. if (err) {
  1592. disable_napi(priv);
  1593. return err;
  1594. }
  1595. err = startup_gfar(dev);
  1596. if (err) {
  1597. disable_napi(priv);
  1598. return err;
  1599. }
  1600. netif_tx_start_all_queues(dev);
  1601. device_set_wakeup_enable(&dev->dev, priv->wol_en);
  1602. return err;
  1603. }
  1604. static inline struct txfcb *gfar_add_fcb(struct sk_buff *skb)
  1605. {
  1606. struct txfcb *fcb = (struct txfcb *)skb_push(skb, GMAC_FCB_LEN);
  1607. memset(fcb, 0, GMAC_FCB_LEN);
  1608. return fcb;
  1609. }
  1610. static inline void gfar_tx_checksum(struct sk_buff *skb, struct txfcb *fcb,
  1611. int fcb_length)
  1612. {
  1613. u8 flags = 0;
  1614. /* If we're here, it's a IP packet with a TCP or UDP
  1615. * payload. We set it to checksum, using a pseudo-header
  1616. * we provide
  1617. */
  1618. flags = TXFCB_DEFAULT;
  1619. /* Tell the controller what the protocol is
  1620. * And provide the already calculated phcs
  1621. */
  1622. if (ip_hdr(skb)->protocol == IPPROTO_UDP) {
  1623. flags |= TXFCB_UDP;
  1624. fcb->phcs = udp_hdr(skb)->check;
  1625. } else
  1626. fcb->phcs = tcp_hdr(skb)->check;
  1627. /* l3os is the distance between the start of the
  1628. * frame (skb->data) and the start of the IP hdr.
  1629. * l4os is the distance between the start of the
  1630. * l3 hdr and the l4 hdr
  1631. */
  1632. fcb->l3os = (u16)(skb_network_offset(skb) - fcb_length);
  1633. fcb->l4os = skb_network_header_len(skb);
  1634. fcb->flags = flags;
  1635. }
  1636. void inline gfar_tx_vlan(struct sk_buff *skb, struct txfcb *fcb)
  1637. {
  1638. fcb->flags |= TXFCB_VLN;
  1639. fcb->vlctl = vlan_tx_tag_get(skb);
  1640. }
  1641. static inline struct txbd8 *skip_txbd(struct txbd8 *bdp, int stride,
  1642. struct txbd8 *base, int ring_size)
  1643. {
  1644. struct txbd8 *new_bd = bdp + stride;
  1645. return (new_bd >= (base + ring_size)) ? (new_bd - ring_size) : new_bd;
  1646. }
  1647. static inline struct txbd8 *next_txbd(struct txbd8 *bdp, struct txbd8 *base,
  1648. int ring_size)
  1649. {
  1650. return skip_txbd(bdp, 1, base, ring_size);
  1651. }
  1652. /* This is called by the kernel when a frame is ready for transmission.
  1653. * It is pointed to by the dev->hard_start_xmit function pointer
  1654. */
  1655. static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
  1656. {
  1657. struct gfar_private *priv = netdev_priv(dev);
  1658. struct gfar_priv_tx_q *tx_queue = NULL;
  1659. struct netdev_queue *txq;
  1660. struct gfar __iomem *regs = NULL;
  1661. struct txfcb *fcb = NULL;
  1662. struct txbd8 *txbdp, *txbdp_start, *base, *txbdp_tstamp = NULL;
  1663. u32 lstatus;
  1664. int i, rq = 0, do_tstamp = 0;
  1665. u32 bufaddr;
  1666. unsigned long flags;
  1667. unsigned int nr_frags, nr_txbds, length, fcb_length = GMAC_FCB_LEN;
  1668. /* TOE=1 frames larger than 2500 bytes may see excess delays
  1669. * before start of transmission.
  1670. */
  1671. if (unlikely(gfar_has_errata(priv, GFAR_ERRATA_76) &&
  1672. skb->ip_summed == CHECKSUM_PARTIAL &&
  1673. skb->len > 2500)) {
  1674. int ret;
  1675. ret = skb_checksum_help(skb);
  1676. if (ret)
  1677. return ret;
  1678. }
  1679. rq = skb->queue_mapping;
  1680. tx_queue = priv->tx_queue[rq];
  1681. txq = netdev_get_tx_queue(dev, rq);
  1682. base = tx_queue->tx_bd_base;
  1683. regs = tx_queue->grp->regs;
  1684. /* check if time stamp should be generated */
  1685. if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP &&
  1686. priv->hwts_tx_en)) {
  1687. do_tstamp = 1;
  1688. fcb_length = GMAC_FCB_LEN + GMAC_TXPAL_LEN;
  1689. }
  1690. /* make space for additional header when fcb is needed */
  1691. if (((skb->ip_summed == CHECKSUM_PARTIAL) ||
  1692. vlan_tx_tag_present(skb) ||
  1693. unlikely(do_tstamp)) &&
  1694. (skb_headroom(skb) < fcb_length)) {
  1695. struct sk_buff *skb_new;
  1696. skb_new = skb_realloc_headroom(skb, fcb_length);
  1697. if (!skb_new) {
  1698. dev->stats.tx_errors++;
  1699. kfree_skb(skb);
  1700. return NETDEV_TX_OK;
  1701. }
  1702. /* Steal sock reference for processing TX time stamps */
  1703. swap(skb_new->sk, skb->sk);
  1704. swap(skb_new->destructor, skb->destructor);
  1705. kfree_skb(skb);
  1706. skb = skb_new;
  1707. }
  1708. /* total number of fragments in the SKB */
  1709. nr_frags = skb_shinfo(skb)->nr_frags;
  1710. /* calculate the required number of TxBDs for this skb */
  1711. if (unlikely(do_tstamp))
  1712. nr_txbds = nr_frags + 2;
  1713. else
  1714. nr_txbds = nr_frags + 1;
  1715. /* check if there is space to queue this packet */
  1716. if (nr_txbds > tx_queue->num_txbdfree) {
  1717. /* no space, stop the queue */
  1718. netif_tx_stop_queue(txq);
  1719. dev->stats.tx_fifo_errors++;
  1720. return NETDEV_TX_BUSY;
  1721. }
  1722. /* Update transmit stats */
  1723. tx_queue->stats.tx_bytes += skb->len;
  1724. tx_queue->stats.tx_packets++;
  1725. txbdp = txbdp_start = tx_queue->cur_tx;
  1726. lstatus = txbdp->lstatus;
  1727. /* Time stamp insertion requires one additional TxBD */
  1728. if (unlikely(do_tstamp))
  1729. txbdp_tstamp = txbdp = next_txbd(txbdp, base,
  1730. tx_queue->tx_ring_size);
  1731. if (nr_frags == 0) {
  1732. if (unlikely(do_tstamp))
  1733. txbdp_tstamp->lstatus |= BD_LFLAG(TXBD_LAST |
  1734. TXBD_INTERRUPT);
  1735. else
  1736. lstatus |= BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT);
  1737. } else {
  1738. /* Place the fragment addresses and lengths into the TxBDs */
  1739. for (i = 0; i < nr_frags; i++) {
  1740. /* Point at the next BD, wrapping as needed */
  1741. txbdp = next_txbd(txbdp, base, tx_queue->tx_ring_size);
  1742. length = skb_shinfo(skb)->frags[i].size;
  1743. lstatus = txbdp->lstatus | length |
  1744. BD_LFLAG(TXBD_READY);
  1745. /* Handle the last BD specially */
  1746. if (i == nr_frags - 1)
  1747. lstatus |= BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT);
  1748. bufaddr = skb_frag_dma_map(&priv->ofdev->dev,
  1749. &skb_shinfo(skb)->frags[i],
  1750. 0,
  1751. length,
  1752. DMA_TO_DEVICE);
  1753. /* set the TxBD length and buffer pointer */
  1754. txbdp->bufPtr = bufaddr;
  1755. txbdp->lstatus = lstatus;
  1756. }
  1757. lstatus = txbdp_start->lstatus;
  1758. }
  1759. /* Add TxPAL between FCB and frame if required */
  1760. if (unlikely(do_tstamp)) {
  1761. skb_push(skb, GMAC_TXPAL_LEN);
  1762. memset(skb->data, 0, GMAC_TXPAL_LEN);
  1763. }
  1764. /* Set up checksumming */
  1765. if (CHECKSUM_PARTIAL == skb->ip_summed) {
  1766. fcb = gfar_add_fcb(skb);
  1767. /* as specified by errata */
  1768. if (unlikely(gfar_has_errata(priv, GFAR_ERRATA_12)
  1769. && ((unsigned long)fcb % 0x20) > 0x18)) {
  1770. __skb_pull(skb, GMAC_FCB_LEN);
  1771. skb_checksum_help(skb);
  1772. } else {
  1773. lstatus |= BD_LFLAG(TXBD_TOE);
  1774. gfar_tx_checksum(skb, fcb, fcb_length);
  1775. }
  1776. }
  1777. if (vlan_tx_tag_present(skb)) {
  1778. if (unlikely(NULL == fcb)) {
  1779. fcb = gfar_add_fcb(skb);
  1780. lstatus |= BD_LFLAG(TXBD_TOE);
  1781. }
  1782. gfar_tx_vlan(skb, fcb);
  1783. }
  1784. /* Setup tx hardware time stamping if requested */
  1785. if (unlikely(do_tstamp)) {
  1786. skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
  1787. if (fcb == NULL)
  1788. fcb = gfar_add_fcb(skb);
  1789. fcb->ptp = 1;
  1790. lstatus |= BD_LFLAG(TXBD_TOE);
  1791. }
  1792. txbdp_start->bufPtr = dma_map_single(&priv->ofdev->dev, skb->data,
  1793. skb_headlen(skb), DMA_TO_DEVICE);
  1794. /* If time stamping is requested one additional TxBD must be set up. The
  1795. * first TxBD points to the FCB and must have a data length of
  1796. * GMAC_FCB_LEN. The second TxBD points to the actual frame data with
  1797. * the full frame length.
  1798. */
  1799. if (unlikely(do_tstamp)) {
  1800. txbdp_tstamp->bufPtr = txbdp_start->bufPtr + fcb_length;
  1801. txbdp_tstamp->lstatus |= BD_LFLAG(TXBD_READY) |
  1802. (skb_headlen(skb) - fcb_length);
  1803. lstatus |= BD_LFLAG(TXBD_CRC | TXBD_READY) | GMAC_FCB_LEN;
  1804. } else {
  1805. lstatus |= BD_LFLAG(TXBD_CRC | TXBD_READY) | skb_headlen(skb);
  1806. }
  1807. netdev_tx_sent_queue(txq, skb->len);
  1808. /* We can work in parallel with gfar_clean_tx_ring(), except
  1809. * when modifying num_txbdfree. Note that we didn't grab the lock
  1810. * when we were reading the num_txbdfree and checking for available
  1811. * space, that's because outside of this function it can only grow,
  1812. * and once we've got needed space, it cannot suddenly disappear.
  1813. *
  1814. * The lock also protects us from gfar_error(), which can modify
  1815. * regs->tstat and thus retrigger the transfers, which is why we
  1816. * also must grab the lock before setting ready bit for the first
  1817. * to be transmitted BD.
  1818. */
  1819. spin_lock_irqsave(&tx_queue->txlock, flags);
  1820. /* The powerpc-specific eieio() is used, as wmb() has too strong
  1821. * semantics (it requires synchronization between cacheable and
  1822. * uncacheable mappings, which eieio doesn't provide and which we
  1823. * don't need), thus requiring a more expensive sync instruction. At
  1824. * some point, the set of architecture-independent barrier functions
  1825. * should be expanded to include weaker barriers.
  1826. */
  1827. eieio();
  1828. txbdp_start->lstatus = lstatus;
  1829. eieio(); /* force lstatus write before tx_skbuff */
  1830. tx_queue->tx_skbuff[tx_queue->skb_curtx] = skb;
  1831. /* Update the current skb pointer to the next entry we will use
  1832. * (wrapping if necessary)
  1833. */
  1834. tx_queue->skb_curtx = (tx_queue->skb_curtx + 1) &
  1835. TX_RING_MOD_MASK(tx_queue->tx_ring_size);
  1836. tx_queue->cur_tx = next_txbd(txbdp, base, tx_queue->tx_ring_size);
  1837. /* reduce TxBD free count */
  1838. tx_queue->num_txbdfree -= (nr_txbds);
  1839. /* If the next BD still needs to be cleaned up, then the bds
  1840. * are full. We need to tell the kernel to stop sending us stuff.
  1841. */
  1842. if (!tx_queue->num_txbdfree) {
  1843. netif_tx_stop_queue(txq);
  1844. dev->stats.tx_fifo_errors++;
  1845. }
  1846. /* Tell the DMA to go go go */
  1847. gfar_write(&regs->tstat, TSTAT_CLEAR_THALT >> tx_queue->qindex);
  1848. /* Unlock priv */
  1849. spin_unlock_irqrestore(&tx_queue->txlock, flags);
  1850. return NETDEV_TX_OK;
  1851. }
  1852. /* Stops the kernel queue, and halts the controller */
  1853. static int gfar_close(struct net_device *dev)
  1854. {
  1855. struct gfar_private *priv = netdev_priv(dev);
  1856. disable_napi(priv);
  1857. cancel_work_sync(&priv->reset_task);
  1858. stop_gfar(dev);
  1859. /* Disconnect from the PHY */
  1860. phy_disconnect(priv->phydev);
  1861. priv->phydev = NULL;
  1862. netif_tx_stop_all_queues(dev);
  1863. return 0;
  1864. }
  1865. /* Changes the mac address if the controller is not running. */
  1866. static int gfar_set_mac_address(struct net_device *dev)
  1867. {
  1868. gfar_set_mac_for_addr(dev, 0, dev->dev_addr);
  1869. return 0;
  1870. }
  1871. /* Check if rx parser should be activated */
  1872. void gfar_check_rx_parser_mode(struct gfar_private *priv)
  1873. {
  1874. struct gfar __iomem *regs;
  1875. u32 tempval;
  1876. regs = priv->gfargrp[0].regs;
  1877. tempval = gfar_read(&regs->rctrl);
  1878. /* If parse is no longer required, then disable parser */
  1879. if (tempval & RCTRL_REQ_PARSER)
  1880. tempval |= RCTRL_PRSDEP_INIT;
  1881. else
  1882. tempval &= ~RCTRL_PRSDEP_INIT;
  1883. gfar_write(&regs->rctrl, tempval);
  1884. }
  1885. /* Enables and disables VLAN insertion/extraction */
  1886. void gfar_vlan_mode(struct net_device *dev, netdev_features_t features)
  1887. {
  1888. struct gfar_private *priv = netdev_priv(dev);
  1889. struct gfar __iomem *regs = NULL;
  1890. unsigned long flags;
  1891. u32 tempval;
  1892. regs = priv->gfargrp[0].regs;
  1893. local_irq_save(flags);
  1894. lock_rx_qs(priv);
  1895. if (features & NETIF_F_HW_VLAN_TX) {
  1896. /* Enable VLAN tag insertion */
  1897. tempval = gfar_read(&regs->tctrl);
  1898. tempval |= TCTRL_VLINS;
  1899. gfar_write(&regs->tctrl, tempval);
  1900. } else {
  1901. /* Disable VLAN tag insertion */
  1902. tempval = gfar_read(&regs->tctrl);
  1903. tempval &= ~TCTRL_VLINS;
  1904. gfar_write(&regs->tctrl, tempval);
  1905. }
  1906. if (features & NETIF_F_HW_VLAN_RX) {
  1907. /* Enable VLAN tag extraction */
  1908. tempval = gfar_read(&regs->rctrl);
  1909. tempval |= (RCTRL_VLEX | RCTRL_PRSDEP_INIT);
  1910. gfar_write(&regs->rctrl, tempval);
  1911. } else {
  1912. /* Disable VLAN tag extraction */
  1913. tempval = gfar_read(&regs->rctrl);
  1914. tempval &= ~RCTRL_VLEX;
  1915. gfar_write(&regs->rctrl, tempval);
  1916. gfar_check_rx_parser_mode(priv);
  1917. }
  1918. gfar_change_mtu(dev, dev->mtu);
  1919. unlock_rx_qs(priv);
  1920. local_irq_restore(flags);
  1921. }
  1922. static int gfar_change_mtu(struct net_device *dev, int new_mtu)
  1923. {
  1924. int tempsize, tempval;
  1925. struct gfar_private *priv = netdev_priv(dev);
  1926. struct gfar __iomem *regs = priv->gfargrp[0].regs;
  1927. int oldsize = priv->rx_buffer_size;
  1928. int frame_size = new_mtu + ETH_HLEN;
  1929. if (gfar_is_vlan_on(priv))
  1930. frame_size += VLAN_HLEN;
  1931. if ((frame_size < 64) || (frame_size > JUMBO_FRAME_SIZE)) {
  1932. netif_err(priv, drv, dev, "Invalid MTU setting\n");
  1933. return -EINVAL;
  1934. }
  1935. if (gfar_uses_fcb(priv))
  1936. frame_size += GMAC_FCB_LEN;
  1937. frame_size += priv->padding;
  1938. tempsize =
  1939. (frame_size & ~(INCREMENTAL_BUFFER_SIZE - 1)) +
  1940. INCREMENTAL_BUFFER_SIZE;
  1941. /* Only stop and start the controller if it isn't already
  1942. * stopped, and we changed something
  1943. */
  1944. if ((oldsize != tempsize) && (dev->flags & IFF_UP))
  1945. stop_gfar(dev);
  1946. priv->rx_buffer_size = tempsize;
  1947. dev->mtu = new_mtu;
  1948. gfar_write(&regs->mrblr, priv->rx_buffer_size);
  1949. gfar_write(&regs->maxfrm, priv->rx_buffer_size);
  1950. /* If the mtu is larger than the max size for standard
  1951. * ethernet frames (ie, a jumbo frame), then set maccfg2
  1952. * to allow huge frames, and to check the length
  1953. */
  1954. tempval = gfar_read(&regs->maccfg2);
  1955. if (priv->rx_buffer_size > DEFAULT_RX_BUFFER_SIZE ||
  1956. gfar_has_errata(priv, GFAR_ERRATA_74))
  1957. tempval |= (MACCFG2_HUGEFRAME | MACCFG2_LENGTHCHECK);
  1958. else
  1959. tempval &= ~(MACCFG2_HUGEFRAME | MACCFG2_LENGTHCHECK);
  1960. gfar_write(&regs->maccfg2, tempval);
  1961. if ((oldsize != tempsize) && (dev->flags & IFF_UP))
  1962. startup_gfar(dev);
  1963. return 0;
  1964. }
  1965. /* gfar_reset_task gets scheduled when a packet has not been
  1966. * transmitted after a set amount of time.
  1967. * For now, assume that clearing out all the structures, and
  1968. * starting over will fix the problem.
  1969. */
  1970. static void gfar_reset_task(struct work_struct *work)
  1971. {
  1972. struct gfar_private *priv = container_of(work, struct gfar_private,
  1973. reset_task);
  1974. struct net_device *dev = priv->ndev;
  1975. if (dev->flags & IFF_UP) {
  1976. netif_tx_stop_all_queues(dev);
  1977. stop_gfar(dev);
  1978. startup_gfar(dev);
  1979. netif_tx_start_all_queues(dev);
  1980. }
  1981. netif_tx_schedule_all(dev);
  1982. }
  1983. static void gfar_timeout(struct net_device *dev)
  1984. {
  1985. struct gfar_private *priv = netdev_priv(dev);
  1986. dev->stats.tx_errors++;
  1987. schedule_work(&priv->reset_task);
  1988. }
  1989. static void gfar_align_skb(struct sk_buff *skb)
  1990. {
  1991. /* We need the data buffer to be aligned properly. We will reserve
  1992. * as many bytes as needed to align the data properly
  1993. */
  1994. skb_reserve(skb, RXBUF_ALIGNMENT -
  1995. (((unsigned long) skb->data) & (RXBUF_ALIGNMENT - 1)));
  1996. }
  1997. /* Interrupt Handler for Transmit complete */
  1998. static int gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue)
  1999. {
  2000. struct net_device *dev = tx_queue->dev;
  2001. struct netdev_queue *txq;
  2002. struct gfar_private *priv = netdev_priv(dev);
  2003. struct gfar_priv_rx_q *rx_queue = NULL;
  2004. struct txbd8 *bdp, *next = NULL;
  2005. struct txbd8 *lbdp = NULL;
  2006. struct txbd8 *base = tx_queue->tx_bd_base;
  2007. struct sk_buff *skb;
  2008. int skb_dirtytx;
  2009. int tx_ring_size = tx_queue->tx_ring_size;
  2010. int frags = 0, nr_txbds = 0;
  2011. int i;
  2012. int howmany = 0;
  2013. int tqi = tx_queue->qindex;
  2014. unsigned int bytes_sent = 0;
  2015. u32 lstatus;
  2016. size_t buflen;
  2017. rx_queue = priv->rx_queue[tqi];
  2018. txq = netdev_get_tx_queue(dev, tqi);
  2019. bdp = tx_queue->dirty_tx;
  2020. skb_dirtytx = tx_queue->skb_dirtytx;
  2021. while ((skb = tx_queue->tx_skbuff[skb_dirtytx])) {
  2022. unsigned long flags;
  2023. frags = skb_shinfo(skb)->nr_frags;
  2024. /* When time stamping, one additional TxBD must be freed.
  2025. * Also, we need to dma_unmap_single() the TxPAL.
  2026. */
  2027. if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS))
  2028. nr_txbds = frags + 2;
  2029. else
  2030. nr_txbds = frags + 1;
  2031. lbdp = skip_txbd(bdp, nr_txbds - 1, base, tx_ring_size);
  2032. lstatus = lbdp->lstatus;
  2033. /* Only clean completed frames */
  2034. if ((lstatus & BD_LFLAG(TXBD_READY)) &&
  2035. (lstatus & BD_LENGTH_MASK))
  2036. break;
  2037. if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)) {
  2038. next = next_txbd(bdp, base, tx_ring_size);
  2039. buflen = next->length + GMAC_FCB_LEN + GMAC_TXPAL_LEN;
  2040. } else
  2041. buflen = bdp->length;
  2042. dma_unmap_single(&priv->ofdev->dev, bdp->bufPtr,
  2043. buflen, DMA_TO_DEVICE);
  2044. if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)) {
  2045. struct skb_shared_hwtstamps shhwtstamps;
  2046. u64 *ns = (u64*) (((u32)skb->data + 0x10) & ~0x7);
  2047. memset(&shhwtstamps, 0, sizeof(shhwtstamps));
  2048. shhwtstamps.hwtstamp = ns_to_ktime(*ns);
  2049. skb_pull(skb, GMAC_FCB_LEN + GMAC_TXPAL_LEN);
  2050. skb_tstamp_tx(skb, &shhwtstamps);
  2051. bdp->lstatus &= BD_LFLAG(TXBD_WRAP);
  2052. bdp = next;
  2053. }
  2054. bdp->lstatus &= BD_LFLAG(TXBD_WRAP);
  2055. bdp = next_txbd(bdp, base, tx_ring_size);
  2056. for (i = 0; i < frags; i++) {
  2057. dma_unmap_page(&priv->ofdev->dev,
  2058. bdp->bufPtr,
  2059. bdp->length,
  2060. DMA_TO_DEVICE);
  2061. bdp->lstatus &= BD_LFLAG(TXBD_WRAP);
  2062. bdp = next_txbd(bdp, base, tx_ring_size);
  2063. }
  2064. bytes_sent += skb->len;
  2065. /* If there's room in the queue (limit it to rx_buffer_size)
  2066. * we add this skb back into the pool, if it's the right size
  2067. */
  2068. if (skb_queue_len(&priv->rx_recycle) < rx_queue->rx_ring_size &&
  2069. skb_recycle_check(skb, priv->rx_buffer_size +
  2070. RXBUF_ALIGNMENT)) {
  2071. gfar_align_skb(skb);
  2072. skb_queue_head(&priv->rx_recycle, skb);
  2073. } else
  2074. dev_kfree_skb_any(skb);
  2075. tx_queue->tx_skbuff[skb_dirtytx] = NULL;
  2076. skb_dirtytx = (skb_dirtytx + 1) &
  2077. TX_RING_MOD_MASK(tx_ring_size);
  2078. howmany++;
  2079. spin_lock_irqsave(&tx_queue->txlock, flags);
  2080. tx_queue->num_txbdfree += nr_txbds;
  2081. spin_unlock_irqrestore(&tx_queue->txlock, flags);
  2082. }
  2083. /* If we freed a buffer, we can restart transmission, if necessary */
  2084. if (netif_tx_queue_stopped(txq) && tx_queue->num_txbdfree)
  2085. netif_wake_subqueue(dev, tqi);
  2086. /* Update dirty indicators */
  2087. tx_queue->skb_dirtytx = skb_dirtytx;
  2088. tx_queue->dirty_tx = bdp;
  2089. netdev_tx_completed_queue(txq, howmany, bytes_sent);
  2090. return howmany;
  2091. }
  2092. static void gfar_schedule_cleanup(struct gfar_priv_grp *gfargrp)
  2093. {
  2094. unsigned long flags;
  2095. spin_lock_irqsave(&gfargrp->grplock, flags);
  2096. if (napi_schedule_prep(&gfargrp->napi)) {
  2097. gfar_write(&gfargrp->regs->imask, IMASK_RTX_DISABLED);
  2098. __napi_schedule(&gfargrp->napi);
  2099. } else {
  2100. /* Clear IEVENT, so interrupts aren't called again
  2101. * because of the packets that have already arrived.
  2102. */
  2103. gfar_write(&gfargrp->regs->ievent, IEVENT_RTX_MASK);
  2104. }
  2105. spin_unlock_irqrestore(&gfargrp->grplock, flags);
  2106. }
  2107. /* Interrupt Handler for Transmit complete */
  2108. static irqreturn_t gfar_transmit(int irq, void *grp_id)
  2109. {
  2110. gfar_schedule_cleanup((struct gfar_priv_grp *)grp_id);
  2111. return IRQ_HANDLED;
  2112. }
  2113. static void gfar_new_rxbdp(struct gfar_priv_rx_q *rx_queue, struct rxbd8 *bdp,
  2114. struct sk_buff *skb)
  2115. {
  2116. struct net_device *dev = rx_queue->dev;
  2117. struct gfar_private *priv = netdev_priv(dev);
  2118. dma_addr_t buf;
  2119. buf = dma_map_single(&priv->ofdev->dev, skb->data,
  2120. priv->rx_buffer_size, DMA_FROM_DEVICE);
  2121. gfar_init_rxbdp(rx_queue, bdp, buf);
  2122. }
  2123. static struct sk_buff *gfar_alloc_skb(struct net_device *dev)
  2124. {
  2125. struct gfar_private *priv = netdev_priv(dev);
  2126. struct sk_buff *skb = NULL;
  2127. skb = netdev_alloc_skb(dev, priv->rx_buffer_size + RXBUF_ALIGNMENT);
  2128. if (!skb)
  2129. return NULL;
  2130. gfar_align_skb(skb);
  2131. return skb;
  2132. }
  2133. struct sk_buff *gfar_new_skb(struct net_device *dev)
  2134. {
  2135. struct gfar_private *priv = netdev_priv(dev);
  2136. struct sk_buff *skb = NULL;
  2137. skb = skb_dequeue(&priv->rx_recycle);
  2138. if (!skb)
  2139. skb = gfar_alloc_skb(dev);
  2140. return skb;
  2141. }
  2142. static inline void count_errors(unsigned short status, struct net_device *dev)
  2143. {
  2144. struct gfar_private *priv = netdev_priv(dev);
  2145. struct net_device_stats *stats = &dev->stats;
  2146. struct gfar_extra_stats *estats = &priv->extra_stats;
  2147. /* If the packet was truncated, none of the other errors matter */
  2148. if (status & RXBD_TRUNCATED) {
  2149. stats->rx_length_errors++;
  2150. estats->rx_trunc++;
  2151. return;
  2152. }
  2153. /* Count the errors, if there were any */
  2154. if (status & (RXBD_LARGE | RXBD_SHORT)) {
  2155. stats->rx_length_errors++;
  2156. if (status & RXBD_LARGE)
  2157. estats->rx_large++;
  2158. else
  2159. estats->rx_short++;
  2160. }
  2161. if (status & RXBD_NONOCTET) {
  2162. stats->rx_frame_errors++;
  2163. estats->rx_nonoctet++;
  2164. }
  2165. if (status & RXBD_CRCERR) {
  2166. estats->rx_crcerr++;
  2167. stats->rx_crc_errors++;
  2168. }
  2169. if (status & RXBD_OVERRUN) {
  2170. estats->rx_overrun++;
  2171. stats->rx_crc_errors++;
  2172. }
  2173. }
  2174. irqreturn_t gfar_receive(int irq, void *grp_id)
  2175. {
  2176. gfar_schedule_cleanup((struct gfar_priv_grp *)grp_id);
  2177. return IRQ_HANDLED;
  2178. }
  2179. static inline void gfar_rx_checksum(struct sk_buff *skb, struct rxfcb *fcb)
  2180. {
  2181. /* If valid headers were found, and valid sums
  2182. * were verified, then we tell the kernel that no
  2183. * checksumming is necessary. Otherwise, it is [FIXME]
  2184. */
  2185. if ((fcb->flags & RXFCB_CSUM_MASK) == (RXFCB_CIP | RXFCB_CTU))
  2186. skb->ip_summed = CHECKSUM_UNNECESSARY;
  2187. else
  2188. skb_checksum_none_assert(skb);
  2189. }
  2190. /* gfar_process_frame() -- handle one incoming packet if skb isn't NULL. */
  2191. static int gfar_process_frame(struct net_device *dev, struct sk_buff *skb,
  2192. int amount_pull, struct napi_struct *napi)
  2193. {
  2194. struct gfar_private *priv = netdev_priv(dev);
  2195. struct rxfcb *fcb = NULL;
  2196. gro_result_t ret;
  2197. /* fcb is at the beginning if exists */
  2198. fcb = (struct rxfcb *)skb->data;
  2199. /* Remove the FCB from the skb
  2200. * Remove the padded bytes, if there are any
  2201. */
  2202. if (amount_pull) {
  2203. skb_record_rx_queue(skb, fcb->rq);
  2204. skb_pull(skb, amount_pull);
  2205. }
  2206. /* Get receive timestamp from the skb */
  2207. if (priv->hwts_rx_en) {
  2208. struct skb_shared_hwtstamps *shhwtstamps = skb_hwtstamps(skb);
  2209. u64 *ns = (u64 *) skb->data;
  2210. memset(shhwtstamps, 0, sizeof(*shhwtstamps));
  2211. shhwtstamps->hwtstamp = ns_to_ktime(*ns);
  2212. }
  2213. if (priv->padding)
  2214. skb_pull(skb, priv->padding);
  2215. if (dev->features & NETIF_F_RXCSUM)
  2216. gfar_rx_checksum(skb, fcb);
  2217. /* Tell the skb what kind of packet this is */
  2218. skb->protocol = eth_type_trans(skb, dev);
  2219. /* There's need to check for NETIF_F_HW_VLAN_RX here.
  2220. * Even if vlan rx accel is disabled, on some chips
  2221. * RXFCB_VLN is pseudo randomly set.
  2222. */
  2223. if (dev->features & NETIF_F_HW_VLAN_RX &&
  2224. fcb->flags & RXFCB_VLN)
  2225. __vlan_hwaccel_put_tag(skb, fcb->vlctl);
  2226. /* Send the packet up the stack */
  2227. ret = napi_gro_receive(napi, skb);
  2228. if (GRO_DROP == ret)
  2229. priv->extra_stats.kernel_dropped++;
  2230. return 0;
  2231. }
  2232. /* gfar_clean_rx_ring() -- Processes each frame in the rx ring
  2233. * until the budget/quota has been reached. Returns the number
  2234. * of frames handled
  2235. */
  2236. int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit)
  2237. {
  2238. struct net_device *dev = rx_queue->dev;
  2239. struct rxbd8 *bdp, *base;
  2240. struct sk_buff *skb;
  2241. int pkt_len;
  2242. int amount_pull;
  2243. int howmany = 0;
  2244. struct gfar_private *priv = netdev_priv(dev);
  2245. /* Get the first full descriptor */
  2246. bdp = rx_queue->cur_rx;
  2247. base = rx_queue->rx_bd_base;
  2248. amount_pull = (gfar_uses_fcb(priv) ? GMAC_FCB_LEN : 0);
  2249. while (!((bdp->status & RXBD_EMPTY) || (--rx_work_limit < 0))) {
  2250. struct sk_buff *newskb;
  2251. rmb();
  2252. /* Add another skb for the future */
  2253. newskb = gfar_new_skb(dev);
  2254. skb = rx_queue->rx_skbuff[rx_queue->skb_currx];
  2255. dma_unmap_single(&priv->ofdev->dev, bdp->bufPtr,
  2256. priv->rx_buffer_size, DMA_FROM_DEVICE);
  2257. if (unlikely(!(bdp->status & RXBD_ERR) &&
  2258. bdp->length > priv->rx_buffer_size))
  2259. bdp->status = RXBD_LARGE;
  2260. /* We drop the frame if we failed to allocate a new buffer */
  2261. if (unlikely(!newskb || !(bdp->status & RXBD_LAST) ||
  2262. bdp->status & RXBD_ERR)) {
  2263. count_errors(bdp->status, dev);
  2264. if (unlikely(!newskb))
  2265. newskb = skb;
  2266. else if (skb)
  2267. skb_queue_head(&priv->rx_recycle, skb);
  2268. } else {
  2269. /* Increment the number of packets */
  2270. rx_queue->stats.rx_packets++;
  2271. howmany++;
  2272. if (likely(skb)) {
  2273. pkt_len = bdp->length - ETH_FCS_LEN;
  2274. /* Remove the FCS from the packet length */
  2275. skb_put(skb, pkt_len);
  2276. rx_queue->stats.rx_bytes += pkt_len;
  2277. skb_record_rx_queue(skb, rx_queue->qindex);
  2278. gfar_process_frame(dev, skb, amount_pull,
  2279. &rx_queue->grp->napi);
  2280. } else {
  2281. netif_warn(priv, rx_err, dev, "Missing skb!\n");
  2282. rx_queue->stats.rx_dropped++;
  2283. priv->extra_stats.rx_skbmissing++;
  2284. }
  2285. }
  2286. rx_queue->rx_skbuff[rx_queue->skb_currx] = newskb;
  2287. /* Setup the new bdp */
  2288. gfar_new_rxbdp(rx_queue, bdp, newskb);
  2289. /* Update to the next pointer */
  2290. bdp = next_bd(bdp, base, rx_queue->rx_ring_size);
  2291. /* update to point at the next skb */
  2292. rx_queue->skb_currx =
  2293. (rx_queue->skb_currx + 1) &
  2294. RX_RING_MOD_MASK(rx_queue->rx_ring_size);
  2295. }
  2296. /* Update the current rxbd pointer to be the next one */
  2297. rx_queue->cur_rx = bdp;
  2298. return howmany;
  2299. }
  2300. static int gfar_poll(struct napi_struct *napi, int budget)
  2301. {
  2302. struct gfar_priv_grp *gfargrp = container_of(napi,
  2303. struct gfar_priv_grp, napi);
  2304. struct gfar_private *priv = gfargrp->priv;
  2305. struct gfar __iomem *regs = gfargrp->regs;
  2306. struct gfar_priv_tx_q *tx_queue = NULL;
  2307. struct gfar_priv_rx_q *rx_queue = NULL;
  2308. int rx_cleaned = 0, budget_per_queue = 0, rx_cleaned_per_queue = 0;
  2309. int tx_cleaned = 0, i, left_over_budget = budget;
  2310. unsigned long serviced_queues = 0;
  2311. int num_queues = 0;
  2312. num_queues = gfargrp->num_rx_queues;
  2313. budget_per_queue = budget/num_queues;
  2314. /* Clear IEVENT, so interrupts aren't called again
  2315. * because of the packets that have already arrived
  2316. */
  2317. gfar_write(&regs->ievent, IEVENT_RTX_MASK);
  2318. while (num_queues && left_over_budget) {
  2319. budget_per_queue = left_over_budget/num_queues;
  2320. left_over_budget = 0;
  2321. for_each_set_bit(i, &gfargrp->rx_bit_map, priv->num_rx_queues) {
  2322. if (test_bit(i, &serviced_queues))
  2323. continue;
  2324. rx_queue = priv->rx_queue[i];
  2325. tx_queue = priv->tx_queue[rx_queue->qindex];
  2326. tx_cleaned += gfar_clean_tx_ring(tx_queue);
  2327. rx_cleaned_per_queue = gfar_clean_rx_ring(rx_queue,
  2328. budget_per_queue);
  2329. rx_cleaned += rx_cleaned_per_queue;
  2330. if(rx_cleaned_per_queue < budget_per_queue) {
  2331. left_over_budget = left_over_budget +
  2332. (budget_per_queue - rx_cleaned_per_queue);
  2333. set_bit(i, &serviced_queues);
  2334. num_queues--;
  2335. }
  2336. }
  2337. }
  2338. if (tx_cleaned)
  2339. return budget;
  2340. if (rx_cleaned < budget) {
  2341. napi_complete(napi);
  2342. /* Clear the halt bit in RSTAT */
  2343. gfar_write(&regs->rstat, gfargrp->rstat);
  2344. gfar_write(&regs->imask, IMASK_DEFAULT);
  2345. /* If we are coalescing interrupts, update the timer
  2346. * Otherwise, clear it
  2347. */
  2348. gfar_configure_coalescing(priv,
  2349. gfargrp->rx_bit_map, gfargrp->tx_bit_map);
  2350. }
  2351. return rx_cleaned;
  2352. }
  2353. #ifdef CONFIG_NET_POLL_CONTROLLER
  2354. /* Polling 'interrupt' - used by things like netconsole to send skbs
  2355. * without having to re-enable interrupts. It's not called while
  2356. * the interrupt routine is executing.
  2357. */
  2358. static void gfar_netpoll(struct net_device *dev)
  2359. {
  2360. struct gfar_private *priv = netdev_priv(dev);
  2361. int i = 0;
  2362. /* If the device has multiple interrupts, run tx/rx */
  2363. if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
  2364. for (i = 0; i < priv->num_grps; i++) {
  2365. disable_irq(priv->gfargrp[i].interruptTransmit);
  2366. disable_irq(priv->gfargrp[i].interruptReceive);
  2367. disable_irq(priv->gfargrp[i].interruptError);
  2368. gfar_interrupt(priv->gfargrp[i].interruptTransmit,
  2369. &priv->gfargrp[i]);
  2370. enable_irq(priv->gfargrp[i].interruptError);
  2371. enable_irq(priv->gfargrp[i].interruptReceive);
  2372. enable_irq(priv->gfargrp[i].interruptTransmit);
  2373. }
  2374. } else {
  2375. for (i = 0; i < priv->num_grps; i++) {
  2376. disable_irq(priv->gfargrp[i].interruptTransmit);
  2377. gfar_interrupt(priv->gfargrp[i].interruptTransmit,
  2378. &priv->gfargrp[i]);
  2379. enable_irq(priv->gfargrp[i].interruptTransmit);
  2380. }
  2381. }
  2382. }
  2383. #endif
  2384. /* The interrupt handler for devices with one interrupt */
  2385. static irqreturn_t gfar_interrupt(int irq, void *grp_id)
  2386. {
  2387. struct gfar_priv_grp *gfargrp = grp_id;
  2388. /* Save ievent for future reference */
  2389. u32 events = gfar_read(&gfargrp->regs->ievent);
  2390. /* Check for reception */
  2391. if (events & IEVENT_RX_MASK)
  2392. gfar_receive(irq, grp_id);
  2393. /* Check for transmit completion */
  2394. if (events & IEVENT_TX_MASK)
  2395. gfar_transmit(irq, grp_id);
  2396. /* Check for errors */
  2397. if (events & IEVENT_ERR_MASK)
  2398. gfar_error(irq, grp_id);
  2399. return IRQ_HANDLED;
  2400. }
  2401. /* Called every time the controller might need to be made
  2402. * aware of new link state. The PHY code conveys this
  2403. * information through variables in the phydev structure, and this
  2404. * function converts those variables into the appropriate
  2405. * register values, and can bring down the device if needed.
  2406. */
  2407. static void adjust_link(struct net_device *dev)
  2408. {
  2409. struct gfar_private *priv = netdev_priv(dev);
  2410. struct gfar __iomem *regs = priv->gfargrp[0].regs;
  2411. unsigned long flags;
  2412. struct phy_device *phydev = priv->phydev;
  2413. int new_state = 0;
  2414. local_irq_save(flags);
  2415. lock_tx_qs(priv);
  2416. if (phydev->link) {
  2417. u32 tempval = gfar_read(&regs->maccfg2);
  2418. u32 ecntrl = gfar_read(&regs->ecntrl);
  2419. /* Now we make sure that we can be in full duplex mode.
  2420. * If not, we operate in half-duplex mode.
  2421. */
  2422. if (phydev->duplex != priv->oldduplex) {
  2423. new_state = 1;
  2424. if (!(phydev->duplex))
  2425. tempval &= ~(MACCFG2_FULL_DUPLEX);
  2426. else
  2427. tempval |= MACCFG2_FULL_DUPLEX;
  2428. priv->oldduplex = phydev->duplex;
  2429. }
  2430. if (phydev->speed != priv->oldspeed) {
  2431. new_state = 1;
  2432. switch (phydev->speed) {
  2433. case 1000:
  2434. tempval =
  2435. ((tempval & ~(MACCFG2_IF)) | MACCFG2_GMII);
  2436. ecntrl &= ~(ECNTRL_R100);
  2437. break;
  2438. case 100:
  2439. case 10:
  2440. tempval =
  2441. ((tempval & ~(MACCFG2_IF)) | MACCFG2_MII);
  2442. /* Reduced mode distinguishes
  2443. * between 10 and 100
  2444. */
  2445. if (phydev->speed == SPEED_100)
  2446. ecntrl |= ECNTRL_R100;
  2447. else
  2448. ecntrl &= ~(ECNTRL_R100);
  2449. break;
  2450. default:
  2451. netif_warn(priv, link, dev,
  2452. "Ack! Speed (%d) is not 10/100/1000!\n",
  2453. phydev->speed);
  2454. break;
  2455. }
  2456. priv->oldspeed = phydev->speed;
  2457. }
  2458. gfar_write(&regs->maccfg2, tempval);
  2459. gfar_write(&regs->ecntrl, ecntrl);
  2460. if (!priv->oldlink) {
  2461. new_state = 1;
  2462. priv->oldlink = 1;
  2463. }
  2464. } else if (priv->oldlink) {
  2465. new_state = 1;
  2466. priv->oldlink = 0;
  2467. priv->oldspeed = 0;
  2468. priv->oldduplex = -1;
  2469. }
  2470. if (new_state && netif_msg_link(priv))
  2471. phy_print_status(phydev);
  2472. unlock_tx_qs(priv);
  2473. local_irq_restore(flags);
  2474. }
  2475. /* Update the hash table based on the current list of multicast
  2476. * addresses we subscribe to. Also, change the promiscuity of
  2477. * the device based on the flags (this function is called
  2478. * whenever dev->flags is changed
  2479. */
  2480. static void gfar_set_multi(struct net_device *dev)
  2481. {
  2482. struct netdev_hw_addr *ha;
  2483. struct gfar_private *priv = netdev_priv(dev);
  2484. struct gfar __iomem *regs = priv->gfargrp[0].regs;
  2485. u32 tempval;
  2486. if (dev->flags & IFF_PROMISC) {
  2487. /* Set RCTRL to PROM */
  2488. tempval = gfar_read(&regs->rctrl);
  2489. tempval |= RCTRL_PROM;
  2490. gfar_write(&regs->rctrl, tempval);
  2491. } else {
  2492. /* Set RCTRL to not PROM */
  2493. tempval = gfar_read(&regs->rctrl);
  2494. tempval &= ~(RCTRL_PROM);
  2495. gfar_write(&regs->rctrl, tempval);
  2496. }
  2497. if (dev->flags & IFF_ALLMULTI) {
  2498. /* Set the hash to rx all multicast frames */
  2499. gfar_write(&regs->igaddr0, 0xffffffff);
  2500. gfar_write(&regs->igaddr1, 0xffffffff);
  2501. gfar_write(&regs->igaddr2, 0xffffffff);
  2502. gfar_write(&regs->igaddr3, 0xffffffff);
  2503. gfar_write(&regs->igaddr4, 0xffffffff);
  2504. gfar_write(&regs->igaddr5, 0xffffffff);
  2505. gfar_write(&regs->igaddr6, 0xffffffff);
  2506. gfar_write(&regs->igaddr7, 0xffffffff);
  2507. gfar_write(&regs->gaddr0, 0xffffffff);
  2508. gfar_write(&regs->gaddr1, 0xffffffff);
  2509. gfar_write(&regs->gaddr2, 0xffffffff);
  2510. gfar_write(&regs->gaddr3, 0xffffffff);
  2511. gfar_write(&regs->gaddr4, 0xffffffff);
  2512. gfar_write(&regs->gaddr5, 0xffffffff);
  2513. gfar_write(&regs->gaddr6, 0xffffffff);
  2514. gfar_write(&regs->gaddr7, 0xffffffff);
  2515. } else {
  2516. int em_num;
  2517. int idx;
  2518. /* zero out the hash */
  2519. gfar_write(&regs->igaddr0, 0x0);
  2520. gfar_write(&regs->igaddr1, 0x0);
  2521. gfar_write(&regs->igaddr2, 0x0);
  2522. gfar_write(&regs->igaddr3, 0x0);
  2523. gfar_write(&regs->igaddr4, 0x0);
  2524. gfar_write(&regs->igaddr5, 0x0);
  2525. gfar_write(&regs->igaddr6, 0x0);
  2526. gfar_write(&regs->igaddr7, 0x0);
  2527. gfar_write(&regs->gaddr0, 0x0);
  2528. gfar_write(&regs->gaddr1, 0x0);
  2529. gfar_write(&regs->gaddr2, 0x0);
  2530. gfar_write(&regs->gaddr3, 0x0);
  2531. gfar_write(&regs->gaddr4, 0x0);
  2532. gfar_write(&regs->gaddr5, 0x0);
  2533. gfar_write(&regs->gaddr6, 0x0);
  2534. gfar_write(&regs->gaddr7, 0x0);
  2535. /* If we have extended hash tables, we need to
  2536. * clear the exact match registers to prepare for
  2537. * setting them
  2538. */
  2539. if (priv->extended_hash) {
  2540. em_num = GFAR_EM_NUM + 1;
  2541. gfar_clear_exact_match(dev);
  2542. idx = 1;
  2543. } else {
  2544. idx = 0;
  2545. em_num = 0;
  2546. }
  2547. if (netdev_mc_empty(dev))
  2548. return;
  2549. /* Parse the list, and set the appropriate bits */
  2550. netdev_for_each_mc_addr(ha, dev) {
  2551. if (idx < em_num) {
  2552. gfar_set_mac_for_addr(dev, idx, ha->addr);
  2553. idx++;
  2554. } else
  2555. gfar_set_hash_for_addr(dev, ha->addr);
  2556. }
  2557. }
  2558. }
  2559. /* Clears each of the exact match registers to zero, so they
  2560. * don't interfere with normal reception
  2561. */
  2562. static void gfar_clear_exact_match(struct net_device *dev)
  2563. {
  2564. int idx;
  2565. static const u8 zero_arr[ETH_ALEN] = {0, 0, 0, 0, 0, 0};
  2566. for(idx = 1;idx < GFAR_EM_NUM + 1;idx++)
  2567. gfar_set_mac_for_addr(dev, idx, zero_arr);
  2568. }
  2569. /* Set the appropriate hash bit for the given addr */
  2570. /* The algorithm works like so:
  2571. * 1) Take the Destination Address (ie the multicast address), and
  2572. * do a CRC on it (little endian), and reverse the bits of the
  2573. * result.
  2574. * 2) Use the 8 most significant bits as a hash into a 256-entry
  2575. * table. The table is controlled through 8 32-bit registers:
  2576. * gaddr0-7. gaddr0's MSB is entry 0, and gaddr7's LSB is
  2577. * gaddr7. This means that the 3 most significant bits in the
  2578. * hash index which gaddr register to use, and the 5 other bits
  2579. * indicate which bit (assuming an IBM numbering scheme, which
  2580. * for PowerPC (tm) is usually the case) in the register holds
  2581. * the entry.
  2582. */
  2583. static void gfar_set_hash_for_addr(struct net_device *dev, u8 *addr)
  2584. {
  2585. u32 tempval;
  2586. struct gfar_private *priv = netdev_priv(dev);
  2587. u32 result = ether_crc(ETH_ALEN, addr);
  2588. int width = priv->hash_width;
  2589. u8 whichbit = (result >> (32 - width)) & 0x1f;
  2590. u8 whichreg = result >> (32 - width + 5);
  2591. u32 value = (1 << (31-whichbit));
  2592. tempval = gfar_read(priv->hash_regs[whichreg]);
  2593. tempval |= value;
  2594. gfar_write(priv->hash_regs[whichreg], tempval);
  2595. }
  2596. /* There are multiple MAC Address register pairs on some controllers
  2597. * This function sets the numth pair to a given address
  2598. */
  2599. static void gfar_set_mac_for_addr(struct net_device *dev, int num,
  2600. const u8 *addr)
  2601. {
  2602. struct gfar_private *priv = netdev_priv(dev);
  2603. struct gfar __iomem *regs = priv->gfargrp[0].regs;
  2604. int idx;
  2605. char tmpbuf[ETH_ALEN];
  2606. u32 tempval;
  2607. u32 __iomem *macptr = &regs->macstnaddr1;
  2608. macptr += num*2;
  2609. /* Now copy it into the mac registers backwards, cuz
  2610. * little endian is silly
  2611. */
  2612. for (idx = 0; idx < ETH_ALEN; idx++)
  2613. tmpbuf[ETH_ALEN - 1 - idx] = addr[idx];
  2614. gfar_write(macptr, *((u32 *) (tmpbuf)));
  2615. tempval = *((u32 *) (tmpbuf + 4));
  2616. gfar_write(macptr+1, tempval);
  2617. }
  2618. /* GFAR error interrupt handler */
  2619. static irqreturn_t gfar_error(int irq, void *grp_id)
  2620. {
  2621. struct gfar_priv_grp *gfargrp = grp_id;
  2622. struct gfar __iomem *regs = gfargrp->regs;
  2623. struct gfar_private *priv= gfargrp->priv;
  2624. struct net_device *dev = priv->ndev;
  2625. /* Save ievent for future reference */
  2626. u32 events = gfar_read(&regs->ievent);
  2627. /* Clear IEVENT */
  2628. gfar_write(&regs->ievent, events & IEVENT_ERR_MASK);
  2629. /* Magic Packet is not an error. */
  2630. if ((priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET) &&
  2631. (events & IEVENT_MAG))
  2632. events &= ~IEVENT_MAG;
  2633. /* Hmm... */
  2634. if (netif_msg_rx_err(priv) || netif_msg_tx_err(priv))
  2635. netdev_dbg(dev, "error interrupt (ievent=0x%08x imask=0x%08x)\n",
  2636. events, gfar_read(&regs->imask));
  2637. /* Update the error counters */
  2638. if (events & IEVENT_TXE) {
  2639. dev->stats.tx_errors++;
  2640. if (events & IEVENT_LC)
  2641. dev->stats.tx_window_errors++;
  2642. if (events & IEVENT_CRL)
  2643. dev->stats.tx_aborted_errors++;
  2644. if (events & IEVENT_XFUN) {
  2645. unsigned long flags;
  2646. netif_dbg(priv, tx_err, dev,
  2647. "TX FIFO underrun, packet dropped\n");
  2648. dev->stats.tx_dropped++;
  2649. priv->extra_stats.tx_underrun++;
  2650. local_irq_save(flags);
  2651. lock_tx_qs(priv);
  2652. /* Reactivate the Tx Queues */
  2653. gfar_write(&regs->tstat, gfargrp->tstat);
  2654. unlock_tx_qs(priv);
  2655. local_irq_restore(flags);
  2656. }
  2657. netif_dbg(priv, tx_err, dev, "Transmit Error\n");
  2658. }
  2659. if (events & IEVENT_BSY) {
  2660. dev->stats.rx_errors++;
  2661. priv->extra_stats.rx_bsy++;
  2662. gfar_receive(irq, grp_id);
  2663. netif_dbg(priv, rx_err, dev, "busy error (rstat: %x)\n",
  2664. gfar_read(&regs->rstat));
  2665. }
  2666. if (events & IEVENT_BABR) {
  2667. dev->stats.rx_errors++;
  2668. priv->extra_stats.rx_babr++;
  2669. netif_dbg(priv, rx_err, dev, "babbling RX error\n");
  2670. }
  2671. if (events & IEVENT_EBERR) {
  2672. priv->extra_stats.eberr++;
  2673. netif_dbg(priv, rx_err, dev, "bus error\n");
  2674. }
  2675. if (events & IEVENT_RXC)
  2676. netif_dbg(priv, rx_status, dev, "control frame\n");
  2677. if (events & IEVENT_BABT) {
  2678. priv->extra_stats.tx_babt++;
  2679. netif_dbg(priv, tx_err, dev, "babbling TX error\n");
  2680. }
  2681. return IRQ_HANDLED;
  2682. }
  2683. static struct of_device_id gfar_match[] =
  2684. {
  2685. {
  2686. .type = "network",
  2687. .compatible = "gianfar",
  2688. },
  2689. {
  2690. .compatible = "fsl,etsec2",
  2691. },
  2692. {},
  2693. };
  2694. MODULE_DEVICE_TABLE(of, gfar_match);
  2695. /* Structure for a device driver */
  2696. static struct platform_driver gfar_driver = {
  2697. .driver = {
  2698. .name = "fsl-gianfar",
  2699. .owner = THIS_MODULE,
  2700. .pm = GFAR_PM_OPS,
  2701. .of_match_table = gfar_match,
  2702. },
  2703. .probe = gfar_probe,
  2704. .remove = gfar_remove,
  2705. };
  2706. module_platform_driver(gfar_driver);