gianfar.c 80 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043
  1. /*
  2. * drivers/net/gianfar.c
  3. *
  4. * Gianfar Ethernet Driver
  5. * This driver is designed for the non-CPM ethernet controllers
  6. * on the 85xx and 83xx family of integrated processors
  7. * Based on 8260_io/fcc_enet.c
  8. *
  9. * Author: Andy Fleming
  10. * Maintainer: Kumar Gala
  11. * Modifier: Sandeep Gopalpet <sandeep.kumar@freescale.com>
  12. *
  13. * Copyright 2002-2009 Freescale Semiconductor, Inc.
  14. * Copyright 2007 MontaVista Software, Inc.
  15. *
  16. * This program is free software; you can redistribute it and/or modify it
  17. * under the terms of the GNU General Public License as published by the
  18. * Free Software Foundation; either version 2 of the License, or (at your
  19. * option) any later version.
  20. *
  21. * Gianfar: AKA Lambda Draconis, "Dragon"
  22. * RA 11 31 24.2
  23. * Dec +69 19 52
  24. * V 3.84
  25. * B-V +1.62
  26. *
  27. * Theory of operation
  28. *
  29. * The driver is initialized through of_device. Configuration information
  30. * is therefore conveyed through an OF-style device tree.
  31. *
  32. * The Gianfar Ethernet Controller uses a ring of buffer
  33. * descriptors. The beginning is indicated by a register
  34. * pointing to the physical address of the start of the ring.
  35. * The end is determined by a "wrap" bit being set in the
  36. * last descriptor of the ring.
  37. *
  38. * When a packet is received, the RXF bit in the
  39. * IEVENT register is set, triggering an interrupt when the
  40. * corresponding bit in the IMASK register is also set (if
  41. * interrupt coalescing is active, then the interrupt may not
  42. * happen immediately, but will wait until either a set number
  43. * of frames or amount of time have passed). In NAPI, the
  44. * interrupt handler will signal there is work to be done, and
  45. * exit. This method will start at the last known empty
  46. * descriptor, and process every subsequent descriptor until there
  47. * are none left with data (NAPI will stop after a set number of
  48. * packets to give time to other tasks, but will eventually
  49. * process all the packets). The data arrives inside a
  50. * pre-allocated skb, and so after the skb is passed up to the
  51. * stack, a new skb must be allocated, and the address field in
  52. * the buffer descriptor must be updated to indicate this new
  53. * skb.
  54. *
  55. * When the kernel requests that a packet be transmitted, the
  56. * driver starts where it left off last time, and points the
  57. * descriptor at the buffer which was passed in. The driver
  58. * then informs the DMA engine that there are packets ready to
  59. * be transmitted. Once the controller is finished transmitting
  60. * the packet, an interrupt may be triggered (under the same
  61. * conditions as for reception, but depending on the TXF bit).
  62. * The driver then cleans up the buffer.
  63. */
  64. #include <linux/kernel.h>
  65. #include <linux/string.h>
  66. #include <linux/errno.h>
  67. #include <linux/unistd.h>
  68. #include <linux/slab.h>
  69. #include <linux/interrupt.h>
  70. #include <linux/init.h>
  71. #include <linux/delay.h>
  72. #include <linux/netdevice.h>
  73. #include <linux/etherdevice.h>
  74. #include <linux/skbuff.h>
  75. #include <linux/if_vlan.h>
  76. #include <linux/spinlock.h>
  77. #include <linux/mm.h>
  78. #include <linux/of_mdio.h>
  79. #include <linux/of_platform.h>
  80. #include <linux/ip.h>
  81. #include <linux/tcp.h>
  82. #include <linux/udp.h>
  83. #include <linux/in.h>
  84. #include <asm/io.h>
  85. #include <asm/irq.h>
  86. #include <asm/uaccess.h>
  87. #include <linux/module.h>
  88. #include <linux/dma-mapping.h>
  89. #include <linux/crc32.h>
  90. #include <linux/mii.h>
  91. #include <linux/phy.h>
  92. #include <linux/phy_fixed.h>
  93. #include <linux/of.h>
  94. #include "gianfar.h"
  95. #include "fsl_pq_mdio.h"
  96. #define TX_TIMEOUT (1*HZ)
  97. #undef BRIEF_GFAR_ERRORS
  98. #undef VERBOSE_GFAR_ERRORS
  99. const char gfar_driver_name[] = "Gianfar Ethernet";
  100. const char gfar_driver_version[] = "1.3";
  101. static int gfar_enet_open(struct net_device *dev);
  102. static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev);
  103. static void gfar_reset_task(struct work_struct *work);
  104. static void gfar_timeout(struct net_device *dev);
  105. static int gfar_close(struct net_device *dev);
  106. struct sk_buff *gfar_new_skb(struct net_device *dev);
  107. static void gfar_new_rxbdp(struct gfar_priv_rx_q *rx_queue, struct rxbd8 *bdp,
  108. struct sk_buff *skb);
  109. static int gfar_set_mac_address(struct net_device *dev);
  110. static int gfar_change_mtu(struct net_device *dev, int new_mtu);
  111. static irqreturn_t gfar_error(int irq, void *dev_id);
  112. static irqreturn_t gfar_transmit(int irq, void *dev_id);
  113. static irqreturn_t gfar_interrupt(int irq, void *dev_id);
  114. static void adjust_link(struct net_device *dev);
  115. static void init_registers(struct net_device *dev);
  116. static int init_phy(struct net_device *dev);
  117. static int gfar_probe(struct of_device *ofdev,
  118. const struct of_device_id *match);
  119. static int gfar_remove(struct of_device *ofdev);
  120. static void free_skb_resources(struct gfar_private *priv);
  121. static void gfar_set_multi(struct net_device *dev);
  122. static void gfar_set_hash_for_addr(struct net_device *dev, u8 *addr);
  123. static void gfar_configure_serdes(struct net_device *dev);
  124. static int gfar_poll(struct napi_struct *napi, int budget);
  125. #ifdef CONFIG_NET_POLL_CONTROLLER
  126. static void gfar_netpoll(struct net_device *dev);
  127. #endif
  128. int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit);
  129. static int gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue);
  130. static int gfar_process_frame(struct net_device *dev, struct sk_buff *skb,
  131. int amount_pull);
  132. static void gfar_vlan_rx_register(struct net_device *netdev,
  133. struct vlan_group *grp);
  134. void gfar_halt(struct net_device *dev);
  135. static void gfar_halt_nodisable(struct net_device *dev);
  136. void gfar_start(struct net_device *dev);
  137. static void gfar_clear_exact_match(struct net_device *dev);
  138. static void gfar_set_mac_for_addr(struct net_device *dev, int num, u8 *addr);
  139. static int gfar_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
  140. u16 gfar_select_queue(struct net_device *dev, struct sk_buff *skb);
  141. MODULE_AUTHOR("Freescale Semiconductor, Inc");
  142. MODULE_DESCRIPTION("Gianfar Ethernet Driver");
  143. MODULE_LICENSE("GPL");
  144. static void gfar_init_rxbdp(struct gfar_priv_rx_q *rx_queue, struct rxbd8 *bdp,
  145. dma_addr_t buf)
  146. {
  147. u32 lstatus;
  148. bdp->bufPtr = buf;
  149. lstatus = BD_LFLAG(RXBD_EMPTY | RXBD_INTERRUPT);
  150. if (bdp == rx_queue->rx_bd_base + rx_queue->rx_ring_size - 1)
  151. lstatus |= BD_LFLAG(RXBD_WRAP);
  152. eieio();
  153. bdp->lstatus = lstatus;
  154. }
  155. static int gfar_init_bds(struct net_device *ndev)
  156. {
  157. struct gfar_private *priv = netdev_priv(ndev);
  158. struct gfar_priv_tx_q *tx_queue = NULL;
  159. struct gfar_priv_rx_q *rx_queue = NULL;
  160. struct txbd8 *txbdp;
  161. struct rxbd8 *rxbdp;
  162. int i, j;
  163. for (i = 0; i < priv->num_tx_queues; i++) {
  164. tx_queue = priv->tx_queue[i];
  165. /* Initialize some variables in our dev structure */
  166. tx_queue->num_txbdfree = tx_queue->tx_ring_size;
  167. tx_queue->dirty_tx = tx_queue->tx_bd_base;
  168. tx_queue->cur_tx = tx_queue->tx_bd_base;
  169. tx_queue->skb_curtx = 0;
  170. tx_queue->skb_dirtytx = 0;
  171. /* Initialize Transmit Descriptor Ring */
  172. txbdp = tx_queue->tx_bd_base;
  173. for (j = 0; j < tx_queue->tx_ring_size; j++) {
  174. txbdp->lstatus = 0;
  175. txbdp->bufPtr = 0;
  176. txbdp++;
  177. }
  178. /* Set the last descriptor in the ring to indicate wrap */
  179. txbdp--;
  180. txbdp->status |= TXBD_WRAP;
  181. }
  182. for (i = 0; i < priv->num_rx_queues; i++) {
  183. rx_queue = priv->rx_queue[i];
  184. rx_queue->cur_rx = rx_queue->rx_bd_base;
  185. rx_queue->skb_currx = 0;
  186. rxbdp = rx_queue->rx_bd_base;
  187. for (j = 0; j < rx_queue->rx_ring_size; j++) {
  188. struct sk_buff *skb = rx_queue->rx_skbuff[j];
  189. if (skb) {
  190. gfar_init_rxbdp(rx_queue, rxbdp,
  191. rxbdp->bufPtr);
  192. } else {
  193. skb = gfar_new_skb(ndev);
  194. if (!skb) {
  195. pr_err("%s: Can't allocate RX buffers\n",
  196. ndev->name);
  197. goto err_rxalloc_fail;
  198. }
  199. rx_queue->rx_skbuff[j] = skb;
  200. gfar_new_rxbdp(rx_queue, rxbdp, skb);
  201. }
  202. rxbdp++;
  203. }
  204. }
  205. return 0;
  206. err_rxalloc_fail:
  207. free_skb_resources(priv);
  208. return -ENOMEM;
  209. }
  210. static int gfar_alloc_skb_resources(struct net_device *ndev)
  211. {
  212. void *vaddr;
  213. dma_addr_t addr;
  214. int i, j, k;
  215. struct gfar_private *priv = netdev_priv(ndev);
  216. struct device *dev = &priv->ofdev->dev;
  217. struct gfar_priv_tx_q *tx_queue = NULL;
  218. struct gfar_priv_rx_q *rx_queue = NULL;
  219. priv->total_tx_ring_size = 0;
  220. for (i = 0; i < priv->num_tx_queues; i++)
  221. priv->total_tx_ring_size += priv->tx_queue[i]->tx_ring_size;
  222. priv->total_rx_ring_size = 0;
  223. for (i = 0; i < priv->num_rx_queues; i++)
  224. priv->total_rx_ring_size += priv->rx_queue[i]->rx_ring_size;
  225. /* Allocate memory for the buffer descriptors */
  226. vaddr = dma_alloc_coherent(dev,
  227. sizeof(struct txbd8) * priv->total_tx_ring_size +
  228. sizeof(struct rxbd8) * priv->total_rx_ring_size,
  229. &addr, GFP_KERNEL);
  230. if (!vaddr) {
  231. if (netif_msg_ifup(priv))
  232. pr_err("%s: Could not allocate buffer descriptors!\n",
  233. ndev->name);
  234. return -ENOMEM;
  235. }
  236. for (i = 0; i < priv->num_tx_queues; i++) {
  237. tx_queue = priv->tx_queue[i];
  238. tx_queue->tx_bd_base = (struct txbd8 *) vaddr;
  239. tx_queue->tx_bd_dma_base = addr;
  240. tx_queue->dev = ndev;
  241. /* enet DMA only understands physical addresses */
  242. addr += sizeof(struct txbd8) *tx_queue->tx_ring_size;
  243. vaddr += sizeof(struct txbd8) *tx_queue->tx_ring_size;
  244. }
  245. /* Start the rx descriptor ring where the tx ring leaves off */
  246. for (i = 0; i < priv->num_rx_queues; i++) {
  247. rx_queue = priv->rx_queue[i];
  248. rx_queue->rx_bd_base = (struct rxbd8 *) vaddr;
  249. rx_queue->rx_bd_dma_base = addr;
  250. rx_queue->dev = ndev;
  251. addr += sizeof (struct rxbd8) * rx_queue->rx_ring_size;
  252. vaddr += sizeof (struct rxbd8) * rx_queue->rx_ring_size;
  253. }
  254. /* Setup the skbuff rings */
  255. for (i = 0; i < priv->num_tx_queues; i++) {
  256. tx_queue = priv->tx_queue[i];
  257. tx_queue->tx_skbuff = kmalloc(sizeof(*tx_queue->tx_skbuff) *
  258. tx_queue->tx_ring_size, GFP_KERNEL);
  259. if (!tx_queue->tx_skbuff) {
  260. if (netif_msg_ifup(priv))
  261. pr_err("%s: Could not allocate tx_skbuff\n",
  262. ndev->name);
  263. goto cleanup;
  264. }
  265. for (k = 0; k < tx_queue->tx_ring_size; k++)
  266. tx_queue->tx_skbuff[k] = NULL;
  267. }
  268. for (i = 0; i < priv->num_rx_queues; i++) {
  269. rx_queue = priv->rx_queue[i];
  270. rx_queue->rx_skbuff = kmalloc(sizeof(*rx_queue->rx_skbuff) *
  271. rx_queue->rx_ring_size, GFP_KERNEL);
  272. if (!rx_queue->rx_skbuff) {
  273. if (netif_msg_ifup(priv))
  274. pr_err("%s: Could not allocate rx_skbuff\n",
  275. ndev->name);
  276. goto cleanup;
  277. }
  278. for (j = 0; j < rx_queue->rx_ring_size; j++)
  279. rx_queue->rx_skbuff[j] = NULL;
  280. }
  281. if (gfar_init_bds(ndev))
  282. goto cleanup;
  283. return 0;
  284. cleanup:
  285. free_skb_resources(priv);
  286. return -ENOMEM;
  287. }
  288. static void gfar_init_tx_rx_base(struct gfar_private *priv)
  289. {
  290. struct gfar __iomem *regs = priv->gfargrp[0].regs;
  291. u32 __iomem *baddr;
  292. int i;
  293. baddr = &regs->tbase0;
  294. for(i = 0; i < priv->num_tx_queues; i++) {
  295. gfar_write(baddr, priv->tx_queue[i]->tx_bd_dma_base);
  296. baddr += 2;
  297. }
  298. baddr = &regs->rbase0;
  299. for(i = 0; i < priv->num_rx_queues; i++) {
  300. gfar_write(baddr, priv->rx_queue[i]->rx_bd_dma_base);
  301. baddr += 2;
  302. }
  303. }
  304. static void gfar_init_mac(struct net_device *ndev)
  305. {
  306. struct gfar_private *priv = netdev_priv(ndev);
  307. struct gfar __iomem *regs = priv->gfargrp[0].regs;
  308. u32 rctrl = 0;
  309. u32 tctrl = 0;
  310. u32 attrs = 0;
  311. /* write the tx/rx base registers */
  312. gfar_init_tx_rx_base(priv);
  313. /* Configure the coalescing support */
  314. gfar_configure_coalescing(priv, 0xFF, 0xFF);
  315. if (priv->rx_filer_enable)
  316. rctrl |= RCTRL_FILREN;
  317. if (priv->rx_csum_enable)
  318. rctrl |= RCTRL_CHECKSUMMING;
  319. if (priv->extended_hash) {
  320. rctrl |= RCTRL_EXTHASH;
  321. gfar_clear_exact_match(ndev);
  322. rctrl |= RCTRL_EMEN;
  323. }
  324. if (priv->padding) {
  325. rctrl &= ~RCTRL_PAL_MASK;
  326. rctrl |= RCTRL_PADDING(priv->padding);
  327. }
  328. /* keep vlan related bits if it's enabled */
  329. if (priv->vlgrp) {
  330. rctrl |= RCTRL_VLEX | RCTRL_PRSDEP_INIT;
  331. tctrl |= TCTRL_VLINS;
  332. }
  333. /* Init rctrl based on our settings */
  334. gfar_write(&regs->rctrl, rctrl);
  335. if (ndev->features & NETIF_F_IP_CSUM)
  336. tctrl |= TCTRL_INIT_CSUM;
  337. tctrl |= TCTRL_TXSCHED_PRIO;
  338. gfar_write(&regs->tctrl, tctrl);
  339. /* Set the extraction length and index */
  340. attrs = ATTRELI_EL(priv->rx_stash_size) |
  341. ATTRELI_EI(priv->rx_stash_index);
  342. gfar_write(&regs->attreli, attrs);
  343. /* Start with defaults, and add stashing or locking
  344. * depending on the approprate variables */
  345. attrs = ATTR_INIT_SETTINGS;
  346. if (priv->bd_stash_en)
  347. attrs |= ATTR_BDSTASH;
  348. if (priv->rx_stash_size != 0)
  349. attrs |= ATTR_BUFSTASH;
  350. gfar_write(&regs->attr, attrs);
  351. gfar_write(&regs->fifo_tx_thr, priv->fifo_threshold);
  352. gfar_write(&regs->fifo_tx_starve, priv->fifo_starve);
  353. gfar_write(&regs->fifo_tx_starve_shutoff, priv->fifo_starve_off);
  354. }
  355. static const struct net_device_ops gfar_netdev_ops = {
  356. .ndo_open = gfar_enet_open,
  357. .ndo_start_xmit = gfar_start_xmit,
  358. .ndo_stop = gfar_close,
  359. .ndo_change_mtu = gfar_change_mtu,
  360. .ndo_set_multicast_list = gfar_set_multi,
  361. .ndo_tx_timeout = gfar_timeout,
  362. .ndo_do_ioctl = gfar_ioctl,
  363. .ndo_select_queue = gfar_select_queue,
  364. .ndo_vlan_rx_register = gfar_vlan_rx_register,
  365. .ndo_set_mac_address = eth_mac_addr,
  366. .ndo_validate_addr = eth_validate_addr,
  367. #ifdef CONFIG_NET_POLL_CONTROLLER
  368. .ndo_poll_controller = gfar_netpoll,
  369. #endif
  370. };
  371. unsigned int ftp_rqfpr[MAX_FILER_IDX + 1];
  372. unsigned int ftp_rqfcr[MAX_FILER_IDX + 1];
  373. void lock_rx_qs(struct gfar_private *priv)
  374. {
  375. int i = 0x0;
  376. for (i = 0; i < priv->num_rx_queues; i++)
  377. spin_lock(&priv->rx_queue[i]->rxlock);
  378. }
  379. void lock_tx_qs(struct gfar_private *priv)
  380. {
  381. int i = 0x0;
  382. for (i = 0; i < priv->num_tx_queues; i++)
  383. spin_lock(&priv->tx_queue[i]->txlock);
  384. }
  385. void unlock_rx_qs(struct gfar_private *priv)
  386. {
  387. int i = 0x0;
  388. for (i = 0; i < priv->num_rx_queues; i++)
  389. spin_unlock(&priv->rx_queue[i]->rxlock);
  390. }
  391. void unlock_tx_qs(struct gfar_private *priv)
  392. {
  393. int i = 0x0;
  394. for (i = 0; i < priv->num_tx_queues; i++)
  395. spin_unlock(&priv->tx_queue[i]->txlock);
  396. }
  397. /* Returns 1 if incoming frames use an FCB */
  398. static inline int gfar_uses_fcb(struct gfar_private *priv)
  399. {
  400. return priv->vlgrp || priv->rx_csum_enable;
  401. }
  402. u16 gfar_select_queue(struct net_device *dev, struct sk_buff *skb)
  403. {
  404. return skb_get_queue_mapping(skb);
  405. }
  406. static void free_tx_pointers(struct gfar_private *priv)
  407. {
  408. int i = 0;
  409. for (i = 0; i < priv->num_tx_queues; i++)
  410. kfree(priv->tx_queue[i]);
  411. }
  412. static void free_rx_pointers(struct gfar_private *priv)
  413. {
  414. int i = 0;
  415. for (i = 0; i < priv->num_rx_queues; i++)
  416. kfree(priv->rx_queue[i]);
  417. }
  418. static void unmap_group_regs(struct gfar_private *priv)
  419. {
  420. int i = 0;
  421. for (i = 0; i < MAXGROUPS; i++)
  422. if (priv->gfargrp[i].regs)
  423. iounmap(priv->gfargrp[i].regs);
  424. }
  425. static void disable_napi(struct gfar_private *priv)
  426. {
  427. int i = 0;
  428. for (i = 0; i < priv->num_grps; i++)
  429. napi_disable(&priv->gfargrp[i].napi);
  430. }
  431. static void enable_napi(struct gfar_private *priv)
  432. {
  433. int i = 0;
  434. for (i = 0; i < priv->num_grps; i++)
  435. napi_enable(&priv->gfargrp[i].napi);
  436. }
  437. static int gfar_parse_group(struct device_node *np,
  438. struct gfar_private *priv, const char *model)
  439. {
  440. u32 *queue_mask;
  441. u64 addr, size;
  442. addr = of_translate_address(np,
  443. of_get_address(np, 0, &size, NULL));
  444. priv->gfargrp[priv->num_grps].regs = ioremap(addr, size);
  445. if (!priv->gfargrp[priv->num_grps].regs)
  446. return -ENOMEM;
  447. priv->gfargrp[priv->num_grps].interruptTransmit =
  448. irq_of_parse_and_map(np, 0);
  449. /* If we aren't the FEC we have multiple interrupts */
  450. if (model && strcasecmp(model, "FEC")) {
  451. priv->gfargrp[priv->num_grps].interruptReceive =
  452. irq_of_parse_and_map(np, 1);
  453. priv->gfargrp[priv->num_grps].interruptError =
  454. irq_of_parse_and_map(np,2);
  455. if (priv->gfargrp[priv->num_grps].interruptTransmit < 0 ||
  456. priv->gfargrp[priv->num_grps].interruptReceive < 0 ||
  457. priv->gfargrp[priv->num_grps].interruptError < 0) {
  458. return -EINVAL;
  459. }
  460. }
  461. priv->gfargrp[priv->num_grps].grp_id = priv->num_grps;
  462. priv->gfargrp[priv->num_grps].priv = priv;
  463. spin_lock_init(&priv->gfargrp[priv->num_grps].grplock);
  464. if(priv->mode == MQ_MG_MODE) {
  465. queue_mask = (u32 *)of_get_property(np,
  466. "fsl,rx-bit-map", NULL);
  467. priv->gfargrp[priv->num_grps].rx_bit_map =
  468. queue_mask ? *queue_mask :(DEFAULT_MAPPING >> priv->num_grps);
  469. queue_mask = (u32 *)of_get_property(np,
  470. "fsl,tx-bit-map", NULL);
  471. priv->gfargrp[priv->num_grps].tx_bit_map =
  472. queue_mask ? *queue_mask : (DEFAULT_MAPPING >> priv->num_grps);
  473. } else {
  474. priv->gfargrp[priv->num_grps].rx_bit_map = 0xFF;
  475. priv->gfargrp[priv->num_grps].tx_bit_map = 0xFF;
  476. }
  477. priv->num_grps++;
  478. return 0;
  479. }
  480. static int gfar_of_init(struct of_device *ofdev, struct net_device **pdev)
  481. {
  482. const char *model;
  483. const char *ctype;
  484. const void *mac_addr;
  485. int err = 0, i;
  486. struct net_device *dev = NULL;
  487. struct gfar_private *priv = NULL;
  488. struct device_node *np = ofdev->node;
  489. struct device_node *child = NULL;
  490. const u32 *stash;
  491. const u32 *stash_len;
  492. const u32 *stash_idx;
  493. unsigned int num_tx_qs, num_rx_qs;
  494. u32 *tx_queues, *rx_queues;
  495. if (!np || !of_device_is_available(np))
  496. return -ENODEV;
  497. /* parse the num of tx and rx queues */
  498. tx_queues = (u32 *)of_get_property(np, "fsl,num_tx_queues", NULL);
  499. num_tx_qs = tx_queues ? *tx_queues : 1;
  500. if (num_tx_qs > MAX_TX_QS) {
  501. printk(KERN_ERR "num_tx_qs(=%d) greater than MAX_TX_QS(=%d)\n",
  502. num_tx_qs, MAX_TX_QS);
  503. printk(KERN_ERR "Cannot do alloc_etherdev, aborting\n");
  504. return -EINVAL;
  505. }
  506. rx_queues = (u32 *)of_get_property(np, "fsl,num_rx_queues", NULL);
  507. num_rx_qs = rx_queues ? *rx_queues : 1;
  508. if (num_rx_qs > MAX_RX_QS) {
  509. printk(KERN_ERR "num_rx_qs(=%d) greater than MAX_RX_QS(=%d)\n",
  510. num_tx_qs, MAX_TX_QS);
  511. printk(KERN_ERR "Cannot do alloc_etherdev, aborting\n");
  512. return -EINVAL;
  513. }
  514. *pdev = alloc_etherdev_mq(sizeof(*priv), num_tx_qs);
  515. dev = *pdev;
  516. if (NULL == dev)
  517. return -ENOMEM;
  518. priv = netdev_priv(dev);
  519. priv->node = ofdev->node;
  520. priv->ndev = dev;
  521. dev->num_tx_queues = num_tx_qs;
  522. dev->real_num_tx_queues = num_tx_qs;
  523. priv->num_tx_queues = num_tx_qs;
  524. priv->num_rx_queues = num_rx_qs;
  525. priv->num_grps = 0x0;
  526. model = of_get_property(np, "model", NULL);
  527. for (i = 0; i < MAXGROUPS; i++)
  528. priv->gfargrp[i].regs = NULL;
  529. /* Parse and initialize group specific information */
  530. if (of_device_is_compatible(np, "fsl,etsec2")) {
  531. priv->mode = MQ_MG_MODE;
  532. for_each_child_of_node(np, child) {
  533. err = gfar_parse_group(child, priv, model);
  534. if (err)
  535. goto err_grp_init;
  536. }
  537. } else {
  538. priv->mode = SQ_SG_MODE;
  539. err = gfar_parse_group(np, priv, model);
  540. if(err)
  541. goto err_grp_init;
  542. }
  543. for (i = 0; i < priv->num_tx_queues; i++)
  544. priv->tx_queue[i] = NULL;
  545. for (i = 0; i < priv->num_rx_queues; i++)
  546. priv->rx_queue[i] = NULL;
  547. for (i = 0; i < priv->num_tx_queues; i++) {
  548. priv->tx_queue[i] = (struct gfar_priv_tx_q *)kmalloc(
  549. sizeof (struct gfar_priv_tx_q), GFP_KERNEL);
  550. if (!priv->tx_queue[i]) {
  551. err = -ENOMEM;
  552. goto tx_alloc_failed;
  553. }
  554. priv->tx_queue[i]->tx_skbuff = NULL;
  555. priv->tx_queue[i]->qindex = i;
  556. priv->tx_queue[i]->dev = dev;
  557. spin_lock_init(&(priv->tx_queue[i]->txlock));
  558. }
  559. for (i = 0; i < priv->num_rx_queues; i++) {
  560. priv->rx_queue[i] = (struct gfar_priv_rx_q *)kmalloc(
  561. sizeof (struct gfar_priv_rx_q), GFP_KERNEL);
  562. if (!priv->rx_queue[i]) {
  563. err = -ENOMEM;
  564. goto rx_alloc_failed;
  565. }
  566. priv->rx_queue[i]->rx_skbuff = NULL;
  567. priv->rx_queue[i]->qindex = i;
  568. priv->rx_queue[i]->dev = dev;
  569. spin_lock_init(&(priv->rx_queue[i]->rxlock));
  570. }
  571. stash = of_get_property(np, "bd-stash", NULL);
  572. if (stash) {
  573. priv->device_flags |= FSL_GIANFAR_DEV_HAS_BD_STASHING;
  574. priv->bd_stash_en = 1;
  575. }
  576. stash_len = of_get_property(np, "rx-stash-len", NULL);
  577. if (stash_len)
  578. priv->rx_stash_size = *stash_len;
  579. stash_idx = of_get_property(np, "rx-stash-idx", NULL);
  580. if (stash_idx)
  581. priv->rx_stash_index = *stash_idx;
  582. if (stash_len || stash_idx)
  583. priv->device_flags |= FSL_GIANFAR_DEV_HAS_BUF_STASHING;
  584. mac_addr = of_get_mac_address(np);
  585. if (mac_addr)
  586. memcpy(dev->dev_addr, mac_addr, MAC_ADDR_LEN);
  587. if (model && !strcasecmp(model, "TSEC"))
  588. priv->device_flags =
  589. FSL_GIANFAR_DEV_HAS_GIGABIT |
  590. FSL_GIANFAR_DEV_HAS_COALESCE |
  591. FSL_GIANFAR_DEV_HAS_RMON |
  592. FSL_GIANFAR_DEV_HAS_MULTI_INTR;
  593. if (model && !strcasecmp(model, "eTSEC"))
  594. priv->device_flags =
  595. FSL_GIANFAR_DEV_HAS_GIGABIT |
  596. FSL_GIANFAR_DEV_HAS_COALESCE |
  597. FSL_GIANFAR_DEV_HAS_RMON |
  598. FSL_GIANFAR_DEV_HAS_MULTI_INTR |
  599. FSL_GIANFAR_DEV_HAS_PADDING |
  600. FSL_GIANFAR_DEV_HAS_CSUM |
  601. FSL_GIANFAR_DEV_HAS_VLAN |
  602. FSL_GIANFAR_DEV_HAS_MAGIC_PACKET |
  603. FSL_GIANFAR_DEV_HAS_EXTENDED_HASH;
  604. ctype = of_get_property(np, "phy-connection-type", NULL);
  605. /* We only care about rgmii-id. The rest are autodetected */
  606. if (ctype && !strcmp(ctype, "rgmii-id"))
  607. priv->interface = PHY_INTERFACE_MODE_RGMII_ID;
  608. else
  609. priv->interface = PHY_INTERFACE_MODE_MII;
  610. if (of_get_property(np, "fsl,magic-packet", NULL))
  611. priv->device_flags |= FSL_GIANFAR_DEV_HAS_MAGIC_PACKET;
  612. priv->phy_node = of_parse_phandle(np, "phy-handle", 0);
  613. /* Find the TBI PHY. If it's not there, we don't support SGMII */
  614. priv->tbi_node = of_parse_phandle(np, "tbi-handle", 0);
  615. return 0;
  616. rx_alloc_failed:
  617. free_rx_pointers(priv);
  618. tx_alloc_failed:
  619. free_tx_pointers(priv);
  620. err_grp_init:
  621. unmap_group_regs(priv);
  622. free_netdev(dev);
  623. return err;
  624. }
  625. /* Ioctl MII Interface */
  626. static int gfar_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
  627. {
  628. struct gfar_private *priv = netdev_priv(dev);
  629. if (!netif_running(dev))
  630. return -EINVAL;
  631. if (!priv->phydev)
  632. return -ENODEV;
  633. return phy_mii_ioctl(priv->phydev, if_mii(rq), cmd);
  634. }
  635. static unsigned int reverse_bitmap(unsigned int bit_map, unsigned int max_qs)
  636. {
  637. unsigned int new_bit_map = 0x0;
  638. int mask = 0x1 << (max_qs - 1), i;
  639. for (i = 0; i < max_qs; i++) {
  640. if (bit_map & mask)
  641. new_bit_map = new_bit_map + (1 << i);
  642. mask = mask >> 0x1;
  643. }
  644. return new_bit_map;
  645. }
  646. static u32 cluster_entry_per_class(struct gfar_private *priv, u32 rqfar,
  647. u32 class)
  648. {
  649. u32 rqfpr = FPR_FILER_MASK;
  650. u32 rqfcr = 0x0;
  651. rqfar--;
  652. rqfcr = RQFCR_CLE | RQFCR_PID_MASK | RQFCR_CMP_EXACT;
  653. ftp_rqfpr[rqfar] = rqfpr;
  654. ftp_rqfcr[rqfar] = rqfcr;
  655. gfar_write_filer(priv, rqfar, rqfcr, rqfpr);
  656. rqfar--;
  657. rqfcr = RQFCR_CMP_NOMATCH;
  658. ftp_rqfpr[rqfar] = rqfpr;
  659. ftp_rqfcr[rqfar] = rqfcr;
  660. gfar_write_filer(priv, rqfar, rqfcr, rqfpr);
  661. rqfar--;
  662. rqfcr = RQFCR_CMP_EXACT | RQFCR_PID_PARSE | RQFCR_CLE | RQFCR_AND;
  663. rqfpr = class;
  664. ftp_rqfcr[rqfar] = rqfcr;
  665. ftp_rqfpr[rqfar] = rqfpr;
  666. gfar_write_filer(priv, rqfar, rqfcr, rqfpr);
  667. rqfar--;
  668. rqfcr = RQFCR_CMP_EXACT | RQFCR_PID_MASK | RQFCR_AND;
  669. rqfpr = class;
  670. ftp_rqfcr[rqfar] = rqfcr;
  671. ftp_rqfpr[rqfar] = rqfpr;
  672. gfar_write_filer(priv, rqfar, rqfcr, rqfpr);
  673. return rqfar;
  674. }
  675. static void gfar_init_filer_table(struct gfar_private *priv)
  676. {
  677. int i = 0x0;
  678. u32 rqfar = MAX_FILER_IDX;
  679. u32 rqfcr = 0x0;
  680. u32 rqfpr = FPR_FILER_MASK;
  681. /* Default rule */
  682. rqfcr = RQFCR_CMP_MATCH;
  683. ftp_rqfcr[rqfar] = rqfcr;
  684. ftp_rqfpr[rqfar] = rqfpr;
  685. gfar_write_filer(priv, rqfar, rqfcr, rqfpr);
  686. rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV6);
  687. rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV6 | RQFPR_UDP);
  688. rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV6 | RQFPR_TCP);
  689. rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV4);
  690. rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV4 | RQFPR_UDP);
  691. rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV4 | RQFPR_TCP);
  692. /* cur_filer_idx indicated the fisrt non-masked rule */
  693. priv->cur_filer_idx = rqfar;
  694. /* Rest are masked rules */
  695. rqfcr = RQFCR_CMP_NOMATCH;
  696. for (i = 0; i < rqfar; i++) {
  697. ftp_rqfcr[i] = rqfcr;
  698. ftp_rqfpr[i] = rqfpr;
  699. gfar_write_filer(priv, i, rqfcr, rqfpr);
  700. }
  701. }
  702. /* Set up the ethernet device structure, private data,
  703. * and anything else we need before we start */
  704. static int gfar_probe(struct of_device *ofdev,
  705. const struct of_device_id *match)
  706. {
  707. u32 tempval;
  708. struct net_device *dev = NULL;
  709. struct gfar_private *priv = NULL;
  710. struct gfar __iomem *regs = NULL;
  711. int err = 0, i, grp_idx = 0;
  712. int len_devname;
  713. u32 rstat = 0, tstat = 0, rqueue = 0, tqueue = 0;
  714. u32 isrg = 0;
  715. u32 __iomem *baddr;
  716. err = gfar_of_init(ofdev, &dev);
  717. if (err)
  718. return err;
  719. priv = netdev_priv(dev);
  720. priv->ndev = dev;
  721. priv->ofdev = ofdev;
  722. priv->node = ofdev->node;
  723. SET_NETDEV_DEV(dev, &ofdev->dev);
  724. spin_lock_init(&priv->bflock);
  725. INIT_WORK(&priv->reset_task, gfar_reset_task);
  726. dev_set_drvdata(&ofdev->dev, priv);
  727. regs = priv->gfargrp[0].regs;
  728. /* Stop the DMA engine now, in case it was running before */
  729. /* (The firmware could have used it, and left it running). */
  730. gfar_halt(dev);
  731. /* Reset MAC layer */
  732. gfar_write(&regs->maccfg1, MACCFG1_SOFT_RESET);
  733. /* We need to delay at least 3 TX clocks */
  734. udelay(2);
  735. tempval = (MACCFG1_TX_FLOW | MACCFG1_RX_FLOW);
  736. gfar_write(&regs->maccfg1, tempval);
  737. /* Initialize MACCFG2. */
  738. gfar_write(&regs->maccfg2, MACCFG2_INIT_SETTINGS);
  739. /* Initialize ECNTRL */
  740. gfar_write(&regs->ecntrl, ECNTRL_INIT_SETTINGS);
  741. /* Set the dev->base_addr to the gfar reg region */
  742. dev->base_addr = (unsigned long) regs;
  743. SET_NETDEV_DEV(dev, &ofdev->dev);
  744. /* Fill in the dev structure */
  745. dev->watchdog_timeo = TX_TIMEOUT;
  746. dev->mtu = 1500;
  747. dev->netdev_ops = &gfar_netdev_ops;
  748. dev->ethtool_ops = &gfar_ethtool_ops;
  749. /* Register for napi ...We are registering NAPI for each grp */
  750. for (i = 0; i < priv->num_grps; i++)
  751. netif_napi_add(dev, &priv->gfargrp[i].napi, gfar_poll, GFAR_DEV_WEIGHT);
  752. if (priv->device_flags & FSL_GIANFAR_DEV_HAS_CSUM) {
  753. priv->rx_csum_enable = 1;
  754. dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG | NETIF_F_HIGHDMA;
  755. } else
  756. priv->rx_csum_enable = 0;
  757. priv->vlgrp = NULL;
  758. if (priv->device_flags & FSL_GIANFAR_DEV_HAS_VLAN)
  759. dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
  760. if (priv->device_flags & FSL_GIANFAR_DEV_HAS_EXTENDED_HASH) {
  761. priv->extended_hash = 1;
  762. priv->hash_width = 9;
  763. priv->hash_regs[0] = &regs->igaddr0;
  764. priv->hash_regs[1] = &regs->igaddr1;
  765. priv->hash_regs[2] = &regs->igaddr2;
  766. priv->hash_regs[3] = &regs->igaddr3;
  767. priv->hash_regs[4] = &regs->igaddr4;
  768. priv->hash_regs[5] = &regs->igaddr5;
  769. priv->hash_regs[6] = &regs->igaddr6;
  770. priv->hash_regs[7] = &regs->igaddr7;
  771. priv->hash_regs[8] = &regs->gaddr0;
  772. priv->hash_regs[9] = &regs->gaddr1;
  773. priv->hash_regs[10] = &regs->gaddr2;
  774. priv->hash_regs[11] = &regs->gaddr3;
  775. priv->hash_regs[12] = &regs->gaddr4;
  776. priv->hash_regs[13] = &regs->gaddr5;
  777. priv->hash_regs[14] = &regs->gaddr6;
  778. priv->hash_regs[15] = &regs->gaddr7;
  779. } else {
  780. priv->extended_hash = 0;
  781. priv->hash_width = 8;
  782. priv->hash_regs[0] = &regs->gaddr0;
  783. priv->hash_regs[1] = &regs->gaddr1;
  784. priv->hash_regs[2] = &regs->gaddr2;
  785. priv->hash_regs[3] = &regs->gaddr3;
  786. priv->hash_regs[4] = &regs->gaddr4;
  787. priv->hash_regs[5] = &regs->gaddr5;
  788. priv->hash_regs[6] = &regs->gaddr6;
  789. priv->hash_regs[7] = &regs->gaddr7;
  790. }
  791. if (priv->device_flags & FSL_GIANFAR_DEV_HAS_PADDING)
  792. priv->padding = DEFAULT_PADDING;
  793. else
  794. priv->padding = 0;
  795. if (dev->features & NETIF_F_IP_CSUM)
  796. dev->hard_header_len += GMAC_FCB_LEN;
  797. /* Program the isrg regs only if number of grps > 1 */
  798. if (priv->num_grps > 1) {
  799. baddr = &regs->isrg0;
  800. for (i = 0; i < priv->num_grps; i++) {
  801. isrg |= (priv->gfargrp[i].rx_bit_map << ISRG_SHIFT_RX);
  802. isrg |= (priv->gfargrp[i].tx_bit_map << ISRG_SHIFT_TX);
  803. gfar_write(baddr, isrg);
  804. baddr++;
  805. isrg = 0x0;
  806. }
  807. }
  808. /* Need to reverse the bit maps as bit_map's MSB is q0
  809. * but, for_each_bit parses from right to left, which
  810. * basically reverses the queue numbers */
  811. for (i = 0; i< priv->num_grps; i++) {
  812. priv->gfargrp[i].tx_bit_map = reverse_bitmap(
  813. priv->gfargrp[i].tx_bit_map, MAX_TX_QS);
  814. priv->gfargrp[i].rx_bit_map = reverse_bitmap(
  815. priv->gfargrp[i].rx_bit_map, MAX_RX_QS);
  816. }
  817. /* Calculate RSTAT, TSTAT, RQUEUE and TQUEUE values,
  818. * also assign queues to groups */
  819. for (grp_idx = 0; grp_idx < priv->num_grps; grp_idx++) {
  820. priv->gfargrp[grp_idx].num_rx_queues = 0x0;
  821. for_each_bit(i, &priv->gfargrp[grp_idx].rx_bit_map,
  822. priv->num_rx_queues) {
  823. priv->gfargrp[grp_idx].num_rx_queues++;
  824. priv->rx_queue[i]->grp = &priv->gfargrp[grp_idx];
  825. rstat = rstat | (RSTAT_CLEAR_RHALT >> i);
  826. rqueue = rqueue | ((RQUEUE_EN0 | RQUEUE_EX0) >> i);
  827. }
  828. priv->gfargrp[grp_idx].num_tx_queues = 0x0;
  829. for_each_bit (i, &priv->gfargrp[grp_idx].tx_bit_map,
  830. priv->num_tx_queues) {
  831. priv->gfargrp[grp_idx].num_tx_queues++;
  832. priv->tx_queue[i]->grp = &priv->gfargrp[grp_idx];
  833. tstat = tstat | (TSTAT_CLEAR_THALT >> i);
  834. tqueue = tqueue | (TQUEUE_EN0 >> i);
  835. }
  836. priv->gfargrp[grp_idx].rstat = rstat;
  837. priv->gfargrp[grp_idx].tstat = tstat;
  838. rstat = tstat =0;
  839. }
  840. gfar_write(&regs->rqueue, rqueue);
  841. gfar_write(&regs->tqueue, tqueue);
  842. priv->rx_buffer_size = DEFAULT_RX_BUFFER_SIZE;
  843. /* Initializing some of the rx/tx queue level parameters */
  844. for (i = 0; i < priv->num_tx_queues; i++) {
  845. priv->tx_queue[i]->tx_ring_size = DEFAULT_TX_RING_SIZE;
  846. priv->tx_queue[i]->num_txbdfree = DEFAULT_TX_RING_SIZE;
  847. priv->tx_queue[i]->txcoalescing = DEFAULT_TX_COALESCE;
  848. priv->tx_queue[i]->txic = DEFAULT_TXIC;
  849. }
  850. for (i = 0; i < priv->num_rx_queues; i++) {
  851. priv->rx_queue[i]->rx_ring_size = DEFAULT_RX_RING_SIZE;
  852. priv->rx_queue[i]->rxcoalescing = DEFAULT_RX_COALESCE;
  853. priv->rx_queue[i]->rxic = DEFAULT_RXIC;
  854. }
  855. /* Enable most messages by default */
  856. priv->msg_enable = (NETIF_MSG_IFUP << 1 ) - 1;
  857. /* Carrier starts down, phylib will bring it up */
  858. netif_carrier_off(dev);
  859. err = register_netdev(dev);
  860. if (err) {
  861. printk(KERN_ERR "%s: Cannot register net device, aborting.\n",
  862. dev->name);
  863. goto register_fail;
  864. }
  865. device_init_wakeup(&dev->dev,
  866. priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET);
  867. /* fill out IRQ number and name fields */
  868. len_devname = strlen(dev->name);
  869. for (i = 0; i < priv->num_grps; i++) {
  870. strncpy(&priv->gfargrp[i].int_name_tx[0], dev->name,
  871. len_devname);
  872. if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
  873. strncpy(&priv->gfargrp[i].int_name_tx[len_devname],
  874. "_g", sizeof("_g"));
  875. priv->gfargrp[i].int_name_tx[
  876. strlen(priv->gfargrp[i].int_name_tx)] = i+48;
  877. strncpy(&priv->gfargrp[i].int_name_tx[strlen(
  878. priv->gfargrp[i].int_name_tx)],
  879. "_tx", sizeof("_tx") + 1);
  880. strncpy(&priv->gfargrp[i].int_name_rx[0], dev->name,
  881. len_devname);
  882. strncpy(&priv->gfargrp[i].int_name_rx[len_devname],
  883. "_g", sizeof("_g"));
  884. priv->gfargrp[i].int_name_rx[
  885. strlen(priv->gfargrp[i].int_name_rx)] = i+48;
  886. strncpy(&priv->gfargrp[i].int_name_rx[strlen(
  887. priv->gfargrp[i].int_name_rx)],
  888. "_rx", sizeof("_rx") + 1);
  889. strncpy(&priv->gfargrp[i].int_name_er[0], dev->name,
  890. len_devname);
  891. strncpy(&priv->gfargrp[i].int_name_er[len_devname],
  892. "_g", sizeof("_g"));
  893. priv->gfargrp[i].int_name_er[strlen(
  894. priv->gfargrp[i].int_name_er)] = i+48;
  895. strncpy(&priv->gfargrp[i].int_name_er[strlen(\
  896. priv->gfargrp[i].int_name_er)],
  897. "_er", sizeof("_er") + 1);
  898. } else
  899. priv->gfargrp[i].int_name_tx[len_devname] = '\0';
  900. }
  901. /* Initialize the filer table */
  902. gfar_init_filer_table(priv);
  903. /* Create all the sysfs files */
  904. gfar_init_sysfs(dev);
  905. /* Print out the device info */
  906. printk(KERN_INFO DEVICE_NAME "%pM\n", dev->name, dev->dev_addr);
  907. /* Even more device info helps when determining which kernel */
  908. /* provided which set of benchmarks. */
  909. printk(KERN_INFO "%s: Running with NAPI enabled\n", dev->name);
  910. for (i = 0; i < priv->num_rx_queues; i++)
  911. printk(KERN_INFO "%s: :RX BD ring size for Q[%d]: %d\n",
  912. dev->name, i, priv->rx_queue[i]->rx_ring_size);
  913. for(i = 0; i < priv->num_tx_queues; i++)
  914. printk(KERN_INFO "%s:TX BD ring size for Q[%d]: %d\n",
  915. dev->name, i, priv->tx_queue[i]->tx_ring_size);
  916. return 0;
  917. register_fail:
  918. unmap_group_regs(priv);
  919. free_tx_pointers(priv);
  920. free_rx_pointers(priv);
  921. if (priv->phy_node)
  922. of_node_put(priv->phy_node);
  923. if (priv->tbi_node)
  924. of_node_put(priv->tbi_node);
  925. free_netdev(dev);
  926. return err;
  927. }
  928. static int gfar_remove(struct of_device *ofdev)
  929. {
  930. struct gfar_private *priv = dev_get_drvdata(&ofdev->dev);
  931. if (priv->phy_node)
  932. of_node_put(priv->phy_node);
  933. if (priv->tbi_node)
  934. of_node_put(priv->tbi_node);
  935. dev_set_drvdata(&ofdev->dev, NULL);
  936. unregister_netdev(priv->ndev);
  937. unmap_group_regs(priv);
  938. free_netdev(priv->ndev);
  939. return 0;
  940. }
  941. #ifdef CONFIG_PM
  942. static int gfar_suspend(struct device *dev)
  943. {
  944. struct gfar_private *priv = dev_get_drvdata(dev);
  945. struct net_device *ndev = priv->ndev;
  946. struct gfar __iomem *regs = priv->gfargrp[0].regs;
  947. unsigned long flags;
  948. u32 tempval;
  949. int magic_packet = priv->wol_en &&
  950. (priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET);
  951. netif_device_detach(ndev);
  952. if (netif_running(ndev)) {
  953. local_irq_save(flags);
  954. lock_tx_qs(priv);
  955. lock_rx_qs(priv);
  956. gfar_halt_nodisable(ndev);
  957. /* Disable Tx, and Rx if wake-on-LAN is disabled. */
  958. tempval = gfar_read(&regs->maccfg1);
  959. tempval &= ~MACCFG1_TX_EN;
  960. if (!magic_packet)
  961. tempval &= ~MACCFG1_RX_EN;
  962. gfar_write(&regs->maccfg1, tempval);
  963. unlock_rx_qs(priv);
  964. unlock_tx_qs(priv);
  965. local_irq_restore(flags);
  966. disable_napi(priv);
  967. if (magic_packet) {
  968. /* Enable interrupt on Magic Packet */
  969. gfar_write(&regs->imask, IMASK_MAG);
  970. /* Enable Magic Packet mode */
  971. tempval = gfar_read(&regs->maccfg2);
  972. tempval |= MACCFG2_MPEN;
  973. gfar_write(&regs->maccfg2, tempval);
  974. } else {
  975. phy_stop(priv->phydev);
  976. }
  977. }
  978. return 0;
  979. }
  980. static int gfar_resume(struct device *dev)
  981. {
  982. struct gfar_private *priv = dev_get_drvdata(dev);
  983. struct net_device *ndev = priv->ndev;
  984. struct gfar __iomem *regs = priv->gfargrp[0].regs;
  985. unsigned long flags;
  986. u32 tempval;
  987. int magic_packet = priv->wol_en &&
  988. (priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET);
  989. if (!netif_running(ndev)) {
  990. netif_device_attach(ndev);
  991. return 0;
  992. }
  993. if (!magic_packet && priv->phydev)
  994. phy_start(priv->phydev);
  995. /* Disable Magic Packet mode, in case something
  996. * else woke us up.
  997. */
  998. local_irq_save(flags);
  999. lock_tx_qs(priv);
  1000. lock_rx_qs(priv);
  1001. tempval = gfar_read(&regs->maccfg2);
  1002. tempval &= ~MACCFG2_MPEN;
  1003. gfar_write(&regs->maccfg2, tempval);
  1004. gfar_start(ndev);
  1005. unlock_rx_qs(priv);
  1006. unlock_tx_qs(priv);
  1007. local_irq_restore(flags);
  1008. netif_device_attach(ndev);
  1009. enable_napi(priv);
  1010. return 0;
  1011. }
  1012. static int gfar_restore(struct device *dev)
  1013. {
  1014. struct gfar_private *priv = dev_get_drvdata(dev);
  1015. struct net_device *ndev = priv->ndev;
  1016. if (!netif_running(ndev))
  1017. return 0;
  1018. gfar_init_bds(ndev);
  1019. init_registers(ndev);
  1020. gfar_set_mac_address(ndev);
  1021. gfar_init_mac(ndev);
  1022. gfar_start(ndev);
  1023. priv->oldlink = 0;
  1024. priv->oldspeed = 0;
  1025. priv->oldduplex = -1;
  1026. if (priv->phydev)
  1027. phy_start(priv->phydev);
  1028. netif_device_attach(ndev);
  1029. enable_napi(priv);
  1030. return 0;
  1031. }
  1032. static struct dev_pm_ops gfar_pm_ops = {
  1033. .suspend = gfar_suspend,
  1034. .resume = gfar_resume,
  1035. .freeze = gfar_suspend,
  1036. .thaw = gfar_resume,
  1037. .restore = gfar_restore,
  1038. };
  1039. #define GFAR_PM_OPS (&gfar_pm_ops)
  1040. static int gfar_legacy_suspend(struct of_device *ofdev, pm_message_t state)
  1041. {
  1042. return gfar_suspend(&ofdev->dev);
  1043. }
  1044. static int gfar_legacy_resume(struct of_device *ofdev)
  1045. {
  1046. return gfar_resume(&ofdev->dev);
  1047. }
  1048. #else
  1049. #define GFAR_PM_OPS NULL
  1050. #define gfar_legacy_suspend NULL
  1051. #define gfar_legacy_resume NULL
  1052. #endif
  1053. /* Reads the controller's registers to determine what interface
  1054. * connects it to the PHY.
  1055. */
  1056. static phy_interface_t gfar_get_interface(struct net_device *dev)
  1057. {
  1058. struct gfar_private *priv = netdev_priv(dev);
  1059. struct gfar __iomem *regs = priv->gfargrp[0].regs;
  1060. u32 ecntrl;
  1061. ecntrl = gfar_read(&regs->ecntrl);
  1062. if (ecntrl & ECNTRL_SGMII_MODE)
  1063. return PHY_INTERFACE_MODE_SGMII;
  1064. if (ecntrl & ECNTRL_TBI_MODE) {
  1065. if (ecntrl & ECNTRL_REDUCED_MODE)
  1066. return PHY_INTERFACE_MODE_RTBI;
  1067. else
  1068. return PHY_INTERFACE_MODE_TBI;
  1069. }
  1070. if (ecntrl & ECNTRL_REDUCED_MODE) {
  1071. if (ecntrl & ECNTRL_REDUCED_MII_MODE)
  1072. return PHY_INTERFACE_MODE_RMII;
  1073. else {
  1074. phy_interface_t interface = priv->interface;
  1075. /*
  1076. * This isn't autodetected right now, so it must
  1077. * be set by the device tree or platform code.
  1078. */
  1079. if (interface == PHY_INTERFACE_MODE_RGMII_ID)
  1080. return PHY_INTERFACE_MODE_RGMII_ID;
  1081. return PHY_INTERFACE_MODE_RGMII;
  1082. }
  1083. }
  1084. if (priv->device_flags & FSL_GIANFAR_DEV_HAS_GIGABIT)
  1085. return PHY_INTERFACE_MODE_GMII;
  1086. return PHY_INTERFACE_MODE_MII;
  1087. }
  1088. /* Initializes driver's PHY state, and attaches to the PHY.
  1089. * Returns 0 on success.
  1090. */
  1091. static int init_phy(struct net_device *dev)
  1092. {
  1093. struct gfar_private *priv = netdev_priv(dev);
  1094. uint gigabit_support =
  1095. priv->device_flags & FSL_GIANFAR_DEV_HAS_GIGABIT ?
  1096. SUPPORTED_1000baseT_Full : 0;
  1097. phy_interface_t interface;
  1098. priv->oldlink = 0;
  1099. priv->oldspeed = 0;
  1100. priv->oldduplex = -1;
  1101. interface = gfar_get_interface(dev);
  1102. priv->phydev = of_phy_connect(dev, priv->phy_node, &adjust_link, 0,
  1103. interface);
  1104. if (!priv->phydev)
  1105. priv->phydev = of_phy_connect_fixed_link(dev, &adjust_link,
  1106. interface);
  1107. if (!priv->phydev) {
  1108. dev_err(&dev->dev, "could not attach to PHY\n");
  1109. return -ENODEV;
  1110. }
  1111. if (interface == PHY_INTERFACE_MODE_SGMII)
  1112. gfar_configure_serdes(dev);
  1113. /* Remove any features not supported by the controller */
  1114. priv->phydev->supported &= (GFAR_SUPPORTED | gigabit_support);
  1115. priv->phydev->advertising = priv->phydev->supported;
  1116. return 0;
  1117. }
  1118. /*
  1119. * Initialize TBI PHY interface for communicating with the
  1120. * SERDES lynx PHY on the chip. We communicate with this PHY
  1121. * through the MDIO bus on each controller, treating it as a
  1122. * "normal" PHY at the address found in the TBIPA register. We assume
  1123. * that the TBIPA register is valid. Either the MDIO bus code will set
  1124. * it to a value that doesn't conflict with other PHYs on the bus, or the
  1125. * value doesn't matter, as there are no other PHYs on the bus.
  1126. */
  1127. static void gfar_configure_serdes(struct net_device *dev)
  1128. {
  1129. struct gfar_private *priv = netdev_priv(dev);
  1130. struct phy_device *tbiphy;
  1131. if (!priv->tbi_node) {
  1132. dev_warn(&dev->dev, "error: SGMII mode requires that the "
  1133. "device tree specify a tbi-handle\n");
  1134. return;
  1135. }
  1136. tbiphy = of_phy_find_device(priv->tbi_node);
  1137. if (!tbiphy) {
  1138. dev_err(&dev->dev, "error: Could not get TBI device\n");
  1139. return;
  1140. }
  1141. /*
  1142. * If the link is already up, we must already be ok, and don't need to
  1143. * configure and reset the TBI<->SerDes link. Maybe U-Boot configured
  1144. * everything for us? Resetting it takes the link down and requires
  1145. * several seconds for it to come back.
  1146. */
  1147. if (phy_read(tbiphy, MII_BMSR) & BMSR_LSTATUS)
  1148. return;
  1149. /* Single clk mode, mii mode off(for serdes communication) */
  1150. phy_write(tbiphy, MII_TBICON, TBICON_CLK_SELECT);
  1151. phy_write(tbiphy, MII_ADVERTISE,
  1152. ADVERTISE_1000XFULL | ADVERTISE_1000XPAUSE |
  1153. ADVERTISE_1000XPSE_ASYM);
  1154. phy_write(tbiphy, MII_BMCR, BMCR_ANENABLE |
  1155. BMCR_ANRESTART | BMCR_FULLDPLX | BMCR_SPEED1000);
  1156. }
  1157. static void init_registers(struct net_device *dev)
  1158. {
  1159. struct gfar_private *priv = netdev_priv(dev);
  1160. struct gfar __iomem *regs = NULL;
  1161. int i = 0;
  1162. for (i = 0; i < priv->num_grps; i++) {
  1163. regs = priv->gfargrp[i].regs;
  1164. /* Clear IEVENT */
  1165. gfar_write(&regs->ievent, IEVENT_INIT_CLEAR);
  1166. /* Initialize IMASK */
  1167. gfar_write(&regs->imask, IMASK_INIT_CLEAR);
  1168. }
  1169. regs = priv->gfargrp[0].regs;
  1170. /* Init hash registers to zero */
  1171. gfar_write(&regs->igaddr0, 0);
  1172. gfar_write(&regs->igaddr1, 0);
  1173. gfar_write(&regs->igaddr2, 0);
  1174. gfar_write(&regs->igaddr3, 0);
  1175. gfar_write(&regs->igaddr4, 0);
  1176. gfar_write(&regs->igaddr5, 0);
  1177. gfar_write(&regs->igaddr6, 0);
  1178. gfar_write(&regs->igaddr7, 0);
  1179. gfar_write(&regs->gaddr0, 0);
  1180. gfar_write(&regs->gaddr1, 0);
  1181. gfar_write(&regs->gaddr2, 0);
  1182. gfar_write(&regs->gaddr3, 0);
  1183. gfar_write(&regs->gaddr4, 0);
  1184. gfar_write(&regs->gaddr5, 0);
  1185. gfar_write(&regs->gaddr6, 0);
  1186. gfar_write(&regs->gaddr7, 0);
  1187. /* Zero out the rmon mib registers if it has them */
  1188. if (priv->device_flags & FSL_GIANFAR_DEV_HAS_RMON) {
  1189. memset_io(&(regs->rmon), 0, sizeof (struct rmon_mib));
  1190. /* Mask off the CAM interrupts */
  1191. gfar_write(&regs->rmon.cam1, 0xffffffff);
  1192. gfar_write(&regs->rmon.cam2, 0xffffffff);
  1193. }
  1194. /* Initialize the max receive buffer length */
  1195. gfar_write(&regs->mrblr, priv->rx_buffer_size);
  1196. /* Initialize the Minimum Frame Length Register */
  1197. gfar_write(&regs->minflr, MINFLR_INIT_SETTINGS);
  1198. }
  1199. /* Halt the receive and transmit queues */
  1200. static void gfar_halt_nodisable(struct net_device *dev)
  1201. {
  1202. struct gfar_private *priv = netdev_priv(dev);
  1203. struct gfar __iomem *regs = NULL;
  1204. u32 tempval;
  1205. int i = 0;
  1206. for (i = 0; i < priv->num_grps; i++) {
  1207. regs = priv->gfargrp[i].regs;
  1208. /* Mask all interrupts */
  1209. gfar_write(&regs->imask, IMASK_INIT_CLEAR);
  1210. /* Clear all interrupts */
  1211. gfar_write(&regs->ievent, IEVENT_INIT_CLEAR);
  1212. }
  1213. regs = priv->gfargrp[0].regs;
  1214. /* Stop the DMA, and wait for it to stop */
  1215. tempval = gfar_read(&regs->dmactrl);
  1216. if ((tempval & (DMACTRL_GRS | DMACTRL_GTS))
  1217. != (DMACTRL_GRS | DMACTRL_GTS)) {
  1218. tempval |= (DMACTRL_GRS | DMACTRL_GTS);
  1219. gfar_write(&regs->dmactrl, tempval);
  1220. while (!(gfar_read(&regs->ievent) &
  1221. (IEVENT_GRSC | IEVENT_GTSC)))
  1222. cpu_relax();
  1223. }
  1224. }
  1225. /* Halt the receive and transmit queues */
  1226. void gfar_halt(struct net_device *dev)
  1227. {
  1228. struct gfar_private *priv = netdev_priv(dev);
  1229. struct gfar __iomem *regs = priv->gfargrp[0].regs;
  1230. u32 tempval;
  1231. gfar_halt_nodisable(dev);
  1232. /* Disable Rx and Tx */
  1233. tempval = gfar_read(&regs->maccfg1);
  1234. tempval &= ~(MACCFG1_RX_EN | MACCFG1_TX_EN);
  1235. gfar_write(&regs->maccfg1, tempval);
  1236. }
  1237. static void free_grp_irqs(struct gfar_priv_grp *grp)
  1238. {
  1239. free_irq(grp->interruptError, grp);
  1240. free_irq(grp->interruptTransmit, grp);
  1241. free_irq(grp->interruptReceive, grp);
  1242. }
  1243. void stop_gfar(struct net_device *dev)
  1244. {
  1245. struct gfar_private *priv = netdev_priv(dev);
  1246. unsigned long flags;
  1247. int i;
  1248. phy_stop(priv->phydev);
  1249. /* Lock it down */
  1250. local_irq_save(flags);
  1251. lock_tx_qs(priv);
  1252. lock_rx_qs(priv);
  1253. gfar_halt(dev);
  1254. unlock_rx_qs(priv);
  1255. unlock_tx_qs(priv);
  1256. local_irq_restore(flags);
  1257. /* Free the IRQs */
  1258. if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
  1259. for (i = 0; i < priv->num_grps; i++)
  1260. free_grp_irqs(&priv->gfargrp[i]);
  1261. } else {
  1262. for (i = 0; i < priv->num_grps; i++)
  1263. free_irq(priv->gfargrp[i].interruptTransmit,
  1264. &priv->gfargrp[i]);
  1265. }
  1266. free_skb_resources(priv);
  1267. }
  1268. static void free_skb_tx_queue(struct gfar_priv_tx_q *tx_queue)
  1269. {
  1270. struct txbd8 *txbdp;
  1271. struct gfar_private *priv = netdev_priv(tx_queue->dev);
  1272. int i, j;
  1273. txbdp = tx_queue->tx_bd_base;
  1274. for (i = 0; i < tx_queue->tx_ring_size; i++) {
  1275. if (!tx_queue->tx_skbuff[i])
  1276. continue;
  1277. dma_unmap_single(&priv->ofdev->dev, txbdp->bufPtr,
  1278. txbdp->length, DMA_TO_DEVICE);
  1279. txbdp->lstatus = 0;
  1280. for (j = 0; j < skb_shinfo(tx_queue->tx_skbuff[i])->nr_frags;
  1281. j++) {
  1282. txbdp++;
  1283. dma_unmap_page(&priv->ofdev->dev, txbdp->bufPtr,
  1284. txbdp->length, DMA_TO_DEVICE);
  1285. }
  1286. txbdp++;
  1287. dev_kfree_skb_any(tx_queue->tx_skbuff[i]);
  1288. tx_queue->tx_skbuff[i] = NULL;
  1289. }
  1290. kfree(tx_queue->tx_skbuff);
  1291. }
  1292. static void free_skb_rx_queue(struct gfar_priv_rx_q *rx_queue)
  1293. {
  1294. struct rxbd8 *rxbdp;
  1295. struct gfar_private *priv = netdev_priv(rx_queue->dev);
  1296. int i;
  1297. rxbdp = rx_queue->rx_bd_base;
  1298. for (i = 0; i < rx_queue->rx_ring_size; i++) {
  1299. if (rx_queue->rx_skbuff[i]) {
  1300. dma_unmap_single(&priv->ofdev->dev,
  1301. rxbdp->bufPtr, priv->rx_buffer_size,
  1302. DMA_FROM_DEVICE);
  1303. dev_kfree_skb_any(rx_queue->rx_skbuff[i]);
  1304. rx_queue->rx_skbuff[i] = NULL;
  1305. }
  1306. rxbdp->lstatus = 0;
  1307. rxbdp->bufPtr = 0;
  1308. rxbdp++;
  1309. }
  1310. kfree(rx_queue->rx_skbuff);
  1311. }
  1312. /* If there are any tx skbs or rx skbs still around, free them.
  1313. * Then free tx_skbuff and rx_skbuff */
  1314. static void free_skb_resources(struct gfar_private *priv)
  1315. {
  1316. struct gfar_priv_tx_q *tx_queue = NULL;
  1317. struct gfar_priv_rx_q *rx_queue = NULL;
  1318. int i;
  1319. /* Go through all the buffer descriptors and free their data buffers */
  1320. for (i = 0; i < priv->num_tx_queues; i++) {
  1321. tx_queue = priv->tx_queue[i];
  1322. if(!tx_queue->tx_skbuff)
  1323. free_skb_tx_queue(tx_queue);
  1324. }
  1325. for (i = 0; i < priv->num_rx_queues; i++) {
  1326. rx_queue = priv->rx_queue[i];
  1327. if(!rx_queue->rx_skbuff)
  1328. free_skb_rx_queue(rx_queue);
  1329. }
  1330. dma_free_coherent(&priv->ofdev->dev,
  1331. sizeof(struct txbd8) * priv->total_tx_ring_size +
  1332. sizeof(struct rxbd8) * priv->total_rx_ring_size,
  1333. priv->tx_queue[0]->tx_bd_base,
  1334. priv->tx_queue[0]->tx_bd_dma_base);
  1335. }
  1336. void gfar_start(struct net_device *dev)
  1337. {
  1338. struct gfar_private *priv = netdev_priv(dev);
  1339. struct gfar __iomem *regs = priv->gfargrp[0].regs;
  1340. u32 tempval;
  1341. int i = 0;
  1342. /* Enable Rx and Tx in MACCFG1 */
  1343. tempval = gfar_read(&regs->maccfg1);
  1344. tempval |= (MACCFG1_RX_EN | MACCFG1_TX_EN);
  1345. gfar_write(&regs->maccfg1, tempval);
  1346. /* Initialize DMACTRL to have WWR and WOP */
  1347. tempval = gfar_read(&regs->dmactrl);
  1348. tempval |= DMACTRL_INIT_SETTINGS;
  1349. gfar_write(&regs->dmactrl, tempval);
  1350. /* Make sure we aren't stopped */
  1351. tempval = gfar_read(&regs->dmactrl);
  1352. tempval &= ~(DMACTRL_GRS | DMACTRL_GTS);
  1353. gfar_write(&regs->dmactrl, tempval);
  1354. for (i = 0; i < priv->num_grps; i++) {
  1355. regs = priv->gfargrp[i].regs;
  1356. /* Clear THLT/RHLT, so that the DMA starts polling now */
  1357. gfar_write(&regs->tstat, priv->gfargrp[i].tstat);
  1358. gfar_write(&regs->rstat, priv->gfargrp[i].rstat);
  1359. /* Unmask the interrupts we look for */
  1360. gfar_write(&regs->imask, IMASK_DEFAULT);
  1361. }
  1362. dev->trans_start = jiffies;
  1363. }
  1364. void gfar_configure_coalescing(struct gfar_private *priv,
  1365. unsigned long tx_mask, unsigned long rx_mask)
  1366. {
  1367. struct gfar __iomem *regs = priv->gfargrp[0].regs;
  1368. u32 __iomem *baddr;
  1369. int i = 0;
  1370. /* Backward compatible case ---- even if we enable
  1371. * multiple queues, there's only single reg to program
  1372. */
  1373. gfar_write(&regs->txic, 0);
  1374. if(likely(priv->tx_queue[0]->txcoalescing))
  1375. gfar_write(&regs->txic, priv->tx_queue[0]->txic);
  1376. gfar_write(&regs->rxic, 0);
  1377. if(unlikely(priv->rx_queue[0]->rxcoalescing))
  1378. gfar_write(&regs->rxic, priv->rx_queue[0]->rxic);
  1379. if (priv->mode == MQ_MG_MODE) {
  1380. baddr = &regs->txic0;
  1381. for_each_bit (i, &tx_mask, priv->num_tx_queues) {
  1382. if (likely(priv->tx_queue[i]->txcoalescing)) {
  1383. gfar_write(baddr + i, 0);
  1384. gfar_write(baddr + i, priv->tx_queue[i]->txic);
  1385. }
  1386. }
  1387. baddr = &regs->rxic0;
  1388. for_each_bit (i, &rx_mask, priv->num_rx_queues) {
  1389. if (likely(priv->rx_queue[i]->rxcoalescing)) {
  1390. gfar_write(baddr + i, 0);
  1391. gfar_write(baddr + i, priv->rx_queue[i]->rxic);
  1392. }
  1393. }
  1394. }
  1395. }
  1396. static int register_grp_irqs(struct gfar_priv_grp *grp)
  1397. {
  1398. struct gfar_private *priv = grp->priv;
  1399. struct net_device *dev = priv->ndev;
  1400. int err;
  1401. /* If the device has multiple interrupts, register for
  1402. * them. Otherwise, only register for the one */
  1403. if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
  1404. /* Install our interrupt handlers for Error,
  1405. * Transmit, and Receive */
  1406. if ((err = request_irq(grp->interruptError, gfar_error, 0,
  1407. grp->int_name_er,grp)) < 0) {
  1408. if (netif_msg_intr(priv))
  1409. printk(KERN_ERR "%s: Can't get IRQ %d\n",
  1410. dev->name, grp->interruptError);
  1411. goto err_irq_fail;
  1412. }
  1413. if ((err = request_irq(grp->interruptTransmit, gfar_transmit,
  1414. 0, grp->int_name_tx, grp)) < 0) {
  1415. if (netif_msg_intr(priv))
  1416. printk(KERN_ERR "%s: Can't get IRQ %d\n",
  1417. dev->name, grp->interruptTransmit);
  1418. goto tx_irq_fail;
  1419. }
  1420. if ((err = request_irq(grp->interruptReceive, gfar_receive, 0,
  1421. grp->int_name_rx, grp)) < 0) {
  1422. if (netif_msg_intr(priv))
  1423. printk(KERN_ERR "%s: Can't get IRQ %d\n",
  1424. dev->name, grp->interruptReceive);
  1425. goto rx_irq_fail;
  1426. }
  1427. } else {
  1428. if ((err = request_irq(grp->interruptTransmit, gfar_interrupt, 0,
  1429. grp->int_name_tx, grp)) < 0) {
  1430. if (netif_msg_intr(priv))
  1431. printk(KERN_ERR "%s: Can't get IRQ %d\n",
  1432. dev->name, grp->interruptTransmit);
  1433. goto err_irq_fail;
  1434. }
  1435. }
  1436. return 0;
  1437. rx_irq_fail:
  1438. free_irq(grp->interruptTransmit, grp);
  1439. tx_irq_fail:
  1440. free_irq(grp->interruptError, grp);
  1441. err_irq_fail:
  1442. return err;
  1443. }
  1444. /* Bring the controller up and running */
  1445. int startup_gfar(struct net_device *ndev)
  1446. {
  1447. struct gfar_private *priv = netdev_priv(ndev);
  1448. struct gfar __iomem *regs = NULL;
  1449. int err, i, j;
  1450. for (i = 0; i < priv->num_grps; i++) {
  1451. regs= priv->gfargrp[i].regs;
  1452. gfar_write(&regs->imask, IMASK_INIT_CLEAR);
  1453. }
  1454. regs= priv->gfargrp[0].regs;
  1455. err = gfar_alloc_skb_resources(ndev);
  1456. if (err)
  1457. return err;
  1458. gfar_init_mac(ndev);
  1459. for (i = 0; i < priv->num_grps; i++) {
  1460. err = register_grp_irqs(&priv->gfargrp[i]);
  1461. if (err) {
  1462. for (j = 0; j < i; j++)
  1463. free_grp_irqs(&priv->gfargrp[j]);
  1464. goto irq_fail;
  1465. }
  1466. }
  1467. /* Start the controller */
  1468. gfar_start(ndev);
  1469. phy_start(priv->phydev);
  1470. gfar_configure_coalescing(priv, 0xFF, 0xFF);
  1471. return 0;
  1472. irq_fail:
  1473. free_skb_resources(priv);
  1474. return err;
  1475. }
  1476. /* Called when something needs to use the ethernet device */
  1477. /* Returns 0 for success. */
  1478. static int gfar_enet_open(struct net_device *dev)
  1479. {
  1480. struct gfar_private *priv = netdev_priv(dev);
  1481. int err;
  1482. enable_napi(priv);
  1483. skb_queue_head_init(&priv->rx_recycle);
  1484. /* Initialize a bunch of registers */
  1485. init_registers(dev);
  1486. gfar_set_mac_address(dev);
  1487. err = init_phy(dev);
  1488. if (err) {
  1489. disable_napi(priv);
  1490. return err;
  1491. }
  1492. err = startup_gfar(dev);
  1493. if (err) {
  1494. disable_napi(priv);
  1495. return err;
  1496. }
  1497. netif_tx_start_all_queues(dev);
  1498. device_set_wakeup_enable(&dev->dev, priv->wol_en);
  1499. return err;
  1500. }
  1501. static inline struct txfcb *gfar_add_fcb(struct sk_buff *skb)
  1502. {
  1503. struct txfcb *fcb = (struct txfcb *)skb_push(skb, GMAC_FCB_LEN);
  1504. memset(fcb, 0, GMAC_FCB_LEN);
  1505. return fcb;
  1506. }
  1507. static inline void gfar_tx_checksum(struct sk_buff *skb, struct txfcb *fcb)
  1508. {
  1509. u8 flags = 0;
  1510. /* If we're here, it's a IP packet with a TCP or UDP
  1511. * payload. We set it to checksum, using a pseudo-header
  1512. * we provide
  1513. */
  1514. flags = TXFCB_DEFAULT;
  1515. /* Tell the controller what the protocol is */
  1516. /* And provide the already calculated phcs */
  1517. if (ip_hdr(skb)->protocol == IPPROTO_UDP) {
  1518. flags |= TXFCB_UDP;
  1519. fcb->phcs = udp_hdr(skb)->check;
  1520. } else
  1521. fcb->phcs = tcp_hdr(skb)->check;
  1522. /* l3os is the distance between the start of the
  1523. * frame (skb->data) and the start of the IP hdr.
  1524. * l4os is the distance between the start of the
  1525. * l3 hdr and the l4 hdr */
  1526. fcb->l3os = (u16)(skb_network_offset(skb) - GMAC_FCB_LEN);
  1527. fcb->l4os = skb_network_header_len(skb);
  1528. fcb->flags = flags;
  1529. }
  1530. void inline gfar_tx_vlan(struct sk_buff *skb, struct txfcb *fcb)
  1531. {
  1532. fcb->flags |= TXFCB_VLN;
  1533. fcb->vlctl = vlan_tx_tag_get(skb);
  1534. }
  1535. static inline struct txbd8 *skip_txbd(struct txbd8 *bdp, int stride,
  1536. struct txbd8 *base, int ring_size)
  1537. {
  1538. struct txbd8 *new_bd = bdp + stride;
  1539. return (new_bd >= (base + ring_size)) ? (new_bd - ring_size) : new_bd;
  1540. }
  1541. static inline struct txbd8 *next_txbd(struct txbd8 *bdp, struct txbd8 *base,
  1542. int ring_size)
  1543. {
  1544. return skip_txbd(bdp, 1, base, ring_size);
  1545. }
  1546. /* This is called by the kernel when a frame is ready for transmission. */
  1547. /* It is pointed to by the dev->hard_start_xmit function pointer */
  1548. static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
  1549. {
  1550. struct gfar_private *priv = netdev_priv(dev);
  1551. struct gfar_priv_tx_q *tx_queue = NULL;
  1552. struct netdev_queue *txq;
  1553. struct gfar __iomem *regs = NULL;
  1554. struct txfcb *fcb = NULL;
  1555. struct txbd8 *txbdp, *txbdp_start, *base;
  1556. u32 lstatus;
  1557. int i, rq = 0;
  1558. u32 bufaddr;
  1559. unsigned long flags;
  1560. unsigned int nr_frags, length;
  1561. rq = skb->queue_mapping;
  1562. tx_queue = priv->tx_queue[rq];
  1563. txq = netdev_get_tx_queue(dev, rq);
  1564. base = tx_queue->tx_bd_base;
  1565. regs = tx_queue->grp->regs;
  1566. /* make space for additional header when fcb is needed */
  1567. if (((skb->ip_summed == CHECKSUM_PARTIAL) ||
  1568. (priv->vlgrp && vlan_tx_tag_present(skb))) &&
  1569. (skb_headroom(skb) < GMAC_FCB_LEN)) {
  1570. struct sk_buff *skb_new;
  1571. skb_new = skb_realloc_headroom(skb, GMAC_FCB_LEN);
  1572. if (!skb_new) {
  1573. dev->stats.tx_errors++;
  1574. kfree_skb(skb);
  1575. return NETDEV_TX_OK;
  1576. }
  1577. kfree_skb(skb);
  1578. skb = skb_new;
  1579. }
  1580. /* total number of fragments in the SKB */
  1581. nr_frags = skb_shinfo(skb)->nr_frags;
  1582. /* check if there is space to queue this packet */
  1583. if ((nr_frags+1) > tx_queue->num_txbdfree) {
  1584. /* no space, stop the queue */
  1585. netif_tx_stop_queue(txq);
  1586. dev->stats.tx_fifo_errors++;
  1587. return NETDEV_TX_BUSY;
  1588. }
  1589. /* Update transmit stats */
  1590. dev->stats.tx_bytes += skb->len;
  1591. txbdp = txbdp_start = tx_queue->cur_tx;
  1592. if (nr_frags == 0) {
  1593. lstatus = txbdp->lstatus | BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT);
  1594. } else {
  1595. /* Place the fragment addresses and lengths into the TxBDs */
  1596. for (i = 0; i < nr_frags; i++) {
  1597. /* Point at the next BD, wrapping as needed */
  1598. txbdp = next_txbd(txbdp, base, tx_queue->tx_ring_size);
  1599. length = skb_shinfo(skb)->frags[i].size;
  1600. lstatus = txbdp->lstatus | length |
  1601. BD_LFLAG(TXBD_READY);
  1602. /* Handle the last BD specially */
  1603. if (i == nr_frags - 1)
  1604. lstatus |= BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT);
  1605. bufaddr = dma_map_page(&priv->ofdev->dev,
  1606. skb_shinfo(skb)->frags[i].page,
  1607. skb_shinfo(skb)->frags[i].page_offset,
  1608. length,
  1609. DMA_TO_DEVICE);
  1610. /* set the TxBD length and buffer pointer */
  1611. txbdp->bufPtr = bufaddr;
  1612. txbdp->lstatus = lstatus;
  1613. }
  1614. lstatus = txbdp_start->lstatus;
  1615. }
  1616. /* Set up checksumming */
  1617. if (CHECKSUM_PARTIAL == skb->ip_summed) {
  1618. fcb = gfar_add_fcb(skb);
  1619. lstatus |= BD_LFLAG(TXBD_TOE);
  1620. gfar_tx_checksum(skb, fcb);
  1621. }
  1622. if (priv->vlgrp && vlan_tx_tag_present(skb)) {
  1623. if (unlikely(NULL == fcb)) {
  1624. fcb = gfar_add_fcb(skb);
  1625. lstatus |= BD_LFLAG(TXBD_TOE);
  1626. }
  1627. gfar_tx_vlan(skb, fcb);
  1628. }
  1629. /* setup the TxBD length and buffer pointer for the first BD */
  1630. tx_queue->tx_skbuff[tx_queue->skb_curtx] = skb;
  1631. txbdp_start->bufPtr = dma_map_single(&priv->ofdev->dev, skb->data,
  1632. skb_headlen(skb), DMA_TO_DEVICE);
  1633. lstatus |= BD_LFLAG(TXBD_CRC | TXBD_READY) | skb_headlen(skb);
  1634. /*
  1635. * We can work in parallel with gfar_clean_tx_ring(), except
  1636. * when modifying num_txbdfree. Note that we didn't grab the lock
  1637. * when we were reading the num_txbdfree and checking for available
  1638. * space, that's because outside of this function it can only grow,
  1639. * and once we've got needed space, it cannot suddenly disappear.
  1640. *
  1641. * The lock also protects us from gfar_error(), which can modify
  1642. * regs->tstat and thus retrigger the transfers, which is why we
  1643. * also must grab the lock before setting ready bit for the first
  1644. * to be transmitted BD.
  1645. */
  1646. spin_lock_irqsave(&tx_queue->txlock, flags);
  1647. /*
  1648. * The powerpc-specific eieio() is used, as wmb() has too strong
  1649. * semantics (it requires synchronization between cacheable and
  1650. * uncacheable mappings, which eieio doesn't provide and which we
  1651. * don't need), thus requiring a more expensive sync instruction. At
  1652. * some point, the set of architecture-independent barrier functions
  1653. * should be expanded to include weaker barriers.
  1654. */
  1655. eieio();
  1656. txbdp_start->lstatus = lstatus;
  1657. /* Update the current skb pointer to the next entry we will use
  1658. * (wrapping if necessary) */
  1659. tx_queue->skb_curtx = (tx_queue->skb_curtx + 1) &
  1660. TX_RING_MOD_MASK(tx_queue->tx_ring_size);
  1661. tx_queue->cur_tx = next_txbd(txbdp, base, tx_queue->tx_ring_size);
  1662. /* reduce TxBD free count */
  1663. tx_queue->num_txbdfree -= (nr_frags + 1);
  1664. dev->trans_start = jiffies;
  1665. /* If the next BD still needs to be cleaned up, then the bds
  1666. are full. We need to tell the kernel to stop sending us stuff. */
  1667. if (!tx_queue->num_txbdfree) {
  1668. netif_tx_stop_queue(txq);
  1669. dev->stats.tx_fifo_errors++;
  1670. }
  1671. /* Tell the DMA to go go go */
  1672. gfar_write(&regs->tstat, TSTAT_CLEAR_THALT >> tx_queue->qindex);
  1673. /* Unlock priv */
  1674. spin_unlock_irqrestore(&tx_queue->txlock, flags);
  1675. return NETDEV_TX_OK;
  1676. }
  1677. /* Stops the kernel queue, and halts the controller */
  1678. static int gfar_close(struct net_device *dev)
  1679. {
  1680. struct gfar_private *priv = netdev_priv(dev);
  1681. disable_napi(priv);
  1682. skb_queue_purge(&priv->rx_recycle);
  1683. cancel_work_sync(&priv->reset_task);
  1684. stop_gfar(dev);
  1685. /* Disconnect from the PHY */
  1686. phy_disconnect(priv->phydev);
  1687. priv->phydev = NULL;
  1688. netif_tx_stop_all_queues(dev);
  1689. return 0;
  1690. }
  1691. /* Changes the mac address if the controller is not running. */
  1692. static int gfar_set_mac_address(struct net_device *dev)
  1693. {
  1694. gfar_set_mac_for_addr(dev, 0, dev->dev_addr);
  1695. return 0;
  1696. }
  1697. /* Enables and disables VLAN insertion/extraction */
  1698. static void gfar_vlan_rx_register(struct net_device *dev,
  1699. struct vlan_group *grp)
  1700. {
  1701. struct gfar_private *priv = netdev_priv(dev);
  1702. struct gfar __iomem *regs = NULL;
  1703. unsigned long flags;
  1704. u32 tempval;
  1705. regs = priv->gfargrp[0].regs;
  1706. local_irq_save(flags);
  1707. lock_rx_qs(priv);
  1708. priv->vlgrp = grp;
  1709. if (grp) {
  1710. /* Enable VLAN tag insertion */
  1711. tempval = gfar_read(&regs->tctrl);
  1712. tempval |= TCTRL_VLINS;
  1713. gfar_write(&regs->tctrl, tempval);
  1714. /* Enable VLAN tag extraction */
  1715. tempval = gfar_read(&regs->rctrl);
  1716. tempval |= (RCTRL_VLEX | RCTRL_PRSDEP_INIT);
  1717. gfar_write(&regs->rctrl, tempval);
  1718. } else {
  1719. /* Disable VLAN tag insertion */
  1720. tempval = gfar_read(&regs->tctrl);
  1721. tempval &= ~TCTRL_VLINS;
  1722. gfar_write(&regs->tctrl, tempval);
  1723. /* Disable VLAN tag extraction */
  1724. tempval = gfar_read(&regs->rctrl);
  1725. tempval &= ~RCTRL_VLEX;
  1726. /* If parse is no longer required, then disable parser */
  1727. if (tempval & RCTRL_REQ_PARSER)
  1728. tempval |= RCTRL_PRSDEP_INIT;
  1729. else
  1730. tempval &= ~RCTRL_PRSDEP_INIT;
  1731. gfar_write(&regs->rctrl, tempval);
  1732. }
  1733. gfar_change_mtu(dev, dev->mtu);
  1734. unlock_rx_qs(priv);
  1735. local_irq_restore(flags);
  1736. }
  1737. static int gfar_change_mtu(struct net_device *dev, int new_mtu)
  1738. {
  1739. int tempsize, tempval;
  1740. struct gfar_private *priv = netdev_priv(dev);
  1741. struct gfar __iomem *regs = priv->gfargrp[0].regs;
  1742. int oldsize = priv->rx_buffer_size;
  1743. int frame_size = new_mtu + ETH_HLEN;
  1744. if (priv->vlgrp)
  1745. frame_size += VLAN_HLEN;
  1746. if ((frame_size < 64) || (frame_size > JUMBO_FRAME_SIZE)) {
  1747. if (netif_msg_drv(priv))
  1748. printk(KERN_ERR "%s: Invalid MTU setting\n",
  1749. dev->name);
  1750. return -EINVAL;
  1751. }
  1752. if (gfar_uses_fcb(priv))
  1753. frame_size += GMAC_FCB_LEN;
  1754. frame_size += priv->padding;
  1755. tempsize =
  1756. (frame_size & ~(INCREMENTAL_BUFFER_SIZE - 1)) +
  1757. INCREMENTAL_BUFFER_SIZE;
  1758. /* Only stop and start the controller if it isn't already
  1759. * stopped, and we changed something */
  1760. if ((oldsize != tempsize) && (dev->flags & IFF_UP))
  1761. stop_gfar(dev);
  1762. priv->rx_buffer_size = tempsize;
  1763. dev->mtu = new_mtu;
  1764. gfar_write(&regs->mrblr, priv->rx_buffer_size);
  1765. gfar_write(&regs->maxfrm, priv->rx_buffer_size);
  1766. /* If the mtu is larger than the max size for standard
  1767. * ethernet frames (ie, a jumbo frame), then set maccfg2
  1768. * to allow huge frames, and to check the length */
  1769. tempval = gfar_read(&regs->maccfg2);
  1770. if (priv->rx_buffer_size > DEFAULT_RX_BUFFER_SIZE)
  1771. tempval |= (MACCFG2_HUGEFRAME | MACCFG2_LENGTHCHECK);
  1772. else
  1773. tempval &= ~(MACCFG2_HUGEFRAME | MACCFG2_LENGTHCHECK);
  1774. gfar_write(&regs->maccfg2, tempval);
  1775. if ((oldsize != tempsize) && (dev->flags & IFF_UP))
  1776. startup_gfar(dev);
  1777. return 0;
  1778. }
  1779. /* gfar_reset_task gets scheduled when a packet has not been
  1780. * transmitted after a set amount of time.
  1781. * For now, assume that clearing out all the structures, and
  1782. * starting over will fix the problem.
  1783. */
  1784. static void gfar_reset_task(struct work_struct *work)
  1785. {
  1786. struct gfar_private *priv = container_of(work, struct gfar_private,
  1787. reset_task);
  1788. struct net_device *dev = priv->ndev;
  1789. if (dev->flags & IFF_UP) {
  1790. netif_tx_stop_all_queues(dev);
  1791. stop_gfar(dev);
  1792. startup_gfar(dev);
  1793. netif_tx_start_all_queues(dev);
  1794. }
  1795. netif_tx_schedule_all(dev);
  1796. }
  1797. static void gfar_timeout(struct net_device *dev)
  1798. {
  1799. struct gfar_private *priv = netdev_priv(dev);
  1800. dev->stats.tx_errors++;
  1801. schedule_work(&priv->reset_task);
  1802. }
  1803. /* Interrupt Handler for Transmit complete */
  1804. static int gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue)
  1805. {
  1806. struct net_device *dev = tx_queue->dev;
  1807. struct gfar_private *priv = netdev_priv(dev);
  1808. struct gfar_priv_rx_q *rx_queue = NULL;
  1809. struct txbd8 *bdp;
  1810. struct txbd8 *lbdp = NULL;
  1811. struct txbd8 *base = tx_queue->tx_bd_base;
  1812. struct sk_buff *skb;
  1813. int skb_dirtytx;
  1814. int tx_ring_size = tx_queue->tx_ring_size;
  1815. int frags = 0;
  1816. int i;
  1817. int howmany = 0;
  1818. u32 lstatus;
  1819. rx_queue = priv->rx_queue[tx_queue->qindex];
  1820. bdp = tx_queue->dirty_tx;
  1821. skb_dirtytx = tx_queue->skb_dirtytx;
  1822. while ((skb = tx_queue->tx_skbuff[skb_dirtytx])) {
  1823. unsigned long flags;
  1824. frags = skb_shinfo(skb)->nr_frags;
  1825. lbdp = skip_txbd(bdp, frags, base, tx_ring_size);
  1826. lstatus = lbdp->lstatus;
  1827. /* Only clean completed frames */
  1828. if ((lstatus & BD_LFLAG(TXBD_READY)) &&
  1829. (lstatus & BD_LENGTH_MASK))
  1830. break;
  1831. dma_unmap_single(&priv->ofdev->dev,
  1832. bdp->bufPtr,
  1833. bdp->length,
  1834. DMA_TO_DEVICE);
  1835. bdp->lstatus &= BD_LFLAG(TXBD_WRAP);
  1836. bdp = next_txbd(bdp, base, tx_ring_size);
  1837. for (i = 0; i < frags; i++) {
  1838. dma_unmap_page(&priv->ofdev->dev,
  1839. bdp->bufPtr,
  1840. bdp->length,
  1841. DMA_TO_DEVICE);
  1842. bdp->lstatus &= BD_LFLAG(TXBD_WRAP);
  1843. bdp = next_txbd(bdp, base, tx_ring_size);
  1844. }
  1845. /*
  1846. * If there's room in the queue (limit it to rx_buffer_size)
  1847. * we add this skb back into the pool, if it's the right size
  1848. */
  1849. if (skb_queue_len(&priv->rx_recycle) < rx_queue->rx_ring_size &&
  1850. skb_recycle_check(skb, priv->rx_buffer_size +
  1851. RXBUF_ALIGNMENT))
  1852. __skb_queue_head(&priv->rx_recycle, skb);
  1853. else
  1854. dev_kfree_skb_any(skb);
  1855. tx_queue->tx_skbuff[skb_dirtytx] = NULL;
  1856. skb_dirtytx = (skb_dirtytx + 1) &
  1857. TX_RING_MOD_MASK(tx_ring_size);
  1858. howmany++;
  1859. spin_lock_irqsave(&tx_queue->txlock, flags);
  1860. tx_queue->num_txbdfree += frags + 1;
  1861. spin_unlock_irqrestore(&tx_queue->txlock, flags);
  1862. }
  1863. /* If we freed a buffer, we can restart transmission, if necessary */
  1864. if (__netif_subqueue_stopped(dev, tx_queue->qindex) && tx_queue->num_txbdfree)
  1865. netif_wake_subqueue(dev, tx_queue->qindex);
  1866. /* Update dirty indicators */
  1867. tx_queue->skb_dirtytx = skb_dirtytx;
  1868. tx_queue->dirty_tx = bdp;
  1869. dev->stats.tx_packets += howmany;
  1870. return howmany;
  1871. }
  1872. static void gfar_schedule_cleanup(struct gfar_priv_grp *gfargrp)
  1873. {
  1874. unsigned long flags;
  1875. spin_lock_irqsave(&gfargrp->grplock, flags);
  1876. if (napi_schedule_prep(&gfargrp->napi)) {
  1877. gfar_write(&gfargrp->regs->imask, IMASK_RTX_DISABLED);
  1878. __napi_schedule(&gfargrp->napi);
  1879. } else {
  1880. /*
  1881. * Clear IEVENT, so interrupts aren't called again
  1882. * because of the packets that have already arrived.
  1883. */
  1884. gfar_write(&gfargrp->regs->ievent, IEVENT_RTX_MASK);
  1885. }
  1886. spin_unlock_irqrestore(&gfargrp->grplock, flags);
  1887. }
  1888. /* Interrupt Handler for Transmit complete */
  1889. static irqreturn_t gfar_transmit(int irq, void *grp_id)
  1890. {
  1891. gfar_schedule_cleanup((struct gfar_priv_grp *)grp_id);
  1892. return IRQ_HANDLED;
  1893. }
  1894. static void gfar_new_rxbdp(struct gfar_priv_rx_q *rx_queue, struct rxbd8 *bdp,
  1895. struct sk_buff *skb)
  1896. {
  1897. struct net_device *dev = rx_queue->dev;
  1898. struct gfar_private *priv = netdev_priv(dev);
  1899. dma_addr_t buf;
  1900. buf = dma_map_single(&priv->ofdev->dev, skb->data,
  1901. priv->rx_buffer_size, DMA_FROM_DEVICE);
  1902. gfar_init_rxbdp(rx_queue, bdp, buf);
  1903. }
  1904. struct sk_buff * gfar_new_skb(struct net_device *dev)
  1905. {
  1906. unsigned int alignamount;
  1907. struct gfar_private *priv = netdev_priv(dev);
  1908. struct sk_buff *skb = NULL;
  1909. skb = __skb_dequeue(&priv->rx_recycle);
  1910. if (!skb)
  1911. skb = netdev_alloc_skb(dev,
  1912. priv->rx_buffer_size + RXBUF_ALIGNMENT);
  1913. if (!skb)
  1914. return NULL;
  1915. alignamount = RXBUF_ALIGNMENT -
  1916. (((unsigned long) skb->data) & (RXBUF_ALIGNMENT - 1));
  1917. /* We need the data buffer to be aligned properly. We will reserve
  1918. * as many bytes as needed to align the data properly
  1919. */
  1920. skb_reserve(skb, alignamount);
  1921. return skb;
  1922. }
  1923. static inline void count_errors(unsigned short status, struct net_device *dev)
  1924. {
  1925. struct gfar_private *priv = netdev_priv(dev);
  1926. struct net_device_stats *stats = &dev->stats;
  1927. struct gfar_extra_stats *estats = &priv->extra_stats;
  1928. /* If the packet was truncated, none of the other errors
  1929. * matter */
  1930. if (status & RXBD_TRUNCATED) {
  1931. stats->rx_length_errors++;
  1932. estats->rx_trunc++;
  1933. return;
  1934. }
  1935. /* Count the errors, if there were any */
  1936. if (status & (RXBD_LARGE | RXBD_SHORT)) {
  1937. stats->rx_length_errors++;
  1938. if (status & RXBD_LARGE)
  1939. estats->rx_large++;
  1940. else
  1941. estats->rx_short++;
  1942. }
  1943. if (status & RXBD_NONOCTET) {
  1944. stats->rx_frame_errors++;
  1945. estats->rx_nonoctet++;
  1946. }
  1947. if (status & RXBD_CRCERR) {
  1948. estats->rx_crcerr++;
  1949. stats->rx_crc_errors++;
  1950. }
  1951. if (status & RXBD_OVERRUN) {
  1952. estats->rx_overrun++;
  1953. stats->rx_crc_errors++;
  1954. }
  1955. }
  1956. irqreturn_t gfar_receive(int irq, void *grp_id)
  1957. {
  1958. gfar_schedule_cleanup((struct gfar_priv_grp *)grp_id);
  1959. return IRQ_HANDLED;
  1960. }
  1961. static inline void gfar_rx_checksum(struct sk_buff *skb, struct rxfcb *fcb)
  1962. {
  1963. /* If valid headers were found, and valid sums
  1964. * were verified, then we tell the kernel that no
  1965. * checksumming is necessary. Otherwise, it is */
  1966. if ((fcb->flags & RXFCB_CSUM_MASK) == (RXFCB_CIP | RXFCB_CTU))
  1967. skb->ip_summed = CHECKSUM_UNNECESSARY;
  1968. else
  1969. skb->ip_summed = CHECKSUM_NONE;
  1970. }
  1971. /* gfar_process_frame() -- handle one incoming packet if skb
  1972. * isn't NULL. */
  1973. static int gfar_process_frame(struct net_device *dev, struct sk_buff *skb,
  1974. int amount_pull)
  1975. {
  1976. struct gfar_private *priv = netdev_priv(dev);
  1977. struct rxfcb *fcb = NULL;
  1978. int ret;
  1979. /* fcb is at the beginning if exists */
  1980. fcb = (struct rxfcb *)skb->data;
  1981. /* Remove the FCB from the skb */
  1982. skb_set_queue_mapping(skb, fcb->rq);
  1983. /* Remove the padded bytes, if there are any */
  1984. if (amount_pull)
  1985. skb_pull(skb, amount_pull);
  1986. if (priv->rx_csum_enable)
  1987. gfar_rx_checksum(skb, fcb);
  1988. /* Tell the skb what kind of packet this is */
  1989. skb->protocol = eth_type_trans(skb, dev);
  1990. /* Send the packet up the stack */
  1991. if (unlikely(priv->vlgrp && (fcb->flags & RXFCB_VLN)))
  1992. ret = vlan_hwaccel_receive_skb(skb, priv->vlgrp, fcb->vlctl);
  1993. else
  1994. ret = netif_receive_skb(skb);
  1995. if (NET_RX_DROP == ret)
  1996. priv->extra_stats.kernel_dropped++;
  1997. return 0;
  1998. }
  1999. /* gfar_clean_rx_ring() -- Processes each frame in the rx ring
  2000. * until the budget/quota has been reached. Returns the number
  2001. * of frames handled
  2002. */
  2003. int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit)
  2004. {
  2005. struct net_device *dev = rx_queue->dev;
  2006. struct rxbd8 *bdp, *base;
  2007. struct sk_buff *skb;
  2008. int pkt_len;
  2009. int amount_pull;
  2010. int howmany = 0;
  2011. struct gfar_private *priv = netdev_priv(dev);
  2012. /* Get the first full descriptor */
  2013. bdp = rx_queue->cur_rx;
  2014. base = rx_queue->rx_bd_base;
  2015. amount_pull = (gfar_uses_fcb(priv) ? GMAC_FCB_LEN : 0) +
  2016. priv->padding;
  2017. while (!((bdp->status & RXBD_EMPTY) || (--rx_work_limit < 0))) {
  2018. struct sk_buff *newskb;
  2019. rmb();
  2020. /* Add another skb for the future */
  2021. newskb = gfar_new_skb(dev);
  2022. skb = rx_queue->rx_skbuff[rx_queue->skb_currx];
  2023. dma_unmap_single(&priv->ofdev->dev, bdp->bufPtr,
  2024. priv->rx_buffer_size, DMA_FROM_DEVICE);
  2025. /* We drop the frame if we failed to allocate a new buffer */
  2026. if (unlikely(!newskb || !(bdp->status & RXBD_LAST) ||
  2027. bdp->status & RXBD_ERR)) {
  2028. count_errors(bdp->status, dev);
  2029. if (unlikely(!newskb))
  2030. newskb = skb;
  2031. else if (skb) {
  2032. /*
  2033. * We need to reset ->data to what it
  2034. * was before gfar_new_skb() re-aligned
  2035. * it to an RXBUF_ALIGNMENT boundary
  2036. * before we put the skb back on the
  2037. * recycle list.
  2038. */
  2039. skb->data = skb->head + NET_SKB_PAD;
  2040. __skb_queue_head(&priv->rx_recycle, skb);
  2041. }
  2042. } else {
  2043. /* Increment the number of packets */
  2044. dev->stats.rx_packets++;
  2045. howmany++;
  2046. if (likely(skb)) {
  2047. pkt_len = bdp->length - ETH_FCS_LEN;
  2048. /* Remove the FCS from the packet length */
  2049. skb_put(skb, pkt_len);
  2050. dev->stats.rx_bytes += pkt_len;
  2051. gfar_process_frame(dev, skb, amount_pull);
  2052. } else {
  2053. if (netif_msg_rx_err(priv))
  2054. printk(KERN_WARNING
  2055. "%s: Missing skb!\n", dev->name);
  2056. dev->stats.rx_dropped++;
  2057. priv->extra_stats.rx_skbmissing++;
  2058. }
  2059. }
  2060. rx_queue->rx_skbuff[rx_queue->skb_currx] = newskb;
  2061. /* Setup the new bdp */
  2062. gfar_new_rxbdp(rx_queue, bdp, newskb);
  2063. /* Update to the next pointer */
  2064. bdp = next_bd(bdp, base, rx_queue->rx_ring_size);
  2065. /* update to point at the next skb */
  2066. rx_queue->skb_currx =
  2067. (rx_queue->skb_currx + 1) &
  2068. RX_RING_MOD_MASK(rx_queue->rx_ring_size);
  2069. }
  2070. /* Update the current rxbd pointer to be the next one */
  2071. rx_queue->cur_rx = bdp;
  2072. return howmany;
  2073. }
  2074. static int gfar_poll(struct napi_struct *napi, int budget)
  2075. {
  2076. struct gfar_priv_grp *gfargrp = container_of(napi,
  2077. struct gfar_priv_grp, napi);
  2078. struct gfar_private *priv = gfargrp->priv;
  2079. struct gfar __iomem *regs = gfargrp->regs;
  2080. struct gfar_priv_tx_q *tx_queue = NULL;
  2081. struct gfar_priv_rx_q *rx_queue = NULL;
  2082. int rx_cleaned = 0, budget_per_queue = 0, rx_cleaned_per_queue = 0;
  2083. int tx_cleaned = 0, i, left_over_budget = budget;
  2084. unsigned long serviced_queues = 0;
  2085. int num_queues = 0;
  2086. num_queues = gfargrp->num_rx_queues;
  2087. budget_per_queue = budget/num_queues;
  2088. /* Clear IEVENT, so interrupts aren't called again
  2089. * because of the packets that have already arrived */
  2090. gfar_write(&regs->ievent, IEVENT_RTX_MASK);
  2091. while (num_queues && left_over_budget) {
  2092. budget_per_queue = left_over_budget/num_queues;
  2093. left_over_budget = 0;
  2094. for_each_bit(i, &gfargrp->rx_bit_map, priv->num_rx_queues) {
  2095. if (test_bit(i, &serviced_queues))
  2096. continue;
  2097. rx_queue = priv->rx_queue[i];
  2098. tx_queue = priv->tx_queue[rx_queue->qindex];
  2099. tx_cleaned += gfar_clean_tx_ring(tx_queue);
  2100. rx_cleaned_per_queue = gfar_clean_rx_ring(rx_queue,
  2101. budget_per_queue);
  2102. rx_cleaned += rx_cleaned_per_queue;
  2103. if(rx_cleaned_per_queue < budget_per_queue) {
  2104. left_over_budget = left_over_budget +
  2105. (budget_per_queue - rx_cleaned_per_queue);
  2106. set_bit(i, &serviced_queues);
  2107. num_queues--;
  2108. }
  2109. }
  2110. }
  2111. if (tx_cleaned)
  2112. return budget;
  2113. if (rx_cleaned < budget) {
  2114. napi_complete(napi);
  2115. /* Clear the halt bit in RSTAT */
  2116. gfar_write(&regs->rstat, gfargrp->rstat);
  2117. gfar_write(&regs->imask, IMASK_DEFAULT);
  2118. /* If we are coalescing interrupts, update the timer */
  2119. /* Otherwise, clear it */
  2120. gfar_configure_coalescing(priv,
  2121. gfargrp->rx_bit_map, gfargrp->tx_bit_map);
  2122. }
  2123. return rx_cleaned;
  2124. }
  2125. #ifdef CONFIG_NET_POLL_CONTROLLER
  2126. /*
  2127. * Polling 'interrupt' - used by things like netconsole to send skbs
  2128. * without having to re-enable interrupts. It's not called while
  2129. * the interrupt routine is executing.
  2130. */
  2131. static void gfar_netpoll(struct net_device *dev)
  2132. {
  2133. struct gfar_private *priv = netdev_priv(dev);
  2134. int i = 0;
  2135. /* If the device has multiple interrupts, run tx/rx */
  2136. if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
  2137. for (i = 0; i < priv->num_grps; i++) {
  2138. disable_irq(priv->gfargrp[i].interruptTransmit);
  2139. disable_irq(priv->gfargrp[i].interruptReceive);
  2140. disable_irq(priv->gfargrp[i].interruptError);
  2141. gfar_interrupt(priv->gfargrp[i].interruptTransmit,
  2142. &priv->gfargrp[i]);
  2143. enable_irq(priv->gfargrp[i].interruptError);
  2144. enable_irq(priv->gfargrp[i].interruptReceive);
  2145. enable_irq(priv->gfargrp[i].interruptTransmit);
  2146. }
  2147. } else {
  2148. for (i = 0; i < priv->num_grps; i++) {
  2149. disable_irq(priv->gfargrp[i].interruptTransmit);
  2150. gfar_interrupt(priv->gfargrp[i].interruptTransmit,
  2151. &priv->gfargrp[i]);
  2152. enable_irq(priv->gfargrp[i].interruptTransmit);
  2153. }
  2154. }
  2155. #endif
  2156. /* The interrupt handler for devices with one interrupt */
  2157. static irqreturn_t gfar_interrupt(int irq, void *grp_id)
  2158. {
  2159. struct gfar_priv_grp *gfargrp = grp_id;
  2160. /* Save ievent for future reference */
  2161. u32 events = gfar_read(&gfargrp->regs->ievent);
  2162. /* Check for reception */
  2163. if (events & IEVENT_RX_MASK)
  2164. gfar_receive(irq, grp_id);
  2165. /* Check for transmit completion */
  2166. if (events & IEVENT_TX_MASK)
  2167. gfar_transmit(irq, grp_id);
  2168. /* Check for errors */
  2169. if (events & IEVENT_ERR_MASK)
  2170. gfar_error(irq, grp_id);
  2171. return IRQ_HANDLED;
  2172. }
  2173. /* Called every time the controller might need to be made
  2174. * aware of new link state. The PHY code conveys this
  2175. * information through variables in the phydev structure, and this
  2176. * function converts those variables into the appropriate
  2177. * register values, and can bring down the device if needed.
  2178. */
  2179. static void adjust_link(struct net_device *dev)
  2180. {
  2181. struct gfar_private *priv = netdev_priv(dev);
  2182. struct gfar __iomem *regs = priv->gfargrp[0].regs;
  2183. unsigned long flags;
  2184. struct phy_device *phydev = priv->phydev;
  2185. int new_state = 0;
  2186. local_irq_save(flags);
  2187. lock_tx_qs(priv);
  2188. if (phydev->link) {
  2189. u32 tempval = gfar_read(&regs->maccfg2);
  2190. u32 ecntrl = gfar_read(&regs->ecntrl);
  2191. /* Now we make sure that we can be in full duplex mode.
  2192. * If not, we operate in half-duplex mode. */
  2193. if (phydev->duplex != priv->oldduplex) {
  2194. new_state = 1;
  2195. if (!(phydev->duplex))
  2196. tempval &= ~(MACCFG2_FULL_DUPLEX);
  2197. else
  2198. tempval |= MACCFG2_FULL_DUPLEX;
  2199. priv->oldduplex = phydev->duplex;
  2200. }
  2201. if (phydev->speed != priv->oldspeed) {
  2202. new_state = 1;
  2203. switch (phydev->speed) {
  2204. case 1000:
  2205. tempval =
  2206. ((tempval & ~(MACCFG2_IF)) | MACCFG2_GMII);
  2207. ecntrl &= ~(ECNTRL_R100);
  2208. break;
  2209. case 100:
  2210. case 10:
  2211. tempval =
  2212. ((tempval & ~(MACCFG2_IF)) | MACCFG2_MII);
  2213. /* Reduced mode distinguishes
  2214. * between 10 and 100 */
  2215. if (phydev->speed == SPEED_100)
  2216. ecntrl |= ECNTRL_R100;
  2217. else
  2218. ecntrl &= ~(ECNTRL_R100);
  2219. break;
  2220. default:
  2221. if (netif_msg_link(priv))
  2222. printk(KERN_WARNING
  2223. "%s: Ack! Speed (%d) is not 10/100/1000!\n",
  2224. dev->name, phydev->speed);
  2225. break;
  2226. }
  2227. priv->oldspeed = phydev->speed;
  2228. }
  2229. gfar_write(&regs->maccfg2, tempval);
  2230. gfar_write(&regs->ecntrl, ecntrl);
  2231. if (!priv->oldlink) {
  2232. new_state = 1;
  2233. priv->oldlink = 1;
  2234. }
  2235. } else if (priv->oldlink) {
  2236. new_state = 1;
  2237. priv->oldlink = 0;
  2238. priv->oldspeed = 0;
  2239. priv->oldduplex = -1;
  2240. }
  2241. if (new_state && netif_msg_link(priv))
  2242. phy_print_status(phydev);
  2243. unlock_tx_qs(priv);
  2244. local_irq_restore(flags);
  2245. }
  2246. /* Update the hash table based on the current list of multicast
  2247. * addresses we subscribe to. Also, change the promiscuity of
  2248. * the device based on the flags (this function is called
  2249. * whenever dev->flags is changed */
  2250. static void gfar_set_multi(struct net_device *dev)
  2251. {
  2252. struct dev_mc_list *mc_ptr;
  2253. struct gfar_private *priv = netdev_priv(dev);
  2254. struct gfar __iomem *regs = priv->gfargrp[0].regs;
  2255. u32 tempval;
  2256. if (dev->flags & IFF_PROMISC) {
  2257. /* Set RCTRL to PROM */
  2258. tempval = gfar_read(&regs->rctrl);
  2259. tempval |= RCTRL_PROM;
  2260. gfar_write(&regs->rctrl, tempval);
  2261. } else {
  2262. /* Set RCTRL to not PROM */
  2263. tempval = gfar_read(&regs->rctrl);
  2264. tempval &= ~(RCTRL_PROM);
  2265. gfar_write(&regs->rctrl, tempval);
  2266. }
  2267. if (dev->flags & IFF_ALLMULTI) {
  2268. /* Set the hash to rx all multicast frames */
  2269. gfar_write(&regs->igaddr0, 0xffffffff);
  2270. gfar_write(&regs->igaddr1, 0xffffffff);
  2271. gfar_write(&regs->igaddr2, 0xffffffff);
  2272. gfar_write(&regs->igaddr3, 0xffffffff);
  2273. gfar_write(&regs->igaddr4, 0xffffffff);
  2274. gfar_write(&regs->igaddr5, 0xffffffff);
  2275. gfar_write(&regs->igaddr6, 0xffffffff);
  2276. gfar_write(&regs->igaddr7, 0xffffffff);
  2277. gfar_write(&regs->gaddr0, 0xffffffff);
  2278. gfar_write(&regs->gaddr1, 0xffffffff);
  2279. gfar_write(&regs->gaddr2, 0xffffffff);
  2280. gfar_write(&regs->gaddr3, 0xffffffff);
  2281. gfar_write(&regs->gaddr4, 0xffffffff);
  2282. gfar_write(&regs->gaddr5, 0xffffffff);
  2283. gfar_write(&regs->gaddr6, 0xffffffff);
  2284. gfar_write(&regs->gaddr7, 0xffffffff);
  2285. } else {
  2286. int em_num;
  2287. int idx;
  2288. /* zero out the hash */
  2289. gfar_write(&regs->igaddr0, 0x0);
  2290. gfar_write(&regs->igaddr1, 0x0);
  2291. gfar_write(&regs->igaddr2, 0x0);
  2292. gfar_write(&regs->igaddr3, 0x0);
  2293. gfar_write(&regs->igaddr4, 0x0);
  2294. gfar_write(&regs->igaddr5, 0x0);
  2295. gfar_write(&regs->igaddr6, 0x0);
  2296. gfar_write(&regs->igaddr7, 0x0);
  2297. gfar_write(&regs->gaddr0, 0x0);
  2298. gfar_write(&regs->gaddr1, 0x0);
  2299. gfar_write(&regs->gaddr2, 0x0);
  2300. gfar_write(&regs->gaddr3, 0x0);
  2301. gfar_write(&regs->gaddr4, 0x0);
  2302. gfar_write(&regs->gaddr5, 0x0);
  2303. gfar_write(&regs->gaddr6, 0x0);
  2304. gfar_write(&regs->gaddr7, 0x0);
  2305. /* If we have extended hash tables, we need to
  2306. * clear the exact match registers to prepare for
  2307. * setting them */
  2308. if (priv->extended_hash) {
  2309. em_num = GFAR_EM_NUM + 1;
  2310. gfar_clear_exact_match(dev);
  2311. idx = 1;
  2312. } else {
  2313. idx = 0;
  2314. em_num = 0;
  2315. }
  2316. if (dev->mc_count == 0)
  2317. return;
  2318. /* Parse the list, and set the appropriate bits */
  2319. for(mc_ptr = dev->mc_list; mc_ptr; mc_ptr = mc_ptr->next) {
  2320. if (idx < em_num) {
  2321. gfar_set_mac_for_addr(dev, idx,
  2322. mc_ptr->dmi_addr);
  2323. idx++;
  2324. } else
  2325. gfar_set_hash_for_addr(dev, mc_ptr->dmi_addr);
  2326. }
  2327. }
  2328. return;
  2329. }
  2330. /* Clears each of the exact match registers to zero, so they
  2331. * don't interfere with normal reception */
  2332. static void gfar_clear_exact_match(struct net_device *dev)
  2333. {
  2334. int idx;
  2335. u8 zero_arr[MAC_ADDR_LEN] = {0,0,0,0,0,0};
  2336. for(idx = 1;idx < GFAR_EM_NUM + 1;idx++)
  2337. gfar_set_mac_for_addr(dev, idx, (u8 *)zero_arr);
  2338. }
  2339. /* Set the appropriate hash bit for the given addr */
  2340. /* The algorithm works like so:
  2341. * 1) Take the Destination Address (ie the multicast address), and
  2342. * do a CRC on it (little endian), and reverse the bits of the
  2343. * result.
  2344. * 2) Use the 8 most significant bits as a hash into a 256-entry
  2345. * table. The table is controlled through 8 32-bit registers:
  2346. * gaddr0-7. gaddr0's MSB is entry 0, and gaddr7's LSB is
  2347. * gaddr7. This means that the 3 most significant bits in the
  2348. * hash index which gaddr register to use, and the 5 other bits
  2349. * indicate which bit (assuming an IBM numbering scheme, which
  2350. * for PowerPC (tm) is usually the case) in the register holds
  2351. * the entry. */
  2352. static void gfar_set_hash_for_addr(struct net_device *dev, u8 *addr)
  2353. {
  2354. u32 tempval;
  2355. struct gfar_private *priv = netdev_priv(dev);
  2356. u32 result = ether_crc(MAC_ADDR_LEN, addr);
  2357. int width = priv->hash_width;
  2358. u8 whichbit = (result >> (32 - width)) & 0x1f;
  2359. u8 whichreg = result >> (32 - width + 5);
  2360. u32 value = (1 << (31-whichbit));
  2361. tempval = gfar_read(priv->hash_regs[whichreg]);
  2362. tempval |= value;
  2363. gfar_write(priv->hash_regs[whichreg], tempval);
  2364. return;
  2365. }
  2366. /* There are multiple MAC Address register pairs on some controllers
  2367. * This function sets the numth pair to a given address
  2368. */
  2369. static void gfar_set_mac_for_addr(struct net_device *dev, int num, u8 *addr)
  2370. {
  2371. struct gfar_private *priv = netdev_priv(dev);
  2372. struct gfar __iomem *regs = priv->gfargrp[0].regs;
  2373. int idx;
  2374. char tmpbuf[MAC_ADDR_LEN];
  2375. u32 tempval;
  2376. u32 __iomem *macptr = &regs->macstnaddr1;
  2377. macptr += num*2;
  2378. /* Now copy it into the mac registers backwards, cuz */
  2379. /* little endian is silly */
  2380. for (idx = 0; idx < MAC_ADDR_LEN; idx++)
  2381. tmpbuf[MAC_ADDR_LEN - 1 - idx] = addr[idx];
  2382. gfar_write(macptr, *((u32 *) (tmpbuf)));
  2383. tempval = *((u32 *) (tmpbuf + 4));
  2384. gfar_write(macptr+1, tempval);
  2385. }
  2386. /* GFAR error interrupt handler */
  2387. static irqreturn_t gfar_error(int irq, void *grp_id)
  2388. {
  2389. struct gfar_priv_grp *gfargrp = grp_id;
  2390. struct gfar __iomem *regs = gfargrp->regs;
  2391. struct gfar_private *priv= gfargrp->priv;
  2392. struct net_device *dev = priv->ndev;
  2393. /* Save ievent for future reference */
  2394. u32 events = gfar_read(&regs->ievent);
  2395. /* Clear IEVENT */
  2396. gfar_write(&regs->ievent, events & IEVENT_ERR_MASK);
  2397. /* Magic Packet is not an error. */
  2398. if ((priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET) &&
  2399. (events & IEVENT_MAG))
  2400. events &= ~IEVENT_MAG;
  2401. /* Hmm... */
  2402. if (netif_msg_rx_err(priv) || netif_msg_tx_err(priv))
  2403. printk(KERN_DEBUG "%s: error interrupt (ievent=0x%08x imask=0x%08x)\n",
  2404. dev->name, events, gfar_read(&regs->imask));
  2405. /* Update the error counters */
  2406. if (events & IEVENT_TXE) {
  2407. dev->stats.tx_errors++;
  2408. if (events & IEVENT_LC)
  2409. dev->stats.tx_window_errors++;
  2410. if (events & IEVENT_CRL)
  2411. dev->stats.tx_aborted_errors++;
  2412. if (events & IEVENT_XFUN) {
  2413. unsigned long flags;
  2414. if (netif_msg_tx_err(priv))
  2415. printk(KERN_DEBUG "%s: TX FIFO underrun, "
  2416. "packet dropped.\n", dev->name);
  2417. dev->stats.tx_dropped++;
  2418. priv->extra_stats.tx_underrun++;
  2419. local_irq_save(flags);
  2420. lock_tx_qs(priv);
  2421. /* Reactivate the Tx Queues */
  2422. gfar_write(&regs->tstat, gfargrp->tstat);
  2423. unlock_tx_qs(priv);
  2424. local_irq_restore(flags);
  2425. }
  2426. if (netif_msg_tx_err(priv))
  2427. printk(KERN_DEBUG "%s: Transmit Error\n", dev->name);
  2428. }
  2429. if (events & IEVENT_BSY) {
  2430. dev->stats.rx_errors++;
  2431. priv->extra_stats.rx_bsy++;
  2432. gfar_receive(irq, grp_id);
  2433. if (netif_msg_rx_err(priv))
  2434. printk(KERN_DEBUG "%s: busy error (rstat: %x)\n",
  2435. dev->name, gfar_read(&regs->rstat));
  2436. }
  2437. if (events & IEVENT_BABR) {
  2438. dev->stats.rx_errors++;
  2439. priv->extra_stats.rx_babr++;
  2440. if (netif_msg_rx_err(priv))
  2441. printk(KERN_DEBUG "%s: babbling RX error\n", dev->name);
  2442. }
  2443. if (events & IEVENT_EBERR) {
  2444. priv->extra_stats.eberr++;
  2445. if (netif_msg_rx_err(priv))
  2446. printk(KERN_DEBUG "%s: bus error\n", dev->name);
  2447. }
  2448. if ((events & IEVENT_RXC) && netif_msg_rx_status(priv))
  2449. printk(KERN_DEBUG "%s: control frame\n", dev->name);
  2450. if (events & IEVENT_BABT) {
  2451. priv->extra_stats.tx_babt++;
  2452. if (netif_msg_tx_err(priv))
  2453. printk(KERN_DEBUG "%s: babbling TX error\n", dev->name);
  2454. }
  2455. return IRQ_HANDLED;
  2456. }
  2457. static struct of_device_id gfar_match[] =
  2458. {
  2459. {
  2460. .type = "network",
  2461. .compatible = "gianfar",
  2462. },
  2463. {
  2464. .compatible = "fsl,etsec2",
  2465. },
  2466. {},
  2467. };
  2468. MODULE_DEVICE_TABLE(of, gfar_match);
  2469. /* Structure for a device driver */
  2470. static struct of_platform_driver gfar_driver = {
  2471. .name = "fsl-gianfar",
  2472. .match_table = gfar_match,
  2473. .probe = gfar_probe,
  2474. .remove = gfar_remove,
  2475. .suspend = gfar_legacy_suspend,
  2476. .resume = gfar_legacy_resume,
  2477. .driver.pm = GFAR_PM_OPS,
  2478. };
  2479. static int __init gfar_init(void)
  2480. {
  2481. return of_register_platform_driver(&gfar_driver);
  2482. }
  2483. static void __exit gfar_exit(void)
  2484. {
  2485. of_unregister_platform_driver(&gfar_driver);
  2486. }
  2487. module_init(gfar_init);
  2488. module_exit(gfar_exit);