gianfar.c 61 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341
  1. /*
  2. * drivers/net/gianfar.c
  3. *
  4. * Gianfar Ethernet Driver
  5. * This driver is designed for the non-CPM ethernet controllers
  6. * on the 85xx and 83xx family of integrated processors
  7. * Based on 8260_io/fcc_enet.c
  8. *
  9. * Author: Andy Fleming
  10. * Maintainer: Kumar Gala
  11. *
  12. * Copyright (c) 2002-2006 Freescale Semiconductor, Inc.
  13. * Copyright (c) 2007 MontaVista Software, Inc.
  14. *
  15. * This program is free software; you can redistribute it and/or modify it
  16. * under the terms of the GNU General Public License as published by the
  17. * Free Software Foundation; either version 2 of the License, or (at your
  18. * option) any later version.
  19. *
  20. * Gianfar: AKA Lambda Draconis, "Dragon"
  21. * RA 11 31 24.2
  22. * Dec +69 19 52
  23. * V 3.84
  24. * B-V +1.62
  25. *
  26. * Theory of operation
  27. *
  28. * The driver is initialized through of_device. Configuration information
  29. * is therefore conveyed through an OF-style device tree.
  30. *
  31. * The Gianfar Ethernet Controller uses a ring of buffer
  32. * descriptors. The beginning is indicated by a register
  33. * pointing to the physical address of the start of the ring.
  34. * The end is determined by a "wrap" bit being set in the
  35. * last descriptor of the ring.
  36. *
  37. * When a packet is received, the RXF bit in the
  38. * IEVENT register is set, triggering an interrupt when the
  39. * corresponding bit in the IMASK register is also set (if
  40. * interrupt coalescing is active, then the interrupt may not
  41. * happen immediately, but will wait until either a set number
  42. * of frames or amount of time have passed). In NAPI, the
  43. * interrupt handler will signal there is work to be done, and
  44. * exit. This method will start at the last known empty
  45. * descriptor, and process every subsequent descriptor until there
  46. * are none left with data (NAPI will stop after a set number of
  47. * packets to give time to other tasks, but will eventually
  48. * process all the packets). The data arrives inside a
  49. * pre-allocated skb, and so after the skb is passed up to the
  50. * stack, a new skb must be allocated, and the address field in
  51. * the buffer descriptor must be updated to indicate this new
  52. * skb.
  53. *
  54. * When the kernel requests that a packet be transmitted, the
  55. * driver starts where it left off last time, and points the
  56. * descriptor at the buffer which was passed in. The driver
  57. * then informs the DMA engine that there are packets ready to
  58. * be transmitted. Once the controller is finished transmitting
  59. * the packet, an interrupt may be triggered (under the same
  60. * conditions as for reception, but depending on the TXF bit).
  61. * The driver then cleans up the buffer.
  62. */
  63. #include <linux/kernel.h>
  64. #include <linux/string.h>
  65. #include <linux/errno.h>
  66. #include <linux/unistd.h>
  67. #include <linux/slab.h>
  68. #include <linux/interrupt.h>
  69. #include <linux/init.h>
  70. #include <linux/delay.h>
  71. #include <linux/netdevice.h>
  72. #include <linux/etherdevice.h>
  73. #include <linux/skbuff.h>
  74. #include <linux/if_vlan.h>
  75. #include <linux/spinlock.h>
  76. #include <linux/mm.h>
  77. #include <linux/of_mdio.h>
  78. #include <linux/of_platform.h>
  79. #include <linux/ip.h>
  80. #include <linux/tcp.h>
  81. #include <linux/udp.h>
  82. #include <linux/in.h>
  83. #include <asm/io.h>
  84. #include <asm/irq.h>
  85. #include <asm/uaccess.h>
  86. #include <linux/module.h>
  87. #include <linux/dma-mapping.h>
  88. #include <linux/crc32.h>
  89. #include <linux/mii.h>
  90. #include <linux/phy.h>
  91. #include <linux/phy_fixed.h>
  92. #include <linux/of.h>
  93. #include "gianfar.h"
  94. #include "fsl_pq_mdio.h"
  95. #define TX_TIMEOUT (1*HZ)
  96. #undef BRIEF_GFAR_ERRORS
  97. #undef VERBOSE_GFAR_ERRORS
  98. const char gfar_driver_name[] = "Gianfar Ethernet";
  99. const char gfar_driver_version[] = "1.3";
  100. static int gfar_enet_open(struct net_device *dev);
  101. static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev);
  102. static void gfar_reset_task(struct work_struct *work);
  103. static void gfar_timeout(struct net_device *dev);
  104. static int gfar_close(struct net_device *dev);
  105. struct sk_buff *gfar_new_skb(struct net_device *dev);
  106. static void gfar_new_rxbdp(struct net_device *dev, struct rxbd8 *bdp,
  107. struct sk_buff *skb);
  108. static int gfar_set_mac_address(struct net_device *dev);
  109. static int gfar_change_mtu(struct net_device *dev, int new_mtu);
  110. static irqreturn_t gfar_error(int irq, void *dev_id);
  111. static irqreturn_t gfar_transmit(int irq, void *dev_id);
  112. static irqreturn_t gfar_interrupt(int irq, void *dev_id);
  113. static void adjust_link(struct net_device *dev);
  114. static void init_registers(struct net_device *dev);
  115. static int init_phy(struct net_device *dev);
  116. static int gfar_probe(struct of_device *ofdev,
  117. const struct of_device_id *match);
  118. static int gfar_remove(struct of_device *ofdev);
  119. static void free_skb_resources(struct gfar_private *priv);
  120. static void gfar_set_multi(struct net_device *dev);
  121. static void gfar_set_hash_for_addr(struct net_device *dev, u8 *addr);
  122. static void gfar_configure_serdes(struct net_device *dev);
  123. static int gfar_poll(struct napi_struct *napi, int budget);
  124. #ifdef CONFIG_NET_POLL_CONTROLLER
  125. static void gfar_netpoll(struct net_device *dev);
  126. #endif
  127. int gfar_clean_rx_ring(struct net_device *dev, int rx_work_limit);
  128. static int gfar_clean_tx_ring(struct net_device *dev);
  129. static int gfar_process_frame(struct net_device *dev, struct sk_buff *skb,
  130. int amount_pull);
  131. static void gfar_vlan_rx_register(struct net_device *netdev,
  132. struct vlan_group *grp);
  133. void gfar_halt(struct net_device *dev);
  134. static void gfar_halt_nodisable(struct net_device *dev);
  135. void gfar_start(struct net_device *dev);
  136. static void gfar_clear_exact_match(struct net_device *dev);
  137. static void gfar_set_mac_for_addr(struct net_device *dev, int num, u8 *addr);
  138. static int gfar_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
  139. MODULE_AUTHOR("Freescale Semiconductor, Inc");
  140. MODULE_DESCRIPTION("Gianfar Ethernet Driver");
  141. MODULE_LICENSE("GPL");
  142. static const struct net_device_ops gfar_netdev_ops = {
  143. .ndo_open = gfar_enet_open,
  144. .ndo_start_xmit = gfar_start_xmit,
  145. .ndo_stop = gfar_close,
  146. .ndo_change_mtu = gfar_change_mtu,
  147. .ndo_set_multicast_list = gfar_set_multi,
  148. .ndo_tx_timeout = gfar_timeout,
  149. .ndo_do_ioctl = gfar_ioctl,
  150. .ndo_vlan_rx_register = gfar_vlan_rx_register,
  151. .ndo_set_mac_address = eth_mac_addr,
  152. .ndo_validate_addr = eth_validate_addr,
  153. #ifdef CONFIG_NET_POLL_CONTROLLER
  154. .ndo_poll_controller = gfar_netpoll,
  155. #endif
  156. };
  157. /* Returns 1 if incoming frames use an FCB */
  158. static inline int gfar_uses_fcb(struct gfar_private *priv)
  159. {
  160. return priv->vlgrp || priv->rx_csum_enable;
  161. }
  162. static int gfar_of_init(struct net_device *dev)
  163. {
  164. const char *model;
  165. const char *ctype;
  166. const void *mac_addr;
  167. u64 addr, size;
  168. int err = 0;
  169. struct gfar_private *priv = netdev_priv(dev);
  170. struct device_node *np = priv->node;
  171. const u32 *stash;
  172. const u32 *stash_len;
  173. const u32 *stash_idx;
  174. if (!np || !of_device_is_available(np))
  175. return -ENODEV;
  176. /* get a pointer to the register memory */
  177. addr = of_translate_address(np, of_get_address(np, 0, &size, NULL));
  178. priv->regs = ioremap(addr, size);
  179. if (priv->regs == NULL)
  180. return -ENOMEM;
  181. priv->interruptTransmit = irq_of_parse_and_map(np, 0);
  182. model = of_get_property(np, "model", NULL);
  183. /* If we aren't the FEC we have multiple interrupts */
  184. if (model && strcasecmp(model, "FEC")) {
  185. priv->interruptReceive = irq_of_parse_and_map(np, 1);
  186. priv->interruptError = irq_of_parse_and_map(np, 2);
  187. if (priv->interruptTransmit < 0 ||
  188. priv->interruptReceive < 0 ||
  189. priv->interruptError < 0) {
  190. err = -EINVAL;
  191. goto err_out;
  192. }
  193. }
  194. stash = of_get_property(np, "bd-stash", NULL);
  195. if(stash) {
  196. priv->device_flags |= FSL_GIANFAR_DEV_HAS_BD_STASHING;
  197. priv->bd_stash_en = 1;
  198. }
  199. stash_len = of_get_property(np, "rx-stash-len", NULL);
  200. if (stash_len)
  201. priv->rx_stash_size = *stash_len;
  202. stash_idx = of_get_property(np, "rx-stash-idx", NULL);
  203. if (stash_idx)
  204. priv->rx_stash_index = *stash_idx;
  205. if (stash_len || stash_idx)
  206. priv->device_flags |= FSL_GIANFAR_DEV_HAS_BUF_STASHING;
  207. mac_addr = of_get_mac_address(np);
  208. if (mac_addr)
  209. memcpy(dev->dev_addr, mac_addr, MAC_ADDR_LEN);
  210. if (model && !strcasecmp(model, "TSEC"))
  211. priv->device_flags =
  212. FSL_GIANFAR_DEV_HAS_GIGABIT |
  213. FSL_GIANFAR_DEV_HAS_COALESCE |
  214. FSL_GIANFAR_DEV_HAS_RMON |
  215. FSL_GIANFAR_DEV_HAS_MULTI_INTR;
  216. if (model && !strcasecmp(model, "eTSEC"))
  217. priv->device_flags =
  218. FSL_GIANFAR_DEV_HAS_GIGABIT |
  219. FSL_GIANFAR_DEV_HAS_COALESCE |
  220. FSL_GIANFAR_DEV_HAS_RMON |
  221. FSL_GIANFAR_DEV_HAS_MULTI_INTR |
  222. FSL_GIANFAR_DEV_HAS_PADDING |
  223. FSL_GIANFAR_DEV_HAS_CSUM |
  224. FSL_GIANFAR_DEV_HAS_VLAN |
  225. FSL_GIANFAR_DEV_HAS_MAGIC_PACKET |
  226. FSL_GIANFAR_DEV_HAS_EXTENDED_HASH;
  227. ctype = of_get_property(np, "phy-connection-type", NULL);
  228. /* We only care about rgmii-id. The rest are autodetected */
  229. if (ctype && !strcmp(ctype, "rgmii-id"))
  230. priv->interface = PHY_INTERFACE_MODE_RGMII_ID;
  231. else
  232. priv->interface = PHY_INTERFACE_MODE_MII;
  233. if (of_get_property(np, "fsl,magic-packet", NULL))
  234. priv->device_flags |= FSL_GIANFAR_DEV_HAS_MAGIC_PACKET;
  235. priv->phy_node = of_parse_phandle(np, "phy-handle", 0);
  236. /* Find the TBI PHY. If it's not there, we don't support SGMII */
  237. priv->tbi_node = of_parse_phandle(np, "tbi-handle", 0);
  238. return 0;
  239. err_out:
  240. iounmap(priv->regs);
  241. return err;
  242. }
  243. /* Ioctl MII Interface */
  244. static int gfar_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
  245. {
  246. struct gfar_private *priv = netdev_priv(dev);
  247. if (!netif_running(dev))
  248. return -EINVAL;
  249. if (!priv->phydev)
  250. return -ENODEV;
  251. return phy_mii_ioctl(priv->phydev, if_mii(rq), cmd);
  252. }
  253. /* Set up the ethernet device structure, private data,
  254. * and anything else we need before we start */
  255. static int gfar_probe(struct of_device *ofdev,
  256. const struct of_device_id *match)
  257. {
  258. u32 tempval;
  259. struct net_device *dev = NULL;
  260. struct gfar_private *priv = NULL;
  261. int err = 0;
  262. int len_devname;
  263. /* Create an ethernet device instance */
  264. dev = alloc_etherdev(sizeof (*priv));
  265. if (NULL == dev)
  266. return -ENOMEM;
  267. priv = netdev_priv(dev);
  268. priv->ndev = dev;
  269. priv->ofdev = ofdev;
  270. priv->node = ofdev->node;
  271. SET_NETDEV_DEV(dev, &ofdev->dev);
  272. err = gfar_of_init(dev);
  273. if (err)
  274. goto regs_fail;
  275. spin_lock_init(&priv->txlock);
  276. spin_lock_init(&priv->rxlock);
  277. spin_lock_init(&priv->bflock);
  278. INIT_WORK(&priv->reset_task, gfar_reset_task);
  279. dev_set_drvdata(&ofdev->dev, priv);
  280. /* Stop the DMA engine now, in case it was running before */
  281. /* (The firmware could have used it, and left it running). */
  282. gfar_halt(dev);
  283. /* Reset MAC layer */
  284. gfar_write(&priv->regs->maccfg1, MACCFG1_SOFT_RESET);
  285. /* We need to delay at least 3 TX clocks */
  286. udelay(2);
  287. tempval = (MACCFG1_TX_FLOW | MACCFG1_RX_FLOW);
  288. gfar_write(&priv->regs->maccfg1, tempval);
  289. /* Initialize MACCFG2. */
  290. gfar_write(&priv->regs->maccfg2, MACCFG2_INIT_SETTINGS);
  291. /* Initialize ECNTRL */
  292. gfar_write(&priv->regs->ecntrl, ECNTRL_INIT_SETTINGS);
  293. /* Set the dev->base_addr to the gfar reg region */
  294. dev->base_addr = (unsigned long) (priv->regs);
  295. SET_NETDEV_DEV(dev, &ofdev->dev);
  296. /* Fill in the dev structure */
  297. dev->watchdog_timeo = TX_TIMEOUT;
  298. netif_napi_add(dev, &priv->napi, gfar_poll, GFAR_DEV_WEIGHT);
  299. dev->mtu = 1500;
  300. dev->netdev_ops = &gfar_netdev_ops;
  301. dev->ethtool_ops = &gfar_ethtool_ops;
  302. if (priv->device_flags & FSL_GIANFAR_DEV_HAS_CSUM) {
  303. priv->rx_csum_enable = 1;
  304. dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG | NETIF_F_HIGHDMA;
  305. } else
  306. priv->rx_csum_enable = 0;
  307. priv->vlgrp = NULL;
  308. if (priv->device_flags & FSL_GIANFAR_DEV_HAS_VLAN)
  309. dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
  310. if (priv->device_flags & FSL_GIANFAR_DEV_HAS_EXTENDED_HASH) {
  311. priv->extended_hash = 1;
  312. priv->hash_width = 9;
  313. priv->hash_regs[0] = &priv->regs->igaddr0;
  314. priv->hash_regs[1] = &priv->regs->igaddr1;
  315. priv->hash_regs[2] = &priv->regs->igaddr2;
  316. priv->hash_regs[3] = &priv->regs->igaddr3;
  317. priv->hash_regs[4] = &priv->regs->igaddr4;
  318. priv->hash_regs[5] = &priv->regs->igaddr5;
  319. priv->hash_regs[6] = &priv->regs->igaddr6;
  320. priv->hash_regs[7] = &priv->regs->igaddr7;
  321. priv->hash_regs[8] = &priv->regs->gaddr0;
  322. priv->hash_regs[9] = &priv->regs->gaddr1;
  323. priv->hash_regs[10] = &priv->regs->gaddr2;
  324. priv->hash_regs[11] = &priv->regs->gaddr3;
  325. priv->hash_regs[12] = &priv->regs->gaddr4;
  326. priv->hash_regs[13] = &priv->regs->gaddr5;
  327. priv->hash_regs[14] = &priv->regs->gaddr6;
  328. priv->hash_regs[15] = &priv->regs->gaddr7;
  329. } else {
  330. priv->extended_hash = 0;
  331. priv->hash_width = 8;
  332. priv->hash_regs[0] = &priv->regs->gaddr0;
  333. priv->hash_regs[1] = &priv->regs->gaddr1;
  334. priv->hash_regs[2] = &priv->regs->gaddr2;
  335. priv->hash_regs[3] = &priv->regs->gaddr3;
  336. priv->hash_regs[4] = &priv->regs->gaddr4;
  337. priv->hash_regs[5] = &priv->regs->gaddr5;
  338. priv->hash_regs[6] = &priv->regs->gaddr6;
  339. priv->hash_regs[7] = &priv->regs->gaddr7;
  340. }
  341. if (priv->device_flags & FSL_GIANFAR_DEV_HAS_PADDING)
  342. priv->padding = DEFAULT_PADDING;
  343. else
  344. priv->padding = 0;
  345. if (dev->features & NETIF_F_IP_CSUM)
  346. dev->hard_header_len += GMAC_FCB_LEN;
  347. priv->rx_buffer_size = DEFAULT_RX_BUFFER_SIZE;
  348. priv->tx_ring_size = DEFAULT_TX_RING_SIZE;
  349. priv->rx_ring_size = DEFAULT_RX_RING_SIZE;
  350. priv->num_txbdfree = DEFAULT_TX_RING_SIZE;
  351. priv->txcoalescing = DEFAULT_TX_COALESCE;
  352. priv->txic = DEFAULT_TXIC;
  353. priv->rxcoalescing = DEFAULT_RX_COALESCE;
  354. priv->rxic = DEFAULT_RXIC;
  355. /* Enable most messages by default */
  356. priv->msg_enable = (NETIF_MSG_IFUP << 1 ) - 1;
  357. /* Carrier starts down, phylib will bring it up */
  358. netif_carrier_off(dev);
  359. err = register_netdev(dev);
  360. if (err) {
  361. printk(KERN_ERR "%s: Cannot register net device, aborting.\n",
  362. dev->name);
  363. goto register_fail;
  364. }
  365. device_init_wakeup(&dev->dev,
  366. priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET);
  367. /* fill out IRQ number and name fields */
  368. len_devname = strlen(dev->name);
  369. strncpy(&priv->int_name_tx[0], dev->name, len_devname);
  370. if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
  371. strncpy(&priv->int_name_tx[len_devname],
  372. "_tx", sizeof("_tx") + 1);
  373. strncpy(&priv->int_name_rx[0], dev->name, len_devname);
  374. strncpy(&priv->int_name_rx[len_devname],
  375. "_rx", sizeof("_rx") + 1);
  376. strncpy(&priv->int_name_er[0], dev->name, len_devname);
  377. strncpy(&priv->int_name_er[len_devname],
  378. "_er", sizeof("_er") + 1);
  379. } else
  380. priv->int_name_tx[len_devname] = '\0';
  381. /* Create all the sysfs files */
  382. gfar_init_sysfs(dev);
  383. /* Print out the device info */
  384. printk(KERN_INFO DEVICE_NAME "%pM\n", dev->name, dev->dev_addr);
  385. /* Even more device info helps when determining which kernel */
  386. /* provided which set of benchmarks. */
  387. printk(KERN_INFO "%s: Running with NAPI enabled\n", dev->name);
  388. printk(KERN_INFO "%s: %d/%d RX/TX BD ring size\n",
  389. dev->name, priv->rx_ring_size, priv->tx_ring_size);
  390. return 0;
  391. register_fail:
  392. iounmap(priv->regs);
  393. regs_fail:
  394. if (priv->phy_node)
  395. of_node_put(priv->phy_node);
  396. if (priv->tbi_node)
  397. of_node_put(priv->tbi_node);
  398. free_netdev(dev);
  399. return err;
  400. }
  401. static int gfar_remove(struct of_device *ofdev)
  402. {
  403. struct gfar_private *priv = dev_get_drvdata(&ofdev->dev);
  404. if (priv->phy_node)
  405. of_node_put(priv->phy_node);
  406. if (priv->tbi_node)
  407. of_node_put(priv->tbi_node);
  408. dev_set_drvdata(&ofdev->dev, NULL);
  409. unregister_netdev(priv->ndev);
  410. iounmap(priv->regs);
  411. free_netdev(priv->ndev);
  412. return 0;
  413. }
  414. #ifdef CONFIG_PM
  415. static int gfar_suspend(struct of_device *ofdev, pm_message_t state)
  416. {
  417. struct gfar_private *priv = dev_get_drvdata(&ofdev->dev);
  418. struct net_device *dev = priv->ndev;
  419. unsigned long flags;
  420. u32 tempval;
  421. int magic_packet = priv->wol_en &&
  422. (priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET);
  423. netif_device_detach(dev);
  424. if (netif_running(dev)) {
  425. spin_lock_irqsave(&priv->txlock, flags);
  426. spin_lock(&priv->rxlock);
  427. gfar_halt_nodisable(dev);
  428. /* Disable Tx, and Rx if wake-on-LAN is disabled. */
  429. tempval = gfar_read(&priv->regs->maccfg1);
  430. tempval &= ~MACCFG1_TX_EN;
  431. if (!magic_packet)
  432. tempval &= ~MACCFG1_RX_EN;
  433. gfar_write(&priv->regs->maccfg1, tempval);
  434. spin_unlock(&priv->rxlock);
  435. spin_unlock_irqrestore(&priv->txlock, flags);
  436. napi_disable(&priv->napi);
  437. if (magic_packet) {
  438. /* Enable interrupt on Magic Packet */
  439. gfar_write(&priv->regs->imask, IMASK_MAG);
  440. /* Enable Magic Packet mode */
  441. tempval = gfar_read(&priv->regs->maccfg2);
  442. tempval |= MACCFG2_MPEN;
  443. gfar_write(&priv->regs->maccfg2, tempval);
  444. } else {
  445. phy_stop(priv->phydev);
  446. }
  447. }
  448. return 0;
  449. }
  450. static int gfar_resume(struct of_device *ofdev)
  451. {
  452. struct gfar_private *priv = dev_get_drvdata(&ofdev->dev);
  453. struct net_device *dev = priv->ndev;
  454. unsigned long flags;
  455. u32 tempval;
  456. int magic_packet = priv->wol_en &&
  457. (priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET);
  458. if (!netif_running(dev)) {
  459. netif_device_attach(dev);
  460. return 0;
  461. }
  462. if (!magic_packet && priv->phydev)
  463. phy_start(priv->phydev);
  464. /* Disable Magic Packet mode, in case something
  465. * else woke us up.
  466. */
  467. spin_lock_irqsave(&priv->txlock, flags);
  468. spin_lock(&priv->rxlock);
  469. tempval = gfar_read(&priv->regs->maccfg2);
  470. tempval &= ~MACCFG2_MPEN;
  471. gfar_write(&priv->regs->maccfg2, tempval);
  472. gfar_start(dev);
  473. spin_unlock(&priv->rxlock);
  474. spin_unlock_irqrestore(&priv->txlock, flags);
  475. netif_device_attach(dev);
  476. napi_enable(&priv->napi);
  477. return 0;
  478. }
  479. #else
  480. #define gfar_suspend NULL
  481. #define gfar_resume NULL
  482. #endif
  483. /* Reads the controller's registers to determine what interface
  484. * connects it to the PHY.
  485. */
  486. static phy_interface_t gfar_get_interface(struct net_device *dev)
  487. {
  488. struct gfar_private *priv = netdev_priv(dev);
  489. u32 ecntrl = gfar_read(&priv->regs->ecntrl);
  490. if (ecntrl & ECNTRL_SGMII_MODE)
  491. return PHY_INTERFACE_MODE_SGMII;
  492. if (ecntrl & ECNTRL_TBI_MODE) {
  493. if (ecntrl & ECNTRL_REDUCED_MODE)
  494. return PHY_INTERFACE_MODE_RTBI;
  495. else
  496. return PHY_INTERFACE_MODE_TBI;
  497. }
  498. if (ecntrl & ECNTRL_REDUCED_MODE) {
  499. if (ecntrl & ECNTRL_REDUCED_MII_MODE)
  500. return PHY_INTERFACE_MODE_RMII;
  501. else {
  502. phy_interface_t interface = priv->interface;
  503. /*
  504. * This isn't autodetected right now, so it must
  505. * be set by the device tree or platform code.
  506. */
  507. if (interface == PHY_INTERFACE_MODE_RGMII_ID)
  508. return PHY_INTERFACE_MODE_RGMII_ID;
  509. return PHY_INTERFACE_MODE_RGMII;
  510. }
  511. }
  512. if (priv->device_flags & FSL_GIANFAR_DEV_HAS_GIGABIT)
  513. return PHY_INTERFACE_MODE_GMII;
  514. return PHY_INTERFACE_MODE_MII;
  515. }
  516. /* Initializes driver's PHY state, and attaches to the PHY.
  517. * Returns 0 on success.
  518. */
  519. static int init_phy(struct net_device *dev)
  520. {
  521. struct gfar_private *priv = netdev_priv(dev);
  522. uint gigabit_support =
  523. priv->device_flags & FSL_GIANFAR_DEV_HAS_GIGABIT ?
  524. SUPPORTED_1000baseT_Full : 0;
  525. phy_interface_t interface;
  526. priv->oldlink = 0;
  527. priv->oldspeed = 0;
  528. priv->oldduplex = -1;
  529. interface = gfar_get_interface(dev);
  530. priv->phydev = of_phy_connect(dev, priv->phy_node, &adjust_link, 0,
  531. interface);
  532. if (!priv->phydev)
  533. priv->phydev = of_phy_connect_fixed_link(dev, &adjust_link,
  534. interface);
  535. if (!priv->phydev) {
  536. dev_err(&dev->dev, "could not attach to PHY\n");
  537. return -ENODEV;
  538. }
  539. if (interface == PHY_INTERFACE_MODE_SGMII)
  540. gfar_configure_serdes(dev);
  541. /* Remove any features not supported by the controller */
  542. priv->phydev->supported &= (GFAR_SUPPORTED | gigabit_support);
  543. priv->phydev->advertising = priv->phydev->supported;
  544. return 0;
  545. }
  546. /*
  547. * Initialize TBI PHY interface for communicating with the
  548. * SERDES lynx PHY on the chip. We communicate with this PHY
  549. * through the MDIO bus on each controller, treating it as a
  550. * "normal" PHY at the address found in the TBIPA register. We assume
  551. * that the TBIPA register is valid. Either the MDIO bus code will set
  552. * it to a value that doesn't conflict with other PHYs on the bus, or the
  553. * value doesn't matter, as there are no other PHYs on the bus.
  554. */
  555. static void gfar_configure_serdes(struct net_device *dev)
  556. {
  557. struct gfar_private *priv = netdev_priv(dev);
  558. struct phy_device *tbiphy;
  559. if (!priv->tbi_node) {
  560. dev_warn(&dev->dev, "error: SGMII mode requires that the "
  561. "device tree specify a tbi-handle\n");
  562. return;
  563. }
  564. tbiphy = of_phy_find_device(priv->tbi_node);
  565. if (!tbiphy) {
  566. dev_err(&dev->dev, "error: Could not get TBI device\n");
  567. return;
  568. }
  569. /*
  570. * If the link is already up, we must already be ok, and don't need to
  571. * configure and reset the TBI<->SerDes link. Maybe U-Boot configured
  572. * everything for us? Resetting it takes the link down and requires
  573. * several seconds for it to come back.
  574. */
  575. if (phy_read(tbiphy, MII_BMSR) & BMSR_LSTATUS)
  576. return;
  577. /* Single clk mode, mii mode off(for serdes communication) */
  578. phy_write(tbiphy, MII_TBICON, TBICON_CLK_SELECT);
  579. phy_write(tbiphy, MII_ADVERTISE,
  580. ADVERTISE_1000XFULL | ADVERTISE_1000XPAUSE |
  581. ADVERTISE_1000XPSE_ASYM);
  582. phy_write(tbiphy, MII_BMCR, BMCR_ANENABLE |
  583. BMCR_ANRESTART | BMCR_FULLDPLX | BMCR_SPEED1000);
  584. }
  585. static void init_registers(struct net_device *dev)
  586. {
  587. struct gfar_private *priv = netdev_priv(dev);
  588. /* Clear IEVENT */
  589. gfar_write(&priv->regs->ievent, IEVENT_INIT_CLEAR);
  590. /* Initialize IMASK */
  591. gfar_write(&priv->regs->imask, IMASK_INIT_CLEAR);
  592. /* Init hash registers to zero */
  593. gfar_write(&priv->regs->igaddr0, 0);
  594. gfar_write(&priv->regs->igaddr1, 0);
  595. gfar_write(&priv->regs->igaddr2, 0);
  596. gfar_write(&priv->regs->igaddr3, 0);
  597. gfar_write(&priv->regs->igaddr4, 0);
  598. gfar_write(&priv->regs->igaddr5, 0);
  599. gfar_write(&priv->regs->igaddr6, 0);
  600. gfar_write(&priv->regs->igaddr7, 0);
  601. gfar_write(&priv->regs->gaddr0, 0);
  602. gfar_write(&priv->regs->gaddr1, 0);
  603. gfar_write(&priv->regs->gaddr2, 0);
  604. gfar_write(&priv->regs->gaddr3, 0);
  605. gfar_write(&priv->regs->gaddr4, 0);
  606. gfar_write(&priv->regs->gaddr5, 0);
  607. gfar_write(&priv->regs->gaddr6, 0);
  608. gfar_write(&priv->regs->gaddr7, 0);
  609. /* Zero out the rmon mib registers if it has them */
  610. if (priv->device_flags & FSL_GIANFAR_DEV_HAS_RMON) {
  611. memset_io(&(priv->regs->rmon), 0, sizeof (struct rmon_mib));
  612. /* Mask off the CAM interrupts */
  613. gfar_write(&priv->regs->rmon.cam1, 0xffffffff);
  614. gfar_write(&priv->regs->rmon.cam2, 0xffffffff);
  615. }
  616. /* Initialize the max receive buffer length */
  617. gfar_write(&priv->regs->mrblr, priv->rx_buffer_size);
  618. /* Initialize the Minimum Frame Length Register */
  619. gfar_write(&priv->regs->minflr, MINFLR_INIT_SETTINGS);
  620. }
  621. /* Halt the receive and transmit queues */
  622. static void gfar_halt_nodisable(struct net_device *dev)
  623. {
  624. struct gfar_private *priv = netdev_priv(dev);
  625. struct gfar __iomem *regs = priv->regs;
  626. u32 tempval;
  627. /* Mask all interrupts */
  628. gfar_write(&regs->imask, IMASK_INIT_CLEAR);
  629. /* Clear all interrupts */
  630. gfar_write(&regs->ievent, IEVENT_INIT_CLEAR);
  631. /* Stop the DMA, and wait for it to stop */
  632. tempval = gfar_read(&priv->regs->dmactrl);
  633. if ((tempval & (DMACTRL_GRS | DMACTRL_GTS))
  634. != (DMACTRL_GRS | DMACTRL_GTS)) {
  635. tempval |= (DMACTRL_GRS | DMACTRL_GTS);
  636. gfar_write(&priv->regs->dmactrl, tempval);
  637. while (!(gfar_read(&priv->regs->ievent) &
  638. (IEVENT_GRSC | IEVENT_GTSC)))
  639. cpu_relax();
  640. }
  641. }
  642. /* Halt the receive and transmit queues */
  643. void gfar_halt(struct net_device *dev)
  644. {
  645. struct gfar_private *priv = netdev_priv(dev);
  646. struct gfar __iomem *regs = priv->regs;
  647. u32 tempval;
  648. gfar_halt_nodisable(dev);
  649. /* Disable Rx and Tx */
  650. tempval = gfar_read(&regs->maccfg1);
  651. tempval &= ~(MACCFG1_RX_EN | MACCFG1_TX_EN);
  652. gfar_write(&regs->maccfg1, tempval);
  653. }
  654. void stop_gfar(struct net_device *dev)
  655. {
  656. struct gfar_private *priv = netdev_priv(dev);
  657. unsigned long flags;
  658. phy_stop(priv->phydev);
  659. /* Lock it down */
  660. spin_lock_irqsave(&priv->txlock, flags);
  661. spin_lock(&priv->rxlock);
  662. gfar_halt(dev);
  663. spin_unlock(&priv->rxlock);
  664. spin_unlock_irqrestore(&priv->txlock, flags);
  665. /* Free the IRQs */
  666. if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
  667. free_irq(priv->interruptError, dev);
  668. free_irq(priv->interruptTransmit, dev);
  669. free_irq(priv->interruptReceive, dev);
  670. } else {
  671. free_irq(priv->interruptTransmit, dev);
  672. }
  673. free_skb_resources(priv);
  674. }
  675. /* If there are any tx skbs or rx skbs still around, free them.
  676. * Then free tx_skbuff and rx_skbuff */
  677. static void free_skb_resources(struct gfar_private *priv)
  678. {
  679. struct device *dev = &priv->ofdev->dev;
  680. struct rxbd8 *rxbdp;
  681. struct txbd8 *txbdp;
  682. int i, j;
  683. /* Go through all the buffer descriptors and free their data buffers */
  684. txbdp = priv->tx_bd_base;
  685. if (!priv->tx_skbuff)
  686. goto skip_tx_skbuff;
  687. for (i = 0; i < priv->tx_ring_size; i++) {
  688. if (!priv->tx_skbuff[i])
  689. continue;
  690. dma_unmap_single(&priv->ofdev->dev, txbdp->bufPtr,
  691. txbdp->length, DMA_TO_DEVICE);
  692. txbdp->lstatus = 0;
  693. for (j = 0; j < skb_shinfo(priv->tx_skbuff[i])->nr_frags; j++) {
  694. txbdp++;
  695. dma_unmap_page(&priv->ofdev->dev, txbdp->bufPtr,
  696. txbdp->length, DMA_TO_DEVICE);
  697. }
  698. txbdp++;
  699. dev_kfree_skb_any(priv->tx_skbuff[i]);
  700. priv->tx_skbuff[i] = NULL;
  701. }
  702. kfree(priv->tx_skbuff);
  703. skip_tx_skbuff:
  704. rxbdp = priv->rx_bd_base;
  705. if (!priv->rx_skbuff)
  706. goto skip_rx_skbuff;
  707. for (i = 0; i < priv->rx_ring_size; i++) {
  708. if (priv->rx_skbuff[i]) {
  709. dma_unmap_single(&priv->ofdev->dev, rxbdp->bufPtr,
  710. priv->rx_buffer_size,
  711. DMA_FROM_DEVICE);
  712. dev_kfree_skb_any(priv->rx_skbuff[i]);
  713. priv->rx_skbuff[i] = NULL;
  714. }
  715. rxbdp->lstatus = 0;
  716. rxbdp->bufPtr = 0;
  717. rxbdp++;
  718. }
  719. kfree(priv->rx_skbuff);
  720. skip_rx_skbuff:
  721. dma_free_coherent(dev, sizeof(*txbdp) * priv->tx_ring_size +
  722. sizeof(*rxbdp) * priv->rx_ring_size,
  723. priv->tx_bd_base, gfar_read(&priv->regs->tbase0));
  724. }
  725. void gfar_start(struct net_device *dev)
  726. {
  727. struct gfar_private *priv = netdev_priv(dev);
  728. struct gfar __iomem *regs = priv->regs;
  729. u32 tempval;
  730. /* Enable Rx and Tx in MACCFG1 */
  731. tempval = gfar_read(&regs->maccfg1);
  732. tempval |= (MACCFG1_RX_EN | MACCFG1_TX_EN);
  733. gfar_write(&regs->maccfg1, tempval);
  734. /* Initialize DMACTRL to have WWR and WOP */
  735. tempval = gfar_read(&priv->regs->dmactrl);
  736. tempval |= DMACTRL_INIT_SETTINGS;
  737. gfar_write(&priv->regs->dmactrl, tempval);
  738. /* Make sure we aren't stopped */
  739. tempval = gfar_read(&priv->regs->dmactrl);
  740. tempval &= ~(DMACTRL_GRS | DMACTRL_GTS);
  741. gfar_write(&priv->regs->dmactrl, tempval);
  742. /* Clear THLT/RHLT, so that the DMA starts polling now */
  743. gfar_write(&regs->tstat, TSTAT_CLEAR_THALT);
  744. gfar_write(&regs->rstat, RSTAT_CLEAR_RHALT);
  745. /* Unmask the interrupts we look for */
  746. gfar_write(&regs->imask, IMASK_DEFAULT);
  747. dev->trans_start = jiffies;
  748. }
  749. /* Bring the controller up and running */
  750. int startup_gfar(struct net_device *ndev)
  751. {
  752. struct txbd8 *txbdp;
  753. struct rxbd8 *rxbdp;
  754. dma_addr_t addr = 0;
  755. void *vaddr;
  756. int i;
  757. struct gfar_private *priv = netdev_priv(ndev);
  758. struct device *dev = &priv->ofdev->dev;
  759. struct gfar __iomem *regs = priv->regs;
  760. int err;
  761. u32 rctrl = 0;
  762. u32 tctrl = 0;
  763. u32 attrs = 0;
  764. gfar_write(&regs->imask, IMASK_INIT_CLEAR);
  765. /* Allocate memory for the buffer descriptors */
  766. vaddr = dma_alloc_coherent(dev, sizeof(*txbdp) * priv->tx_ring_size +
  767. sizeof(*rxbdp) * priv->rx_ring_size,
  768. &addr, GFP_KERNEL);
  769. if (!vaddr) {
  770. if (netif_msg_ifup(priv))
  771. pr_err("%s: Could not allocate buffer descriptors!\n",
  772. ndev->name);
  773. return -ENOMEM;
  774. }
  775. priv->tx_bd_base = vaddr;
  776. /* enet DMA only understands physical addresses */
  777. gfar_write(&regs->tbase0, addr);
  778. /* Start the rx descriptor ring where the tx ring leaves off */
  779. addr = addr + sizeof(*txbdp) * priv->tx_ring_size;
  780. vaddr = vaddr + sizeof(*txbdp) * priv->tx_ring_size;
  781. priv->rx_bd_base = vaddr;
  782. gfar_write(&regs->rbase0, addr);
  783. /* Setup the skbuff rings */
  784. priv->tx_skbuff = kmalloc(sizeof(*priv->tx_skbuff) *
  785. priv->tx_ring_size, GFP_KERNEL);
  786. if (!priv->tx_skbuff) {
  787. if (netif_msg_ifup(priv))
  788. pr_err("%s: Could not allocate tx_skbuff\n",
  789. ndev->name);
  790. err = -ENOMEM;
  791. goto tx_skb_fail;
  792. }
  793. for (i = 0; i < priv->tx_ring_size; i++)
  794. priv->tx_skbuff[i] = NULL;
  795. priv->rx_skbuff = kmalloc(sizeof(*priv->rx_skbuff) *
  796. priv->rx_ring_size, GFP_KERNEL);
  797. if (!priv->rx_skbuff) {
  798. if (netif_msg_ifup(priv))
  799. pr_err("%s: Could not allocate rx_skbuff\n",
  800. ndev->name);
  801. err = -ENOMEM;
  802. goto rx_skb_fail;
  803. }
  804. for (i = 0; i < priv->rx_ring_size; i++)
  805. priv->rx_skbuff[i] = NULL;
  806. /* Initialize some variables in our dev structure */
  807. priv->num_txbdfree = priv->tx_ring_size;
  808. priv->dirty_tx = priv->cur_tx = priv->tx_bd_base;
  809. priv->cur_rx = priv->rx_bd_base;
  810. priv->skb_curtx = priv->skb_dirtytx = 0;
  811. priv->skb_currx = 0;
  812. /* Initialize Transmit Descriptor Ring */
  813. txbdp = priv->tx_bd_base;
  814. for (i = 0; i < priv->tx_ring_size; i++) {
  815. txbdp->lstatus = 0;
  816. txbdp->bufPtr = 0;
  817. txbdp++;
  818. }
  819. /* Set the last descriptor in the ring to indicate wrap */
  820. txbdp--;
  821. txbdp->status |= TXBD_WRAP;
  822. rxbdp = priv->rx_bd_base;
  823. for (i = 0; i < priv->rx_ring_size; i++) {
  824. struct sk_buff *skb;
  825. skb = gfar_new_skb(ndev);
  826. if (!skb) {
  827. pr_err("%s: Can't allocate RX buffers\n", ndev->name);
  828. err = -ENOMEM;
  829. goto err_rxalloc_fail;
  830. }
  831. priv->rx_skbuff[i] = skb;
  832. gfar_new_rxbdp(ndev, rxbdp, skb);
  833. rxbdp++;
  834. }
  835. /* If the device has multiple interrupts, register for
  836. * them. Otherwise, only register for the one */
  837. if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
  838. /* Install our interrupt handlers for Error,
  839. * Transmit, and Receive */
  840. err = request_irq(priv->interruptError, gfar_error, 0,
  841. priv->int_name_er, ndev);
  842. if (err) {
  843. if (netif_msg_intr(priv))
  844. pr_err("%s: Can't get IRQ %d\n", ndev->name,
  845. priv->interruptError);
  846. goto err_irq_fail;
  847. }
  848. err = request_irq(priv->interruptTransmit, gfar_transmit, 0,
  849. priv->int_name_tx, ndev);
  850. if (err) {
  851. if (netif_msg_intr(priv))
  852. pr_err("%s: Can't get IRQ %d\n", ndev->name,
  853. priv->interruptTransmit);
  854. goto tx_irq_fail;
  855. }
  856. err = request_irq(priv->interruptReceive, gfar_receive, 0,
  857. priv->int_name_rx, ndev);
  858. if (err) {
  859. if (netif_msg_intr(priv))
  860. pr_err("%s: Can't get IRQ %d (receive0)\n",
  861. ndev->name, priv->interruptReceive);
  862. goto rx_irq_fail;
  863. }
  864. } else {
  865. err = request_irq(priv->interruptTransmit, gfar_interrupt,
  866. 0, priv->int_name_tx, ndev);
  867. if (err) {
  868. if (netif_msg_intr(priv))
  869. pr_err("%s: Can't get IRQ %d\n", ndev->name,
  870. priv->interruptTransmit);
  871. goto err_irq_fail;
  872. }
  873. }
  874. phy_start(priv->phydev);
  875. /* Configure the coalescing support */
  876. gfar_write(&regs->txic, 0);
  877. if (priv->txcoalescing)
  878. gfar_write(&regs->txic, priv->txic);
  879. gfar_write(&regs->rxic, 0);
  880. if (priv->rxcoalescing)
  881. gfar_write(&regs->rxic, priv->rxic);
  882. if (priv->rx_csum_enable)
  883. rctrl |= RCTRL_CHECKSUMMING;
  884. if (priv->extended_hash) {
  885. rctrl |= RCTRL_EXTHASH;
  886. gfar_clear_exact_match(ndev);
  887. rctrl |= RCTRL_EMEN;
  888. }
  889. if (priv->padding) {
  890. rctrl &= ~RCTRL_PAL_MASK;
  891. rctrl |= RCTRL_PADDING(priv->padding);
  892. }
  893. /* keep vlan related bits if it's enabled */
  894. if (priv->vlgrp) {
  895. rctrl |= RCTRL_VLEX | RCTRL_PRSDEP_INIT;
  896. tctrl |= TCTRL_VLINS;
  897. }
  898. /* Init rctrl based on our settings */
  899. gfar_write(&regs->rctrl, rctrl);
  900. if (ndev->features & NETIF_F_IP_CSUM)
  901. tctrl |= TCTRL_INIT_CSUM;
  902. gfar_write(&regs->tctrl, tctrl);
  903. /* Set the extraction length and index */
  904. attrs = ATTRELI_EL(priv->rx_stash_size) |
  905. ATTRELI_EI(priv->rx_stash_index);
  906. gfar_write(&regs->attreli, attrs);
  907. /* Start with defaults, and add stashing or locking
  908. * depending on the approprate variables */
  909. attrs = ATTR_INIT_SETTINGS;
  910. if (priv->bd_stash_en)
  911. attrs |= ATTR_BDSTASH;
  912. if (priv->rx_stash_size != 0)
  913. attrs |= ATTR_BUFSTASH;
  914. gfar_write(&regs->attr, attrs);
  915. gfar_write(&regs->fifo_tx_thr, priv->fifo_threshold);
  916. gfar_write(&regs->fifo_tx_starve, priv->fifo_starve);
  917. gfar_write(&regs->fifo_tx_starve_shutoff, priv->fifo_starve_off);
  918. /* Start the controller */
  919. gfar_start(ndev);
  920. return 0;
  921. rx_irq_fail:
  922. free_irq(priv->interruptTransmit, ndev);
  923. tx_irq_fail:
  924. free_irq(priv->interruptError, ndev);
  925. err_irq_fail:
  926. err_rxalloc_fail:
  927. rx_skb_fail:
  928. tx_skb_fail:
  929. free_skb_resources(priv);
  930. return err;
  931. }
  932. /* Called when something needs to use the ethernet device */
  933. /* Returns 0 for success. */
  934. static int gfar_enet_open(struct net_device *dev)
  935. {
  936. struct gfar_private *priv = netdev_priv(dev);
  937. int err;
  938. napi_enable(&priv->napi);
  939. skb_queue_head_init(&priv->rx_recycle);
  940. /* Initialize a bunch of registers */
  941. init_registers(dev);
  942. gfar_set_mac_address(dev);
  943. err = init_phy(dev);
  944. if(err) {
  945. napi_disable(&priv->napi);
  946. return err;
  947. }
  948. err = startup_gfar(dev);
  949. if (err) {
  950. napi_disable(&priv->napi);
  951. return err;
  952. }
  953. netif_start_queue(dev);
  954. device_set_wakeup_enable(&dev->dev, priv->wol_en);
  955. return err;
  956. }
  957. static inline struct txfcb *gfar_add_fcb(struct sk_buff *skb)
  958. {
  959. struct txfcb *fcb = (struct txfcb *)skb_push(skb, GMAC_FCB_LEN);
  960. memset(fcb, 0, GMAC_FCB_LEN);
  961. return fcb;
  962. }
  963. static inline void gfar_tx_checksum(struct sk_buff *skb, struct txfcb *fcb)
  964. {
  965. u8 flags = 0;
  966. /* If we're here, it's a IP packet with a TCP or UDP
  967. * payload. We set it to checksum, using a pseudo-header
  968. * we provide
  969. */
  970. flags = TXFCB_DEFAULT;
  971. /* Tell the controller what the protocol is */
  972. /* And provide the already calculated phcs */
  973. if (ip_hdr(skb)->protocol == IPPROTO_UDP) {
  974. flags |= TXFCB_UDP;
  975. fcb->phcs = udp_hdr(skb)->check;
  976. } else
  977. fcb->phcs = tcp_hdr(skb)->check;
  978. /* l3os is the distance between the start of the
  979. * frame (skb->data) and the start of the IP hdr.
  980. * l4os is the distance between the start of the
  981. * l3 hdr and the l4 hdr */
  982. fcb->l3os = (u16)(skb_network_offset(skb) - GMAC_FCB_LEN);
  983. fcb->l4os = skb_network_header_len(skb);
  984. fcb->flags = flags;
  985. }
  986. void inline gfar_tx_vlan(struct sk_buff *skb, struct txfcb *fcb)
  987. {
  988. fcb->flags |= TXFCB_VLN;
  989. fcb->vlctl = vlan_tx_tag_get(skb);
  990. }
  991. static inline struct txbd8 *skip_txbd(struct txbd8 *bdp, int stride,
  992. struct txbd8 *base, int ring_size)
  993. {
  994. struct txbd8 *new_bd = bdp + stride;
  995. return (new_bd >= (base + ring_size)) ? (new_bd - ring_size) : new_bd;
  996. }
  997. static inline struct txbd8 *next_txbd(struct txbd8 *bdp, struct txbd8 *base,
  998. int ring_size)
  999. {
  1000. return skip_txbd(bdp, 1, base, ring_size);
  1001. }
  1002. /* This is called by the kernel when a frame is ready for transmission. */
  1003. /* It is pointed to by the dev->hard_start_xmit function pointer */
  1004. static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
  1005. {
  1006. struct gfar_private *priv = netdev_priv(dev);
  1007. struct txfcb *fcb = NULL;
  1008. struct txbd8 *txbdp, *txbdp_start, *base;
  1009. u32 lstatus;
  1010. int i;
  1011. u32 bufaddr;
  1012. unsigned long flags;
  1013. unsigned int nr_frags, length;
  1014. base = priv->tx_bd_base;
  1015. /* make space for additional header when fcb is needed */
  1016. if (((skb->ip_summed == CHECKSUM_PARTIAL) ||
  1017. (priv->vlgrp && vlan_tx_tag_present(skb))) &&
  1018. (skb_headroom(skb) < GMAC_FCB_LEN)) {
  1019. struct sk_buff *skb_new;
  1020. skb_new = skb_realloc_headroom(skb, GMAC_FCB_LEN);
  1021. if (!skb_new) {
  1022. dev->stats.tx_errors++;
  1023. kfree_skb(skb);
  1024. return NETDEV_TX_OK;
  1025. }
  1026. kfree_skb(skb);
  1027. skb = skb_new;
  1028. }
  1029. /* total number of fragments in the SKB */
  1030. nr_frags = skb_shinfo(skb)->nr_frags;
  1031. spin_lock_irqsave(&priv->txlock, flags);
  1032. /* check if there is space to queue this packet */
  1033. if ((nr_frags+1) > priv->num_txbdfree) {
  1034. /* no space, stop the queue */
  1035. netif_stop_queue(dev);
  1036. dev->stats.tx_fifo_errors++;
  1037. spin_unlock_irqrestore(&priv->txlock, flags);
  1038. return NETDEV_TX_BUSY;
  1039. }
  1040. /* Update transmit stats */
  1041. dev->stats.tx_bytes += skb->len;
  1042. txbdp = txbdp_start = priv->cur_tx;
  1043. if (nr_frags == 0) {
  1044. lstatus = txbdp->lstatus | BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT);
  1045. } else {
  1046. /* Place the fragment addresses and lengths into the TxBDs */
  1047. for (i = 0; i < nr_frags; i++) {
  1048. /* Point at the next BD, wrapping as needed */
  1049. txbdp = next_txbd(txbdp, base, priv->tx_ring_size);
  1050. length = skb_shinfo(skb)->frags[i].size;
  1051. lstatus = txbdp->lstatus | length |
  1052. BD_LFLAG(TXBD_READY);
  1053. /* Handle the last BD specially */
  1054. if (i == nr_frags - 1)
  1055. lstatus |= BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT);
  1056. bufaddr = dma_map_page(&priv->ofdev->dev,
  1057. skb_shinfo(skb)->frags[i].page,
  1058. skb_shinfo(skb)->frags[i].page_offset,
  1059. length,
  1060. DMA_TO_DEVICE);
  1061. /* set the TxBD length and buffer pointer */
  1062. txbdp->bufPtr = bufaddr;
  1063. txbdp->lstatus = lstatus;
  1064. }
  1065. lstatus = txbdp_start->lstatus;
  1066. }
  1067. /* Set up checksumming */
  1068. if (CHECKSUM_PARTIAL == skb->ip_summed) {
  1069. fcb = gfar_add_fcb(skb);
  1070. lstatus |= BD_LFLAG(TXBD_TOE);
  1071. gfar_tx_checksum(skb, fcb);
  1072. }
  1073. if (priv->vlgrp && vlan_tx_tag_present(skb)) {
  1074. if (unlikely(NULL == fcb)) {
  1075. fcb = gfar_add_fcb(skb);
  1076. lstatus |= BD_LFLAG(TXBD_TOE);
  1077. }
  1078. gfar_tx_vlan(skb, fcb);
  1079. }
  1080. /* setup the TxBD length and buffer pointer for the first BD */
  1081. priv->tx_skbuff[priv->skb_curtx] = skb;
  1082. txbdp_start->bufPtr = dma_map_single(&priv->ofdev->dev, skb->data,
  1083. skb_headlen(skb), DMA_TO_DEVICE);
  1084. lstatus |= BD_LFLAG(TXBD_CRC | TXBD_READY) | skb_headlen(skb);
  1085. /*
  1086. * The powerpc-specific eieio() is used, as wmb() has too strong
  1087. * semantics (it requires synchronization between cacheable and
  1088. * uncacheable mappings, which eieio doesn't provide and which we
  1089. * don't need), thus requiring a more expensive sync instruction. At
  1090. * some point, the set of architecture-independent barrier functions
  1091. * should be expanded to include weaker barriers.
  1092. */
  1093. eieio();
  1094. txbdp_start->lstatus = lstatus;
  1095. /* Update the current skb pointer to the next entry we will use
  1096. * (wrapping if necessary) */
  1097. priv->skb_curtx = (priv->skb_curtx + 1) &
  1098. TX_RING_MOD_MASK(priv->tx_ring_size);
  1099. priv->cur_tx = next_txbd(txbdp, base, priv->tx_ring_size);
  1100. /* reduce TxBD free count */
  1101. priv->num_txbdfree -= (nr_frags + 1);
  1102. dev->trans_start = jiffies;
  1103. /* If the next BD still needs to be cleaned up, then the bds
  1104. are full. We need to tell the kernel to stop sending us stuff. */
  1105. if (!priv->num_txbdfree) {
  1106. netif_stop_queue(dev);
  1107. dev->stats.tx_fifo_errors++;
  1108. }
  1109. /* Tell the DMA to go go go */
  1110. gfar_write(&priv->regs->tstat, TSTAT_CLEAR_THALT);
  1111. /* Unlock priv */
  1112. spin_unlock_irqrestore(&priv->txlock, flags);
  1113. return NETDEV_TX_OK;
  1114. }
  1115. /* Stops the kernel queue, and halts the controller */
  1116. static int gfar_close(struct net_device *dev)
  1117. {
  1118. struct gfar_private *priv = netdev_priv(dev);
  1119. napi_disable(&priv->napi);
  1120. skb_queue_purge(&priv->rx_recycle);
  1121. cancel_work_sync(&priv->reset_task);
  1122. stop_gfar(dev);
  1123. /* Disconnect from the PHY */
  1124. phy_disconnect(priv->phydev);
  1125. priv->phydev = NULL;
  1126. netif_stop_queue(dev);
  1127. return 0;
  1128. }
  1129. /* Changes the mac address if the controller is not running. */
  1130. static int gfar_set_mac_address(struct net_device *dev)
  1131. {
  1132. gfar_set_mac_for_addr(dev, 0, dev->dev_addr);
  1133. return 0;
  1134. }
  1135. /* Enables and disables VLAN insertion/extraction */
  1136. static void gfar_vlan_rx_register(struct net_device *dev,
  1137. struct vlan_group *grp)
  1138. {
  1139. struct gfar_private *priv = netdev_priv(dev);
  1140. unsigned long flags;
  1141. u32 tempval;
  1142. spin_lock_irqsave(&priv->rxlock, flags);
  1143. priv->vlgrp = grp;
  1144. if (grp) {
  1145. /* Enable VLAN tag insertion */
  1146. tempval = gfar_read(&priv->regs->tctrl);
  1147. tempval |= TCTRL_VLINS;
  1148. gfar_write(&priv->regs->tctrl, tempval);
  1149. /* Enable VLAN tag extraction */
  1150. tempval = gfar_read(&priv->regs->rctrl);
  1151. tempval |= (RCTRL_VLEX | RCTRL_PRSDEP_INIT);
  1152. gfar_write(&priv->regs->rctrl, tempval);
  1153. } else {
  1154. /* Disable VLAN tag insertion */
  1155. tempval = gfar_read(&priv->regs->tctrl);
  1156. tempval &= ~TCTRL_VLINS;
  1157. gfar_write(&priv->regs->tctrl, tempval);
  1158. /* Disable VLAN tag extraction */
  1159. tempval = gfar_read(&priv->regs->rctrl);
  1160. tempval &= ~RCTRL_VLEX;
  1161. /* If parse is no longer required, then disable parser */
  1162. if (tempval & RCTRL_REQ_PARSER)
  1163. tempval |= RCTRL_PRSDEP_INIT;
  1164. else
  1165. tempval &= ~RCTRL_PRSDEP_INIT;
  1166. gfar_write(&priv->regs->rctrl, tempval);
  1167. }
  1168. gfar_change_mtu(dev, dev->mtu);
  1169. spin_unlock_irqrestore(&priv->rxlock, flags);
  1170. }
  1171. static int gfar_change_mtu(struct net_device *dev, int new_mtu)
  1172. {
  1173. int tempsize, tempval;
  1174. struct gfar_private *priv = netdev_priv(dev);
  1175. int oldsize = priv->rx_buffer_size;
  1176. int frame_size = new_mtu + ETH_HLEN;
  1177. if (priv->vlgrp)
  1178. frame_size += VLAN_HLEN;
  1179. if ((frame_size < 64) || (frame_size > JUMBO_FRAME_SIZE)) {
  1180. if (netif_msg_drv(priv))
  1181. printk(KERN_ERR "%s: Invalid MTU setting\n",
  1182. dev->name);
  1183. return -EINVAL;
  1184. }
  1185. if (gfar_uses_fcb(priv))
  1186. frame_size += GMAC_FCB_LEN;
  1187. frame_size += priv->padding;
  1188. tempsize =
  1189. (frame_size & ~(INCREMENTAL_BUFFER_SIZE - 1)) +
  1190. INCREMENTAL_BUFFER_SIZE;
  1191. /* Only stop and start the controller if it isn't already
  1192. * stopped, and we changed something */
  1193. if ((oldsize != tempsize) && (dev->flags & IFF_UP))
  1194. stop_gfar(dev);
  1195. priv->rx_buffer_size = tempsize;
  1196. dev->mtu = new_mtu;
  1197. gfar_write(&priv->regs->mrblr, priv->rx_buffer_size);
  1198. gfar_write(&priv->regs->maxfrm, priv->rx_buffer_size);
  1199. /* If the mtu is larger than the max size for standard
  1200. * ethernet frames (ie, a jumbo frame), then set maccfg2
  1201. * to allow huge frames, and to check the length */
  1202. tempval = gfar_read(&priv->regs->maccfg2);
  1203. if (priv->rx_buffer_size > DEFAULT_RX_BUFFER_SIZE)
  1204. tempval |= (MACCFG2_HUGEFRAME | MACCFG2_LENGTHCHECK);
  1205. else
  1206. tempval &= ~(MACCFG2_HUGEFRAME | MACCFG2_LENGTHCHECK);
  1207. gfar_write(&priv->regs->maccfg2, tempval);
  1208. if ((oldsize != tempsize) && (dev->flags & IFF_UP))
  1209. startup_gfar(dev);
  1210. return 0;
  1211. }
  1212. /* gfar_reset_task gets scheduled when a packet has not been
  1213. * transmitted after a set amount of time.
  1214. * For now, assume that clearing out all the structures, and
  1215. * starting over will fix the problem.
  1216. */
  1217. static void gfar_reset_task(struct work_struct *work)
  1218. {
  1219. struct gfar_private *priv = container_of(work, struct gfar_private,
  1220. reset_task);
  1221. struct net_device *dev = priv->ndev;
  1222. if (dev->flags & IFF_UP) {
  1223. netif_stop_queue(dev);
  1224. stop_gfar(dev);
  1225. startup_gfar(dev);
  1226. netif_start_queue(dev);
  1227. }
  1228. netif_tx_schedule_all(dev);
  1229. }
  1230. static void gfar_timeout(struct net_device *dev)
  1231. {
  1232. struct gfar_private *priv = netdev_priv(dev);
  1233. dev->stats.tx_errors++;
  1234. schedule_work(&priv->reset_task);
  1235. }
  1236. /* Interrupt Handler for Transmit complete */
  1237. static int gfar_clean_tx_ring(struct net_device *dev)
  1238. {
  1239. struct gfar_private *priv = netdev_priv(dev);
  1240. struct txbd8 *bdp;
  1241. struct txbd8 *lbdp = NULL;
  1242. struct txbd8 *base = priv->tx_bd_base;
  1243. struct sk_buff *skb;
  1244. int skb_dirtytx;
  1245. int tx_ring_size = priv->tx_ring_size;
  1246. int frags = 0;
  1247. int i;
  1248. int howmany = 0;
  1249. u32 lstatus;
  1250. bdp = priv->dirty_tx;
  1251. skb_dirtytx = priv->skb_dirtytx;
  1252. while ((skb = priv->tx_skbuff[skb_dirtytx])) {
  1253. frags = skb_shinfo(skb)->nr_frags;
  1254. lbdp = skip_txbd(bdp, frags, base, tx_ring_size);
  1255. lstatus = lbdp->lstatus;
  1256. /* Only clean completed frames */
  1257. if ((lstatus & BD_LFLAG(TXBD_READY)) &&
  1258. (lstatus & BD_LENGTH_MASK))
  1259. break;
  1260. dma_unmap_single(&priv->ofdev->dev,
  1261. bdp->bufPtr,
  1262. bdp->length,
  1263. DMA_TO_DEVICE);
  1264. bdp->lstatus &= BD_LFLAG(TXBD_WRAP);
  1265. bdp = next_txbd(bdp, base, tx_ring_size);
  1266. for (i = 0; i < frags; i++) {
  1267. dma_unmap_page(&priv->ofdev->dev,
  1268. bdp->bufPtr,
  1269. bdp->length,
  1270. DMA_TO_DEVICE);
  1271. bdp->lstatus &= BD_LFLAG(TXBD_WRAP);
  1272. bdp = next_txbd(bdp, base, tx_ring_size);
  1273. }
  1274. /*
  1275. * If there's room in the queue (limit it to rx_buffer_size)
  1276. * we add this skb back into the pool, if it's the right size
  1277. */
  1278. if (skb_queue_len(&priv->rx_recycle) < priv->rx_ring_size &&
  1279. skb_recycle_check(skb, priv->rx_buffer_size +
  1280. RXBUF_ALIGNMENT))
  1281. __skb_queue_head(&priv->rx_recycle, skb);
  1282. else
  1283. dev_kfree_skb_any(skb);
  1284. priv->tx_skbuff[skb_dirtytx] = NULL;
  1285. skb_dirtytx = (skb_dirtytx + 1) &
  1286. TX_RING_MOD_MASK(tx_ring_size);
  1287. howmany++;
  1288. priv->num_txbdfree += frags + 1;
  1289. }
  1290. /* If we freed a buffer, we can restart transmission, if necessary */
  1291. if (netif_queue_stopped(dev) && priv->num_txbdfree)
  1292. netif_wake_queue(dev);
  1293. /* Update dirty indicators */
  1294. priv->skb_dirtytx = skb_dirtytx;
  1295. priv->dirty_tx = bdp;
  1296. dev->stats.tx_packets += howmany;
  1297. return howmany;
  1298. }
  1299. static void gfar_schedule_cleanup(struct net_device *dev)
  1300. {
  1301. struct gfar_private *priv = netdev_priv(dev);
  1302. unsigned long flags;
  1303. spin_lock_irqsave(&priv->txlock, flags);
  1304. spin_lock(&priv->rxlock);
  1305. if (napi_schedule_prep(&priv->napi)) {
  1306. gfar_write(&priv->regs->imask, IMASK_RTX_DISABLED);
  1307. __napi_schedule(&priv->napi);
  1308. } else {
  1309. /*
  1310. * Clear IEVENT, so interrupts aren't called again
  1311. * because of the packets that have already arrived.
  1312. */
  1313. gfar_write(&priv->regs->ievent, IEVENT_RTX_MASK);
  1314. }
  1315. spin_unlock(&priv->rxlock);
  1316. spin_unlock_irqrestore(&priv->txlock, flags);
  1317. }
  1318. /* Interrupt Handler for Transmit complete */
  1319. static irqreturn_t gfar_transmit(int irq, void *dev_id)
  1320. {
  1321. gfar_schedule_cleanup((struct net_device *)dev_id);
  1322. return IRQ_HANDLED;
  1323. }
  1324. static void gfar_new_rxbdp(struct net_device *dev, struct rxbd8 *bdp,
  1325. struct sk_buff *skb)
  1326. {
  1327. struct gfar_private *priv = netdev_priv(dev);
  1328. u32 lstatus;
  1329. bdp->bufPtr = dma_map_single(&priv->ofdev->dev, skb->data,
  1330. priv->rx_buffer_size, DMA_FROM_DEVICE);
  1331. lstatus = BD_LFLAG(RXBD_EMPTY | RXBD_INTERRUPT);
  1332. if (bdp == priv->rx_bd_base + priv->rx_ring_size - 1)
  1333. lstatus |= BD_LFLAG(RXBD_WRAP);
  1334. eieio();
  1335. bdp->lstatus = lstatus;
  1336. }
  1337. struct sk_buff * gfar_new_skb(struct net_device *dev)
  1338. {
  1339. unsigned int alignamount;
  1340. struct gfar_private *priv = netdev_priv(dev);
  1341. struct sk_buff *skb = NULL;
  1342. skb = __skb_dequeue(&priv->rx_recycle);
  1343. if (!skb)
  1344. skb = netdev_alloc_skb(dev,
  1345. priv->rx_buffer_size + RXBUF_ALIGNMENT);
  1346. if (!skb)
  1347. return NULL;
  1348. alignamount = RXBUF_ALIGNMENT -
  1349. (((unsigned long) skb->data) & (RXBUF_ALIGNMENT - 1));
  1350. /* We need the data buffer to be aligned properly. We will reserve
  1351. * as many bytes as needed to align the data properly
  1352. */
  1353. skb_reserve(skb, alignamount);
  1354. return skb;
  1355. }
  1356. static inline void count_errors(unsigned short status, struct net_device *dev)
  1357. {
  1358. struct gfar_private *priv = netdev_priv(dev);
  1359. struct net_device_stats *stats = &dev->stats;
  1360. struct gfar_extra_stats *estats = &priv->extra_stats;
  1361. /* If the packet was truncated, none of the other errors
  1362. * matter */
  1363. if (status & RXBD_TRUNCATED) {
  1364. stats->rx_length_errors++;
  1365. estats->rx_trunc++;
  1366. return;
  1367. }
  1368. /* Count the errors, if there were any */
  1369. if (status & (RXBD_LARGE | RXBD_SHORT)) {
  1370. stats->rx_length_errors++;
  1371. if (status & RXBD_LARGE)
  1372. estats->rx_large++;
  1373. else
  1374. estats->rx_short++;
  1375. }
  1376. if (status & RXBD_NONOCTET) {
  1377. stats->rx_frame_errors++;
  1378. estats->rx_nonoctet++;
  1379. }
  1380. if (status & RXBD_CRCERR) {
  1381. estats->rx_crcerr++;
  1382. stats->rx_crc_errors++;
  1383. }
  1384. if (status & RXBD_OVERRUN) {
  1385. estats->rx_overrun++;
  1386. stats->rx_crc_errors++;
  1387. }
  1388. }
  1389. irqreturn_t gfar_receive(int irq, void *dev_id)
  1390. {
  1391. gfar_schedule_cleanup((struct net_device *)dev_id);
  1392. return IRQ_HANDLED;
  1393. }
  1394. static inline void gfar_rx_checksum(struct sk_buff *skb, struct rxfcb *fcb)
  1395. {
  1396. /* If valid headers were found, and valid sums
  1397. * were verified, then we tell the kernel that no
  1398. * checksumming is necessary. Otherwise, it is */
  1399. if ((fcb->flags & RXFCB_CSUM_MASK) == (RXFCB_CIP | RXFCB_CTU))
  1400. skb->ip_summed = CHECKSUM_UNNECESSARY;
  1401. else
  1402. skb->ip_summed = CHECKSUM_NONE;
  1403. }
  1404. /* gfar_process_frame() -- handle one incoming packet if skb
  1405. * isn't NULL. */
  1406. static int gfar_process_frame(struct net_device *dev, struct sk_buff *skb,
  1407. int amount_pull)
  1408. {
  1409. struct gfar_private *priv = netdev_priv(dev);
  1410. struct rxfcb *fcb = NULL;
  1411. int ret;
  1412. /* fcb is at the beginning if exists */
  1413. fcb = (struct rxfcb *)skb->data;
  1414. /* Remove the FCB from the skb */
  1415. /* Remove the padded bytes, if there are any */
  1416. if (amount_pull)
  1417. skb_pull(skb, amount_pull);
  1418. if (priv->rx_csum_enable)
  1419. gfar_rx_checksum(skb, fcb);
  1420. /* Tell the skb what kind of packet this is */
  1421. skb->protocol = eth_type_trans(skb, dev);
  1422. /* Send the packet up the stack */
  1423. if (unlikely(priv->vlgrp && (fcb->flags & RXFCB_VLN)))
  1424. ret = vlan_hwaccel_receive_skb(skb, priv->vlgrp, fcb->vlctl);
  1425. else
  1426. ret = netif_receive_skb(skb);
  1427. if (NET_RX_DROP == ret)
  1428. priv->extra_stats.kernel_dropped++;
  1429. return 0;
  1430. }
  1431. /* gfar_clean_rx_ring() -- Processes each frame in the rx ring
  1432. * until the budget/quota has been reached. Returns the number
  1433. * of frames handled
  1434. */
  1435. int gfar_clean_rx_ring(struct net_device *dev, int rx_work_limit)
  1436. {
  1437. struct rxbd8 *bdp, *base;
  1438. struct sk_buff *skb;
  1439. int pkt_len;
  1440. int amount_pull;
  1441. int howmany = 0;
  1442. struct gfar_private *priv = netdev_priv(dev);
  1443. /* Get the first full descriptor */
  1444. bdp = priv->cur_rx;
  1445. base = priv->rx_bd_base;
  1446. amount_pull = (gfar_uses_fcb(priv) ? GMAC_FCB_LEN : 0) +
  1447. priv->padding;
  1448. while (!((bdp->status & RXBD_EMPTY) || (--rx_work_limit < 0))) {
  1449. struct sk_buff *newskb;
  1450. rmb();
  1451. /* Add another skb for the future */
  1452. newskb = gfar_new_skb(dev);
  1453. skb = priv->rx_skbuff[priv->skb_currx];
  1454. dma_unmap_single(&priv->ofdev->dev, bdp->bufPtr,
  1455. priv->rx_buffer_size, DMA_FROM_DEVICE);
  1456. /* We drop the frame if we failed to allocate a new buffer */
  1457. if (unlikely(!newskb || !(bdp->status & RXBD_LAST) ||
  1458. bdp->status & RXBD_ERR)) {
  1459. count_errors(bdp->status, dev);
  1460. if (unlikely(!newskb))
  1461. newskb = skb;
  1462. else if (skb) {
  1463. /*
  1464. * We need to reset ->data to what it
  1465. * was before gfar_new_skb() re-aligned
  1466. * it to an RXBUF_ALIGNMENT boundary
  1467. * before we put the skb back on the
  1468. * recycle list.
  1469. */
  1470. skb->data = skb->head + NET_SKB_PAD;
  1471. __skb_queue_head(&priv->rx_recycle, skb);
  1472. }
  1473. } else {
  1474. /* Increment the number of packets */
  1475. dev->stats.rx_packets++;
  1476. howmany++;
  1477. if (likely(skb)) {
  1478. pkt_len = bdp->length - ETH_FCS_LEN;
  1479. /* Remove the FCS from the packet length */
  1480. skb_put(skb, pkt_len);
  1481. dev->stats.rx_bytes += pkt_len;
  1482. if (in_irq() || irqs_disabled())
  1483. printk("Interrupt problem!\n");
  1484. gfar_process_frame(dev, skb, amount_pull);
  1485. } else {
  1486. if (netif_msg_rx_err(priv))
  1487. printk(KERN_WARNING
  1488. "%s: Missing skb!\n", dev->name);
  1489. dev->stats.rx_dropped++;
  1490. priv->extra_stats.rx_skbmissing++;
  1491. }
  1492. }
  1493. priv->rx_skbuff[priv->skb_currx] = newskb;
  1494. /* Setup the new bdp */
  1495. gfar_new_rxbdp(dev, bdp, newskb);
  1496. /* Update to the next pointer */
  1497. bdp = next_bd(bdp, base, priv->rx_ring_size);
  1498. /* update to point at the next skb */
  1499. priv->skb_currx =
  1500. (priv->skb_currx + 1) &
  1501. RX_RING_MOD_MASK(priv->rx_ring_size);
  1502. }
  1503. /* Update the current rxbd pointer to be the next one */
  1504. priv->cur_rx = bdp;
  1505. return howmany;
  1506. }
  1507. static int gfar_poll(struct napi_struct *napi, int budget)
  1508. {
  1509. struct gfar_private *priv = container_of(napi, struct gfar_private, napi);
  1510. struct net_device *dev = priv->ndev;
  1511. int tx_cleaned = 0;
  1512. int rx_cleaned = 0;
  1513. unsigned long flags;
  1514. /* Clear IEVENT, so interrupts aren't called again
  1515. * because of the packets that have already arrived */
  1516. gfar_write(&priv->regs->ievent, IEVENT_RTX_MASK);
  1517. /* If we fail to get the lock, don't bother with the TX BDs */
  1518. if (spin_trylock_irqsave(&priv->txlock, flags)) {
  1519. tx_cleaned = gfar_clean_tx_ring(dev);
  1520. spin_unlock_irqrestore(&priv->txlock, flags);
  1521. }
  1522. rx_cleaned = gfar_clean_rx_ring(dev, budget);
  1523. if (tx_cleaned)
  1524. return budget;
  1525. if (rx_cleaned < budget) {
  1526. napi_complete(napi);
  1527. /* Clear the halt bit in RSTAT */
  1528. gfar_write(&priv->regs->rstat, RSTAT_CLEAR_RHALT);
  1529. gfar_write(&priv->regs->imask, IMASK_DEFAULT);
  1530. /* If we are coalescing interrupts, update the timer */
  1531. /* Otherwise, clear it */
  1532. if (likely(priv->rxcoalescing)) {
  1533. gfar_write(&priv->regs->rxic, 0);
  1534. gfar_write(&priv->regs->rxic, priv->rxic);
  1535. }
  1536. if (likely(priv->txcoalescing)) {
  1537. gfar_write(&priv->regs->txic, 0);
  1538. gfar_write(&priv->regs->txic, priv->txic);
  1539. }
  1540. }
  1541. return rx_cleaned;
  1542. }
  1543. #ifdef CONFIG_NET_POLL_CONTROLLER
  1544. /*
  1545. * Polling 'interrupt' - used by things like netconsole to send skbs
  1546. * without having to re-enable interrupts. It's not called while
  1547. * the interrupt routine is executing.
  1548. */
  1549. static void gfar_netpoll(struct net_device *dev)
  1550. {
  1551. struct gfar_private *priv = netdev_priv(dev);
  1552. /* If the device has multiple interrupts, run tx/rx */
  1553. if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
  1554. disable_irq(priv->interruptTransmit);
  1555. disable_irq(priv->interruptReceive);
  1556. disable_irq(priv->interruptError);
  1557. gfar_interrupt(priv->interruptTransmit, dev);
  1558. enable_irq(priv->interruptError);
  1559. enable_irq(priv->interruptReceive);
  1560. enable_irq(priv->interruptTransmit);
  1561. } else {
  1562. disable_irq(priv->interruptTransmit);
  1563. gfar_interrupt(priv->interruptTransmit, dev);
  1564. enable_irq(priv->interruptTransmit);
  1565. }
  1566. }
  1567. #endif
  1568. /* The interrupt handler for devices with one interrupt */
  1569. static irqreturn_t gfar_interrupt(int irq, void *dev_id)
  1570. {
  1571. struct net_device *dev = dev_id;
  1572. struct gfar_private *priv = netdev_priv(dev);
  1573. /* Save ievent for future reference */
  1574. u32 events = gfar_read(&priv->regs->ievent);
  1575. /* Check for reception */
  1576. if (events & IEVENT_RX_MASK)
  1577. gfar_receive(irq, dev_id);
  1578. /* Check for transmit completion */
  1579. if (events & IEVENT_TX_MASK)
  1580. gfar_transmit(irq, dev_id);
  1581. /* Check for errors */
  1582. if (events & IEVENT_ERR_MASK)
  1583. gfar_error(irq, dev_id);
  1584. return IRQ_HANDLED;
  1585. }
  1586. /* Called every time the controller might need to be made
  1587. * aware of new link state. The PHY code conveys this
  1588. * information through variables in the phydev structure, and this
  1589. * function converts those variables into the appropriate
  1590. * register values, and can bring down the device if needed.
  1591. */
  1592. static void adjust_link(struct net_device *dev)
  1593. {
  1594. struct gfar_private *priv = netdev_priv(dev);
  1595. struct gfar __iomem *regs = priv->regs;
  1596. unsigned long flags;
  1597. struct phy_device *phydev = priv->phydev;
  1598. int new_state = 0;
  1599. spin_lock_irqsave(&priv->txlock, flags);
  1600. if (phydev->link) {
  1601. u32 tempval = gfar_read(&regs->maccfg2);
  1602. u32 ecntrl = gfar_read(&regs->ecntrl);
  1603. /* Now we make sure that we can be in full duplex mode.
  1604. * If not, we operate in half-duplex mode. */
  1605. if (phydev->duplex != priv->oldduplex) {
  1606. new_state = 1;
  1607. if (!(phydev->duplex))
  1608. tempval &= ~(MACCFG2_FULL_DUPLEX);
  1609. else
  1610. tempval |= MACCFG2_FULL_DUPLEX;
  1611. priv->oldduplex = phydev->duplex;
  1612. }
  1613. if (phydev->speed != priv->oldspeed) {
  1614. new_state = 1;
  1615. switch (phydev->speed) {
  1616. case 1000:
  1617. tempval =
  1618. ((tempval & ~(MACCFG2_IF)) | MACCFG2_GMII);
  1619. ecntrl &= ~(ECNTRL_R100);
  1620. break;
  1621. case 100:
  1622. case 10:
  1623. tempval =
  1624. ((tempval & ~(MACCFG2_IF)) | MACCFG2_MII);
  1625. /* Reduced mode distinguishes
  1626. * between 10 and 100 */
  1627. if (phydev->speed == SPEED_100)
  1628. ecntrl |= ECNTRL_R100;
  1629. else
  1630. ecntrl &= ~(ECNTRL_R100);
  1631. break;
  1632. default:
  1633. if (netif_msg_link(priv))
  1634. printk(KERN_WARNING
  1635. "%s: Ack! Speed (%d) is not 10/100/1000!\n",
  1636. dev->name, phydev->speed);
  1637. break;
  1638. }
  1639. priv->oldspeed = phydev->speed;
  1640. }
  1641. gfar_write(&regs->maccfg2, tempval);
  1642. gfar_write(&regs->ecntrl, ecntrl);
  1643. if (!priv->oldlink) {
  1644. new_state = 1;
  1645. priv->oldlink = 1;
  1646. }
  1647. } else if (priv->oldlink) {
  1648. new_state = 1;
  1649. priv->oldlink = 0;
  1650. priv->oldspeed = 0;
  1651. priv->oldduplex = -1;
  1652. }
  1653. if (new_state && netif_msg_link(priv))
  1654. phy_print_status(phydev);
  1655. spin_unlock_irqrestore(&priv->txlock, flags);
  1656. }
  1657. /* Update the hash table based on the current list of multicast
  1658. * addresses we subscribe to. Also, change the promiscuity of
  1659. * the device based on the flags (this function is called
  1660. * whenever dev->flags is changed */
  1661. static void gfar_set_multi(struct net_device *dev)
  1662. {
  1663. struct dev_mc_list *mc_ptr;
  1664. struct gfar_private *priv = netdev_priv(dev);
  1665. struct gfar __iomem *regs = priv->regs;
  1666. u32 tempval;
  1667. if(dev->flags & IFF_PROMISC) {
  1668. /* Set RCTRL to PROM */
  1669. tempval = gfar_read(&regs->rctrl);
  1670. tempval |= RCTRL_PROM;
  1671. gfar_write(&regs->rctrl, tempval);
  1672. } else {
  1673. /* Set RCTRL to not PROM */
  1674. tempval = gfar_read(&regs->rctrl);
  1675. tempval &= ~(RCTRL_PROM);
  1676. gfar_write(&regs->rctrl, tempval);
  1677. }
  1678. if(dev->flags & IFF_ALLMULTI) {
  1679. /* Set the hash to rx all multicast frames */
  1680. gfar_write(&regs->igaddr0, 0xffffffff);
  1681. gfar_write(&regs->igaddr1, 0xffffffff);
  1682. gfar_write(&regs->igaddr2, 0xffffffff);
  1683. gfar_write(&regs->igaddr3, 0xffffffff);
  1684. gfar_write(&regs->igaddr4, 0xffffffff);
  1685. gfar_write(&regs->igaddr5, 0xffffffff);
  1686. gfar_write(&regs->igaddr6, 0xffffffff);
  1687. gfar_write(&regs->igaddr7, 0xffffffff);
  1688. gfar_write(&regs->gaddr0, 0xffffffff);
  1689. gfar_write(&regs->gaddr1, 0xffffffff);
  1690. gfar_write(&regs->gaddr2, 0xffffffff);
  1691. gfar_write(&regs->gaddr3, 0xffffffff);
  1692. gfar_write(&regs->gaddr4, 0xffffffff);
  1693. gfar_write(&regs->gaddr5, 0xffffffff);
  1694. gfar_write(&regs->gaddr6, 0xffffffff);
  1695. gfar_write(&regs->gaddr7, 0xffffffff);
  1696. } else {
  1697. int em_num;
  1698. int idx;
  1699. /* zero out the hash */
  1700. gfar_write(&regs->igaddr0, 0x0);
  1701. gfar_write(&regs->igaddr1, 0x0);
  1702. gfar_write(&regs->igaddr2, 0x0);
  1703. gfar_write(&regs->igaddr3, 0x0);
  1704. gfar_write(&regs->igaddr4, 0x0);
  1705. gfar_write(&regs->igaddr5, 0x0);
  1706. gfar_write(&regs->igaddr6, 0x0);
  1707. gfar_write(&regs->igaddr7, 0x0);
  1708. gfar_write(&regs->gaddr0, 0x0);
  1709. gfar_write(&regs->gaddr1, 0x0);
  1710. gfar_write(&regs->gaddr2, 0x0);
  1711. gfar_write(&regs->gaddr3, 0x0);
  1712. gfar_write(&regs->gaddr4, 0x0);
  1713. gfar_write(&regs->gaddr5, 0x0);
  1714. gfar_write(&regs->gaddr6, 0x0);
  1715. gfar_write(&regs->gaddr7, 0x0);
  1716. /* If we have extended hash tables, we need to
  1717. * clear the exact match registers to prepare for
  1718. * setting them */
  1719. if (priv->extended_hash) {
  1720. em_num = GFAR_EM_NUM + 1;
  1721. gfar_clear_exact_match(dev);
  1722. idx = 1;
  1723. } else {
  1724. idx = 0;
  1725. em_num = 0;
  1726. }
  1727. if(dev->mc_count == 0)
  1728. return;
  1729. /* Parse the list, and set the appropriate bits */
  1730. for(mc_ptr = dev->mc_list; mc_ptr; mc_ptr = mc_ptr->next) {
  1731. if (idx < em_num) {
  1732. gfar_set_mac_for_addr(dev, idx,
  1733. mc_ptr->dmi_addr);
  1734. idx++;
  1735. } else
  1736. gfar_set_hash_for_addr(dev, mc_ptr->dmi_addr);
  1737. }
  1738. }
  1739. return;
  1740. }
  1741. /* Clears each of the exact match registers to zero, so they
  1742. * don't interfere with normal reception */
  1743. static void gfar_clear_exact_match(struct net_device *dev)
  1744. {
  1745. int idx;
  1746. u8 zero_arr[MAC_ADDR_LEN] = {0,0,0,0,0,0};
  1747. for(idx = 1;idx < GFAR_EM_NUM + 1;idx++)
  1748. gfar_set_mac_for_addr(dev, idx, (u8 *)zero_arr);
  1749. }
  1750. /* Set the appropriate hash bit for the given addr */
  1751. /* The algorithm works like so:
  1752. * 1) Take the Destination Address (ie the multicast address), and
  1753. * do a CRC on it (little endian), and reverse the bits of the
  1754. * result.
  1755. * 2) Use the 8 most significant bits as a hash into a 256-entry
  1756. * table. The table is controlled through 8 32-bit registers:
  1757. * gaddr0-7. gaddr0's MSB is entry 0, and gaddr7's LSB is
  1758. * gaddr7. This means that the 3 most significant bits in the
  1759. * hash index which gaddr register to use, and the 5 other bits
  1760. * indicate which bit (assuming an IBM numbering scheme, which
  1761. * for PowerPC (tm) is usually the case) in the register holds
  1762. * the entry. */
  1763. static void gfar_set_hash_for_addr(struct net_device *dev, u8 *addr)
  1764. {
  1765. u32 tempval;
  1766. struct gfar_private *priv = netdev_priv(dev);
  1767. u32 result = ether_crc(MAC_ADDR_LEN, addr);
  1768. int width = priv->hash_width;
  1769. u8 whichbit = (result >> (32 - width)) & 0x1f;
  1770. u8 whichreg = result >> (32 - width + 5);
  1771. u32 value = (1 << (31-whichbit));
  1772. tempval = gfar_read(priv->hash_regs[whichreg]);
  1773. tempval |= value;
  1774. gfar_write(priv->hash_regs[whichreg], tempval);
  1775. return;
  1776. }
  1777. /* There are multiple MAC Address register pairs on some controllers
  1778. * This function sets the numth pair to a given address
  1779. */
  1780. static void gfar_set_mac_for_addr(struct net_device *dev, int num, u8 *addr)
  1781. {
  1782. struct gfar_private *priv = netdev_priv(dev);
  1783. int idx;
  1784. char tmpbuf[MAC_ADDR_LEN];
  1785. u32 tempval;
  1786. u32 __iomem *macptr = &priv->regs->macstnaddr1;
  1787. macptr += num*2;
  1788. /* Now copy it into the mac registers backwards, cuz */
  1789. /* little endian is silly */
  1790. for (idx = 0; idx < MAC_ADDR_LEN; idx++)
  1791. tmpbuf[MAC_ADDR_LEN - 1 - idx] = addr[idx];
  1792. gfar_write(macptr, *((u32 *) (tmpbuf)));
  1793. tempval = *((u32 *) (tmpbuf + 4));
  1794. gfar_write(macptr+1, tempval);
  1795. }
  1796. /* GFAR error interrupt handler */
  1797. static irqreturn_t gfar_error(int irq, void *dev_id)
  1798. {
  1799. struct net_device *dev = dev_id;
  1800. struct gfar_private *priv = netdev_priv(dev);
  1801. /* Save ievent for future reference */
  1802. u32 events = gfar_read(&priv->regs->ievent);
  1803. /* Clear IEVENT */
  1804. gfar_write(&priv->regs->ievent, events & IEVENT_ERR_MASK);
  1805. /* Magic Packet is not an error. */
  1806. if ((priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET) &&
  1807. (events & IEVENT_MAG))
  1808. events &= ~IEVENT_MAG;
  1809. /* Hmm... */
  1810. if (netif_msg_rx_err(priv) || netif_msg_tx_err(priv))
  1811. printk(KERN_DEBUG "%s: error interrupt (ievent=0x%08x imask=0x%08x)\n",
  1812. dev->name, events, gfar_read(&priv->regs->imask));
  1813. /* Update the error counters */
  1814. if (events & IEVENT_TXE) {
  1815. dev->stats.tx_errors++;
  1816. if (events & IEVENT_LC)
  1817. dev->stats.tx_window_errors++;
  1818. if (events & IEVENT_CRL)
  1819. dev->stats.tx_aborted_errors++;
  1820. if (events & IEVENT_XFUN) {
  1821. if (netif_msg_tx_err(priv))
  1822. printk(KERN_DEBUG "%s: TX FIFO underrun, "
  1823. "packet dropped.\n", dev->name);
  1824. dev->stats.tx_dropped++;
  1825. priv->extra_stats.tx_underrun++;
  1826. /* Reactivate the Tx Queues */
  1827. gfar_write(&priv->regs->tstat, TSTAT_CLEAR_THALT);
  1828. }
  1829. if (netif_msg_tx_err(priv))
  1830. printk(KERN_DEBUG "%s: Transmit Error\n", dev->name);
  1831. }
  1832. if (events & IEVENT_BSY) {
  1833. dev->stats.rx_errors++;
  1834. priv->extra_stats.rx_bsy++;
  1835. gfar_receive(irq, dev_id);
  1836. if (netif_msg_rx_err(priv))
  1837. printk(KERN_DEBUG "%s: busy error (rstat: %x)\n",
  1838. dev->name, gfar_read(&priv->regs->rstat));
  1839. }
  1840. if (events & IEVENT_BABR) {
  1841. dev->stats.rx_errors++;
  1842. priv->extra_stats.rx_babr++;
  1843. if (netif_msg_rx_err(priv))
  1844. printk(KERN_DEBUG "%s: babbling RX error\n", dev->name);
  1845. }
  1846. if (events & IEVENT_EBERR) {
  1847. priv->extra_stats.eberr++;
  1848. if (netif_msg_rx_err(priv))
  1849. printk(KERN_DEBUG "%s: bus error\n", dev->name);
  1850. }
  1851. if ((events & IEVENT_RXC) && netif_msg_rx_status(priv))
  1852. printk(KERN_DEBUG "%s: control frame\n", dev->name);
  1853. if (events & IEVENT_BABT) {
  1854. priv->extra_stats.tx_babt++;
  1855. if (netif_msg_tx_err(priv))
  1856. printk(KERN_DEBUG "%s: babbling TX error\n", dev->name);
  1857. }
  1858. return IRQ_HANDLED;
  1859. }
  1860. /* work with hotplug and coldplug */
  1861. MODULE_ALIAS("platform:fsl-gianfar");
  1862. static struct of_device_id gfar_match[] =
  1863. {
  1864. {
  1865. .type = "network",
  1866. .compatible = "gianfar",
  1867. },
  1868. {},
  1869. };
  1870. /* Structure for a device driver */
  1871. static struct of_platform_driver gfar_driver = {
  1872. .name = "fsl-gianfar",
  1873. .match_table = gfar_match,
  1874. .probe = gfar_probe,
  1875. .remove = gfar_remove,
  1876. .suspend = gfar_suspend,
  1877. .resume = gfar_resume,
  1878. };
  1879. static int __init gfar_init(void)
  1880. {
  1881. return of_register_platform_driver(&gfar_driver);
  1882. }
  1883. static void __exit gfar_exit(void)
  1884. {
  1885. of_unregister_platform_driver(&gfar_driver);
  1886. }
  1887. module_init(gfar_init);
  1888. module_exit(gfar_exit);