fs_enet-main.c 34 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539
  1. /*
  2. * Combined Ethernet driver for Motorola MPC8xx and MPC82xx.
  3. *
  4. * Copyright (c) 2003 Intracom S.A.
  5. * by Pantelis Antoniou <panto@intracom.gr>
  6. *
  7. * 2005 (c) MontaVista Software, Inc.
  8. * Vitaly Bordug <vbordug@ru.mvista.com>
  9. *
  10. * Heavily based on original FEC driver by Dan Malek <dan@embeddededge.com>
  11. * and modifications by Joakim Tjernlund <joakim.tjernlund@lumentis.se>
  12. *
  13. * This file is licensed under the terms of the GNU General Public License
  14. * version 2. This program is licensed "as is" without any warranty of any
  15. * kind, whether express or implied.
  16. */
  17. #include <linux/module.h>
  18. #include <linux/kernel.h>
  19. #include <linux/types.h>
  20. #include <linux/string.h>
  21. #include <linux/ptrace.h>
  22. #include <linux/errno.h>
  23. #include <linux/ioport.h>
  24. #include <linux/slab.h>
  25. #include <linux/interrupt.h>
  26. #include <linux/init.h>
  27. #include <linux/delay.h>
  28. #include <linux/netdevice.h>
  29. #include <linux/etherdevice.h>
  30. #include <linux/skbuff.h>
  31. #include <linux/spinlock.h>
  32. #include <linux/mii.h>
  33. #include <linux/ethtool.h>
  34. #include <linux/bitops.h>
  35. #include <linux/fs.h>
  36. #include <linux/platform_device.h>
  37. #include <linux/phy.h>
  38. #include <linux/vmalloc.h>
  39. #include <asm/pgtable.h>
  40. #include <asm/irq.h>
  41. #include <asm/uaccess.h>
  42. #ifdef CONFIG_PPC_CPM_NEW_BINDING
  43. #include <linux/of_gpio.h>
  44. #include <asm/of_platform.h>
  45. #endif
  46. #include "fs_enet.h"
  47. /*************************************************/
  48. #ifndef CONFIG_PPC_CPM_NEW_BINDING
  49. static char version[] __devinitdata =
  50. DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")" "\n";
  51. #endif
  52. MODULE_AUTHOR("Pantelis Antoniou <panto@intracom.gr>");
  53. MODULE_DESCRIPTION("Freescale Ethernet Driver");
  54. MODULE_LICENSE("GPL");
  55. MODULE_VERSION(DRV_MODULE_VERSION);
  56. static int fs_enet_debug = -1; /* -1 == use FS_ENET_DEF_MSG_ENABLE as value */
  57. module_param(fs_enet_debug, int, 0);
  58. MODULE_PARM_DESC(fs_enet_debug,
  59. "Freescale bitmapped debugging message enable value");
  60. #ifdef CONFIG_NET_POLL_CONTROLLER
  61. static void fs_enet_netpoll(struct net_device *dev);
  62. #endif
  63. static void fs_set_multicast_list(struct net_device *dev)
  64. {
  65. struct fs_enet_private *fep = netdev_priv(dev);
  66. (*fep->ops->set_multicast_list)(dev);
  67. }
  68. static void skb_align(struct sk_buff *skb, int align)
  69. {
  70. int off = ((unsigned long)skb->data) & (align - 1);
  71. if (off)
  72. skb_reserve(skb, align - off);
  73. }
  74. /* NAPI receive function */
  75. static int fs_enet_rx_napi(struct napi_struct *napi, int budget)
  76. {
  77. struct fs_enet_private *fep = container_of(napi, struct fs_enet_private, napi);
  78. struct net_device *dev = fep->ndev;
  79. const struct fs_platform_info *fpi = fep->fpi;
  80. cbd_t __iomem *bdp;
  81. struct sk_buff *skb, *skbn, *skbt;
  82. int received = 0;
  83. u16 pkt_len, sc;
  84. int curidx;
  85. /*
  86. * First, grab all of the stats for the incoming packet.
  87. * These get messed up if we get called due to a busy condition.
  88. */
  89. bdp = fep->cur_rx;
  90. /* clear RX status bits for napi*/
  91. (*fep->ops->napi_clear_rx_event)(dev);
  92. while (((sc = CBDR_SC(bdp)) & BD_ENET_RX_EMPTY) == 0) {
  93. curidx = bdp - fep->rx_bd_base;
  94. /*
  95. * Since we have allocated space to hold a complete frame,
  96. * the last indicator should be set.
  97. */
  98. if ((sc & BD_ENET_RX_LAST) == 0)
  99. printk(KERN_WARNING DRV_MODULE_NAME
  100. ": %s rcv is not +last\n",
  101. dev->name);
  102. /*
  103. * Check for errors.
  104. */
  105. if (sc & (BD_ENET_RX_LG | BD_ENET_RX_SH | BD_ENET_RX_CL |
  106. BD_ENET_RX_NO | BD_ENET_RX_CR | BD_ENET_RX_OV)) {
  107. fep->stats.rx_errors++;
  108. /* Frame too long or too short. */
  109. if (sc & (BD_ENET_RX_LG | BD_ENET_RX_SH))
  110. fep->stats.rx_length_errors++;
  111. /* Frame alignment */
  112. if (sc & (BD_ENET_RX_NO | BD_ENET_RX_CL))
  113. fep->stats.rx_frame_errors++;
  114. /* CRC Error */
  115. if (sc & BD_ENET_RX_CR)
  116. fep->stats.rx_crc_errors++;
  117. /* FIFO overrun */
  118. if (sc & BD_ENET_RX_OV)
  119. fep->stats.rx_crc_errors++;
  120. skb = fep->rx_skbuff[curidx];
  121. dma_unmap_single(fep->dev, CBDR_BUFADDR(bdp),
  122. L1_CACHE_ALIGN(PKT_MAXBUF_SIZE),
  123. DMA_FROM_DEVICE);
  124. skbn = skb;
  125. } else {
  126. skb = fep->rx_skbuff[curidx];
  127. dma_unmap_single(fep->dev, CBDR_BUFADDR(bdp),
  128. L1_CACHE_ALIGN(PKT_MAXBUF_SIZE),
  129. DMA_FROM_DEVICE);
  130. /*
  131. * Process the incoming frame.
  132. */
  133. fep->stats.rx_packets++;
  134. pkt_len = CBDR_DATLEN(bdp) - 4; /* remove CRC */
  135. fep->stats.rx_bytes += pkt_len + 4;
  136. if (pkt_len <= fpi->rx_copybreak) {
  137. /* +2 to make IP header L1 cache aligned */
  138. skbn = dev_alloc_skb(pkt_len + 2);
  139. if (skbn != NULL) {
  140. skb_reserve(skbn, 2); /* align IP header */
  141. skb_copy_from_linear_data(skb,
  142. skbn->data, pkt_len);
  143. /* swap */
  144. skbt = skb;
  145. skb = skbn;
  146. skbn = skbt;
  147. }
  148. } else {
  149. skbn = dev_alloc_skb(ENET_RX_FRSIZE);
  150. if (skbn)
  151. skb_align(skbn, ENET_RX_ALIGN);
  152. }
  153. if (skbn != NULL) {
  154. skb_put(skb, pkt_len); /* Make room */
  155. skb->protocol = eth_type_trans(skb, dev);
  156. received++;
  157. netif_receive_skb(skb);
  158. } else {
  159. printk(KERN_WARNING DRV_MODULE_NAME
  160. ": %s Memory squeeze, dropping packet.\n",
  161. dev->name);
  162. fep->stats.rx_dropped++;
  163. skbn = skb;
  164. }
  165. }
  166. fep->rx_skbuff[curidx] = skbn;
  167. CBDW_BUFADDR(bdp, dma_map_single(fep->dev, skbn->data,
  168. L1_CACHE_ALIGN(PKT_MAXBUF_SIZE),
  169. DMA_FROM_DEVICE));
  170. CBDW_DATLEN(bdp, 0);
  171. CBDW_SC(bdp, (sc & ~BD_ENET_RX_STATS) | BD_ENET_RX_EMPTY);
  172. /*
  173. * Update BD pointer to next entry.
  174. */
  175. if ((sc & BD_ENET_RX_WRAP) == 0)
  176. bdp++;
  177. else
  178. bdp = fep->rx_bd_base;
  179. (*fep->ops->rx_bd_done)(dev);
  180. if (received >= budget)
  181. break;
  182. }
  183. fep->cur_rx = bdp;
  184. if (received < budget) {
  185. /* done */
  186. netif_rx_complete(dev, napi);
  187. (*fep->ops->napi_enable_rx)(dev);
  188. }
  189. return received;
  190. }
  191. /* non NAPI receive function */
  192. static int fs_enet_rx_non_napi(struct net_device *dev)
  193. {
  194. struct fs_enet_private *fep = netdev_priv(dev);
  195. const struct fs_platform_info *fpi = fep->fpi;
  196. cbd_t __iomem *bdp;
  197. struct sk_buff *skb, *skbn, *skbt;
  198. int received = 0;
  199. u16 pkt_len, sc;
  200. int curidx;
  201. /*
  202. * First, grab all of the stats for the incoming packet.
  203. * These get messed up if we get called due to a busy condition.
  204. */
  205. bdp = fep->cur_rx;
  206. while (((sc = CBDR_SC(bdp)) & BD_ENET_RX_EMPTY) == 0) {
  207. curidx = bdp - fep->rx_bd_base;
  208. /*
  209. * Since we have allocated space to hold a complete frame,
  210. * the last indicator should be set.
  211. */
  212. if ((sc & BD_ENET_RX_LAST) == 0)
  213. printk(KERN_WARNING DRV_MODULE_NAME
  214. ": %s rcv is not +last\n",
  215. dev->name);
  216. /*
  217. * Check for errors.
  218. */
  219. if (sc & (BD_ENET_RX_LG | BD_ENET_RX_SH | BD_ENET_RX_CL |
  220. BD_ENET_RX_NO | BD_ENET_RX_CR | BD_ENET_RX_OV)) {
  221. fep->stats.rx_errors++;
  222. /* Frame too long or too short. */
  223. if (sc & (BD_ENET_RX_LG | BD_ENET_RX_SH))
  224. fep->stats.rx_length_errors++;
  225. /* Frame alignment */
  226. if (sc & (BD_ENET_RX_NO | BD_ENET_RX_CL))
  227. fep->stats.rx_frame_errors++;
  228. /* CRC Error */
  229. if (sc & BD_ENET_RX_CR)
  230. fep->stats.rx_crc_errors++;
  231. /* FIFO overrun */
  232. if (sc & BD_ENET_RX_OV)
  233. fep->stats.rx_crc_errors++;
  234. skb = fep->rx_skbuff[curidx];
  235. dma_unmap_single(fep->dev, CBDR_BUFADDR(bdp),
  236. L1_CACHE_ALIGN(PKT_MAXBUF_SIZE),
  237. DMA_FROM_DEVICE);
  238. skbn = skb;
  239. } else {
  240. skb = fep->rx_skbuff[curidx];
  241. dma_unmap_single(fep->dev, CBDR_BUFADDR(bdp),
  242. L1_CACHE_ALIGN(PKT_MAXBUF_SIZE),
  243. DMA_FROM_DEVICE);
  244. /*
  245. * Process the incoming frame.
  246. */
  247. fep->stats.rx_packets++;
  248. pkt_len = CBDR_DATLEN(bdp) - 4; /* remove CRC */
  249. fep->stats.rx_bytes += pkt_len + 4;
  250. if (pkt_len <= fpi->rx_copybreak) {
  251. /* +2 to make IP header L1 cache aligned */
  252. skbn = dev_alloc_skb(pkt_len + 2);
  253. if (skbn != NULL) {
  254. skb_reserve(skbn, 2); /* align IP header */
  255. skb_copy_from_linear_data(skb,
  256. skbn->data, pkt_len);
  257. /* swap */
  258. skbt = skb;
  259. skb = skbn;
  260. skbn = skbt;
  261. }
  262. } else {
  263. skbn = dev_alloc_skb(ENET_RX_FRSIZE);
  264. if (skbn)
  265. skb_align(skbn, ENET_RX_ALIGN);
  266. }
  267. if (skbn != NULL) {
  268. skb_put(skb, pkt_len); /* Make room */
  269. skb->protocol = eth_type_trans(skb, dev);
  270. received++;
  271. netif_rx(skb);
  272. } else {
  273. printk(KERN_WARNING DRV_MODULE_NAME
  274. ": %s Memory squeeze, dropping packet.\n",
  275. dev->name);
  276. fep->stats.rx_dropped++;
  277. skbn = skb;
  278. }
  279. }
  280. fep->rx_skbuff[curidx] = skbn;
  281. CBDW_BUFADDR(bdp, dma_map_single(fep->dev, skbn->data,
  282. L1_CACHE_ALIGN(PKT_MAXBUF_SIZE),
  283. DMA_FROM_DEVICE));
  284. CBDW_DATLEN(bdp, 0);
  285. CBDW_SC(bdp, (sc & ~BD_ENET_RX_STATS) | BD_ENET_RX_EMPTY);
  286. /*
  287. * Update BD pointer to next entry.
  288. */
  289. if ((sc & BD_ENET_RX_WRAP) == 0)
  290. bdp++;
  291. else
  292. bdp = fep->rx_bd_base;
  293. (*fep->ops->rx_bd_done)(dev);
  294. }
  295. fep->cur_rx = bdp;
  296. return 0;
  297. }
  298. static void fs_enet_tx(struct net_device *dev)
  299. {
  300. struct fs_enet_private *fep = netdev_priv(dev);
  301. cbd_t __iomem *bdp;
  302. struct sk_buff *skb;
  303. int dirtyidx, do_wake, do_restart;
  304. u16 sc;
  305. spin_lock(&fep->tx_lock);
  306. bdp = fep->dirty_tx;
  307. do_wake = do_restart = 0;
  308. while (((sc = CBDR_SC(bdp)) & BD_ENET_TX_READY) == 0) {
  309. dirtyidx = bdp - fep->tx_bd_base;
  310. if (fep->tx_free == fep->tx_ring)
  311. break;
  312. skb = fep->tx_skbuff[dirtyidx];
  313. /*
  314. * Check for errors.
  315. */
  316. if (sc & (BD_ENET_TX_HB | BD_ENET_TX_LC |
  317. BD_ENET_TX_RL | BD_ENET_TX_UN | BD_ENET_TX_CSL)) {
  318. if (sc & BD_ENET_TX_HB) /* No heartbeat */
  319. fep->stats.tx_heartbeat_errors++;
  320. if (sc & BD_ENET_TX_LC) /* Late collision */
  321. fep->stats.tx_window_errors++;
  322. if (sc & BD_ENET_TX_RL) /* Retrans limit */
  323. fep->stats.tx_aborted_errors++;
  324. if (sc & BD_ENET_TX_UN) /* Underrun */
  325. fep->stats.tx_fifo_errors++;
  326. if (sc & BD_ENET_TX_CSL) /* Carrier lost */
  327. fep->stats.tx_carrier_errors++;
  328. if (sc & (BD_ENET_TX_LC | BD_ENET_TX_RL | BD_ENET_TX_UN)) {
  329. fep->stats.tx_errors++;
  330. do_restart = 1;
  331. }
  332. } else
  333. fep->stats.tx_packets++;
  334. if (sc & BD_ENET_TX_READY)
  335. printk(KERN_WARNING DRV_MODULE_NAME
  336. ": %s HEY! Enet xmit interrupt and TX_READY.\n",
  337. dev->name);
  338. /*
  339. * Deferred means some collisions occurred during transmit,
  340. * but we eventually sent the packet OK.
  341. */
  342. if (sc & BD_ENET_TX_DEF)
  343. fep->stats.collisions++;
  344. /* unmap */
  345. dma_unmap_single(fep->dev, CBDR_BUFADDR(bdp),
  346. skb->len, DMA_TO_DEVICE);
  347. /*
  348. * Free the sk buffer associated with this last transmit.
  349. */
  350. dev_kfree_skb_irq(skb);
  351. fep->tx_skbuff[dirtyidx] = NULL;
  352. /*
  353. * Update pointer to next buffer descriptor to be transmitted.
  354. */
  355. if ((sc & BD_ENET_TX_WRAP) == 0)
  356. bdp++;
  357. else
  358. bdp = fep->tx_bd_base;
  359. /*
  360. * Since we have freed up a buffer, the ring is no longer
  361. * full.
  362. */
  363. if (!fep->tx_free++)
  364. do_wake = 1;
  365. }
  366. fep->dirty_tx = bdp;
  367. if (do_restart)
  368. (*fep->ops->tx_restart)(dev);
  369. spin_unlock(&fep->tx_lock);
  370. if (do_wake)
  371. netif_wake_queue(dev);
  372. }
  373. /*
  374. * The interrupt handler.
  375. * This is called from the MPC core interrupt.
  376. */
  377. static irqreturn_t
  378. fs_enet_interrupt(int irq, void *dev_id)
  379. {
  380. struct net_device *dev = dev_id;
  381. struct fs_enet_private *fep;
  382. const struct fs_platform_info *fpi;
  383. u32 int_events;
  384. u32 int_clr_events;
  385. int nr, napi_ok;
  386. int handled;
  387. fep = netdev_priv(dev);
  388. fpi = fep->fpi;
  389. nr = 0;
  390. while ((int_events = (*fep->ops->get_int_events)(dev)) != 0) {
  391. nr++;
  392. int_clr_events = int_events;
  393. if (fpi->use_napi)
  394. int_clr_events &= ~fep->ev_napi_rx;
  395. (*fep->ops->clear_int_events)(dev, int_clr_events);
  396. if (int_events & fep->ev_err)
  397. (*fep->ops->ev_error)(dev, int_events);
  398. if (int_events & fep->ev_rx) {
  399. if (!fpi->use_napi)
  400. fs_enet_rx_non_napi(dev);
  401. else {
  402. napi_ok = napi_schedule_prep(&fep->napi);
  403. (*fep->ops->napi_disable_rx)(dev);
  404. (*fep->ops->clear_int_events)(dev, fep->ev_napi_rx);
  405. /* NOTE: it is possible for FCCs in NAPI mode */
  406. /* to submit a spurious interrupt while in poll */
  407. if (napi_ok)
  408. __netif_rx_schedule(dev, &fep->napi);
  409. }
  410. }
  411. if (int_events & fep->ev_tx)
  412. fs_enet_tx(dev);
  413. }
  414. handled = nr > 0;
  415. return IRQ_RETVAL(handled);
  416. }
  417. void fs_init_bds(struct net_device *dev)
  418. {
  419. struct fs_enet_private *fep = netdev_priv(dev);
  420. cbd_t __iomem *bdp;
  421. struct sk_buff *skb;
  422. int i;
  423. fs_cleanup_bds(dev);
  424. fep->dirty_tx = fep->cur_tx = fep->tx_bd_base;
  425. fep->tx_free = fep->tx_ring;
  426. fep->cur_rx = fep->rx_bd_base;
  427. /*
  428. * Initialize the receive buffer descriptors.
  429. */
  430. for (i = 0, bdp = fep->rx_bd_base; i < fep->rx_ring; i++, bdp++) {
  431. skb = dev_alloc_skb(ENET_RX_FRSIZE);
  432. if (skb == NULL) {
  433. printk(KERN_WARNING DRV_MODULE_NAME
  434. ": %s Memory squeeze, unable to allocate skb\n",
  435. dev->name);
  436. break;
  437. }
  438. skb_align(skb, ENET_RX_ALIGN);
  439. fep->rx_skbuff[i] = skb;
  440. CBDW_BUFADDR(bdp,
  441. dma_map_single(fep->dev, skb->data,
  442. L1_CACHE_ALIGN(PKT_MAXBUF_SIZE),
  443. DMA_FROM_DEVICE));
  444. CBDW_DATLEN(bdp, 0); /* zero */
  445. CBDW_SC(bdp, BD_ENET_RX_EMPTY |
  446. ((i < fep->rx_ring - 1) ? 0 : BD_SC_WRAP));
  447. }
  448. /*
  449. * if we failed, fillup remainder
  450. */
  451. for (; i < fep->rx_ring; i++, bdp++) {
  452. fep->rx_skbuff[i] = NULL;
  453. CBDW_SC(bdp, (i < fep->rx_ring - 1) ? 0 : BD_SC_WRAP);
  454. }
  455. /*
  456. * ...and the same for transmit.
  457. */
  458. for (i = 0, bdp = fep->tx_bd_base; i < fep->tx_ring; i++, bdp++) {
  459. fep->tx_skbuff[i] = NULL;
  460. CBDW_BUFADDR(bdp, 0);
  461. CBDW_DATLEN(bdp, 0);
  462. CBDW_SC(bdp, (i < fep->tx_ring - 1) ? 0 : BD_SC_WRAP);
  463. }
  464. }
  465. void fs_cleanup_bds(struct net_device *dev)
  466. {
  467. struct fs_enet_private *fep = netdev_priv(dev);
  468. struct sk_buff *skb;
  469. cbd_t __iomem *bdp;
  470. int i;
  471. /*
  472. * Reset SKB transmit buffers.
  473. */
  474. for (i = 0, bdp = fep->tx_bd_base; i < fep->tx_ring; i++, bdp++) {
  475. if ((skb = fep->tx_skbuff[i]) == NULL)
  476. continue;
  477. /* unmap */
  478. dma_unmap_single(fep->dev, CBDR_BUFADDR(bdp),
  479. skb->len, DMA_TO_DEVICE);
  480. fep->tx_skbuff[i] = NULL;
  481. dev_kfree_skb(skb);
  482. }
  483. /*
  484. * Reset SKB receive buffers
  485. */
  486. for (i = 0, bdp = fep->rx_bd_base; i < fep->rx_ring; i++, bdp++) {
  487. if ((skb = fep->rx_skbuff[i]) == NULL)
  488. continue;
  489. /* unmap */
  490. dma_unmap_single(fep->dev, CBDR_BUFADDR(bdp),
  491. L1_CACHE_ALIGN(PKT_MAXBUF_SIZE),
  492. DMA_FROM_DEVICE);
  493. fep->rx_skbuff[i] = NULL;
  494. dev_kfree_skb(skb);
  495. }
  496. }
  497. /**********************************************************************************/
  498. static int fs_enet_start_xmit(struct sk_buff *skb, struct net_device *dev)
  499. {
  500. struct fs_enet_private *fep = netdev_priv(dev);
  501. cbd_t __iomem *bdp;
  502. int curidx;
  503. u16 sc;
  504. unsigned long flags;
  505. spin_lock_irqsave(&fep->tx_lock, flags);
  506. /*
  507. * Fill in a Tx ring entry
  508. */
  509. bdp = fep->cur_tx;
  510. if (!fep->tx_free || (CBDR_SC(bdp) & BD_ENET_TX_READY)) {
  511. netif_stop_queue(dev);
  512. spin_unlock_irqrestore(&fep->tx_lock, flags);
  513. /*
  514. * Ooops. All transmit buffers are full. Bail out.
  515. * This should not happen, since the tx queue should be stopped.
  516. */
  517. printk(KERN_WARNING DRV_MODULE_NAME
  518. ": %s tx queue full!.\n", dev->name);
  519. return NETDEV_TX_BUSY;
  520. }
  521. curidx = bdp - fep->tx_bd_base;
  522. /*
  523. * Clear all of the status flags.
  524. */
  525. CBDC_SC(bdp, BD_ENET_TX_STATS);
  526. /*
  527. * Save skb pointer.
  528. */
  529. fep->tx_skbuff[curidx] = skb;
  530. fep->stats.tx_bytes += skb->len;
  531. /*
  532. * Push the data cache so the CPM does not get stale memory data.
  533. */
  534. CBDW_BUFADDR(bdp, dma_map_single(fep->dev,
  535. skb->data, skb->len, DMA_TO_DEVICE));
  536. CBDW_DATLEN(bdp, skb->len);
  537. dev->trans_start = jiffies;
  538. /*
  539. * If this was the last BD in the ring, start at the beginning again.
  540. */
  541. if ((CBDR_SC(bdp) & BD_ENET_TX_WRAP) == 0)
  542. fep->cur_tx++;
  543. else
  544. fep->cur_tx = fep->tx_bd_base;
  545. if (!--fep->tx_free)
  546. netif_stop_queue(dev);
  547. /* Trigger transmission start */
  548. sc = BD_ENET_TX_READY | BD_ENET_TX_INTR |
  549. BD_ENET_TX_LAST | BD_ENET_TX_TC;
  550. /* note that while FEC does not have this bit
  551. * it marks it as available for software use
  552. * yay for hw reuse :) */
  553. if (skb->len <= 60)
  554. sc |= BD_ENET_TX_PAD;
  555. CBDS_SC(bdp, sc);
  556. (*fep->ops->tx_kickstart)(dev);
  557. spin_unlock_irqrestore(&fep->tx_lock, flags);
  558. return NETDEV_TX_OK;
  559. }
  560. static int fs_request_irq(struct net_device *dev, int irq, const char *name,
  561. irq_handler_t irqf)
  562. {
  563. struct fs_enet_private *fep = netdev_priv(dev);
  564. (*fep->ops->pre_request_irq)(dev, irq);
  565. return request_irq(irq, irqf, IRQF_SHARED, name, dev);
  566. }
  567. static void fs_free_irq(struct net_device *dev, int irq)
  568. {
  569. struct fs_enet_private *fep = netdev_priv(dev);
  570. free_irq(irq, dev);
  571. (*fep->ops->post_free_irq)(dev, irq);
  572. }
  573. static void fs_timeout(struct net_device *dev)
  574. {
  575. struct fs_enet_private *fep = netdev_priv(dev);
  576. unsigned long flags;
  577. int wake = 0;
  578. fep->stats.tx_errors++;
  579. spin_lock_irqsave(&fep->lock, flags);
  580. if (dev->flags & IFF_UP) {
  581. phy_stop(fep->phydev);
  582. (*fep->ops->stop)(dev);
  583. (*fep->ops->restart)(dev);
  584. phy_start(fep->phydev);
  585. }
  586. phy_start(fep->phydev);
  587. wake = fep->tx_free && !(CBDR_SC(fep->cur_tx) & BD_ENET_TX_READY);
  588. spin_unlock_irqrestore(&fep->lock, flags);
  589. if (wake)
  590. netif_wake_queue(dev);
  591. }
  592. /*-----------------------------------------------------------------------------
  593. * generic link-change handler - should be sufficient for most cases
  594. *-----------------------------------------------------------------------------*/
  595. static void generic_adjust_link(struct net_device *dev)
  596. {
  597. struct fs_enet_private *fep = netdev_priv(dev);
  598. struct phy_device *phydev = fep->phydev;
  599. int new_state = 0;
  600. if (phydev->link) {
  601. /* adjust to duplex mode */
  602. if (phydev->duplex != fep->oldduplex) {
  603. new_state = 1;
  604. fep->oldduplex = phydev->duplex;
  605. }
  606. if (phydev->speed != fep->oldspeed) {
  607. new_state = 1;
  608. fep->oldspeed = phydev->speed;
  609. }
  610. if (!fep->oldlink) {
  611. new_state = 1;
  612. fep->oldlink = 1;
  613. netif_tx_schedule_all(dev);
  614. netif_carrier_on(dev);
  615. netif_start_queue(dev);
  616. }
  617. if (new_state)
  618. fep->ops->restart(dev);
  619. } else if (fep->oldlink) {
  620. new_state = 1;
  621. fep->oldlink = 0;
  622. fep->oldspeed = 0;
  623. fep->oldduplex = -1;
  624. netif_carrier_off(dev);
  625. netif_stop_queue(dev);
  626. }
  627. if (new_state && netif_msg_link(fep))
  628. phy_print_status(phydev);
  629. }
  630. static void fs_adjust_link(struct net_device *dev)
  631. {
  632. struct fs_enet_private *fep = netdev_priv(dev);
  633. unsigned long flags;
  634. spin_lock_irqsave(&fep->lock, flags);
  635. if(fep->ops->adjust_link)
  636. fep->ops->adjust_link(dev);
  637. else
  638. generic_adjust_link(dev);
  639. spin_unlock_irqrestore(&fep->lock, flags);
  640. }
  641. static int fs_init_phy(struct net_device *dev)
  642. {
  643. struct fs_enet_private *fep = netdev_priv(dev);
  644. struct phy_device *phydev;
  645. fep->oldlink = 0;
  646. fep->oldspeed = 0;
  647. fep->oldduplex = -1;
  648. if(fep->fpi->bus_id)
  649. phydev = phy_connect(dev, fep->fpi->bus_id, &fs_adjust_link, 0,
  650. PHY_INTERFACE_MODE_MII);
  651. else {
  652. printk("No phy bus ID specified in BSP code\n");
  653. return -EINVAL;
  654. }
  655. if (IS_ERR(phydev)) {
  656. printk(KERN_ERR "%s: Could not attach to PHY\n", dev->name);
  657. return PTR_ERR(phydev);
  658. }
  659. fep->phydev = phydev;
  660. return 0;
  661. }
  662. static int fs_enet_open(struct net_device *dev)
  663. {
  664. struct fs_enet_private *fep = netdev_priv(dev);
  665. int r;
  666. int err;
  667. if (fep->fpi->use_napi)
  668. napi_enable(&fep->napi);
  669. /* Install our interrupt handler. */
  670. r = fs_request_irq(dev, fep->interrupt, "fs_enet-mac", fs_enet_interrupt);
  671. if (r != 0) {
  672. printk(KERN_ERR DRV_MODULE_NAME
  673. ": %s Could not allocate FS_ENET IRQ!", dev->name);
  674. if (fep->fpi->use_napi)
  675. napi_disable(&fep->napi);
  676. return -EINVAL;
  677. }
  678. err = fs_init_phy(dev);
  679. if (err) {
  680. if (fep->fpi->use_napi)
  681. napi_disable(&fep->napi);
  682. return err;
  683. }
  684. phy_start(fep->phydev);
  685. return 0;
  686. }
  687. static int fs_enet_close(struct net_device *dev)
  688. {
  689. struct fs_enet_private *fep = netdev_priv(dev);
  690. unsigned long flags;
  691. netif_stop_queue(dev);
  692. netif_carrier_off(dev);
  693. if (fep->fpi->use_napi)
  694. napi_disable(&fep->napi);
  695. phy_stop(fep->phydev);
  696. spin_lock_irqsave(&fep->lock, flags);
  697. spin_lock(&fep->tx_lock);
  698. (*fep->ops->stop)(dev);
  699. spin_unlock(&fep->tx_lock);
  700. spin_unlock_irqrestore(&fep->lock, flags);
  701. /* release any irqs */
  702. phy_disconnect(fep->phydev);
  703. fep->phydev = NULL;
  704. fs_free_irq(dev, fep->interrupt);
  705. return 0;
  706. }
  707. static struct net_device_stats *fs_enet_get_stats(struct net_device *dev)
  708. {
  709. struct fs_enet_private *fep = netdev_priv(dev);
  710. return &fep->stats;
  711. }
  712. /*************************************************************************/
  713. static void fs_get_drvinfo(struct net_device *dev,
  714. struct ethtool_drvinfo *info)
  715. {
  716. strcpy(info->driver, DRV_MODULE_NAME);
  717. strcpy(info->version, DRV_MODULE_VERSION);
  718. }
  719. static int fs_get_regs_len(struct net_device *dev)
  720. {
  721. struct fs_enet_private *fep = netdev_priv(dev);
  722. return (*fep->ops->get_regs_len)(dev);
  723. }
  724. static void fs_get_regs(struct net_device *dev, struct ethtool_regs *regs,
  725. void *p)
  726. {
  727. struct fs_enet_private *fep = netdev_priv(dev);
  728. unsigned long flags;
  729. int r, len;
  730. len = regs->len;
  731. spin_lock_irqsave(&fep->lock, flags);
  732. r = (*fep->ops->get_regs)(dev, p, &len);
  733. spin_unlock_irqrestore(&fep->lock, flags);
  734. if (r == 0)
  735. regs->version = 0;
  736. }
  737. static int fs_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
  738. {
  739. struct fs_enet_private *fep = netdev_priv(dev);
  740. if (!fep->phydev)
  741. return -ENODEV;
  742. return phy_ethtool_gset(fep->phydev, cmd);
  743. }
  744. static int fs_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
  745. {
  746. struct fs_enet_private *fep = netdev_priv(dev);
  747. if (!fep->phydev)
  748. return -ENODEV;
  749. return phy_ethtool_sset(fep->phydev, cmd);
  750. }
  751. static int fs_nway_reset(struct net_device *dev)
  752. {
  753. return 0;
  754. }
  755. static u32 fs_get_msglevel(struct net_device *dev)
  756. {
  757. struct fs_enet_private *fep = netdev_priv(dev);
  758. return fep->msg_enable;
  759. }
  760. static void fs_set_msglevel(struct net_device *dev, u32 value)
  761. {
  762. struct fs_enet_private *fep = netdev_priv(dev);
  763. fep->msg_enable = value;
  764. }
  765. static const struct ethtool_ops fs_ethtool_ops = {
  766. .get_drvinfo = fs_get_drvinfo,
  767. .get_regs_len = fs_get_regs_len,
  768. .get_settings = fs_get_settings,
  769. .set_settings = fs_set_settings,
  770. .nway_reset = fs_nway_reset,
  771. .get_link = ethtool_op_get_link,
  772. .get_msglevel = fs_get_msglevel,
  773. .set_msglevel = fs_set_msglevel,
  774. .set_tx_csum = ethtool_op_set_tx_csum, /* local! */
  775. .set_sg = ethtool_op_set_sg,
  776. .get_regs = fs_get_regs,
  777. };
  778. static int fs_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
  779. {
  780. struct fs_enet_private *fep = netdev_priv(dev);
  781. struct mii_ioctl_data *mii = (struct mii_ioctl_data *)&rq->ifr_data;
  782. if (!netif_running(dev))
  783. return -EINVAL;
  784. return phy_mii_ioctl(fep->phydev, mii, cmd);
  785. }
  786. extern int fs_mii_connect(struct net_device *dev);
  787. extern void fs_mii_disconnect(struct net_device *dev);
  788. #ifndef CONFIG_PPC_CPM_NEW_BINDING
  789. static struct net_device *fs_init_instance(struct device *dev,
  790. struct fs_platform_info *fpi)
  791. {
  792. struct net_device *ndev = NULL;
  793. struct fs_enet_private *fep = NULL;
  794. int privsize, i, r, err = 0, registered = 0;
  795. fpi->fs_no = fs_get_id(fpi);
  796. /* guard */
  797. if ((unsigned int)fpi->fs_no >= FS_MAX_INDEX)
  798. return ERR_PTR(-EINVAL);
  799. privsize = sizeof(*fep) + (sizeof(struct sk_buff **) *
  800. (fpi->rx_ring + fpi->tx_ring));
  801. ndev = alloc_etherdev(privsize);
  802. if (!ndev) {
  803. err = -ENOMEM;
  804. goto err;
  805. }
  806. fep = netdev_priv(ndev);
  807. fep->dev = dev;
  808. dev_set_drvdata(dev, ndev);
  809. fep->fpi = fpi;
  810. if (fpi->init_ioports)
  811. fpi->init_ioports((struct fs_platform_info *)fpi);
  812. #ifdef CONFIG_FS_ENET_HAS_FEC
  813. if (fs_get_fec_index(fpi->fs_no) >= 0)
  814. fep->ops = &fs_fec_ops;
  815. #endif
  816. #ifdef CONFIG_FS_ENET_HAS_SCC
  817. if (fs_get_scc_index(fpi->fs_no) >=0)
  818. fep->ops = &fs_scc_ops;
  819. #endif
  820. #ifdef CONFIG_FS_ENET_HAS_FCC
  821. if (fs_get_fcc_index(fpi->fs_no) >= 0)
  822. fep->ops = &fs_fcc_ops;
  823. #endif
  824. if (fep->ops == NULL) {
  825. printk(KERN_ERR DRV_MODULE_NAME
  826. ": %s No matching ops found (%d).\n",
  827. ndev->name, fpi->fs_no);
  828. err = -EINVAL;
  829. goto err;
  830. }
  831. r = (*fep->ops->setup_data)(ndev);
  832. if (r != 0) {
  833. printk(KERN_ERR DRV_MODULE_NAME
  834. ": %s setup_data failed\n",
  835. ndev->name);
  836. err = r;
  837. goto err;
  838. }
  839. /* point rx_skbuff, tx_skbuff */
  840. fep->rx_skbuff = (struct sk_buff **)&fep[1];
  841. fep->tx_skbuff = fep->rx_skbuff + fpi->rx_ring;
  842. /* init locks */
  843. spin_lock_init(&fep->lock);
  844. spin_lock_init(&fep->tx_lock);
  845. /*
  846. * Set the Ethernet address.
  847. */
  848. for (i = 0; i < 6; i++)
  849. ndev->dev_addr[i] = fpi->macaddr[i];
  850. r = (*fep->ops->allocate_bd)(ndev);
  851. if (fep->ring_base == NULL) {
  852. printk(KERN_ERR DRV_MODULE_NAME
  853. ": %s buffer descriptor alloc failed (%d).\n", ndev->name, r);
  854. err = r;
  855. goto err;
  856. }
  857. /*
  858. * Set receive and transmit descriptor base.
  859. */
  860. fep->rx_bd_base = fep->ring_base;
  861. fep->tx_bd_base = fep->rx_bd_base + fpi->rx_ring;
  862. /* initialize ring size variables */
  863. fep->tx_ring = fpi->tx_ring;
  864. fep->rx_ring = fpi->rx_ring;
  865. /*
  866. * The FEC Ethernet specific entries in the device structure.
  867. */
  868. ndev->open = fs_enet_open;
  869. ndev->hard_start_xmit = fs_enet_start_xmit;
  870. ndev->tx_timeout = fs_timeout;
  871. ndev->watchdog_timeo = 2 * HZ;
  872. ndev->stop = fs_enet_close;
  873. ndev->get_stats = fs_enet_get_stats;
  874. ndev->set_multicast_list = fs_set_multicast_list;
  875. #ifdef CONFIG_NET_POLL_CONTROLLER
  876. ndev->poll_controller = fs_enet_netpoll;
  877. #endif
  878. netif_napi_add(ndev, &fep->napi,
  879. fs_enet_rx_napi, fpi->napi_weight);
  880. ndev->ethtool_ops = &fs_ethtool_ops;
  881. ndev->do_ioctl = fs_ioctl;
  882. init_timer(&fep->phy_timer_list);
  883. netif_carrier_off(ndev);
  884. err = register_netdev(ndev);
  885. if (err != 0) {
  886. printk(KERN_ERR DRV_MODULE_NAME
  887. ": %s register_netdev failed.\n", ndev->name);
  888. goto err;
  889. }
  890. registered = 1;
  891. return ndev;
  892. err:
  893. if (ndev != NULL) {
  894. if (registered)
  895. unregister_netdev(ndev);
  896. if (fep && fep->ops) {
  897. (*fep->ops->free_bd)(ndev);
  898. (*fep->ops->cleanup_data)(ndev);
  899. }
  900. free_netdev(ndev);
  901. }
  902. dev_set_drvdata(dev, NULL);
  903. return ERR_PTR(err);
  904. }
  905. static int fs_cleanup_instance(struct net_device *ndev)
  906. {
  907. struct fs_enet_private *fep;
  908. const struct fs_platform_info *fpi;
  909. struct device *dev;
  910. if (ndev == NULL)
  911. return -EINVAL;
  912. fep = netdev_priv(ndev);
  913. if (fep == NULL)
  914. return -EINVAL;
  915. fpi = fep->fpi;
  916. unregister_netdev(ndev);
  917. dma_free_coherent(fep->dev, (fpi->tx_ring + fpi->rx_ring) * sizeof(cbd_t),
  918. (void __force *)fep->ring_base, fep->ring_mem_addr);
  919. /* reset it */
  920. (*fep->ops->cleanup_data)(ndev);
  921. dev = fep->dev;
  922. if (dev != NULL) {
  923. dev_set_drvdata(dev, NULL);
  924. fep->dev = NULL;
  925. }
  926. free_netdev(ndev);
  927. return 0;
  928. }
  929. #endif
  930. /**************************************************************************************/
  931. /* handy pointer to the immap */
  932. void __iomem *fs_enet_immap = NULL;
  933. static int setup_immap(void)
  934. {
  935. #ifdef CONFIG_CPM1
  936. fs_enet_immap = ioremap(IMAP_ADDR, 0x4000);
  937. WARN_ON(!fs_enet_immap);
  938. #elif defined(CONFIG_CPM2)
  939. fs_enet_immap = cpm2_immr;
  940. #endif
  941. return 0;
  942. }
  943. static void cleanup_immap(void)
  944. {
  945. #if defined(CONFIG_CPM1)
  946. iounmap(fs_enet_immap);
  947. #endif
  948. }
  949. /**************************************************************************************/
  950. #ifdef CONFIG_PPC_CPM_NEW_BINDING
  951. static int __devinit find_phy(struct device_node *np,
  952. struct fs_platform_info *fpi)
  953. {
  954. struct device_node *phynode, *mdionode;
  955. int ret = 0, len, bus_id;
  956. const u32 *data;
  957. data = of_get_property(np, "fixed-link", NULL);
  958. if (data) {
  959. snprintf(fpi->bus_id, 16, "%x:%02x", 0, *data);
  960. return 0;
  961. }
  962. data = of_get_property(np, "phy-handle", &len);
  963. if (!data || len != 4)
  964. return -EINVAL;
  965. phynode = of_find_node_by_phandle(*data);
  966. if (!phynode)
  967. return -EINVAL;
  968. data = of_get_property(phynode, "reg", &len);
  969. if (!data || len != 4) {
  970. ret = -EINVAL;
  971. goto out_put_phy;
  972. }
  973. mdionode = of_get_parent(phynode);
  974. if (!mdionode) {
  975. ret = -EINVAL;
  976. goto out_put_phy;
  977. }
  978. bus_id = of_get_gpio(mdionode, 0);
  979. if (bus_id < 0) {
  980. struct resource res;
  981. ret = of_address_to_resource(mdionode, 0, &res);
  982. if (ret)
  983. goto out_put_mdio;
  984. bus_id = res.start;
  985. }
  986. snprintf(fpi->bus_id, 16, "%x:%02x", bus_id, *data);
  987. out_put_mdio:
  988. of_node_put(mdionode);
  989. out_put_phy:
  990. of_node_put(phynode);
  991. return ret;
  992. }
  993. #ifdef CONFIG_FS_ENET_HAS_FEC
  994. #define IS_FEC(match) ((match)->data == &fs_fec_ops)
  995. #else
  996. #define IS_FEC(match) 0
  997. #endif
  998. static int __devinit fs_enet_probe(struct of_device *ofdev,
  999. const struct of_device_id *match)
  1000. {
  1001. struct net_device *ndev;
  1002. struct fs_enet_private *fep;
  1003. struct fs_platform_info *fpi;
  1004. const u32 *data;
  1005. const u8 *mac_addr;
  1006. int privsize, len, ret = -ENODEV;
  1007. fpi = kzalloc(sizeof(*fpi), GFP_KERNEL);
  1008. if (!fpi)
  1009. return -ENOMEM;
  1010. if (!IS_FEC(match)) {
  1011. data = of_get_property(ofdev->node, "fsl,cpm-command", &len);
  1012. if (!data || len != 4)
  1013. goto out_free_fpi;
  1014. fpi->cp_command = *data;
  1015. }
  1016. fpi->rx_ring = 32;
  1017. fpi->tx_ring = 32;
  1018. fpi->rx_copybreak = 240;
  1019. fpi->use_napi = 1;
  1020. fpi->napi_weight = 17;
  1021. ret = find_phy(ofdev->node, fpi);
  1022. if (ret)
  1023. goto out_free_fpi;
  1024. privsize = sizeof(*fep) +
  1025. sizeof(struct sk_buff **) *
  1026. (fpi->rx_ring + fpi->tx_ring);
  1027. ndev = alloc_etherdev(privsize);
  1028. if (!ndev) {
  1029. ret = -ENOMEM;
  1030. goto out_free_fpi;
  1031. }
  1032. dev_set_drvdata(&ofdev->dev, ndev);
  1033. fep = netdev_priv(ndev);
  1034. fep->dev = &ofdev->dev;
  1035. fep->ndev = ndev;
  1036. fep->fpi = fpi;
  1037. fep->ops = match->data;
  1038. ret = fep->ops->setup_data(ndev);
  1039. if (ret)
  1040. goto out_free_dev;
  1041. fep->rx_skbuff = (struct sk_buff **)&fep[1];
  1042. fep->tx_skbuff = fep->rx_skbuff + fpi->rx_ring;
  1043. spin_lock_init(&fep->lock);
  1044. spin_lock_init(&fep->tx_lock);
  1045. mac_addr = of_get_mac_address(ofdev->node);
  1046. if (mac_addr)
  1047. memcpy(ndev->dev_addr, mac_addr, 6);
  1048. ret = fep->ops->allocate_bd(ndev);
  1049. if (ret)
  1050. goto out_cleanup_data;
  1051. fep->rx_bd_base = fep->ring_base;
  1052. fep->tx_bd_base = fep->rx_bd_base + fpi->rx_ring;
  1053. fep->tx_ring = fpi->tx_ring;
  1054. fep->rx_ring = fpi->rx_ring;
  1055. ndev->open = fs_enet_open;
  1056. ndev->hard_start_xmit = fs_enet_start_xmit;
  1057. ndev->tx_timeout = fs_timeout;
  1058. ndev->watchdog_timeo = 2 * HZ;
  1059. ndev->stop = fs_enet_close;
  1060. ndev->get_stats = fs_enet_get_stats;
  1061. ndev->set_multicast_list = fs_set_multicast_list;
  1062. if (fpi->use_napi)
  1063. netif_napi_add(ndev, &fep->napi, fs_enet_rx_napi,
  1064. fpi->napi_weight);
  1065. ndev->ethtool_ops = &fs_ethtool_ops;
  1066. ndev->do_ioctl = fs_ioctl;
  1067. init_timer(&fep->phy_timer_list);
  1068. netif_carrier_off(ndev);
  1069. ret = register_netdev(ndev);
  1070. if (ret)
  1071. goto out_free_bd;
  1072. printk(KERN_INFO "%s: fs_enet: %02x:%02x:%02x:%02x:%02x:%02x\n",
  1073. ndev->name,
  1074. ndev->dev_addr[0], ndev->dev_addr[1], ndev->dev_addr[2],
  1075. ndev->dev_addr[3], ndev->dev_addr[4], ndev->dev_addr[5]);
  1076. return 0;
  1077. out_free_bd:
  1078. fep->ops->free_bd(ndev);
  1079. out_cleanup_data:
  1080. fep->ops->cleanup_data(ndev);
  1081. out_free_dev:
  1082. free_netdev(ndev);
  1083. dev_set_drvdata(&ofdev->dev, NULL);
  1084. out_free_fpi:
  1085. kfree(fpi);
  1086. return ret;
  1087. }
  1088. static int fs_enet_remove(struct of_device *ofdev)
  1089. {
  1090. struct net_device *ndev = dev_get_drvdata(&ofdev->dev);
  1091. struct fs_enet_private *fep = netdev_priv(ndev);
  1092. unregister_netdev(ndev);
  1093. fep->ops->free_bd(ndev);
  1094. fep->ops->cleanup_data(ndev);
  1095. dev_set_drvdata(fep->dev, NULL);
  1096. free_netdev(ndev);
  1097. return 0;
  1098. }
  1099. static struct of_device_id fs_enet_match[] = {
  1100. #ifdef CONFIG_FS_ENET_HAS_SCC
  1101. {
  1102. .compatible = "fsl,cpm1-scc-enet",
  1103. .data = (void *)&fs_scc_ops,
  1104. },
  1105. #endif
  1106. #ifdef CONFIG_FS_ENET_HAS_FCC
  1107. {
  1108. .compatible = "fsl,cpm2-fcc-enet",
  1109. .data = (void *)&fs_fcc_ops,
  1110. },
  1111. #endif
  1112. #ifdef CONFIG_FS_ENET_HAS_FEC
  1113. {
  1114. .compatible = "fsl,pq1-fec-enet",
  1115. .data = (void *)&fs_fec_ops,
  1116. },
  1117. #endif
  1118. {}
  1119. };
  1120. static struct of_platform_driver fs_enet_driver = {
  1121. .name = "fs_enet",
  1122. .match_table = fs_enet_match,
  1123. .probe = fs_enet_probe,
  1124. .remove = fs_enet_remove,
  1125. };
  1126. static int __init fs_init(void)
  1127. {
  1128. int r = setup_immap();
  1129. if (r != 0)
  1130. return r;
  1131. r = of_register_platform_driver(&fs_enet_driver);
  1132. if (r != 0)
  1133. goto out;
  1134. return 0;
  1135. out:
  1136. cleanup_immap();
  1137. return r;
  1138. }
  1139. static void __exit fs_cleanup(void)
  1140. {
  1141. of_unregister_platform_driver(&fs_enet_driver);
  1142. cleanup_immap();
  1143. }
  1144. #else
  1145. static int __devinit fs_enet_probe(struct device *dev)
  1146. {
  1147. struct net_device *ndev;
  1148. /* no fixup - no device */
  1149. if (dev->platform_data == NULL) {
  1150. printk(KERN_INFO "fs_enet: "
  1151. "probe called with no platform data; "
  1152. "remove unused devices\n");
  1153. return -ENODEV;
  1154. }
  1155. ndev = fs_init_instance(dev, dev->platform_data);
  1156. if (IS_ERR(ndev))
  1157. return PTR_ERR(ndev);
  1158. return 0;
  1159. }
  1160. static int fs_enet_remove(struct device *dev)
  1161. {
  1162. return fs_cleanup_instance(dev_get_drvdata(dev));
  1163. }
  1164. static struct device_driver fs_enet_fec_driver = {
  1165. .name = "fsl-cpm-fec",
  1166. .bus = &platform_bus_type,
  1167. .probe = fs_enet_probe,
  1168. .remove = fs_enet_remove,
  1169. #ifdef CONFIG_PM
  1170. /* .suspend = fs_enet_suspend, TODO */
  1171. /* .resume = fs_enet_resume, TODO */
  1172. #endif
  1173. };
  1174. static struct device_driver fs_enet_scc_driver = {
  1175. .name = "fsl-cpm-scc",
  1176. .bus = &platform_bus_type,
  1177. .probe = fs_enet_probe,
  1178. .remove = fs_enet_remove,
  1179. #ifdef CONFIG_PM
  1180. /* .suspend = fs_enet_suspend, TODO */
  1181. /* .resume = fs_enet_resume, TODO */
  1182. #endif
  1183. };
  1184. static struct device_driver fs_enet_fcc_driver = {
  1185. .name = "fsl-cpm-fcc",
  1186. .bus = &platform_bus_type,
  1187. .probe = fs_enet_probe,
  1188. .remove = fs_enet_remove,
  1189. #ifdef CONFIG_PM
  1190. /* .suspend = fs_enet_suspend, TODO */
  1191. /* .resume = fs_enet_resume, TODO */
  1192. #endif
  1193. };
  1194. static int __init fs_init(void)
  1195. {
  1196. int r;
  1197. printk(KERN_INFO
  1198. "%s", version);
  1199. r = setup_immap();
  1200. if (r != 0)
  1201. return r;
  1202. #ifdef CONFIG_FS_ENET_HAS_FCC
  1203. /* let's insert mii stuff */
  1204. r = fs_enet_mdio_bb_init();
  1205. if (r != 0) {
  1206. printk(KERN_ERR DRV_MODULE_NAME
  1207. "BB PHY init failed.\n");
  1208. return r;
  1209. }
  1210. r = driver_register(&fs_enet_fcc_driver);
  1211. if (r != 0)
  1212. goto err;
  1213. #endif
  1214. #ifdef CONFIG_FS_ENET_HAS_FEC
  1215. r = fs_enet_mdio_fec_init();
  1216. if (r != 0) {
  1217. printk(KERN_ERR DRV_MODULE_NAME
  1218. "FEC PHY init failed.\n");
  1219. return r;
  1220. }
  1221. r = driver_register(&fs_enet_fec_driver);
  1222. if (r != 0)
  1223. goto err;
  1224. #endif
  1225. #ifdef CONFIG_FS_ENET_HAS_SCC
  1226. r = driver_register(&fs_enet_scc_driver);
  1227. if (r != 0)
  1228. goto err;
  1229. #endif
  1230. return 0;
  1231. err:
  1232. cleanup_immap();
  1233. return r;
  1234. }
  1235. static void __exit fs_cleanup(void)
  1236. {
  1237. driver_unregister(&fs_enet_fec_driver);
  1238. driver_unregister(&fs_enet_fcc_driver);
  1239. driver_unregister(&fs_enet_scc_driver);
  1240. cleanup_immap();
  1241. }
  1242. #endif
  1243. #ifdef CONFIG_NET_POLL_CONTROLLER
  1244. static void fs_enet_netpoll(struct net_device *dev)
  1245. {
  1246. disable_irq(dev->irq);
  1247. fs_enet_interrupt(dev->irq, dev, NULL);
  1248. enable_irq(dev->irq);
  1249. }
  1250. #endif
  1251. /**************************************************************************************/
  1252. module_init(fs_init);
  1253. module_exit(fs_cleanup);