fs_enet-main.c 27 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200
  1. /*
  2. * Combined Ethernet driver for Motorola MPC8xx and MPC82xx.
  3. *
  4. * Copyright (c) 2003 Intracom S.A.
  5. * by Pantelis Antoniou <panto@intracom.gr>
  6. *
  7. * 2005 (c) MontaVista Software, Inc.
  8. * Vitaly Bordug <vbordug@ru.mvista.com>
  9. *
  10. * Heavily based on original FEC driver by Dan Malek <dan@embeddededge.com>
  11. * and modifications by Joakim Tjernlund <joakim.tjernlund@lumentis.se>
  12. *
  13. * This file is licensed under the terms of the GNU General Public License
  14. * version 2. This program is licensed "as is" without any warranty of any
  15. * kind, whether express or implied.
  16. */
  17. #include <linux/module.h>
  18. #include <linux/kernel.h>
  19. #include <linux/types.h>
  20. #include <linux/string.h>
  21. #include <linux/ptrace.h>
  22. #include <linux/errno.h>
  23. #include <linux/ioport.h>
  24. #include <linux/slab.h>
  25. #include <linux/interrupt.h>
  26. #include <linux/init.h>
  27. #include <linux/delay.h>
  28. #include <linux/netdevice.h>
  29. #include <linux/etherdevice.h>
  30. #include <linux/skbuff.h>
  31. #include <linux/spinlock.h>
  32. #include <linux/mii.h>
  33. #include <linux/ethtool.h>
  34. #include <linux/bitops.h>
  35. #include <linux/fs.h>
  36. #include <linux/platform_device.h>
  37. #include <linux/phy.h>
  38. #include <linux/of.h>
  39. #include <linux/of_mdio.h>
  40. #include <linux/of_platform.h>
  41. #include <linux/of_gpio.h>
  42. #include <linux/of_net.h>
  43. #include <linux/vmalloc.h>
  44. #include <asm/pgtable.h>
  45. #include <asm/irq.h>
  46. #include <asm/uaccess.h>
  47. #include "fs_enet.h"
  48. /*************************************************/
  49. MODULE_AUTHOR("Pantelis Antoniou <panto@intracom.gr>");
  50. MODULE_DESCRIPTION("Freescale Ethernet Driver");
  51. MODULE_LICENSE("GPL");
  52. MODULE_VERSION(DRV_MODULE_VERSION);
  53. static int fs_enet_debug = -1; /* -1 == use FS_ENET_DEF_MSG_ENABLE as value */
  54. module_param(fs_enet_debug, int, 0);
  55. MODULE_PARM_DESC(fs_enet_debug,
  56. "Freescale bitmapped debugging message enable value");
  57. #ifdef CONFIG_NET_POLL_CONTROLLER
  58. static void fs_enet_netpoll(struct net_device *dev);
  59. #endif
  60. static void fs_set_multicast_list(struct net_device *dev)
  61. {
  62. struct fs_enet_private *fep = netdev_priv(dev);
  63. (*fep->ops->set_multicast_list)(dev);
  64. }
  65. static void skb_align(struct sk_buff *skb, int align)
  66. {
  67. int off = ((unsigned long)skb->data) & (align - 1);
  68. if (off)
  69. skb_reserve(skb, align - off);
  70. }
  71. /* NAPI receive function */
  72. static int fs_enet_rx_napi(struct napi_struct *napi, int budget)
  73. {
  74. struct fs_enet_private *fep = container_of(napi, struct fs_enet_private, napi);
  75. struct net_device *dev = fep->ndev;
  76. const struct fs_platform_info *fpi = fep->fpi;
  77. cbd_t __iomem *bdp;
  78. struct sk_buff *skb, *skbn, *skbt;
  79. int received = 0;
  80. u16 pkt_len, sc;
  81. int curidx;
  82. /*
  83. * First, grab all of the stats for the incoming packet.
  84. * These get messed up if we get called due to a busy condition.
  85. */
  86. bdp = fep->cur_rx;
  87. /* clear RX status bits for napi*/
  88. (*fep->ops->napi_clear_rx_event)(dev);
  89. while (((sc = CBDR_SC(bdp)) & BD_ENET_RX_EMPTY) == 0) {
  90. curidx = bdp - fep->rx_bd_base;
  91. /*
  92. * Since we have allocated space to hold a complete frame,
  93. * the last indicator should be set.
  94. */
  95. if ((sc & BD_ENET_RX_LAST) == 0)
  96. dev_warn(fep->dev, "rcv is not +last\n");
  97. /*
  98. * Check for errors.
  99. */
  100. if (sc & (BD_ENET_RX_LG | BD_ENET_RX_SH | BD_ENET_RX_CL |
  101. BD_ENET_RX_NO | BD_ENET_RX_CR | BD_ENET_RX_OV)) {
  102. fep->stats.rx_errors++;
  103. /* Frame too long or too short. */
  104. if (sc & (BD_ENET_RX_LG | BD_ENET_RX_SH))
  105. fep->stats.rx_length_errors++;
  106. /* Frame alignment */
  107. if (sc & (BD_ENET_RX_NO | BD_ENET_RX_CL))
  108. fep->stats.rx_frame_errors++;
  109. /* CRC Error */
  110. if (sc & BD_ENET_RX_CR)
  111. fep->stats.rx_crc_errors++;
  112. /* FIFO overrun */
  113. if (sc & BD_ENET_RX_OV)
  114. fep->stats.rx_crc_errors++;
  115. skb = fep->rx_skbuff[curidx];
  116. dma_unmap_single(fep->dev, CBDR_BUFADDR(bdp),
  117. L1_CACHE_ALIGN(PKT_MAXBUF_SIZE),
  118. DMA_FROM_DEVICE);
  119. skbn = skb;
  120. } else {
  121. skb = fep->rx_skbuff[curidx];
  122. dma_unmap_single(fep->dev, CBDR_BUFADDR(bdp),
  123. L1_CACHE_ALIGN(PKT_MAXBUF_SIZE),
  124. DMA_FROM_DEVICE);
  125. /*
  126. * Process the incoming frame.
  127. */
  128. fep->stats.rx_packets++;
  129. pkt_len = CBDR_DATLEN(bdp) - 4; /* remove CRC */
  130. fep->stats.rx_bytes += pkt_len + 4;
  131. if (pkt_len <= fpi->rx_copybreak) {
  132. /* +2 to make IP header L1 cache aligned */
  133. skbn = netdev_alloc_skb(dev, pkt_len + 2);
  134. if (skbn != NULL) {
  135. skb_reserve(skbn, 2); /* align IP header */
  136. skb_copy_from_linear_data(skb,
  137. skbn->data, pkt_len);
  138. /* swap */
  139. skbt = skb;
  140. skb = skbn;
  141. skbn = skbt;
  142. }
  143. } else {
  144. skbn = netdev_alloc_skb(dev, ENET_RX_FRSIZE);
  145. if (skbn)
  146. skb_align(skbn, ENET_RX_ALIGN);
  147. }
  148. if (skbn != NULL) {
  149. skb_put(skb, pkt_len); /* Make room */
  150. skb->protocol = eth_type_trans(skb, dev);
  151. received++;
  152. netif_receive_skb(skb);
  153. } else {
  154. dev_warn(fep->dev,
  155. "Memory squeeze, dropping packet.\n");
  156. fep->stats.rx_dropped++;
  157. skbn = skb;
  158. }
  159. }
  160. fep->rx_skbuff[curidx] = skbn;
  161. CBDW_BUFADDR(bdp, dma_map_single(fep->dev, skbn->data,
  162. L1_CACHE_ALIGN(PKT_MAXBUF_SIZE),
  163. DMA_FROM_DEVICE));
  164. CBDW_DATLEN(bdp, 0);
  165. CBDW_SC(bdp, (sc & ~BD_ENET_RX_STATS) | BD_ENET_RX_EMPTY);
  166. /*
  167. * Update BD pointer to next entry.
  168. */
  169. if ((sc & BD_ENET_RX_WRAP) == 0)
  170. bdp++;
  171. else
  172. bdp = fep->rx_bd_base;
  173. (*fep->ops->rx_bd_done)(dev);
  174. if (received >= budget)
  175. break;
  176. }
  177. fep->cur_rx = bdp;
  178. if (received < budget) {
  179. /* done */
  180. napi_complete(napi);
  181. (*fep->ops->napi_enable_rx)(dev);
  182. }
  183. return received;
  184. }
  185. /* non NAPI receive function */
  186. static int fs_enet_rx_non_napi(struct net_device *dev)
  187. {
  188. struct fs_enet_private *fep = netdev_priv(dev);
  189. const struct fs_platform_info *fpi = fep->fpi;
  190. cbd_t __iomem *bdp;
  191. struct sk_buff *skb, *skbn, *skbt;
  192. int received = 0;
  193. u16 pkt_len, sc;
  194. int curidx;
  195. /*
  196. * First, grab all of the stats for the incoming packet.
  197. * These get messed up if we get called due to a busy condition.
  198. */
  199. bdp = fep->cur_rx;
  200. while (((sc = CBDR_SC(bdp)) & BD_ENET_RX_EMPTY) == 0) {
  201. curidx = bdp - fep->rx_bd_base;
  202. /*
  203. * Since we have allocated space to hold a complete frame,
  204. * the last indicator should be set.
  205. */
  206. if ((sc & BD_ENET_RX_LAST) == 0)
  207. dev_warn(fep->dev, "rcv is not +last\n");
  208. /*
  209. * Check for errors.
  210. */
  211. if (sc & (BD_ENET_RX_LG | BD_ENET_RX_SH | BD_ENET_RX_CL |
  212. BD_ENET_RX_NO | BD_ENET_RX_CR | BD_ENET_RX_OV)) {
  213. fep->stats.rx_errors++;
  214. /* Frame too long or too short. */
  215. if (sc & (BD_ENET_RX_LG | BD_ENET_RX_SH))
  216. fep->stats.rx_length_errors++;
  217. /* Frame alignment */
  218. if (sc & (BD_ENET_RX_NO | BD_ENET_RX_CL))
  219. fep->stats.rx_frame_errors++;
  220. /* CRC Error */
  221. if (sc & BD_ENET_RX_CR)
  222. fep->stats.rx_crc_errors++;
  223. /* FIFO overrun */
  224. if (sc & BD_ENET_RX_OV)
  225. fep->stats.rx_crc_errors++;
  226. skb = fep->rx_skbuff[curidx];
  227. dma_unmap_single(fep->dev, CBDR_BUFADDR(bdp),
  228. L1_CACHE_ALIGN(PKT_MAXBUF_SIZE),
  229. DMA_FROM_DEVICE);
  230. skbn = skb;
  231. } else {
  232. skb = fep->rx_skbuff[curidx];
  233. dma_unmap_single(fep->dev, CBDR_BUFADDR(bdp),
  234. L1_CACHE_ALIGN(PKT_MAXBUF_SIZE),
  235. DMA_FROM_DEVICE);
  236. /*
  237. * Process the incoming frame.
  238. */
  239. fep->stats.rx_packets++;
  240. pkt_len = CBDR_DATLEN(bdp) - 4; /* remove CRC */
  241. fep->stats.rx_bytes += pkt_len + 4;
  242. if (pkt_len <= fpi->rx_copybreak) {
  243. /* +2 to make IP header L1 cache aligned */
  244. skbn = netdev_alloc_skb(dev, pkt_len + 2);
  245. if (skbn != NULL) {
  246. skb_reserve(skbn, 2); /* align IP header */
  247. skb_copy_from_linear_data(skb,
  248. skbn->data, pkt_len);
  249. /* swap */
  250. skbt = skb;
  251. skb = skbn;
  252. skbn = skbt;
  253. }
  254. } else {
  255. skbn = netdev_alloc_skb(dev, ENET_RX_FRSIZE);
  256. if (skbn)
  257. skb_align(skbn, ENET_RX_ALIGN);
  258. }
  259. if (skbn != NULL) {
  260. skb_put(skb, pkt_len); /* Make room */
  261. skb->protocol = eth_type_trans(skb, dev);
  262. received++;
  263. netif_rx(skb);
  264. } else {
  265. dev_warn(fep->dev,
  266. "Memory squeeze, dropping packet.\n");
  267. fep->stats.rx_dropped++;
  268. skbn = skb;
  269. }
  270. }
  271. fep->rx_skbuff[curidx] = skbn;
  272. CBDW_BUFADDR(bdp, dma_map_single(fep->dev, skbn->data,
  273. L1_CACHE_ALIGN(PKT_MAXBUF_SIZE),
  274. DMA_FROM_DEVICE));
  275. CBDW_DATLEN(bdp, 0);
  276. CBDW_SC(bdp, (sc & ~BD_ENET_RX_STATS) | BD_ENET_RX_EMPTY);
  277. /*
  278. * Update BD pointer to next entry.
  279. */
  280. if ((sc & BD_ENET_RX_WRAP) == 0)
  281. bdp++;
  282. else
  283. bdp = fep->rx_bd_base;
  284. (*fep->ops->rx_bd_done)(dev);
  285. }
  286. fep->cur_rx = bdp;
  287. return 0;
  288. }
  289. static void fs_enet_tx(struct net_device *dev)
  290. {
  291. struct fs_enet_private *fep = netdev_priv(dev);
  292. cbd_t __iomem *bdp;
  293. struct sk_buff *skb;
  294. int dirtyidx, do_wake, do_restart;
  295. u16 sc;
  296. spin_lock(&fep->tx_lock);
  297. bdp = fep->dirty_tx;
  298. do_wake = do_restart = 0;
  299. while (((sc = CBDR_SC(bdp)) & BD_ENET_TX_READY) == 0) {
  300. dirtyidx = bdp - fep->tx_bd_base;
  301. if (fep->tx_free == fep->tx_ring)
  302. break;
  303. skb = fep->tx_skbuff[dirtyidx];
  304. /*
  305. * Check for errors.
  306. */
  307. if (sc & (BD_ENET_TX_HB | BD_ENET_TX_LC |
  308. BD_ENET_TX_RL | BD_ENET_TX_UN | BD_ENET_TX_CSL)) {
  309. if (sc & BD_ENET_TX_HB) /* No heartbeat */
  310. fep->stats.tx_heartbeat_errors++;
  311. if (sc & BD_ENET_TX_LC) /* Late collision */
  312. fep->stats.tx_window_errors++;
  313. if (sc & BD_ENET_TX_RL) /* Retrans limit */
  314. fep->stats.tx_aborted_errors++;
  315. if (sc & BD_ENET_TX_UN) /* Underrun */
  316. fep->stats.tx_fifo_errors++;
  317. if (sc & BD_ENET_TX_CSL) /* Carrier lost */
  318. fep->stats.tx_carrier_errors++;
  319. if (sc & (BD_ENET_TX_LC | BD_ENET_TX_RL | BD_ENET_TX_UN)) {
  320. fep->stats.tx_errors++;
  321. do_restart = 1;
  322. }
  323. } else
  324. fep->stats.tx_packets++;
  325. if (sc & BD_ENET_TX_READY) {
  326. dev_warn(fep->dev,
  327. "HEY! Enet xmit interrupt and TX_READY.\n");
  328. }
  329. /*
  330. * Deferred means some collisions occurred during transmit,
  331. * but we eventually sent the packet OK.
  332. */
  333. if (sc & BD_ENET_TX_DEF)
  334. fep->stats.collisions++;
  335. /* unmap */
  336. dma_unmap_single(fep->dev, CBDR_BUFADDR(bdp),
  337. skb->len, DMA_TO_DEVICE);
  338. /*
  339. * Free the sk buffer associated with this last transmit.
  340. */
  341. dev_kfree_skb_irq(skb);
  342. fep->tx_skbuff[dirtyidx] = NULL;
  343. /*
  344. * Update pointer to next buffer descriptor to be transmitted.
  345. */
  346. if ((sc & BD_ENET_TX_WRAP) == 0)
  347. bdp++;
  348. else
  349. bdp = fep->tx_bd_base;
  350. /*
  351. * Since we have freed up a buffer, the ring is no longer
  352. * full.
  353. */
  354. if (!fep->tx_free++)
  355. do_wake = 1;
  356. }
  357. fep->dirty_tx = bdp;
  358. if (do_restart)
  359. (*fep->ops->tx_restart)(dev);
  360. spin_unlock(&fep->tx_lock);
  361. if (do_wake)
  362. netif_wake_queue(dev);
  363. }
  364. /*
  365. * The interrupt handler.
  366. * This is called from the MPC core interrupt.
  367. */
  368. static irqreturn_t
  369. fs_enet_interrupt(int irq, void *dev_id)
  370. {
  371. struct net_device *dev = dev_id;
  372. struct fs_enet_private *fep;
  373. const struct fs_platform_info *fpi;
  374. u32 int_events;
  375. u32 int_clr_events;
  376. int nr, napi_ok;
  377. int handled;
  378. fep = netdev_priv(dev);
  379. fpi = fep->fpi;
  380. nr = 0;
  381. while ((int_events = (*fep->ops->get_int_events)(dev)) != 0) {
  382. nr++;
  383. int_clr_events = int_events;
  384. if (fpi->use_napi)
  385. int_clr_events &= ~fep->ev_napi_rx;
  386. (*fep->ops->clear_int_events)(dev, int_clr_events);
  387. if (int_events & fep->ev_err)
  388. (*fep->ops->ev_error)(dev, int_events);
  389. if (int_events & fep->ev_rx) {
  390. if (!fpi->use_napi)
  391. fs_enet_rx_non_napi(dev);
  392. else {
  393. napi_ok = napi_schedule_prep(&fep->napi);
  394. (*fep->ops->napi_disable_rx)(dev);
  395. (*fep->ops->clear_int_events)(dev, fep->ev_napi_rx);
  396. /* NOTE: it is possible for FCCs in NAPI mode */
  397. /* to submit a spurious interrupt while in poll */
  398. if (napi_ok)
  399. __napi_schedule(&fep->napi);
  400. }
  401. }
  402. if (int_events & fep->ev_tx)
  403. fs_enet_tx(dev);
  404. }
  405. handled = nr > 0;
  406. return IRQ_RETVAL(handled);
  407. }
  408. void fs_init_bds(struct net_device *dev)
  409. {
  410. struct fs_enet_private *fep = netdev_priv(dev);
  411. cbd_t __iomem *bdp;
  412. struct sk_buff *skb;
  413. int i;
  414. fs_cleanup_bds(dev);
  415. fep->dirty_tx = fep->cur_tx = fep->tx_bd_base;
  416. fep->tx_free = fep->tx_ring;
  417. fep->cur_rx = fep->rx_bd_base;
  418. /*
  419. * Initialize the receive buffer descriptors.
  420. */
  421. for (i = 0, bdp = fep->rx_bd_base; i < fep->rx_ring; i++, bdp++) {
  422. skb = netdev_alloc_skb(dev, ENET_RX_FRSIZE);
  423. if (skb == NULL) {
  424. dev_warn(fep->dev,
  425. "Memory squeeze, unable to allocate skb\n");
  426. break;
  427. }
  428. skb_align(skb, ENET_RX_ALIGN);
  429. fep->rx_skbuff[i] = skb;
  430. CBDW_BUFADDR(bdp,
  431. dma_map_single(fep->dev, skb->data,
  432. L1_CACHE_ALIGN(PKT_MAXBUF_SIZE),
  433. DMA_FROM_DEVICE));
  434. CBDW_DATLEN(bdp, 0); /* zero */
  435. CBDW_SC(bdp, BD_ENET_RX_EMPTY |
  436. ((i < fep->rx_ring - 1) ? 0 : BD_SC_WRAP));
  437. }
  438. /*
  439. * if we failed, fillup remainder
  440. */
  441. for (; i < fep->rx_ring; i++, bdp++) {
  442. fep->rx_skbuff[i] = NULL;
  443. CBDW_SC(bdp, (i < fep->rx_ring - 1) ? 0 : BD_SC_WRAP);
  444. }
  445. /*
  446. * ...and the same for transmit.
  447. */
  448. for (i = 0, bdp = fep->tx_bd_base; i < fep->tx_ring; i++, bdp++) {
  449. fep->tx_skbuff[i] = NULL;
  450. CBDW_BUFADDR(bdp, 0);
  451. CBDW_DATLEN(bdp, 0);
  452. CBDW_SC(bdp, (i < fep->tx_ring - 1) ? 0 : BD_SC_WRAP);
  453. }
  454. }
  455. void fs_cleanup_bds(struct net_device *dev)
  456. {
  457. struct fs_enet_private *fep = netdev_priv(dev);
  458. struct sk_buff *skb;
  459. cbd_t __iomem *bdp;
  460. int i;
  461. /*
  462. * Reset SKB transmit buffers.
  463. */
  464. for (i = 0, bdp = fep->tx_bd_base; i < fep->tx_ring; i++, bdp++) {
  465. if ((skb = fep->tx_skbuff[i]) == NULL)
  466. continue;
  467. /* unmap */
  468. dma_unmap_single(fep->dev, CBDR_BUFADDR(bdp),
  469. skb->len, DMA_TO_DEVICE);
  470. fep->tx_skbuff[i] = NULL;
  471. dev_kfree_skb(skb);
  472. }
  473. /*
  474. * Reset SKB receive buffers
  475. */
  476. for (i = 0, bdp = fep->rx_bd_base; i < fep->rx_ring; i++, bdp++) {
  477. if ((skb = fep->rx_skbuff[i]) == NULL)
  478. continue;
  479. /* unmap */
  480. dma_unmap_single(fep->dev, CBDR_BUFADDR(bdp),
  481. L1_CACHE_ALIGN(PKT_MAXBUF_SIZE),
  482. DMA_FROM_DEVICE);
  483. fep->rx_skbuff[i] = NULL;
  484. dev_kfree_skb(skb);
  485. }
  486. }
  487. /**********************************************************************************/
  488. #ifdef CONFIG_FS_ENET_MPC5121_FEC
  489. /*
  490. * MPC5121 FEC requeries 4-byte alignment for TX data buffer!
  491. */
  492. static struct sk_buff *tx_skb_align_workaround(struct net_device *dev,
  493. struct sk_buff *skb)
  494. {
  495. struct sk_buff *new_skb;
  496. struct fs_enet_private *fep = netdev_priv(dev);
  497. /* Alloc new skb */
  498. new_skb = netdev_alloc_skb(dev, skb->len + 4);
  499. if (!new_skb) {
  500. if (net_ratelimit()) {
  501. dev_warn(fep->dev,
  502. "Memory squeeze, dropping tx packet.\n");
  503. }
  504. return NULL;
  505. }
  506. /* Make sure new skb is properly aligned */
  507. skb_align(new_skb, 4);
  508. /* Copy data to new skb ... */
  509. skb_copy_from_linear_data(skb, new_skb->data, skb->len);
  510. skb_put(new_skb, skb->len);
  511. /* ... and free an old one */
  512. dev_kfree_skb_any(skb);
  513. return new_skb;
  514. }
  515. #endif
  516. static int fs_enet_start_xmit(struct sk_buff *skb, struct net_device *dev)
  517. {
  518. struct fs_enet_private *fep = netdev_priv(dev);
  519. cbd_t __iomem *bdp;
  520. int curidx;
  521. u16 sc;
  522. unsigned long flags;
  523. #ifdef CONFIG_FS_ENET_MPC5121_FEC
  524. if (((unsigned long)skb->data) & 0x3) {
  525. skb = tx_skb_align_workaround(dev, skb);
  526. if (!skb) {
  527. /*
  528. * We have lost packet due to memory allocation error
  529. * in tx_skb_align_workaround(). Hopefully original
  530. * skb is still valid, so try transmit it later.
  531. */
  532. return NETDEV_TX_BUSY;
  533. }
  534. }
  535. #endif
  536. spin_lock_irqsave(&fep->tx_lock, flags);
  537. /*
  538. * Fill in a Tx ring entry
  539. */
  540. bdp = fep->cur_tx;
  541. if (!fep->tx_free || (CBDR_SC(bdp) & BD_ENET_TX_READY)) {
  542. netif_stop_queue(dev);
  543. spin_unlock_irqrestore(&fep->tx_lock, flags);
  544. /*
  545. * Ooops. All transmit buffers are full. Bail out.
  546. * This should not happen, since the tx queue should be stopped.
  547. */
  548. dev_warn(fep->dev, "tx queue full!.\n");
  549. return NETDEV_TX_BUSY;
  550. }
  551. curidx = bdp - fep->tx_bd_base;
  552. /*
  553. * Clear all of the status flags.
  554. */
  555. CBDC_SC(bdp, BD_ENET_TX_STATS);
  556. /*
  557. * Save skb pointer.
  558. */
  559. fep->tx_skbuff[curidx] = skb;
  560. fep->stats.tx_bytes += skb->len;
  561. /*
  562. * Push the data cache so the CPM does not get stale memory data.
  563. */
  564. CBDW_BUFADDR(bdp, dma_map_single(fep->dev,
  565. skb->data, skb->len, DMA_TO_DEVICE));
  566. CBDW_DATLEN(bdp, skb->len);
  567. /*
  568. * If this was the last BD in the ring, start at the beginning again.
  569. */
  570. if ((CBDR_SC(bdp) & BD_ENET_TX_WRAP) == 0)
  571. fep->cur_tx++;
  572. else
  573. fep->cur_tx = fep->tx_bd_base;
  574. if (!--fep->tx_free)
  575. netif_stop_queue(dev);
  576. /* Trigger transmission start */
  577. sc = BD_ENET_TX_READY | BD_ENET_TX_INTR |
  578. BD_ENET_TX_LAST | BD_ENET_TX_TC;
  579. /* note that while FEC does not have this bit
  580. * it marks it as available for software use
  581. * yay for hw reuse :) */
  582. if (skb->len <= 60)
  583. sc |= BD_ENET_TX_PAD;
  584. CBDS_SC(bdp, sc);
  585. skb_tx_timestamp(skb);
  586. (*fep->ops->tx_kickstart)(dev);
  587. spin_unlock_irqrestore(&fep->tx_lock, flags);
  588. return NETDEV_TX_OK;
  589. }
  590. static void fs_timeout(struct net_device *dev)
  591. {
  592. struct fs_enet_private *fep = netdev_priv(dev);
  593. unsigned long flags;
  594. int wake = 0;
  595. fep->stats.tx_errors++;
  596. spin_lock_irqsave(&fep->lock, flags);
  597. if (dev->flags & IFF_UP) {
  598. phy_stop(fep->phydev);
  599. (*fep->ops->stop)(dev);
  600. (*fep->ops->restart)(dev);
  601. phy_start(fep->phydev);
  602. }
  603. phy_start(fep->phydev);
  604. wake = fep->tx_free && !(CBDR_SC(fep->cur_tx) & BD_ENET_TX_READY);
  605. spin_unlock_irqrestore(&fep->lock, flags);
  606. if (wake)
  607. netif_wake_queue(dev);
  608. }
  609. /*-----------------------------------------------------------------------------
  610. * generic link-change handler - should be sufficient for most cases
  611. *-----------------------------------------------------------------------------*/
  612. static void generic_adjust_link(struct net_device *dev)
  613. {
  614. struct fs_enet_private *fep = netdev_priv(dev);
  615. struct phy_device *phydev = fep->phydev;
  616. int new_state = 0;
  617. if (phydev->link) {
  618. /* adjust to duplex mode */
  619. if (phydev->duplex != fep->oldduplex) {
  620. new_state = 1;
  621. fep->oldduplex = phydev->duplex;
  622. }
  623. if (phydev->speed != fep->oldspeed) {
  624. new_state = 1;
  625. fep->oldspeed = phydev->speed;
  626. }
  627. if (!fep->oldlink) {
  628. new_state = 1;
  629. fep->oldlink = 1;
  630. }
  631. if (new_state)
  632. fep->ops->restart(dev);
  633. } else if (fep->oldlink) {
  634. new_state = 1;
  635. fep->oldlink = 0;
  636. fep->oldspeed = 0;
  637. fep->oldduplex = -1;
  638. }
  639. if (new_state && netif_msg_link(fep))
  640. phy_print_status(phydev);
  641. }
  642. static void fs_adjust_link(struct net_device *dev)
  643. {
  644. struct fs_enet_private *fep = netdev_priv(dev);
  645. unsigned long flags;
  646. spin_lock_irqsave(&fep->lock, flags);
  647. if(fep->ops->adjust_link)
  648. fep->ops->adjust_link(dev);
  649. else
  650. generic_adjust_link(dev);
  651. spin_unlock_irqrestore(&fep->lock, flags);
  652. }
  653. static int fs_init_phy(struct net_device *dev)
  654. {
  655. struct fs_enet_private *fep = netdev_priv(dev);
  656. struct phy_device *phydev;
  657. phy_interface_t iface;
  658. fep->oldlink = 0;
  659. fep->oldspeed = 0;
  660. fep->oldduplex = -1;
  661. iface = fep->fpi->use_rmii ?
  662. PHY_INTERFACE_MODE_RMII : PHY_INTERFACE_MODE_MII;
  663. phydev = of_phy_connect(dev, fep->fpi->phy_node, &fs_adjust_link, 0,
  664. iface);
  665. if (!phydev) {
  666. phydev = of_phy_connect_fixed_link(dev, &fs_adjust_link,
  667. iface);
  668. }
  669. if (!phydev) {
  670. dev_err(&dev->dev, "Could not attach to PHY\n");
  671. return -ENODEV;
  672. }
  673. fep->phydev = phydev;
  674. return 0;
  675. }
  676. static int fs_enet_open(struct net_device *dev)
  677. {
  678. struct fs_enet_private *fep = netdev_priv(dev);
  679. int r;
  680. int err;
  681. /* to initialize the fep->cur_rx,... */
  682. /* not doing this, will cause a crash in fs_enet_rx_napi */
  683. fs_init_bds(fep->ndev);
  684. if (fep->fpi->use_napi)
  685. napi_enable(&fep->napi);
  686. /* Install our interrupt handler. */
  687. r = request_irq(fep->interrupt, fs_enet_interrupt, IRQF_SHARED,
  688. "fs_enet-mac", dev);
  689. if (r != 0) {
  690. dev_err(fep->dev, "Could not allocate FS_ENET IRQ!");
  691. if (fep->fpi->use_napi)
  692. napi_disable(&fep->napi);
  693. return -EINVAL;
  694. }
  695. err = fs_init_phy(dev);
  696. if (err) {
  697. free_irq(fep->interrupt, dev);
  698. if (fep->fpi->use_napi)
  699. napi_disable(&fep->napi);
  700. return err;
  701. }
  702. phy_start(fep->phydev);
  703. netif_start_queue(dev);
  704. return 0;
  705. }
  706. static int fs_enet_close(struct net_device *dev)
  707. {
  708. struct fs_enet_private *fep = netdev_priv(dev);
  709. unsigned long flags;
  710. netif_stop_queue(dev);
  711. netif_carrier_off(dev);
  712. if (fep->fpi->use_napi)
  713. napi_disable(&fep->napi);
  714. phy_stop(fep->phydev);
  715. spin_lock_irqsave(&fep->lock, flags);
  716. spin_lock(&fep->tx_lock);
  717. (*fep->ops->stop)(dev);
  718. spin_unlock(&fep->tx_lock);
  719. spin_unlock_irqrestore(&fep->lock, flags);
  720. /* release any irqs */
  721. phy_disconnect(fep->phydev);
  722. fep->phydev = NULL;
  723. free_irq(fep->interrupt, dev);
  724. return 0;
  725. }
  726. static struct net_device_stats *fs_enet_get_stats(struct net_device *dev)
  727. {
  728. struct fs_enet_private *fep = netdev_priv(dev);
  729. return &fep->stats;
  730. }
  731. /*************************************************************************/
  732. static void fs_get_drvinfo(struct net_device *dev,
  733. struct ethtool_drvinfo *info)
  734. {
  735. strcpy(info->driver, DRV_MODULE_NAME);
  736. strcpy(info->version, DRV_MODULE_VERSION);
  737. }
  738. static int fs_get_regs_len(struct net_device *dev)
  739. {
  740. struct fs_enet_private *fep = netdev_priv(dev);
  741. return (*fep->ops->get_regs_len)(dev);
  742. }
  743. static void fs_get_regs(struct net_device *dev, struct ethtool_regs *regs,
  744. void *p)
  745. {
  746. struct fs_enet_private *fep = netdev_priv(dev);
  747. unsigned long flags;
  748. int r, len;
  749. len = regs->len;
  750. spin_lock_irqsave(&fep->lock, flags);
  751. r = (*fep->ops->get_regs)(dev, p, &len);
  752. spin_unlock_irqrestore(&fep->lock, flags);
  753. if (r == 0)
  754. regs->version = 0;
  755. }
  756. static int fs_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
  757. {
  758. struct fs_enet_private *fep = netdev_priv(dev);
  759. if (!fep->phydev)
  760. return -ENODEV;
  761. return phy_ethtool_gset(fep->phydev, cmd);
  762. }
  763. static int fs_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
  764. {
  765. struct fs_enet_private *fep = netdev_priv(dev);
  766. if (!fep->phydev)
  767. return -ENODEV;
  768. return phy_ethtool_sset(fep->phydev, cmd);
  769. }
  770. static int fs_nway_reset(struct net_device *dev)
  771. {
  772. return 0;
  773. }
  774. static u32 fs_get_msglevel(struct net_device *dev)
  775. {
  776. struct fs_enet_private *fep = netdev_priv(dev);
  777. return fep->msg_enable;
  778. }
  779. static void fs_set_msglevel(struct net_device *dev, u32 value)
  780. {
  781. struct fs_enet_private *fep = netdev_priv(dev);
  782. fep->msg_enable = value;
  783. }
  784. static const struct ethtool_ops fs_ethtool_ops = {
  785. .get_drvinfo = fs_get_drvinfo,
  786. .get_regs_len = fs_get_regs_len,
  787. .get_settings = fs_get_settings,
  788. .set_settings = fs_set_settings,
  789. .nway_reset = fs_nway_reset,
  790. .get_link = ethtool_op_get_link,
  791. .get_msglevel = fs_get_msglevel,
  792. .set_msglevel = fs_set_msglevel,
  793. .get_regs = fs_get_regs,
  794. .get_ts_info = ethtool_op_get_ts_info,
  795. };
  796. static int fs_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
  797. {
  798. struct fs_enet_private *fep = netdev_priv(dev);
  799. if (!netif_running(dev))
  800. return -EINVAL;
  801. return phy_mii_ioctl(fep->phydev, rq, cmd);
  802. }
  803. extern int fs_mii_connect(struct net_device *dev);
  804. extern void fs_mii_disconnect(struct net_device *dev);
  805. /**************************************************************************************/
  806. #ifdef CONFIG_FS_ENET_HAS_FEC
  807. #define IS_FEC(match) ((match)->data == &fs_fec_ops)
  808. #else
  809. #define IS_FEC(match) 0
  810. #endif
  811. static const struct net_device_ops fs_enet_netdev_ops = {
  812. .ndo_open = fs_enet_open,
  813. .ndo_stop = fs_enet_close,
  814. .ndo_get_stats = fs_enet_get_stats,
  815. .ndo_start_xmit = fs_enet_start_xmit,
  816. .ndo_tx_timeout = fs_timeout,
  817. .ndo_set_rx_mode = fs_set_multicast_list,
  818. .ndo_do_ioctl = fs_ioctl,
  819. .ndo_validate_addr = eth_validate_addr,
  820. .ndo_set_mac_address = eth_mac_addr,
  821. .ndo_change_mtu = eth_change_mtu,
  822. #ifdef CONFIG_NET_POLL_CONTROLLER
  823. .ndo_poll_controller = fs_enet_netpoll,
  824. #endif
  825. };
  826. static struct of_device_id fs_enet_match[];
  827. static int fs_enet_probe(struct platform_device *ofdev)
  828. {
  829. const struct of_device_id *match;
  830. struct net_device *ndev;
  831. struct fs_enet_private *fep;
  832. struct fs_platform_info *fpi;
  833. const u32 *data;
  834. const u8 *mac_addr;
  835. const char *phy_connection_type;
  836. int privsize, len, ret = -ENODEV;
  837. match = of_match_device(fs_enet_match, &ofdev->dev);
  838. if (!match)
  839. return -EINVAL;
  840. fpi = kzalloc(sizeof(*fpi), GFP_KERNEL);
  841. if (!fpi)
  842. return -ENOMEM;
  843. if (!IS_FEC(match)) {
  844. data = of_get_property(ofdev->dev.of_node, "fsl,cpm-command", &len);
  845. if (!data || len != 4)
  846. goto out_free_fpi;
  847. fpi->cp_command = *data;
  848. }
  849. fpi->rx_ring = 32;
  850. fpi->tx_ring = 32;
  851. fpi->rx_copybreak = 240;
  852. fpi->use_napi = 1;
  853. fpi->napi_weight = 17;
  854. fpi->phy_node = of_parse_phandle(ofdev->dev.of_node, "phy-handle", 0);
  855. if ((!fpi->phy_node) && (!of_get_property(ofdev->dev.of_node, "fixed-link",
  856. NULL)))
  857. goto out_free_fpi;
  858. if (of_device_is_compatible(ofdev->dev.of_node, "fsl,mpc5125-fec")) {
  859. phy_connection_type = of_get_property(ofdev->dev.of_node,
  860. "phy-connection-type", NULL);
  861. if (phy_connection_type && !strcmp("rmii", phy_connection_type))
  862. fpi->use_rmii = 1;
  863. }
  864. privsize = sizeof(*fep) +
  865. sizeof(struct sk_buff **) *
  866. (fpi->rx_ring + fpi->tx_ring);
  867. ndev = alloc_etherdev(privsize);
  868. if (!ndev) {
  869. ret = -ENOMEM;
  870. goto out_put;
  871. }
  872. SET_NETDEV_DEV(ndev, &ofdev->dev);
  873. dev_set_drvdata(&ofdev->dev, ndev);
  874. fep = netdev_priv(ndev);
  875. fep->dev = &ofdev->dev;
  876. fep->ndev = ndev;
  877. fep->fpi = fpi;
  878. fep->ops = match->data;
  879. ret = fep->ops->setup_data(ndev);
  880. if (ret)
  881. goto out_free_dev;
  882. fep->rx_skbuff = (struct sk_buff **)&fep[1];
  883. fep->tx_skbuff = fep->rx_skbuff + fpi->rx_ring;
  884. spin_lock_init(&fep->lock);
  885. spin_lock_init(&fep->tx_lock);
  886. mac_addr = of_get_mac_address(ofdev->dev.of_node);
  887. if (mac_addr)
  888. memcpy(ndev->dev_addr, mac_addr, 6);
  889. ret = fep->ops->allocate_bd(ndev);
  890. if (ret)
  891. goto out_cleanup_data;
  892. fep->rx_bd_base = fep->ring_base;
  893. fep->tx_bd_base = fep->rx_bd_base + fpi->rx_ring;
  894. fep->tx_ring = fpi->tx_ring;
  895. fep->rx_ring = fpi->rx_ring;
  896. ndev->netdev_ops = &fs_enet_netdev_ops;
  897. ndev->watchdog_timeo = 2 * HZ;
  898. if (fpi->use_napi)
  899. netif_napi_add(ndev, &fep->napi, fs_enet_rx_napi,
  900. fpi->napi_weight);
  901. ndev->ethtool_ops = &fs_ethtool_ops;
  902. init_timer(&fep->phy_timer_list);
  903. netif_carrier_off(ndev);
  904. ret = register_netdev(ndev);
  905. if (ret)
  906. goto out_free_bd;
  907. pr_info("%s: fs_enet: %pM\n", ndev->name, ndev->dev_addr);
  908. return 0;
  909. out_free_bd:
  910. fep->ops->free_bd(ndev);
  911. out_cleanup_data:
  912. fep->ops->cleanup_data(ndev);
  913. out_free_dev:
  914. free_netdev(ndev);
  915. dev_set_drvdata(&ofdev->dev, NULL);
  916. out_put:
  917. of_node_put(fpi->phy_node);
  918. out_free_fpi:
  919. kfree(fpi);
  920. return ret;
  921. }
  922. static int fs_enet_remove(struct platform_device *ofdev)
  923. {
  924. struct net_device *ndev = dev_get_drvdata(&ofdev->dev);
  925. struct fs_enet_private *fep = netdev_priv(ndev);
  926. unregister_netdev(ndev);
  927. fep->ops->free_bd(ndev);
  928. fep->ops->cleanup_data(ndev);
  929. dev_set_drvdata(fep->dev, NULL);
  930. of_node_put(fep->fpi->phy_node);
  931. free_netdev(ndev);
  932. return 0;
  933. }
  934. static struct of_device_id fs_enet_match[] = {
  935. #ifdef CONFIG_FS_ENET_HAS_SCC
  936. {
  937. .compatible = "fsl,cpm1-scc-enet",
  938. .data = (void *)&fs_scc_ops,
  939. },
  940. {
  941. .compatible = "fsl,cpm2-scc-enet",
  942. .data = (void *)&fs_scc_ops,
  943. },
  944. #endif
  945. #ifdef CONFIG_FS_ENET_HAS_FCC
  946. {
  947. .compatible = "fsl,cpm2-fcc-enet",
  948. .data = (void *)&fs_fcc_ops,
  949. },
  950. #endif
  951. #ifdef CONFIG_FS_ENET_HAS_FEC
  952. #ifdef CONFIG_FS_ENET_MPC5121_FEC
  953. {
  954. .compatible = "fsl,mpc5121-fec",
  955. .data = (void *)&fs_fec_ops,
  956. },
  957. {
  958. .compatible = "fsl,mpc5125-fec",
  959. .data = (void *)&fs_fec_ops,
  960. },
  961. #else
  962. {
  963. .compatible = "fsl,pq1-fec-enet",
  964. .data = (void *)&fs_fec_ops,
  965. },
  966. #endif
  967. #endif
  968. {}
  969. };
  970. MODULE_DEVICE_TABLE(of, fs_enet_match);
  971. static struct platform_driver fs_enet_driver = {
  972. .driver = {
  973. .owner = THIS_MODULE,
  974. .name = "fs_enet",
  975. .of_match_table = fs_enet_match,
  976. },
  977. .probe = fs_enet_probe,
  978. .remove = fs_enet_remove,
  979. };
  980. #ifdef CONFIG_NET_POLL_CONTROLLER
  981. static void fs_enet_netpoll(struct net_device *dev)
  982. {
  983. disable_irq(dev->irq);
  984. fs_enet_interrupt(dev->irq, dev);
  985. enable_irq(dev->irq);
  986. }
  987. #endif
  988. module_platform_driver(fs_enet_driver);