fs_enet-main.c 27 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199
  1. /*
  2. * Combined Ethernet driver for Motorola MPC8xx and MPC82xx.
  3. *
  4. * Copyright (c) 2003 Intracom S.A.
  5. * by Pantelis Antoniou <panto@intracom.gr>
  6. *
  7. * 2005 (c) MontaVista Software, Inc.
  8. * Vitaly Bordug <vbordug@ru.mvista.com>
  9. *
  10. * Heavily based on original FEC driver by Dan Malek <dan@embeddededge.com>
  11. * and modifications by Joakim Tjernlund <joakim.tjernlund@lumentis.se>
  12. *
  13. * This file is licensed under the terms of the GNU General Public License
  14. * version 2. This program is licensed "as is" without any warranty of any
  15. * kind, whether express or implied.
  16. */
  17. #include <linux/module.h>
  18. #include <linux/kernel.h>
  19. #include <linux/types.h>
  20. #include <linux/string.h>
  21. #include <linux/ptrace.h>
  22. #include <linux/errno.h>
  23. #include <linux/ioport.h>
  24. #include <linux/slab.h>
  25. #include <linux/interrupt.h>
  26. #include <linux/init.h>
  27. #include <linux/delay.h>
  28. #include <linux/netdevice.h>
  29. #include <linux/etherdevice.h>
  30. #include <linux/skbuff.h>
  31. #include <linux/spinlock.h>
  32. #include <linux/mii.h>
  33. #include <linux/ethtool.h>
  34. #include <linux/bitops.h>
  35. #include <linux/fs.h>
  36. #include <linux/platform_device.h>
  37. #include <linux/phy.h>
  38. #include <linux/of.h>
  39. #include <linux/of_mdio.h>
  40. #include <linux/of_platform.h>
  41. #include <linux/of_gpio.h>
  42. #include <linux/of_net.h>
  43. #include <linux/vmalloc.h>
  44. #include <asm/pgtable.h>
  45. #include <asm/irq.h>
  46. #include <asm/uaccess.h>
  47. #include "fs_enet.h"
  48. /*************************************************/
  49. MODULE_AUTHOR("Pantelis Antoniou <panto@intracom.gr>");
  50. MODULE_DESCRIPTION("Freescale Ethernet Driver");
  51. MODULE_LICENSE("GPL");
  52. MODULE_VERSION(DRV_MODULE_VERSION);
  53. static int fs_enet_debug = -1; /* -1 == use FS_ENET_DEF_MSG_ENABLE as value */
  54. module_param(fs_enet_debug, int, 0);
  55. MODULE_PARM_DESC(fs_enet_debug,
  56. "Freescale bitmapped debugging message enable value");
  57. #ifdef CONFIG_NET_POLL_CONTROLLER
  58. static void fs_enet_netpoll(struct net_device *dev);
  59. #endif
  60. static void fs_set_multicast_list(struct net_device *dev)
  61. {
  62. struct fs_enet_private *fep = netdev_priv(dev);
  63. (*fep->ops->set_multicast_list)(dev);
  64. }
  65. static void skb_align(struct sk_buff *skb, int align)
  66. {
  67. int off = ((unsigned long)skb->data) & (align - 1);
  68. if (off)
  69. skb_reserve(skb, align - off);
  70. }
  71. /* NAPI receive function */
  72. static int fs_enet_rx_napi(struct napi_struct *napi, int budget)
  73. {
  74. struct fs_enet_private *fep = container_of(napi, struct fs_enet_private, napi);
  75. struct net_device *dev = fep->ndev;
  76. const struct fs_platform_info *fpi = fep->fpi;
  77. cbd_t __iomem *bdp;
  78. struct sk_buff *skb, *skbn, *skbt;
  79. int received = 0;
  80. u16 pkt_len, sc;
  81. int curidx;
  82. /*
  83. * First, grab all of the stats for the incoming packet.
  84. * These get messed up if we get called due to a busy condition.
  85. */
  86. bdp = fep->cur_rx;
  87. /* clear RX status bits for napi*/
  88. (*fep->ops->napi_clear_rx_event)(dev);
  89. while (((sc = CBDR_SC(bdp)) & BD_ENET_RX_EMPTY) == 0) {
  90. curidx = bdp - fep->rx_bd_base;
  91. /*
  92. * Since we have allocated space to hold a complete frame,
  93. * the last indicator should be set.
  94. */
  95. if ((sc & BD_ENET_RX_LAST) == 0)
  96. dev_warn(fep->dev, "rcv is not +last\n");
  97. /*
  98. * Check for errors.
  99. */
  100. if (sc & (BD_ENET_RX_LG | BD_ENET_RX_SH | BD_ENET_RX_CL |
  101. BD_ENET_RX_NO | BD_ENET_RX_CR | BD_ENET_RX_OV)) {
  102. fep->stats.rx_errors++;
  103. /* Frame too long or too short. */
  104. if (sc & (BD_ENET_RX_LG | BD_ENET_RX_SH))
  105. fep->stats.rx_length_errors++;
  106. /* Frame alignment */
  107. if (sc & (BD_ENET_RX_NO | BD_ENET_RX_CL))
  108. fep->stats.rx_frame_errors++;
  109. /* CRC Error */
  110. if (sc & BD_ENET_RX_CR)
  111. fep->stats.rx_crc_errors++;
  112. /* FIFO overrun */
  113. if (sc & BD_ENET_RX_OV)
  114. fep->stats.rx_crc_errors++;
  115. skb = fep->rx_skbuff[curidx];
  116. dma_unmap_single(fep->dev, CBDR_BUFADDR(bdp),
  117. L1_CACHE_ALIGN(PKT_MAXBUF_SIZE),
  118. DMA_FROM_DEVICE);
  119. skbn = skb;
  120. } else {
  121. skb = fep->rx_skbuff[curidx];
  122. dma_unmap_single(fep->dev, CBDR_BUFADDR(bdp),
  123. L1_CACHE_ALIGN(PKT_MAXBUF_SIZE),
  124. DMA_FROM_DEVICE);
  125. /*
  126. * Process the incoming frame.
  127. */
  128. fep->stats.rx_packets++;
  129. pkt_len = CBDR_DATLEN(bdp) - 4; /* remove CRC */
  130. fep->stats.rx_bytes += pkt_len + 4;
  131. if (pkt_len <= fpi->rx_copybreak) {
  132. /* +2 to make IP header L1 cache aligned */
  133. skbn = netdev_alloc_skb(dev, pkt_len + 2);
  134. if (skbn != NULL) {
  135. skb_reserve(skbn, 2); /* align IP header */
  136. skb_copy_from_linear_data(skb,
  137. skbn->data, pkt_len);
  138. /* swap */
  139. skbt = skb;
  140. skb = skbn;
  141. skbn = skbt;
  142. }
  143. } else {
  144. skbn = netdev_alloc_skb(dev, ENET_RX_FRSIZE);
  145. if (skbn)
  146. skb_align(skbn, ENET_RX_ALIGN);
  147. }
  148. if (skbn != NULL) {
  149. skb_put(skb, pkt_len); /* Make room */
  150. skb->protocol = eth_type_trans(skb, dev);
  151. received++;
  152. netif_receive_skb(skb);
  153. } else {
  154. dev_warn(fep->dev,
  155. "Memory squeeze, dropping packet.\n");
  156. fep->stats.rx_dropped++;
  157. skbn = skb;
  158. }
  159. }
  160. fep->rx_skbuff[curidx] = skbn;
  161. CBDW_BUFADDR(bdp, dma_map_single(fep->dev, skbn->data,
  162. L1_CACHE_ALIGN(PKT_MAXBUF_SIZE),
  163. DMA_FROM_DEVICE));
  164. CBDW_DATLEN(bdp, 0);
  165. CBDW_SC(bdp, (sc & ~BD_ENET_RX_STATS) | BD_ENET_RX_EMPTY);
  166. /*
  167. * Update BD pointer to next entry.
  168. */
  169. if ((sc & BD_ENET_RX_WRAP) == 0)
  170. bdp++;
  171. else
  172. bdp = fep->rx_bd_base;
  173. (*fep->ops->rx_bd_done)(dev);
  174. if (received >= budget)
  175. break;
  176. }
  177. fep->cur_rx = bdp;
  178. if (received < budget) {
  179. /* done */
  180. napi_complete(napi);
  181. (*fep->ops->napi_enable_rx)(dev);
  182. }
  183. return received;
  184. }
  185. /* non NAPI receive function */
  186. static int fs_enet_rx_non_napi(struct net_device *dev)
  187. {
  188. struct fs_enet_private *fep = netdev_priv(dev);
  189. const struct fs_platform_info *fpi = fep->fpi;
  190. cbd_t __iomem *bdp;
  191. struct sk_buff *skb, *skbn, *skbt;
  192. int received = 0;
  193. u16 pkt_len, sc;
  194. int curidx;
  195. /*
  196. * First, grab all of the stats for the incoming packet.
  197. * These get messed up if we get called due to a busy condition.
  198. */
  199. bdp = fep->cur_rx;
  200. while (((sc = CBDR_SC(bdp)) & BD_ENET_RX_EMPTY) == 0) {
  201. curidx = bdp - fep->rx_bd_base;
  202. /*
  203. * Since we have allocated space to hold a complete frame,
  204. * the last indicator should be set.
  205. */
  206. if ((sc & BD_ENET_RX_LAST) == 0)
  207. dev_warn(fep->dev, "rcv is not +last\n");
  208. /*
  209. * Check for errors.
  210. */
  211. if (sc & (BD_ENET_RX_LG | BD_ENET_RX_SH | BD_ENET_RX_CL |
  212. BD_ENET_RX_NO | BD_ENET_RX_CR | BD_ENET_RX_OV)) {
  213. fep->stats.rx_errors++;
  214. /* Frame too long or too short. */
  215. if (sc & (BD_ENET_RX_LG | BD_ENET_RX_SH))
  216. fep->stats.rx_length_errors++;
  217. /* Frame alignment */
  218. if (sc & (BD_ENET_RX_NO | BD_ENET_RX_CL))
  219. fep->stats.rx_frame_errors++;
  220. /* CRC Error */
  221. if (sc & BD_ENET_RX_CR)
  222. fep->stats.rx_crc_errors++;
  223. /* FIFO overrun */
  224. if (sc & BD_ENET_RX_OV)
  225. fep->stats.rx_crc_errors++;
  226. skb = fep->rx_skbuff[curidx];
  227. dma_unmap_single(fep->dev, CBDR_BUFADDR(bdp),
  228. L1_CACHE_ALIGN(PKT_MAXBUF_SIZE),
  229. DMA_FROM_DEVICE);
  230. skbn = skb;
  231. } else {
  232. skb = fep->rx_skbuff[curidx];
  233. dma_unmap_single(fep->dev, CBDR_BUFADDR(bdp),
  234. L1_CACHE_ALIGN(PKT_MAXBUF_SIZE),
  235. DMA_FROM_DEVICE);
  236. /*
  237. * Process the incoming frame.
  238. */
  239. fep->stats.rx_packets++;
  240. pkt_len = CBDR_DATLEN(bdp) - 4; /* remove CRC */
  241. fep->stats.rx_bytes += pkt_len + 4;
  242. if (pkt_len <= fpi->rx_copybreak) {
  243. /* +2 to make IP header L1 cache aligned */
  244. skbn = netdev_alloc_skb(dev, pkt_len + 2);
  245. if (skbn != NULL) {
  246. skb_reserve(skbn, 2); /* align IP header */
  247. skb_copy_from_linear_data(skb,
  248. skbn->data, pkt_len);
  249. /* swap */
  250. skbt = skb;
  251. skb = skbn;
  252. skbn = skbt;
  253. }
  254. } else {
  255. skbn = netdev_alloc_skb(dev, ENET_RX_FRSIZE);
  256. if (skbn)
  257. skb_align(skbn, ENET_RX_ALIGN);
  258. }
  259. if (skbn != NULL) {
  260. skb_put(skb, pkt_len); /* Make room */
  261. skb->protocol = eth_type_trans(skb, dev);
  262. received++;
  263. netif_rx(skb);
  264. } else {
  265. dev_warn(fep->dev,
  266. "Memory squeeze, dropping packet.\n");
  267. fep->stats.rx_dropped++;
  268. skbn = skb;
  269. }
  270. }
  271. fep->rx_skbuff[curidx] = skbn;
  272. CBDW_BUFADDR(bdp, dma_map_single(fep->dev, skbn->data,
  273. L1_CACHE_ALIGN(PKT_MAXBUF_SIZE),
  274. DMA_FROM_DEVICE));
  275. CBDW_DATLEN(bdp, 0);
  276. CBDW_SC(bdp, (sc & ~BD_ENET_RX_STATS) | BD_ENET_RX_EMPTY);
  277. /*
  278. * Update BD pointer to next entry.
  279. */
  280. if ((sc & BD_ENET_RX_WRAP) == 0)
  281. bdp++;
  282. else
  283. bdp = fep->rx_bd_base;
  284. (*fep->ops->rx_bd_done)(dev);
  285. }
  286. fep->cur_rx = bdp;
  287. return 0;
  288. }
  289. static void fs_enet_tx(struct net_device *dev)
  290. {
  291. struct fs_enet_private *fep = netdev_priv(dev);
  292. cbd_t __iomem *bdp;
  293. struct sk_buff *skb;
  294. int dirtyidx, do_wake, do_restart;
  295. u16 sc;
  296. spin_lock(&fep->tx_lock);
  297. bdp = fep->dirty_tx;
  298. do_wake = do_restart = 0;
  299. while (((sc = CBDR_SC(bdp)) & BD_ENET_TX_READY) == 0) {
  300. dirtyidx = bdp - fep->tx_bd_base;
  301. if (fep->tx_free == fep->tx_ring)
  302. break;
  303. skb = fep->tx_skbuff[dirtyidx];
  304. /*
  305. * Check for errors.
  306. */
  307. if (sc & (BD_ENET_TX_HB | BD_ENET_TX_LC |
  308. BD_ENET_TX_RL | BD_ENET_TX_UN | BD_ENET_TX_CSL)) {
  309. if (sc & BD_ENET_TX_HB) /* No heartbeat */
  310. fep->stats.tx_heartbeat_errors++;
  311. if (sc & BD_ENET_TX_LC) /* Late collision */
  312. fep->stats.tx_window_errors++;
  313. if (sc & BD_ENET_TX_RL) /* Retrans limit */
  314. fep->stats.tx_aborted_errors++;
  315. if (sc & BD_ENET_TX_UN) /* Underrun */
  316. fep->stats.tx_fifo_errors++;
  317. if (sc & BD_ENET_TX_CSL) /* Carrier lost */
  318. fep->stats.tx_carrier_errors++;
  319. if (sc & (BD_ENET_TX_LC | BD_ENET_TX_RL | BD_ENET_TX_UN)) {
  320. fep->stats.tx_errors++;
  321. do_restart = 1;
  322. }
  323. } else
  324. fep->stats.tx_packets++;
  325. if (sc & BD_ENET_TX_READY) {
  326. dev_warn(fep->dev,
  327. "HEY! Enet xmit interrupt and TX_READY.\n");
  328. }
  329. /*
  330. * Deferred means some collisions occurred during transmit,
  331. * but we eventually sent the packet OK.
  332. */
  333. if (sc & BD_ENET_TX_DEF)
  334. fep->stats.collisions++;
  335. /* unmap */
  336. dma_unmap_single(fep->dev, CBDR_BUFADDR(bdp),
  337. skb->len, DMA_TO_DEVICE);
  338. /*
  339. * Free the sk buffer associated with this last transmit.
  340. */
  341. dev_kfree_skb_irq(skb);
  342. fep->tx_skbuff[dirtyidx] = NULL;
  343. /*
  344. * Update pointer to next buffer descriptor to be transmitted.
  345. */
  346. if ((sc & BD_ENET_TX_WRAP) == 0)
  347. bdp++;
  348. else
  349. bdp = fep->tx_bd_base;
  350. /*
  351. * Since we have freed up a buffer, the ring is no longer
  352. * full.
  353. */
  354. if (!fep->tx_free++)
  355. do_wake = 1;
  356. }
  357. fep->dirty_tx = bdp;
  358. if (do_restart)
  359. (*fep->ops->tx_restart)(dev);
  360. spin_unlock(&fep->tx_lock);
  361. if (do_wake)
  362. netif_wake_queue(dev);
  363. }
  364. /*
  365. * The interrupt handler.
  366. * This is called from the MPC core interrupt.
  367. */
  368. static irqreturn_t
  369. fs_enet_interrupt(int irq, void *dev_id)
  370. {
  371. struct net_device *dev = dev_id;
  372. struct fs_enet_private *fep;
  373. const struct fs_platform_info *fpi;
  374. u32 int_events;
  375. u32 int_clr_events;
  376. int nr, napi_ok;
  377. int handled;
  378. fep = netdev_priv(dev);
  379. fpi = fep->fpi;
  380. nr = 0;
  381. while ((int_events = (*fep->ops->get_int_events)(dev)) != 0) {
  382. nr++;
  383. int_clr_events = int_events;
  384. if (fpi->use_napi)
  385. int_clr_events &= ~fep->ev_napi_rx;
  386. (*fep->ops->clear_int_events)(dev, int_clr_events);
  387. if (int_events & fep->ev_err)
  388. (*fep->ops->ev_error)(dev, int_events);
  389. if (int_events & fep->ev_rx) {
  390. if (!fpi->use_napi)
  391. fs_enet_rx_non_napi(dev);
  392. else {
  393. napi_ok = napi_schedule_prep(&fep->napi);
  394. (*fep->ops->napi_disable_rx)(dev);
  395. (*fep->ops->clear_int_events)(dev, fep->ev_napi_rx);
  396. /* NOTE: it is possible for FCCs in NAPI mode */
  397. /* to submit a spurious interrupt while in poll */
  398. if (napi_ok)
  399. __napi_schedule(&fep->napi);
  400. }
  401. }
  402. if (int_events & fep->ev_tx)
  403. fs_enet_tx(dev);
  404. }
  405. handled = nr > 0;
  406. return IRQ_RETVAL(handled);
  407. }
  408. void fs_init_bds(struct net_device *dev)
  409. {
  410. struct fs_enet_private *fep = netdev_priv(dev);
  411. cbd_t __iomem *bdp;
  412. struct sk_buff *skb;
  413. int i;
  414. fs_cleanup_bds(dev);
  415. fep->dirty_tx = fep->cur_tx = fep->tx_bd_base;
  416. fep->tx_free = fep->tx_ring;
  417. fep->cur_rx = fep->rx_bd_base;
  418. /*
  419. * Initialize the receive buffer descriptors.
  420. */
  421. for (i = 0, bdp = fep->rx_bd_base; i < fep->rx_ring; i++, bdp++) {
  422. skb = netdev_alloc_skb(dev, ENET_RX_FRSIZE);
  423. if (skb == NULL) {
  424. dev_warn(fep->dev,
  425. "Memory squeeze, unable to allocate skb\n");
  426. break;
  427. }
  428. skb_align(skb, ENET_RX_ALIGN);
  429. fep->rx_skbuff[i] = skb;
  430. CBDW_BUFADDR(bdp,
  431. dma_map_single(fep->dev, skb->data,
  432. L1_CACHE_ALIGN(PKT_MAXBUF_SIZE),
  433. DMA_FROM_DEVICE));
  434. CBDW_DATLEN(bdp, 0); /* zero */
  435. CBDW_SC(bdp, BD_ENET_RX_EMPTY |
  436. ((i < fep->rx_ring - 1) ? 0 : BD_SC_WRAP));
  437. }
  438. /*
  439. * if we failed, fillup remainder
  440. */
  441. for (; i < fep->rx_ring; i++, bdp++) {
  442. fep->rx_skbuff[i] = NULL;
  443. CBDW_SC(bdp, (i < fep->rx_ring - 1) ? 0 : BD_SC_WRAP);
  444. }
  445. /*
  446. * ...and the same for transmit.
  447. */
  448. for (i = 0, bdp = fep->tx_bd_base; i < fep->tx_ring; i++, bdp++) {
  449. fep->tx_skbuff[i] = NULL;
  450. CBDW_BUFADDR(bdp, 0);
  451. CBDW_DATLEN(bdp, 0);
  452. CBDW_SC(bdp, (i < fep->tx_ring - 1) ? 0 : BD_SC_WRAP);
  453. }
  454. }
  455. void fs_cleanup_bds(struct net_device *dev)
  456. {
  457. struct fs_enet_private *fep = netdev_priv(dev);
  458. struct sk_buff *skb;
  459. cbd_t __iomem *bdp;
  460. int i;
  461. /*
  462. * Reset SKB transmit buffers.
  463. */
  464. for (i = 0, bdp = fep->tx_bd_base; i < fep->tx_ring; i++, bdp++) {
  465. if ((skb = fep->tx_skbuff[i]) == NULL)
  466. continue;
  467. /* unmap */
  468. dma_unmap_single(fep->dev, CBDR_BUFADDR(bdp),
  469. skb->len, DMA_TO_DEVICE);
  470. fep->tx_skbuff[i] = NULL;
  471. dev_kfree_skb(skb);
  472. }
  473. /*
  474. * Reset SKB receive buffers
  475. */
  476. for (i = 0, bdp = fep->rx_bd_base; i < fep->rx_ring; i++, bdp++) {
  477. if ((skb = fep->rx_skbuff[i]) == NULL)
  478. continue;
  479. /* unmap */
  480. dma_unmap_single(fep->dev, CBDR_BUFADDR(bdp),
  481. L1_CACHE_ALIGN(PKT_MAXBUF_SIZE),
  482. DMA_FROM_DEVICE);
  483. fep->rx_skbuff[i] = NULL;
  484. dev_kfree_skb(skb);
  485. }
  486. }
  487. /**********************************************************************************/
  488. #ifdef CONFIG_FS_ENET_MPC5121_FEC
  489. /*
  490. * MPC5121 FEC requeries 4-byte alignment for TX data buffer!
  491. */
  492. static struct sk_buff *tx_skb_align_workaround(struct net_device *dev,
  493. struct sk_buff *skb)
  494. {
  495. struct sk_buff *new_skb;
  496. struct fs_enet_private *fep = netdev_priv(dev);
  497. /* Alloc new skb */
  498. new_skb = netdev_alloc_skb(dev, skb->len + 4);
  499. if (!new_skb) {
  500. if (net_ratelimit()) {
  501. dev_warn(fep->dev,
  502. "Memory squeeze, dropping tx packet.\n");
  503. }
  504. return NULL;
  505. }
  506. /* Make sure new skb is properly aligned */
  507. skb_align(new_skb, 4);
  508. /* Copy data to new skb ... */
  509. skb_copy_from_linear_data(skb, new_skb->data, skb->len);
  510. skb_put(new_skb, skb->len);
  511. /* ... and free an old one */
  512. dev_kfree_skb_any(skb);
  513. return new_skb;
  514. }
  515. #endif
  516. static int fs_enet_start_xmit(struct sk_buff *skb, struct net_device *dev)
  517. {
  518. struct fs_enet_private *fep = netdev_priv(dev);
  519. cbd_t __iomem *bdp;
  520. int curidx;
  521. u16 sc;
  522. unsigned long flags;
  523. #ifdef CONFIG_FS_ENET_MPC5121_FEC
  524. if (((unsigned long)skb->data) & 0x3) {
  525. skb = tx_skb_align_workaround(dev, skb);
  526. if (!skb) {
  527. /*
  528. * We have lost packet due to memory allocation error
  529. * in tx_skb_align_workaround(). Hopefully original
  530. * skb is still valid, so try transmit it later.
  531. */
  532. return NETDEV_TX_BUSY;
  533. }
  534. }
  535. #endif
  536. spin_lock_irqsave(&fep->tx_lock, flags);
  537. /*
  538. * Fill in a Tx ring entry
  539. */
  540. bdp = fep->cur_tx;
  541. if (!fep->tx_free || (CBDR_SC(bdp) & BD_ENET_TX_READY)) {
  542. netif_stop_queue(dev);
  543. spin_unlock_irqrestore(&fep->tx_lock, flags);
  544. /*
  545. * Ooops. All transmit buffers are full. Bail out.
  546. * This should not happen, since the tx queue should be stopped.
  547. */
  548. dev_warn(fep->dev, "tx queue full!.\n");
  549. return NETDEV_TX_BUSY;
  550. }
  551. curidx = bdp - fep->tx_bd_base;
  552. /*
  553. * Clear all of the status flags.
  554. */
  555. CBDC_SC(bdp, BD_ENET_TX_STATS);
  556. /*
  557. * Save skb pointer.
  558. */
  559. fep->tx_skbuff[curidx] = skb;
  560. fep->stats.tx_bytes += skb->len;
  561. /*
  562. * Push the data cache so the CPM does not get stale memory data.
  563. */
  564. CBDW_BUFADDR(bdp, dma_map_single(fep->dev,
  565. skb->data, skb->len, DMA_TO_DEVICE));
  566. CBDW_DATLEN(bdp, skb->len);
  567. /*
  568. * If this was the last BD in the ring, start at the beginning again.
  569. */
  570. if ((CBDR_SC(bdp) & BD_ENET_TX_WRAP) == 0)
  571. fep->cur_tx++;
  572. else
  573. fep->cur_tx = fep->tx_bd_base;
  574. if (!--fep->tx_free)
  575. netif_stop_queue(dev);
  576. /* Trigger transmission start */
  577. sc = BD_ENET_TX_READY | BD_ENET_TX_INTR |
  578. BD_ENET_TX_LAST | BD_ENET_TX_TC;
  579. /* note that while FEC does not have this bit
  580. * it marks it as available for software use
  581. * yay for hw reuse :) */
  582. if (skb->len <= 60)
  583. sc |= BD_ENET_TX_PAD;
  584. CBDS_SC(bdp, sc);
  585. skb_tx_timestamp(skb);
  586. (*fep->ops->tx_kickstart)(dev);
  587. spin_unlock_irqrestore(&fep->tx_lock, flags);
  588. return NETDEV_TX_OK;
  589. }
  590. static void fs_timeout(struct net_device *dev)
  591. {
  592. struct fs_enet_private *fep = netdev_priv(dev);
  593. unsigned long flags;
  594. int wake = 0;
  595. fep->stats.tx_errors++;
  596. spin_lock_irqsave(&fep->lock, flags);
  597. if (dev->flags & IFF_UP) {
  598. phy_stop(fep->phydev);
  599. (*fep->ops->stop)(dev);
  600. (*fep->ops->restart)(dev);
  601. phy_start(fep->phydev);
  602. }
  603. phy_start(fep->phydev);
  604. wake = fep->tx_free && !(CBDR_SC(fep->cur_tx) & BD_ENET_TX_READY);
  605. spin_unlock_irqrestore(&fep->lock, flags);
  606. if (wake)
  607. netif_wake_queue(dev);
  608. }
  609. /*-----------------------------------------------------------------------------
  610. * generic link-change handler - should be sufficient for most cases
  611. *-----------------------------------------------------------------------------*/
  612. static void generic_adjust_link(struct net_device *dev)
  613. {
  614. struct fs_enet_private *fep = netdev_priv(dev);
  615. struct phy_device *phydev = fep->phydev;
  616. int new_state = 0;
  617. if (phydev->link) {
  618. /* adjust to duplex mode */
  619. if (phydev->duplex != fep->oldduplex) {
  620. new_state = 1;
  621. fep->oldduplex = phydev->duplex;
  622. }
  623. if (phydev->speed != fep->oldspeed) {
  624. new_state = 1;
  625. fep->oldspeed = phydev->speed;
  626. }
  627. if (!fep->oldlink) {
  628. new_state = 1;
  629. fep->oldlink = 1;
  630. }
  631. if (new_state)
  632. fep->ops->restart(dev);
  633. } else if (fep->oldlink) {
  634. new_state = 1;
  635. fep->oldlink = 0;
  636. fep->oldspeed = 0;
  637. fep->oldduplex = -1;
  638. }
  639. if (new_state && netif_msg_link(fep))
  640. phy_print_status(phydev);
  641. }
  642. static void fs_adjust_link(struct net_device *dev)
  643. {
  644. struct fs_enet_private *fep = netdev_priv(dev);
  645. unsigned long flags;
  646. spin_lock_irqsave(&fep->lock, flags);
  647. if(fep->ops->adjust_link)
  648. fep->ops->adjust_link(dev);
  649. else
  650. generic_adjust_link(dev);
  651. spin_unlock_irqrestore(&fep->lock, flags);
  652. }
  653. static int fs_init_phy(struct net_device *dev)
  654. {
  655. struct fs_enet_private *fep = netdev_priv(dev);
  656. struct phy_device *phydev;
  657. phy_interface_t iface;
  658. fep->oldlink = 0;
  659. fep->oldspeed = 0;
  660. fep->oldduplex = -1;
  661. iface = fep->fpi->use_rmii ?
  662. PHY_INTERFACE_MODE_RMII : PHY_INTERFACE_MODE_MII;
  663. phydev = of_phy_connect(dev, fep->fpi->phy_node, &fs_adjust_link, 0,
  664. iface);
  665. if (!phydev) {
  666. phydev = of_phy_connect_fixed_link(dev, &fs_adjust_link,
  667. iface);
  668. }
  669. if (!phydev) {
  670. dev_err(&dev->dev, "Could not attach to PHY\n");
  671. return -ENODEV;
  672. }
  673. fep->phydev = phydev;
  674. return 0;
  675. }
  676. static int fs_enet_open(struct net_device *dev)
  677. {
  678. struct fs_enet_private *fep = netdev_priv(dev);
  679. int r;
  680. int err;
  681. /* to initialize the fep->cur_rx,... */
  682. /* not doing this, will cause a crash in fs_enet_rx_napi */
  683. fs_init_bds(fep->ndev);
  684. if (fep->fpi->use_napi)
  685. napi_enable(&fep->napi);
  686. /* Install our interrupt handler. */
  687. r = request_irq(fep->interrupt, fs_enet_interrupt, IRQF_SHARED,
  688. "fs_enet-mac", dev);
  689. if (r != 0) {
  690. dev_err(fep->dev, "Could not allocate FS_ENET IRQ!");
  691. if (fep->fpi->use_napi)
  692. napi_disable(&fep->napi);
  693. return -EINVAL;
  694. }
  695. err = fs_init_phy(dev);
  696. if (err) {
  697. free_irq(fep->interrupt, dev);
  698. if (fep->fpi->use_napi)
  699. napi_disable(&fep->napi);
  700. return err;
  701. }
  702. phy_start(fep->phydev);
  703. netif_start_queue(dev);
  704. return 0;
  705. }
  706. static int fs_enet_close(struct net_device *dev)
  707. {
  708. struct fs_enet_private *fep = netdev_priv(dev);
  709. unsigned long flags;
  710. netif_stop_queue(dev);
  711. netif_carrier_off(dev);
  712. if (fep->fpi->use_napi)
  713. napi_disable(&fep->napi);
  714. phy_stop(fep->phydev);
  715. spin_lock_irqsave(&fep->lock, flags);
  716. spin_lock(&fep->tx_lock);
  717. (*fep->ops->stop)(dev);
  718. spin_unlock(&fep->tx_lock);
  719. spin_unlock_irqrestore(&fep->lock, flags);
  720. /* release any irqs */
  721. phy_disconnect(fep->phydev);
  722. fep->phydev = NULL;
  723. free_irq(fep->interrupt, dev);
  724. return 0;
  725. }
  726. static struct net_device_stats *fs_enet_get_stats(struct net_device *dev)
  727. {
  728. struct fs_enet_private *fep = netdev_priv(dev);
  729. return &fep->stats;
  730. }
  731. /*************************************************************************/
  732. static void fs_get_drvinfo(struct net_device *dev,
  733. struct ethtool_drvinfo *info)
  734. {
  735. strcpy(info->driver, DRV_MODULE_NAME);
  736. strcpy(info->version, DRV_MODULE_VERSION);
  737. }
  738. static int fs_get_regs_len(struct net_device *dev)
  739. {
  740. struct fs_enet_private *fep = netdev_priv(dev);
  741. return (*fep->ops->get_regs_len)(dev);
  742. }
  743. static void fs_get_regs(struct net_device *dev, struct ethtool_regs *regs,
  744. void *p)
  745. {
  746. struct fs_enet_private *fep = netdev_priv(dev);
  747. unsigned long flags;
  748. int r, len;
  749. len = regs->len;
  750. spin_lock_irqsave(&fep->lock, flags);
  751. r = (*fep->ops->get_regs)(dev, p, &len);
  752. spin_unlock_irqrestore(&fep->lock, flags);
  753. if (r == 0)
  754. regs->version = 0;
  755. }
  756. static int fs_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
  757. {
  758. struct fs_enet_private *fep = netdev_priv(dev);
  759. if (!fep->phydev)
  760. return -ENODEV;
  761. return phy_ethtool_gset(fep->phydev, cmd);
  762. }
  763. static int fs_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
  764. {
  765. struct fs_enet_private *fep = netdev_priv(dev);
  766. if (!fep->phydev)
  767. return -ENODEV;
  768. return phy_ethtool_sset(fep->phydev, cmd);
  769. }
  770. static int fs_nway_reset(struct net_device *dev)
  771. {
  772. return 0;
  773. }
  774. static u32 fs_get_msglevel(struct net_device *dev)
  775. {
  776. struct fs_enet_private *fep = netdev_priv(dev);
  777. return fep->msg_enable;
  778. }
  779. static void fs_set_msglevel(struct net_device *dev, u32 value)
  780. {
  781. struct fs_enet_private *fep = netdev_priv(dev);
  782. fep->msg_enable = value;
  783. }
  784. static const struct ethtool_ops fs_ethtool_ops = {
  785. .get_drvinfo = fs_get_drvinfo,
  786. .get_regs_len = fs_get_regs_len,
  787. .get_settings = fs_get_settings,
  788. .set_settings = fs_set_settings,
  789. .nway_reset = fs_nway_reset,
  790. .get_link = ethtool_op_get_link,
  791. .get_msglevel = fs_get_msglevel,
  792. .set_msglevel = fs_set_msglevel,
  793. .get_regs = fs_get_regs,
  794. };
  795. static int fs_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
  796. {
  797. struct fs_enet_private *fep = netdev_priv(dev);
  798. if (!netif_running(dev))
  799. return -EINVAL;
  800. return phy_mii_ioctl(fep->phydev, rq, cmd);
  801. }
  802. extern int fs_mii_connect(struct net_device *dev);
  803. extern void fs_mii_disconnect(struct net_device *dev);
  804. /**************************************************************************************/
  805. #ifdef CONFIG_FS_ENET_HAS_FEC
  806. #define IS_FEC(match) ((match)->data == &fs_fec_ops)
  807. #else
  808. #define IS_FEC(match) 0
  809. #endif
  810. static const struct net_device_ops fs_enet_netdev_ops = {
  811. .ndo_open = fs_enet_open,
  812. .ndo_stop = fs_enet_close,
  813. .ndo_get_stats = fs_enet_get_stats,
  814. .ndo_start_xmit = fs_enet_start_xmit,
  815. .ndo_tx_timeout = fs_timeout,
  816. .ndo_set_rx_mode = fs_set_multicast_list,
  817. .ndo_do_ioctl = fs_ioctl,
  818. .ndo_validate_addr = eth_validate_addr,
  819. .ndo_set_mac_address = eth_mac_addr,
  820. .ndo_change_mtu = eth_change_mtu,
  821. #ifdef CONFIG_NET_POLL_CONTROLLER
  822. .ndo_poll_controller = fs_enet_netpoll,
  823. #endif
  824. };
  825. static struct of_device_id fs_enet_match[];
  826. static int __devinit fs_enet_probe(struct platform_device *ofdev)
  827. {
  828. const struct of_device_id *match;
  829. struct net_device *ndev;
  830. struct fs_enet_private *fep;
  831. struct fs_platform_info *fpi;
  832. const u32 *data;
  833. const u8 *mac_addr;
  834. const char *phy_connection_type;
  835. int privsize, len, ret = -ENODEV;
  836. match = of_match_device(fs_enet_match, &ofdev->dev);
  837. if (!match)
  838. return -EINVAL;
  839. fpi = kzalloc(sizeof(*fpi), GFP_KERNEL);
  840. if (!fpi)
  841. return -ENOMEM;
  842. if (!IS_FEC(match)) {
  843. data = of_get_property(ofdev->dev.of_node, "fsl,cpm-command", &len);
  844. if (!data || len != 4)
  845. goto out_free_fpi;
  846. fpi->cp_command = *data;
  847. }
  848. fpi->rx_ring = 32;
  849. fpi->tx_ring = 32;
  850. fpi->rx_copybreak = 240;
  851. fpi->use_napi = 1;
  852. fpi->napi_weight = 17;
  853. fpi->phy_node = of_parse_phandle(ofdev->dev.of_node, "phy-handle", 0);
  854. if ((!fpi->phy_node) && (!of_get_property(ofdev->dev.of_node, "fixed-link",
  855. NULL)))
  856. goto out_free_fpi;
  857. if (of_device_is_compatible(ofdev->dev.of_node, "fsl,mpc5125-fec")) {
  858. phy_connection_type = of_get_property(ofdev->dev.of_node,
  859. "phy-connection-type", NULL);
  860. if (phy_connection_type && !strcmp("rmii", phy_connection_type))
  861. fpi->use_rmii = 1;
  862. }
  863. privsize = sizeof(*fep) +
  864. sizeof(struct sk_buff **) *
  865. (fpi->rx_ring + fpi->tx_ring);
  866. ndev = alloc_etherdev(privsize);
  867. if (!ndev) {
  868. ret = -ENOMEM;
  869. goto out_put;
  870. }
  871. SET_NETDEV_DEV(ndev, &ofdev->dev);
  872. dev_set_drvdata(&ofdev->dev, ndev);
  873. fep = netdev_priv(ndev);
  874. fep->dev = &ofdev->dev;
  875. fep->ndev = ndev;
  876. fep->fpi = fpi;
  877. fep->ops = match->data;
  878. ret = fep->ops->setup_data(ndev);
  879. if (ret)
  880. goto out_free_dev;
  881. fep->rx_skbuff = (struct sk_buff **)&fep[1];
  882. fep->tx_skbuff = fep->rx_skbuff + fpi->rx_ring;
  883. spin_lock_init(&fep->lock);
  884. spin_lock_init(&fep->tx_lock);
  885. mac_addr = of_get_mac_address(ofdev->dev.of_node);
  886. if (mac_addr)
  887. memcpy(ndev->dev_addr, mac_addr, 6);
  888. ret = fep->ops->allocate_bd(ndev);
  889. if (ret)
  890. goto out_cleanup_data;
  891. fep->rx_bd_base = fep->ring_base;
  892. fep->tx_bd_base = fep->rx_bd_base + fpi->rx_ring;
  893. fep->tx_ring = fpi->tx_ring;
  894. fep->rx_ring = fpi->rx_ring;
  895. ndev->netdev_ops = &fs_enet_netdev_ops;
  896. ndev->watchdog_timeo = 2 * HZ;
  897. if (fpi->use_napi)
  898. netif_napi_add(ndev, &fep->napi, fs_enet_rx_napi,
  899. fpi->napi_weight);
  900. ndev->ethtool_ops = &fs_ethtool_ops;
  901. init_timer(&fep->phy_timer_list);
  902. netif_carrier_off(ndev);
  903. ret = register_netdev(ndev);
  904. if (ret)
  905. goto out_free_bd;
  906. pr_info("%s: fs_enet: %pM\n", ndev->name, ndev->dev_addr);
  907. return 0;
  908. out_free_bd:
  909. fep->ops->free_bd(ndev);
  910. out_cleanup_data:
  911. fep->ops->cleanup_data(ndev);
  912. out_free_dev:
  913. free_netdev(ndev);
  914. dev_set_drvdata(&ofdev->dev, NULL);
  915. out_put:
  916. of_node_put(fpi->phy_node);
  917. out_free_fpi:
  918. kfree(fpi);
  919. return ret;
  920. }
  921. static int fs_enet_remove(struct platform_device *ofdev)
  922. {
  923. struct net_device *ndev = dev_get_drvdata(&ofdev->dev);
  924. struct fs_enet_private *fep = netdev_priv(ndev);
  925. unregister_netdev(ndev);
  926. fep->ops->free_bd(ndev);
  927. fep->ops->cleanup_data(ndev);
  928. dev_set_drvdata(fep->dev, NULL);
  929. of_node_put(fep->fpi->phy_node);
  930. free_netdev(ndev);
  931. return 0;
  932. }
  933. static struct of_device_id fs_enet_match[] = {
  934. #ifdef CONFIG_FS_ENET_HAS_SCC
  935. {
  936. .compatible = "fsl,cpm1-scc-enet",
  937. .data = (void *)&fs_scc_ops,
  938. },
  939. {
  940. .compatible = "fsl,cpm2-scc-enet",
  941. .data = (void *)&fs_scc_ops,
  942. },
  943. #endif
  944. #ifdef CONFIG_FS_ENET_HAS_FCC
  945. {
  946. .compatible = "fsl,cpm2-fcc-enet",
  947. .data = (void *)&fs_fcc_ops,
  948. },
  949. #endif
  950. #ifdef CONFIG_FS_ENET_HAS_FEC
  951. #ifdef CONFIG_FS_ENET_MPC5121_FEC
  952. {
  953. .compatible = "fsl,mpc5121-fec",
  954. .data = (void *)&fs_fec_ops,
  955. },
  956. {
  957. .compatible = "fsl,mpc5125-fec",
  958. .data = (void *)&fs_fec_ops,
  959. },
  960. #else
  961. {
  962. .compatible = "fsl,pq1-fec-enet",
  963. .data = (void *)&fs_fec_ops,
  964. },
  965. #endif
  966. #endif
  967. {}
  968. };
  969. MODULE_DEVICE_TABLE(of, fs_enet_match);
  970. static struct platform_driver fs_enet_driver = {
  971. .driver = {
  972. .owner = THIS_MODULE,
  973. .name = "fs_enet",
  974. .of_match_table = fs_enet_match,
  975. },
  976. .probe = fs_enet_probe,
  977. .remove = fs_enet_remove,
  978. };
  979. #ifdef CONFIG_NET_POLL_CONTROLLER
  980. static void fs_enet_netpoll(struct net_device *dev)
  981. {
  982. disable_irq(dev->irq);
  983. fs_enet_interrupt(dev->irq, dev);
  984. enable_irq(dev->irq);
  985. }
  986. #endif
  987. module_platform_driver(fs_enet_driver);