fs_enet-main.c 27 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196
  1. /*
  2. * Combined Ethernet driver for Motorola MPC8xx and MPC82xx.
  3. *
  4. * Copyright (c) 2003 Intracom S.A.
  5. * by Pantelis Antoniou <panto@intracom.gr>
  6. *
  7. * 2005 (c) MontaVista Software, Inc.
  8. * Vitaly Bordug <vbordug@ru.mvista.com>
  9. *
  10. * Heavily based on original FEC driver by Dan Malek <dan@embeddededge.com>
  11. * and modifications by Joakim Tjernlund <joakim.tjernlund@lumentis.se>
  12. *
  13. * This file is licensed under the terms of the GNU General Public License
  14. * version 2. This program is licensed "as is" without any warranty of any
  15. * kind, whether express or implied.
  16. */
  17. #include <linux/module.h>
  18. #include <linux/kernel.h>
  19. #include <linux/types.h>
  20. #include <linux/string.h>
  21. #include <linux/ptrace.h>
  22. #include <linux/errno.h>
  23. #include <linux/ioport.h>
  24. #include <linux/slab.h>
  25. #include <linux/interrupt.h>
  26. #include <linux/init.h>
  27. #include <linux/delay.h>
  28. #include <linux/netdevice.h>
  29. #include <linux/etherdevice.h>
  30. #include <linux/skbuff.h>
  31. #include <linux/spinlock.h>
  32. #include <linux/mii.h>
  33. #include <linux/ethtool.h>
  34. #include <linux/bitops.h>
  35. #include <linux/fs.h>
  36. #include <linux/platform_device.h>
  37. #include <linux/phy.h>
  38. #include <linux/of.h>
  39. #include <linux/of_mdio.h>
  40. #include <linux/of_platform.h>
  41. #include <linux/of_gpio.h>
  42. #include <linux/of_net.h>
  43. #include <linux/vmalloc.h>
  44. #include <asm/pgtable.h>
  45. #include <asm/irq.h>
  46. #include <asm/uaccess.h>
  47. #include "fs_enet.h"
  48. /*************************************************/
  49. MODULE_AUTHOR("Pantelis Antoniou <panto@intracom.gr>");
  50. MODULE_DESCRIPTION("Freescale Ethernet Driver");
  51. MODULE_LICENSE("GPL");
  52. MODULE_VERSION(DRV_MODULE_VERSION);
  53. static int fs_enet_debug = -1; /* -1 == use FS_ENET_DEF_MSG_ENABLE as value */
  54. module_param(fs_enet_debug, int, 0);
  55. MODULE_PARM_DESC(fs_enet_debug,
  56. "Freescale bitmapped debugging message enable value");
  57. #ifdef CONFIG_NET_POLL_CONTROLLER
  58. static void fs_enet_netpoll(struct net_device *dev);
  59. #endif
  60. static void fs_set_multicast_list(struct net_device *dev)
  61. {
  62. struct fs_enet_private *fep = netdev_priv(dev);
  63. (*fep->ops->set_multicast_list)(dev);
  64. }
  65. static void skb_align(struct sk_buff *skb, int align)
  66. {
  67. int off = ((unsigned long)skb->data) & (align - 1);
  68. if (off)
  69. skb_reserve(skb, align - off);
  70. }
  71. /* NAPI receive function */
  72. static int fs_enet_rx_napi(struct napi_struct *napi, int budget)
  73. {
  74. struct fs_enet_private *fep = container_of(napi, struct fs_enet_private, napi);
  75. struct net_device *dev = fep->ndev;
  76. const struct fs_platform_info *fpi = fep->fpi;
  77. cbd_t __iomem *bdp;
  78. struct sk_buff *skb, *skbn, *skbt;
  79. int received = 0;
  80. u16 pkt_len, sc;
  81. int curidx;
  82. /*
  83. * First, grab all of the stats for the incoming packet.
  84. * These get messed up if we get called due to a busy condition.
  85. */
  86. bdp = fep->cur_rx;
  87. /* clear RX status bits for napi*/
  88. (*fep->ops->napi_clear_rx_event)(dev);
  89. while (((sc = CBDR_SC(bdp)) & BD_ENET_RX_EMPTY) == 0) {
  90. curidx = bdp - fep->rx_bd_base;
  91. /*
  92. * Since we have allocated space to hold a complete frame,
  93. * the last indicator should be set.
  94. */
  95. if ((sc & BD_ENET_RX_LAST) == 0)
  96. dev_warn(fep->dev, "rcv is not +last\n");
  97. /*
  98. * Check for errors.
  99. */
  100. if (sc & (BD_ENET_RX_LG | BD_ENET_RX_SH | BD_ENET_RX_CL |
  101. BD_ENET_RX_NO | BD_ENET_RX_CR | BD_ENET_RX_OV)) {
  102. fep->stats.rx_errors++;
  103. /* Frame too long or too short. */
  104. if (sc & (BD_ENET_RX_LG | BD_ENET_RX_SH))
  105. fep->stats.rx_length_errors++;
  106. /* Frame alignment */
  107. if (sc & (BD_ENET_RX_NO | BD_ENET_RX_CL))
  108. fep->stats.rx_frame_errors++;
  109. /* CRC Error */
  110. if (sc & BD_ENET_RX_CR)
  111. fep->stats.rx_crc_errors++;
  112. /* FIFO overrun */
  113. if (sc & BD_ENET_RX_OV)
  114. fep->stats.rx_crc_errors++;
  115. skb = fep->rx_skbuff[curidx];
  116. dma_unmap_single(fep->dev, CBDR_BUFADDR(bdp),
  117. L1_CACHE_ALIGN(PKT_MAXBUF_SIZE),
  118. DMA_FROM_DEVICE);
  119. skbn = skb;
  120. } else {
  121. skb = fep->rx_skbuff[curidx];
  122. dma_unmap_single(fep->dev, CBDR_BUFADDR(bdp),
  123. L1_CACHE_ALIGN(PKT_MAXBUF_SIZE),
  124. DMA_FROM_DEVICE);
  125. /*
  126. * Process the incoming frame.
  127. */
  128. fep->stats.rx_packets++;
  129. pkt_len = CBDR_DATLEN(bdp) - 4; /* remove CRC */
  130. fep->stats.rx_bytes += pkt_len + 4;
  131. if (pkt_len <= fpi->rx_copybreak) {
  132. /* +2 to make IP header L1 cache aligned */
  133. skbn = dev_alloc_skb(pkt_len + 2);
  134. if (skbn != NULL) {
  135. skb_reserve(skbn, 2); /* align IP header */
  136. skb_copy_from_linear_data(skb,
  137. skbn->data, pkt_len);
  138. /* swap */
  139. skbt = skb;
  140. skb = skbn;
  141. skbn = skbt;
  142. }
  143. } else {
  144. skbn = dev_alloc_skb(ENET_RX_FRSIZE);
  145. if (skbn)
  146. skb_align(skbn, ENET_RX_ALIGN);
  147. }
  148. if (skbn != NULL) {
  149. skb_put(skb, pkt_len); /* Make room */
  150. skb->protocol = eth_type_trans(skb, dev);
  151. received++;
  152. netif_receive_skb(skb);
  153. } else {
  154. dev_warn(fep->dev,
  155. "Memory squeeze, dropping packet.\n");
  156. fep->stats.rx_dropped++;
  157. skbn = skb;
  158. }
  159. }
  160. fep->rx_skbuff[curidx] = skbn;
  161. CBDW_BUFADDR(bdp, dma_map_single(fep->dev, skbn->data,
  162. L1_CACHE_ALIGN(PKT_MAXBUF_SIZE),
  163. DMA_FROM_DEVICE));
  164. CBDW_DATLEN(bdp, 0);
  165. CBDW_SC(bdp, (sc & ~BD_ENET_RX_STATS) | BD_ENET_RX_EMPTY);
  166. /*
  167. * Update BD pointer to next entry.
  168. */
  169. if ((sc & BD_ENET_RX_WRAP) == 0)
  170. bdp++;
  171. else
  172. bdp = fep->rx_bd_base;
  173. (*fep->ops->rx_bd_done)(dev);
  174. if (received >= budget)
  175. break;
  176. }
  177. fep->cur_rx = bdp;
  178. if (received < budget) {
  179. /* done */
  180. napi_complete(napi);
  181. (*fep->ops->napi_enable_rx)(dev);
  182. }
  183. return received;
  184. }
  185. /* non NAPI receive function */
  186. static int fs_enet_rx_non_napi(struct net_device *dev)
  187. {
  188. struct fs_enet_private *fep = netdev_priv(dev);
  189. const struct fs_platform_info *fpi = fep->fpi;
  190. cbd_t __iomem *bdp;
  191. struct sk_buff *skb, *skbn, *skbt;
  192. int received = 0;
  193. u16 pkt_len, sc;
  194. int curidx;
  195. /*
  196. * First, grab all of the stats for the incoming packet.
  197. * These get messed up if we get called due to a busy condition.
  198. */
  199. bdp = fep->cur_rx;
  200. while (((sc = CBDR_SC(bdp)) & BD_ENET_RX_EMPTY) == 0) {
  201. curidx = bdp - fep->rx_bd_base;
  202. /*
  203. * Since we have allocated space to hold a complete frame,
  204. * the last indicator should be set.
  205. */
  206. if ((sc & BD_ENET_RX_LAST) == 0)
  207. dev_warn(fep->dev, "rcv is not +last\n");
  208. /*
  209. * Check for errors.
  210. */
  211. if (sc & (BD_ENET_RX_LG | BD_ENET_RX_SH | BD_ENET_RX_CL |
  212. BD_ENET_RX_NO | BD_ENET_RX_CR | BD_ENET_RX_OV)) {
  213. fep->stats.rx_errors++;
  214. /* Frame too long or too short. */
  215. if (sc & (BD_ENET_RX_LG | BD_ENET_RX_SH))
  216. fep->stats.rx_length_errors++;
  217. /* Frame alignment */
  218. if (sc & (BD_ENET_RX_NO | BD_ENET_RX_CL))
  219. fep->stats.rx_frame_errors++;
  220. /* CRC Error */
  221. if (sc & BD_ENET_RX_CR)
  222. fep->stats.rx_crc_errors++;
  223. /* FIFO overrun */
  224. if (sc & BD_ENET_RX_OV)
  225. fep->stats.rx_crc_errors++;
  226. skb = fep->rx_skbuff[curidx];
  227. dma_unmap_single(fep->dev, CBDR_BUFADDR(bdp),
  228. L1_CACHE_ALIGN(PKT_MAXBUF_SIZE),
  229. DMA_FROM_DEVICE);
  230. skbn = skb;
  231. } else {
  232. skb = fep->rx_skbuff[curidx];
  233. dma_unmap_single(fep->dev, CBDR_BUFADDR(bdp),
  234. L1_CACHE_ALIGN(PKT_MAXBUF_SIZE),
  235. DMA_FROM_DEVICE);
  236. /*
  237. * Process the incoming frame.
  238. */
  239. fep->stats.rx_packets++;
  240. pkt_len = CBDR_DATLEN(bdp) - 4; /* remove CRC */
  241. fep->stats.rx_bytes += pkt_len + 4;
  242. if (pkt_len <= fpi->rx_copybreak) {
  243. /* +2 to make IP header L1 cache aligned */
  244. skbn = dev_alloc_skb(pkt_len + 2);
  245. if (skbn != NULL) {
  246. skb_reserve(skbn, 2); /* align IP header */
  247. skb_copy_from_linear_data(skb,
  248. skbn->data, pkt_len);
  249. /* swap */
  250. skbt = skb;
  251. skb = skbn;
  252. skbn = skbt;
  253. }
  254. } else {
  255. skbn = dev_alloc_skb(ENET_RX_FRSIZE);
  256. if (skbn)
  257. skb_align(skbn, ENET_RX_ALIGN);
  258. }
  259. if (skbn != NULL) {
  260. skb_put(skb, pkt_len); /* Make room */
  261. skb->protocol = eth_type_trans(skb, dev);
  262. received++;
  263. netif_rx(skb);
  264. } else {
  265. dev_warn(fep->dev,
  266. "Memory squeeze, dropping packet.\n");
  267. fep->stats.rx_dropped++;
  268. skbn = skb;
  269. }
  270. }
  271. fep->rx_skbuff[curidx] = skbn;
  272. CBDW_BUFADDR(bdp, dma_map_single(fep->dev, skbn->data,
  273. L1_CACHE_ALIGN(PKT_MAXBUF_SIZE),
  274. DMA_FROM_DEVICE));
  275. CBDW_DATLEN(bdp, 0);
  276. CBDW_SC(bdp, (sc & ~BD_ENET_RX_STATS) | BD_ENET_RX_EMPTY);
  277. /*
  278. * Update BD pointer to next entry.
  279. */
  280. if ((sc & BD_ENET_RX_WRAP) == 0)
  281. bdp++;
  282. else
  283. bdp = fep->rx_bd_base;
  284. (*fep->ops->rx_bd_done)(dev);
  285. }
  286. fep->cur_rx = bdp;
  287. return 0;
  288. }
  289. static void fs_enet_tx(struct net_device *dev)
  290. {
  291. struct fs_enet_private *fep = netdev_priv(dev);
  292. cbd_t __iomem *bdp;
  293. struct sk_buff *skb;
  294. int dirtyidx, do_wake, do_restart;
  295. u16 sc;
  296. spin_lock(&fep->tx_lock);
  297. bdp = fep->dirty_tx;
  298. do_wake = do_restart = 0;
  299. while (((sc = CBDR_SC(bdp)) & BD_ENET_TX_READY) == 0) {
  300. dirtyidx = bdp - fep->tx_bd_base;
  301. if (fep->tx_free == fep->tx_ring)
  302. break;
  303. skb = fep->tx_skbuff[dirtyidx];
  304. /*
  305. * Check for errors.
  306. */
  307. if (sc & (BD_ENET_TX_HB | BD_ENET_TX_LC |
  308. BD_ENET_TX_RL | BD_ENET_TX_UN | BD_ENET_TX_CSL)) {
  309. if (sc & BD_ENET_TX_HB) /* No heartbeat */
  310. fep->stats.tx_heartbeat_errors++;
  311. if (sc & BD_ENET_TX_LC) /* Late collision */
  312. fep->stats.tx_window_errors++;
  313. if (sc & BD_ENET_TX_RL) /* Retrans limit */
  314. fep->stats.tx_aborted_errors++;
  315. if (sc & BD_ENET_TX_UN) /* Underrun */
  316. fep->stats.tx_fifo_errors++;
  317. if (sc & BD_ENET_TX_CSL) /* Carrier lost */
  318. fep->stats.tx_carrier_errors++;
  319. if (sc & (BD_ENET_TX_LC | BD_ENET_TX_RL | BD_ENET_TX_UN)) {
  320. fep->stats.tx_errors++;
  321. do_restart = 1;
  322. }
  323. } else
  324. fep->stats.tx_packets++;
  325. if (sc & BD_ENET_TX_READY) {
  326. dev_warn(fep->dev,
  327. "HEY! Enet xmit interrupt and TX_READY.\n");
  328. }
  329. /*
  330. * Deferred means some collisions occurred during transmit,
  331. * but we eventually sent the packet OK.
  332. */
  333. if (sc & BD_ENET_TX_DEF)
  334. fep->stats.collisions++;
  335. /* unmap */
  336. dma_unmap_single(fep->dev, CBDR_BUFADDR(bdp),
  337. skb->len, DMA_TO_DEVICE);
  338. /*
  339. * Free the sk buffer associated with this last transmit.
  340. */
  341. dev_kfree_skb_irq(skb);
  342. fep->tx_skbuff[dirtyidx] = NULL;
  343. /*
  344. * Update pointer to next buffer descriptor to be transmitted.
  345. */
  346. if ((sc & BD_ENET_TX_WRAP) == 0)
  347. bdp++;
  348. else
  349. bdp = fep->tx_bd_base;
  350. /*
  351. * Since we have freed up a buffer, the ring is no longer
  352. * full.
  353. */
  354. if (!fep->tx_free++)
  355. do_wake = 1;
  356. }
  357. fep->dirty_tx = bdp;
  358. if (do_restart)
  359. (*fep->ops->tx_restart)(dev);
  360. spin_unlock(&fep->tx_lock);
  361. if (do_wake)
  362. netif_wake_queue(dev);
  363. }
  364. /*
  365. * The interrupt handler.
  366. * This is called from the MPC core interrupt.
  367. */
  368. static irqreturn_t
  369. fs_enet_interrupt(int irq, void *dev_id)
  370. {
  371. struct net_device *dev = dev_id;
  372. struct fs_enet_private *fep;
  373. const struct fs_platform_info *fpi;
  374. u32 int_events;
  375. u32 int_clr_events;
  376. int nr, napi_ok;
  377. int handled;
  378. fep = netdev_priv(dev);
  379. fpi = fep->fpi;
  380. nr = 0;
  381. while ((int_events = (*fep->ops->get_int_events)(dev)) != 0) {
  382. nr++;
  383. int_clr_events = int_events;
  384. if (fpi->use_napi)
  385. int_clr_events &= ~fep->ev_napi_rx;
  386. (*fep->ops->clear_int_events)(dev, int_clr_events);
  387. if (int_events & fep->ev_err)
  388. (*fep->ops->ev_error)(dev, int_events);
  389. if (int_events & fep->ev_rx) {
  390. if (!fpi->use_napi)
  391. fs_enet_rx_non_napi(dev);
  392. else {
  393. napi_ok = napi_schedule_prep(&fep->napi);
  394. (*fep->ops->napi_disable_rx)(dev);
  395. (*fep->ops->clear_int_events)(dev, fep->ev_napi_rx);
  396. /* NOTE: it is possible for FCCs in NAPI mode */
  397. /* to submit a spurious interrupt while in poll */
  398. if (napi_ok)
  399. __napi_schedule(&fep->napi);
  400. }
  401. }
  402. if (int_events & fep->ev_tx)
  403. fs_enet_tx(dev);
  404. }
  405. handled = nr > 0;
  406. return IRQ_RETVAL(handled);
  407. }
  408. void fs_init_bds(struct net_device *dev)
  409. {
  410. struct fs_enet_private *fep = netdev_priv(dev);
  411. cbd_t __iomem *bdp;
  412. struct sk_buff *skb;
  413. int i;
  414. fs_cleanup_bds(dev);
  415. fep->dirty_tx = fep->cur_tx = fep->tx_bd_base;
  416. fep->tx_free = fep->tx_ring;
  417. fep->cur_rx = fep->rx_bd_base;
  418. /*
  419. * Initialize the receive buffer descriptors.
  420. */
  421. for (i = 0, bdp = fep->rx_bd_base; i < fep->rx_ring; i++, bdp++) {
  422. skb = dev_alloc_skb(ENET_RX_FRSIZE);
  423. if (skb == NULL) {
  424. dev_warn(fep->dev,
  425. "Memory squeeze, unable to allocate skb\n");
  426. break;
  427. }
  428. skb_align(skb, ENET_RX_ALIGN);
  429. fep->rx_skbuff[i] = skb;
  430. CBDW_BUFADDR(bdp,
  431. dma_map_single(fep->dev, skb->data,
  432. L1_CACHE_ALIGN(PKT_MAXBUF_SIZE),
  433. DMA_FROM_DEVICE));
  434. CBDW_DATLEN(bdp, 0); /* zero */
  435. CBDW_SC(bdp, BD_ENET_RX_EMPTY |
  436. ((i < fep->rx_ring - 1) ? 0 : BD_SC_WRAP));
  437. }
  438. /*
  439. * if we failed, fillup remainder
  440. */
  441. for (; i < fep->rx_ring; i++, bdp++) {
  442. fep->rx_skbuff[i] = NULL;
  443. CBDW_SC(bdp, (i < fep->rx_ring - 1) ? 0 : BD_SC_WRAP);
  444. }
  445. /*
  446. * ...and the same for transmit.
  447. */
  448. for (i = 0, bdp = fep->tx_bd_base; i < fep->tx_ring; i++, bdp++) {
  449. fep->tx_skbuff[i] = NULL;
  450. CBDW_BUFADDR(bdp, 0);
  451. CBDW_DATLEN(bdp, 0);
  452. CBDW_SC(bdp, (i < fep->tx_ring - 1) ? 0 : BD_SC_WRAP);
  453. }
  454. }
  455. void fs_cleanup_bds(struct net_device *dev)
  456. {
  457. struct fs_enet_private *fep = netdev_priv(dev);
  458. struct sk_buff *skb;
  459. cbd_t __iomem *bdp;
  460. int i;
  461. /*
  462. * Reset SKB transmit buffers.
  463. */
  464. for (i = 0, bdp = fep->tx_bd_base; i < fep->tx_ring; i++, bdp++) {
  465. if ((skb = fep->tx_skbuff[i]) == NULL)
  466. continue;
  467. /* unmap */
  468. dma_unmap_single(fep->dev, CBDR_BUFADDR(bdp),
  469. skb->len, DMA_TO_DEVICE);
  470. fep->tx_skbuff[i] = NULL;
  471. dev_kfree_skb(skb);
  472. }
  473. /*
  474. * Reset SKB receive buffers
  475. */
  476. for (i = 0, bdp = fep->rx_bd_base; i < fep->rx_ring; i++, bdp++) {
  477. if ((skb = fep->rx_skbuff[i]) == NULL)
  478. continue;
  479. /* unmap */
  480. dma_unmap_single(fep->dev, CBDR_BUFADDR(bdp),
  481. L1_CACHE_ALIGN(PKT_MAXBUF_SIZE),
  482. DMA_FROM_DEVICE);
  483. fep->rx_skbuff[i] = NULL;
  484. dev_kfree_skb(skb);
  485. }
  486. }
  487. /**********************************************************************************/
  488. #ifdef CONFIG_FS_ENET_MPC5121_FEC
  489. /*
  490. * MPC5121 FEC requeries 4-byte alignment for TX data buffer!
  491. */
  492. static struct sk_buff *tx_skb_align_workaround(struct net_device *dev,
  493. struct sk_buff *skb)
  494. {
  495. struct sk_buff *new_skb;
  496. struct fs_enet_private *fep = netdev_priv(dev);
  497. /* Alloc new skb */
  498. new_skb = dev_alloc_skb(skb->len + 4);
  499. if (!new_skb) {
  500. if (net_ratelimit()) {
  501. dev_warn(fep->dev,
  502. "Memory squeeze, dropping tx packet.\n");
  503. }
  504. return NULL;
  505. }
  506. /* Make sure new skb is properly aligned */
  507. skb_align(new_skb, 4);
  508. /* Copy data to new skb ... */
  509. skb_copy_from_linear_data(skb, new_skb->data, skb->len);
  510. skb_put(new_skb, skb->len);
  511. /* ... and free an old one */
  512. dev_kfree_skb_any(skb);
  513. return new_skb;
  514. }
  515. #endif
  516. static int fs_enet_start_xmit(struct sk_buff *skb, struct net_device *dev)
  517. {
  518. struct fs_enet_private *fep = netdev_priv(dev);
  519. cbd_t __iomem *bdp;
  520. int curidx;
  521. u16 sc;
  522. unsigned long flags;
  523. #ifdef CONFIG_FS_ENET_MPC5121_FEC
  524. if (((unsigned long)skb->data) & 0x3) {
  525. skb = tx_skb_align_workaround(dev, skb);
  526. if (!skb) {
  527. /*
  528. * We have lost packet due to memory allocation error
  529. * in tx_skb_align_workaround(). Hopefully original
  530. * skb is still valid, so try transmit it later.
  531. */
  532. return NETDEV_TX_BUSY;
  533. }
  534. }
  535. #endif
  536. spin_lock_irqsave(&fep->tx_lock, flags);
  537. /*
  538. * Fill in a Tx ring entry
  539. */
  540. bdp = fep->cur_tx;
  541. if (!fep->tx_free || (CBDR_SC(bdp) & BD_ENET_TX_READY)) {
  542. netif_stop_queue(dev);
  543. spin_unlock_irqrestore(&fep->tx_lock, flags);
  544. /*
  545. * Ooops. All transmit buffers are full. Bail out.
  546. * This should not happen, since the tx queue should be stopped.
  547. */
  548. dev_warn(fep->dev, "tx queue full!.\n");
  549. return NETDEV_TX_BUSY;
  550. }
  551. curidx = bdp - fep->tx_bd_base;
  552. /*
  553. * Clear all of the status flags.
  554. */
  555. CBDC_SC(bdp, BD_ENET_TX_STATS);
  556. /*
  557. * Save skb pointer.
  558. */
  559. fep->tx_skbuff[curidx] = skb;
  560. fep->stats.tx_bytes += skb->len;
  561. /*
  562. * Push the data cache so the CPM does not get stale memory data.
  563. */
  564. CBDW_BUFADDR(bdp, dma_map_single(fep->dev,
  565. skb->data, skb->len, DMA_TO_DEVICE));
  566. CBDW_DATLEN(bdp, skb->len);
  567. /*
  568. * If this was the last BD in the ring, start at the beginning again.
  569. */
  570. if ((CBDR_SC(bdp) & BD_ENET_TX_WRAP) == 0)
  571. fep->cur_tx++;
  572. else
  573. fep->cur_tx = fep->tx_bd_base;
  574. if (!--fep->tx_free)
  575. netif_stop_queue(dev);
  576. /* Trigger transmission start */
  577. sc = BD_ENET_TX_READY | BD_ENET_TX_INTR |
  578. BD_ENET_TX_LAST | BD_ENET_TX_TC;
  579. /* note that while FEC does not have this bit
  580. * it marks it as available for software use
  581. * yay for hw reuse :) */
  582. if (skb->len <= 60)
  583. sc |= BD_ENET_TX_PAD;
  584. CBDS_SC(bdp, sc);
  585. skb_tx_timestamp(skb);
  586. (*fep->ops->tx_kickstart)(dev);
  587. spin_unlock_irqrestore(&fep->tx_lock, flags);
  588. return NETDEV_TX_OK;
  589. }
  590. static void fs_timeout(struct net_device *dev)
  591. {
  592. struct fs_enet_private *fep = netdev_priv(dev);
  593. unsigned long flags;
  594. int wake = 0;
  595. fep->stats.tx_errors++;
  596. spin_lock_irqsave(&fep->lock, flags);
  597. if (dev->flags & IFF_UP) {
  598. phy_stop(fep->phydev);
  599. (*fep->ops->stop)(dev);
  600. (*fep->ops->restart)(dev);
  601. phy_start(fep->phydev);
  602. }
  603. phy_start(fep->phydev);
  604. wake = fep->tx_free && !(CBDR_SC(fep->cur_tx) & BD_ENET_TX_READY);
  605. spin_unlock_irqrestore(&fep->lock, flags);
  606. if (wake)
  607. netif_wake_queue(dev);
  608. }
  609. /*-----------------------------------------------------------------------------
  610. * generic link-change handler - should be sufficient for most cases
  611. *-----------------------------------------------------------------------------*/
  612. static void generic_adjust_link(struct net_device *dev)
  613. {
  614. struct fs_enet_private *fep = netdev_priv(dev);
  615. struct phy_device *phydev = fep->phydev;
  616. int new_state = 0;
  617. if (phydev->link) {
  618. /* adjust to duplex mode */
  619. if (phydev->duplex != fep->oldduplex) {
  620. new_state = 1;
  621. fep->oldduplex = phydev->duplex;
  622. }
  623. if (phydev->speed != fep->oldspeed) {
  624. new_state = 1;
  625. fep->oldspeed = phydev->speed;
  626. }
  627. if (!fep->oldlink) {
  628. new_state = 1;
  629. fep->oldlink = 1;
  630. }
  631. if (new_state)
  632. fep->ops->restart(dev);
  633. } else if (fep->oldlink) {
  634. new_state = 1;
  635. fep->oldlink = 0;
  636. fep->oldspeed = 0;
  637. fep->oldduplex = -1;
  638. }
  639. if (new_state && netif_msg_link(fep))
  640. phy_print_status(phydev);
  641. }
  642. static void fs_adjust_link(struct net_device *dev)
  643. {
  644. struct fs_enet_private *fep = netdev_priv(dev);
  645. unsigned long flags;
  646. spin_lock_irqsave(&fep->lock, flags);
  647. if(fep->ops->adjust_link)
  648. fep->ops->adjust_link(dev);
  649. else
  650. generic_adjust_link(dev);
  651. spin_unlock_irqrestore(&fep->lock, flags);
  652. }
  653. static int fs_init_phy(struct net_device *dev)
  654. {
  655. struct fs_enet_private *fep = netdev_priv(dev);
  656. struct phy_device *phydev;
  657. fep->oldlink = 0;
  658. fep->oldspeed = 0;
  659. fep->oldduplex = -1;
  660. phydev = of_phy_connect(dev, fep->fpi->phy_node, &fs_adjust_link, 0,
  661. PHY_INTERFACE_MODE_MII);
  662. if (!phydev) {
  663. phydev = of_phy_connect_fixed_link(dev, &fs_adjust_link,
  664. PHY_INTERFACE_MODE_MII);
  665. }
  666. if (!phydev) {
  667. dev_err(&dev->dev, "Could not attach to PHY\n");
  668. return -ENODEV;
  669. }
  670. fep->phydev = phydev;
  671. return 0;
  672. }
  673. static int fs_enet_open(struct net_device *dev)
  674. {
  675. struct fs_enet_private *fep = netdev_priv(dev);
  676. int r;
  677. int err;
  678. /* to initialize the fep->cur_rx,... */
  679. /* not doing this, will cause a crash in fs_enet_rx_napi */
  680. fs_init_bds(fep->ndev);
  681. if (fep->fpi->use_napi)
  682. napi_enable(&fep->napi);
  683. /* Install our interrupt handler. */
  684. r = request_irq(fep->interrupt, fs_enet_interrupt, IRQF_SHARED,
  685. "fs_enet-mac", dev);
  686. if (r != 0) {
  687. dev_err(fep->dev, "Could not allocate FS_ENET IRQ!");
  688. if (fep->fpi->use_napi)
  689. napi_disable(&fep->napi);
  690. return -EINVAL;
  691. }
  692. err = fs_init_phy(dev);
  693. if (err) {
  694. free_irq(fep->interrupt, dev);
  695. if (fep->fpi->use_napi)
  696. napi_disable(&fep->napi);
  697. return err;
  698. }
  699. phy_start(fep->phydev);
  700. netif_start_queue(dev);
  701. return 0;
  702. }
  703. static int fs_enet_close(struct net_device *dev)
  704. {
  705. struct fs_enet_private *fep = netdev_priv(dev);
  706. unsigned long flags;
  707. netif_stop_queue(dev);
  708. netif_carrier_off(dev);
  709. if (fep->fpi->use_napi)
  710. napi_disable(&fep->napi);
  711. phy_stop(fep->phydev);
  712. spin_lock_irqsave(&fep->lock, flags);
  713. spin_lock(&fep->tx_lock);
  714. (*fep->ops->stop)(dev);
  715. spin_unlock(&fep->tx_lock);
  716. spin_unlock_irqrestore(&fep->lock, flags);
  717. /* release any irqs */
  718. phy_disconnect(fep->phydev);
  719. fep->phydev = NULL;
  720. free_irq(fep->interrupt, dev);
  721. return 0;
  722. }
  723. static struct net_device_stats *fs_enet_get_stats(struct net_device *dev)
  724. {
  725. struct fs_enet_private *fep = netdev_priv(dev);
  726. return &fep->stats;
  727. }
  728. /*************************************************************************/
  729. static void fs_get_drvinfo(struct net_device *dev,
  730. struct ethtool_drvinfo *info)
  731. {
  732. strcpy(info->driver, DRV_MODULE_NAME);
  733. strcpy(info->version, DRV_MODULE_VERSION);
  734. }
  735. static int fs_get_regs_len(struct net_device *dev)
  736. {
  737. struct fs_enet_private *fep = netdev_priv(dev);
  738. return (*fep->ops->get_regs_len)(dev);
  739. }
  740. static void fs_get_regs(struct net_device *dev, struct ethtool_regs *regs,
  741. void *p)
  742. {
  743. struct fs_enet_private *fep = netdev_priv(dev);
  744. unsigned long flags;
  745. int r, len;
  746. len = regs->len;
  747. spin_lock_irqsave(&fep->lock, flags);
  748. r = (*fep->ops->get_regs)(dev, p, &len);
  749. spin_unlock_irqrestore(&fep->lock, flags);
  750. if (r == 0)
  751. regs->version = 0;
  752. }
  753. static int fs_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
  754. {
  755. struct fs_enet_private *fep = netdev_priv(dev);
  756. if (!fep->phydev)
  757. return -ENODEV;
  758. return phy_ethtool_gset(fep->phydev, cmd);
  759. }
  760. static int fs_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
  761. {
  762. struct fs_enet_private *fep = netdev_priv(dev);
  763. if (!fep->phydev)
  764. return -ENODEV;
  765. return phy_ethtool_sset(fep->phydev, cmd);
  766. }
  767. static int fs_nway_reset(struct net_device *dev)
  768. {
  769. return 0;
  770. }
  771. static u32 fs_get_msglevel(struct net_device *dev)
  772. {
  773. struct fs_enet_private *fep = netdev_priv(dev);
  774. return fep->msg_enable;
  775. }
  776. static void fs_set_msglevel(struct net_device *dev, u32 value)
  777. {
  778. struct fs_enet_private *fep = netdev_priv(dev);
  779. fep->msg_enable = value;
  780. }
  781. static const struct ethtool_ops fs_ethtool_ops = {
  782. .get_drvinfo = fs_get_drvinfo,
  783. .get_regs_len = fs_get_regs_len,
  784. .get_settings = fs_get_settings,
  785. .set_settings = fs_set_settings,
  786. .nway_reset = fs_nway_reset,
  787. .get_link = ethtool_op_get_link,
  788. .get_msglevel = fs_get_msglevel,
  789. .set_msglevel = fs_set_msglevel,
  790. .get_regs = fs_get_regs,
  791. };
  792. static int fs_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
  793. {
  794. struct fs_enet_private *fep = netdev_priv(dev);
  795. if (!netif_running(dev))
  796. return -EINVAL;
  797. return phy_mii_ioctl(fep->phydev, rq, cmd);
  798. }
  799. extern int fs_mii_connect(struct net_device *dev);
  800. extern void fs_mii_disconnect(struct net_device *dev);
  801. /**************************************************************************************/
  802. #ifdef CONFIG_FS_ENET_HAS_FEC
  803. #define IS_FEC(match) ((match)->data == &fs_fec_ops)
  804. #else
  805. #define IS_FEC(match) 0
  806. #endif
  807. static const struct net_device_ops fs_enet_netdev_ops = {
  808. .ndo_open = fs_enet_open,
  809. .ndo_stop = fs_enet_close,
  810. .ndo_get_stats = fs_enet_get_stats,
  811. .ndo_start_xmit = fs_enet_start_xmit,
  812. .ndo_tx_timeout = fs_timeout,
  813. .ndo_set_multicast_list = fs_set_multicast_list,
  814. .ndo_do_ioctl = fs_ioctl,
  815. .ndo_validate_addr = eth_validate_addr,
  816. .ndo_set_mac_address = eth_mac_addr,
  817. .ndo_change_mtu = eth_change_mtu,
  818. #ifdef CONFIG_NET_POLL_CONTROLLER
  819. .ndo_poll_controller = fs_enet_netpoll,
  820. #endif
  821. };
  822. static struct of_device_id fs_enet_match[];
  823. static int __devinit fs_enet_probe(struct platform_device *ofdev)
  824. {
  825. const struct of_device_id *match;
  826. struct net_device *ndev;
  827. struct fs_enet_private *fep;
  828. struct fs_platform_info *fpi;
  829. const u32 *data;
  830. const u8 *mac_addr;
  831. int privsize, len, ret = -ENODEV;
  832. match = of_match_device(fs_enet_match, &ofdev->dev);
  833. if (!match)
  834. return -EINVAL;
  835. fpi = kzalloc(sizeof(*fpi), GFP_KERNEL);
  836. if (!fpi)
  837. return -ENOMEM;
  838. if (!IS_FEC(match)) {
  839. data = of_get_property(ofdev->dev.of_node, "fsl,cpm-command", &len);
  840. if (!data || len != 4)
  841. goto out_free_fpi;
  842. fpi->cp_command = *data;
  843. }
  844. fpi->rx_ring = 32;
  845. fpi->tx_ring = 32;
  846. fpi->rx_copybreak = 240;
  847. fpi->use_napi = 1;
  848. fpi->napi_weight = 17;
  849. fpi->phy_node = of_parse_phandle(ofdev->dev.of_node, "phy-handle", 0);
  850. if ((!fpi->phy_node) && (!of_get_property(ofdev->dev.of_node, "fixed-link",
  851. NULL)))
  852. goto out_free_fpi;
  853. privsize = sizeof(*fep) +
  854. sizeof(struct sk_buff **) *
  855. (fpi->rx_ring + fpi->tx_ring);
  856. ndev = alloc_etherdev(privsize);
  857. if (!ndev) {
  858. ret = -ENOMEM;
  859. goto out_put;
  860. }
  861. SET_NETDEV_DEV(ndev, &ofdev->dev);
  862. dev_set_drvdata(&ofdev->dev, ndev);
  863. fep = netdev_priv(ndev);
  864. fep->dev = &ofdev->dev;
  865. fep->ndev = ndev;
  866. fep->fpi = fpi;
  867. fep->ops = match->data;
  868. ret = fep->ops->setup_data(ndev);
  869. if (ret)
  870. goto out_free_dev;
  871. fep->rx_skbuff = (struct sk_buff **)&fep[1];
  872. fep->tx_skbuff = fep->rx_skbuff + fpi->rx_ring;
  873. spin_lock_init(&fep->lock);
  874. spin_lock_init(&fep->tx_lock);
  875. mac_addr = of_get_mac_address(ofdev->dev.of_node);
  876. if (mac_addr)
  877. memcpy(ndev->dev_addr, mac_addr, 6);
  878. ret = fep->ops->allocate_bd(ndev);
  879. if (ret)
  880. goto out_cleanup_data;
  881. fep->rx_bd_base = fep->ring_base;
  882. fep->tx_bd_base = fep->rx_bd_base + fpi->rx_ring;
  883. fep->tx_ring = fpi->tx_ring;
  884. fep->rx_ring = fpi->rx_ring;
  885. ndev->netdev_ops = &fs_enet_netdev_ops;
  886. ndev->watchdog_timeo = 2 * HZ;
  887. if (fpi->use_napi)
  888. netif_napi_add(ndev, &fep->napi, fs_enet_rx_napi,
  889. fpi->napi_weight);
  890. ndev->ethtool_ops = &fs_ethtool_ops;
  891. init_timer(&fep->phy_timer_list);
  892. netif_carrier_off(ndev);
  893. ret = register_netdev(ndev);
  894. if (ret)
  895. goto out_free_bd;
  896. pr_info("%s: fs_enet: %pM\n", ndev->name, ndev->dev_addr);
  897. return 0;
  898. out_free_bd:
  899. fep->ops->free_bd(ndev);
  900. out_cleanup_data:
  901. fep->ops->cleanup_data(ndev);
  902. out_free_dev:
  903. free_netdev(ndev);
  904. dev_set_drvdata(&ofdev->dev, NULL);
  905. out_put:
  906. of_node_put(fpi->phy_node);
  907. out_free_fpi:
  908. kfree(fpi);
  909. return ret;
  910. }
  911. static int fs_enet_remove(struct platform_device *ofdev)
  912. {
  913. struct net_device *ndev = dev_get_drvdata(&ofdev->dev);
  914. struct fs_enet_private *fep = netdev_priv(ndev);
  915. unregister_netdev(ndev);
  916. fep->ops->free_bd(ndev);
  917. fep->ops->cleanup_data(ndev);
  918. dev_set_drvdata(fep->dev, NULL);
  919. of_node_put(fep->fpi->phy_node);
  920. free_netdev(ndev);
  921. return 0;
  922. }
  923. static struct of_device_id fs_enet_match[] = {
  924. #ifdef CONFIG_FS_ENET_HAS_SCC
  925. {
  926. .compatible = "fsl,cpm1-scc-enet",
  927. .data = (void *)&fs_scc_ops,
  928. },
  929. {
  930. .compatible = "fsl,cpm2-scc-enet",
  931. .data = (void *)&fs_scc_ops,
  932. },
  933. #endif
  934. #ifdef CONFIG_FS_ENET_HAS_FCC
  935. {
  936. .compatible = "fsl,cpm2-fcc-enet",
  937. .data = (void *)&fs_fcc_ops,
  938. },
  939. #endif
  940. #ifdef CONFIG_FS_ENET_HAS_FEC
  941. #ifdef CONFIG_FS_ENET_MPC5121_FEC
  942. {
  943. .compatible = "fsl,mpc5121-fec",
  944. .data = (void *)&fs_fec_ops,
  945. },
  946. #else
  947. {
  948. .compatible = "fsl,pq1-fec-enet",
  949. .data = (void *)&fs_fec_ops,
  950. },
  951. #endif
  952. #endif
  953. {}
  954. };
  955. MODULE_DEVICE_TABLE(of, fs_enet_match);
  956. static struct platform_driver fs_enet_driver = {
  957. .driver = {
  958. .owner = THIS_MODULE,
  959. .name = "fs_enet",
  960. .of_match_table = fs_enet_match,
  961. },
  962. .probe = fs_enet_probe,
  963. .remove = fs_enet_remove,
  964. };
  965. static int __init fs_init(void)
  966. {
  967. return platform_driver_register(&fs_enet_driver);
  968. }
  969. static void __exit fs_cleanup(void)
  970. {
  971. platform_driver_unregister(&fs_enet_driver);
  972. }
  973. #ifdef CONFIG_NET_POLL_CONTROLLER
  974. static void fs_enet_netpoll(struct net_device *dev)
  975. {
  976. disable_irq(dev->irq);
  977. fs_enet_interrupt(dev->irq, dev);
  978. enable_irq(dev->irq);
  979. }
  980. #endif
  981. /**************************************************************************************/
  982. module_init(fs_init);
  983. module_exit(fs_cleanup);