fec.c 35 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400
  1. /*
  2. * Fast Ethernet Controller (FEC) driver for Motorola MPC8xx.
  3. * Copyright (c) 1997 Dan Malek (dmalek@jlc.net)
  4. *
  5. * Right now, I am very wasteful with the buffers. I allocate memory
  6. * pages and then divide them into 2K frame buffers. This way I know I
  7. * have buffers large enough to hold one frame within one buffer descriptor.
  8. * Once I get this working, I will use 64 or 128 byte CPM buffers, which
  9. * will be much more memory efficient and will easily handle lots of
  10. * small packets.
  11. *
  12. * Much better multiple PHY support by Magnus Damm.
  13. * Copyright (c) 2000 Ericsson Radio Systems AB.
  14. *
  15. * Support for FEC controller of ColdFire processors.
  16. * Copyright (c) 2001-2005 Greg Ungerer (gerg@snapgear.com)
  17. *
  18. * Bug fixes and cleanup by Philippe De Muyter (phdm@macqel.be)
  19. * Copyright (c) 2004-2006 Macq Electronique SA.
  20. */
  21. #include <linux/module.h>
  22. #include <linux/kernel.h>
  23. #include <linux/string.h>
  24. #include <linux/ptrace.h>
  25. #include <linux/errno.h>
  26. #include <linux/ioport.h>
  27. #include <linux/slab.h>
  28. #include <linux/interrupt.h>
  29. #include <linux/pci.h>
  30. #include <linux/init.h>
  31. #include <linux/delay.h>
  32. #include <linux/netdevice.h>
  33. #include <linux/etherdevice.h>
  34. #include <linux/skbuff.h>
  35. #include <linux/spinlock.h>
  36. #include <linux/workqueue.h>
  37. #include <linux/bitops.h>
  38. #include <linux/io.h>
  39. #include <linux/irq.h>
  40. #include <linux/clk.h>
  41. #include <linux/platform_device.h>
  42. #include <linux/phy.h>
  43. #include <asm/cacheflush.h>
  44. #ifndef CONFIG_ARCH_MXC
  45. #include <asm/coldfire.h>
  46. #include <asm/mcfsim.h>
  47. #endif
  48. #include "fec.h"
  49. #ifdef CONFIG_ARCH_MXC
  50. #include <mach/hardware.h>
  51. #define FEC_ALIGNMENT 0xf
  52. #else
  53. #define FEC_ALIGNMENT 0x3
  54. #endif
  55. /*
  56. * Define the fixed address of the FEC hardware.
  57. */
  58. #if defined(CONFIG_M5272)
  59. static unsigned char fec_mac_default[] = {
  60. 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
  61. };
  62. /*
  63. * Some hardware gets it MAC address out of local flash memory.
  64. * if this is non-zero then assume it is the address to get MAC from.
  65. */
  66. #if defined(CONFIG_NETtel)
  67. #define FEC_FLASHMAC 0xf0006006
  68. #elif defined(CONFIG_GILBARCONAP) || defined(CONFIG_SCALES)
  69. #define FEC_FLASHMAC 0xf0006000
  70. #elif defined(CONFIG_CANCam)
  71. #define FEC_FLASHMAC 0xf0020000
  72. #elif defined (CONFIG_M5272C3)
  73. #define FEC_FLASHMAC (0xffe04000 + 4)
  74. #elif defined(CONFIG_MOD5272)
  75. #define FEC_FLASHMAC 0xffc0406b
  76. #else
  77. #define FEC_FLASHMAC 0
  78. #endif
  79. #endif /* CONFIG_M5272 */
  80. /* The number of Tx and Rx buffers. These are allocated from the page
  81. * pool. The code may assume these are power of two, so it it best
  82. * to keep them that size.
  83. * We don't need to allocate pages for the transmitter. We just use
  84. * the skbuffer directly.
  85. */
  86. #define FEC_ENET_RX_PAGES 8
  87. #define FEC_ENET_RX_FRSIZE 2048
  88. #define FEC_ENET_RX_FRPPG (PAGE_SIZE / FEC_ENET_RX_FRSIZE)
  89. #define RX_RING_SIZE (FEC_ENET_RX_FRPPG * FEC_ENET_RX_PAGES)
  90. #define FEC_ENET_TX_FRSIZE 2048
  91. #define FEC_ENET_TX_FRPPG (PAGE_SIZE / FEC_ENET_TX_FRSIZE)
  92. #define TX_RING_SIZE 16 /* Must be power of two */
  93. #define TX_RING_MOD_MASK 15 /* for this to work */
  94. #if (((RX_RING_SIZE + TX_RING_SIZE) * 8) > PAGE_SIZE)
  95. #error "FEC: descriptor ring size constants too large"
  96. #endif
  97. /* Interrupt events/masks. */
  98. #define FEC_ENET_HBERR ((uint)0x80000000) /* Heartbeat error */
  99. #define FEC_ENET_BABR ((uint)0x40000000) /* Babbling receiver */
  100. #define FEC_ENET_BABT ((uint)0x20000000) /* Babbling transmitter */
  101. #define FEC_ENET_GRA ((uint)0x10000000) /* Graceful stop complete */
  102. #define FEC_ENET_TXF ((uint)0x08000000) /* Full frame transmitted */
  103. #define FEC_ENET_TXB ((uint)0x04000000) /* A buffer was transmitted */
  104. #define FEC_ENET_RXF ((uint)0x02000000) /* Full frame received */
  105. #define FEC_ENET_RXB ((uint)0x01000000) /* A buffer was received */
  106. #define FEC_ENET_MII ((uint)0x00800000) /* MII interrupt */
  107. #define FEC_ENET_EBERR ((uint)0x00400000) /* SDMA bus error */
  108. /* The FEC stores dest/src/type, data, and checksum for receive packets.
  109. */
  110. #define PKT_MAXBUF_SIZE 1518
  111. #define PKT_MINBUF_SIZE 64
  112. #define PKT_MAXBLR_SIZE 1520
  113. /*
  114. * The 5270/5271/5280/5282/532x RX control register also contains maximum frame
  115. * size bits. Other FEC hardware does not, so we need to take that into
  116. * account when setting it.
  117. */
  118. #if defined(CONFIG_M523x) || defined(CONFIG_M527x) || defined(CONFIG_M528x) || \
  119. defined(CONFIG_M520x) || defined(CONFIG_M532x) || defined(CONFIG_ARCH_MXC)
  120. #define OPT_FRAME_SIZE (PKT_MAXBUF_SIZE << 16)
  121. #else
  122. #define OPT_FRAME_SIZE 0
  123. #endif
  124. /* The FEC buffer descriptors track the ring buffers. The rx_bd_base and
  125. * tx_bd_base always point to the base of the buffer descriptors. The
  126. * cur_rx and cur_tx point to the currently available buffer.
  127. * The dirty_tx tracks the current buffer that is being sent by the
  128. * controller. The cur_tx and dirty_tx are equal under both completely
  129. * empty and completely full conditions. The empty/ready indicator in
  130. * the buffer descriptor determines the actual condition.
  131. */
  132. struct fec_enet_private {
  133. /* Hardware registers of the FEC device */
  134. void __iomem *hwp;
  135. struct net_device *netdev;
  136. struct clk *clk;
  137. /* The saved address of a sent-in-place packet/buffer, for skfree(). */
  138. unsigned char *tx_bounce[TX_RING_SIZE];
  139. struct sk_buff* tx_skbuff[TX_RING_SIZE];
  140. struct sk_buff* rx_skbuff[RX_RING_SIZE];
  141. ushort skb_cur;
  142. ushort skb_dirty;
  143. /* CPM dual port RAM relative addresses */
  144. dma_addr_t bd_dma;
  145. /* Address of Rx and Tx buffers */
  146. struct bufdesc *rx_bd_base;
  147. struct bufdesc *tx_bd_base;
  148. /* The next free ring entry */
  149. struct bufdesc *cur_rx, *cur_tx;
  150. /* The ring entries to be free()ed */
  151. struct bufdesc *dirty_tx;
  152. uint tx_full;
  153. /* hold while accessing the HW like ringbuffer for tx/rx but not MAC */
  154. spinlock_t hw_lock;
  155. struct platform_device *pdev;
  156. int opened;
  157. /* Phylib and MDIO interface */
  158. struct mii_bus *mii_bus;
  159. struct phy_device *phy_dev;
  160. int mii_timeout;
  161. uint phy_speed;
  162. int index;
  163. int link;
  164. int full_duplex;
  165. };
  166. static irqreturn_t fec_enet_interrupt(int irq, void * dev_id);
  167. static void fec_enet_tx(struct net_device *dev);
  168. static void fec_enet_rx(struct net_device *dev);
  169. static int fec_enet_close(struct net_device *dev);
  170. static void fec_restart(struct net_device *dev, int duplex);
  171. static void fec_stop(struct net_device *dev);
  172. /* FEC MII MMFR bits definition */
  173. #define FEC_MMFR_ST (1 << 30)
  174. #define FEC_MMFR_OP_READ (2 << 28)
  175. #define FEC_MMFR_OP_WRITE (1 << 28)
  176. #define FEC_MMFR_PA(v) ((v & 0x1f) << 23)
  177. #define FEC_MMFR_RA(v) ((v & 0x1f) << 18)
  178. #define FEC_MMFR_TA (2 << 16)
  179. #define FEC_MMFR_DATA(v) (v & 0xffff)
  180. #define FEC_MII_TIMEOUT 10000
  181. /* Transmitter timeout */
  182. #define TX_TIMEOUT (2 * HZ)
  183. static int
  184. fec_enet_start_xmit(struct sk_buff *skb, struct net_device *dev)
  185. {
  186. struct fec_enet_private *fep = netdev_priv(dev);
  187. struct bufdesc *bdp;
  188. void *bufaddr;
  189. unsigned short status;
  190. unsigned long flags;
  191. if (!fep->link) {
  192. /* Link is down or autonegotiation is in progress. */
  193. return NETDEV_TX_BUSY;
  194. }
  195. spin_lock_irqsave(&fep->hw_lock, flags);
  196. /* Fill in a Tx ring entry */
  197. bdp = fep->cur_tx;
  198. status = bdp->cbd_sc;
  199. if (status & BD_ENET_TX_READY) {
  200. /* Ooops. All transmit buffers are full. Bail out.
  201. * This should not happen, since dev->tbusy should be set.
  202. */
  203. printk("%s: tx queue full!.\n", dev->name);
  204. spin_unlock_irqrestore(&fep->hw_lock, flags);
  205. return NETDEV_TX_BUSY;
  206. }
  207. /* Clear all of the status flags */
  208. status &= ~BD_ENET_TX_STATS;
  209. /* Set buffer length and buffer pointer */
  210. bufaddr = skb->data;
  211. bdp->cbd_datlen = skb->len;
  212. /*
  213. * On some FEC implementations data must be aligned on
  214. * 4-byte boundaries. Use bounce buffers to copy data
  215. * and get it aligned. Ugh.
  216. */
  217. if (((unsigned long) bufaddr) & FEC_ALIGNMENT) {
  218. unsigned int index;
  219. index = bdp - fep->tx_bd_base;
  220. memcpy(fep->tx_bounce[index], (void *)skb->data, skb->len);
  221. bufaddr = fep->tx_bounce[index];
  222. }
  223. /* Save skb pointer */
  224. fep->tx_skbuff[fep->skb_cur] = skb;
  225. dev->stats.tx_bytes += skb->len;
  226. fep->skb_cur = (fep->skb_cur+1) & TX_RING_MOD_MASK;
  227. /* Push the data cache so the CPM does not get stale memory
  228. * data.
  229. */
  230. bdp->cbd_bufaddr = dma_map_single(&dev->dev, bufaddr,
  231. FEC_ENET_TX_FRSIZE, DMA_TO_DEVICE);
  232. /* Send it on its way. Tell FEC it's ready, interrupt when done,
  233. * it's the last BD of the frame, and to put the CRC on the end.
  234. */
  235. status |= (BD_ENET_TX_READY | BD_ENET_TX_INTR
  236. | BD_ENET_TX_LAST | BD_ENET_TX_TC);
  237. bdp->cbd_sc = status;
  238. /* Trigger transmission start */
  239. writel(0, fep->hwp + FEC_X_DES_ACTIVE);
  240. /* If this was the last BD in the ring, start at the beginning again. */
  241. if (status & BD_ENET_TX_WRAP)
  242. bdp = fep->tx_bd_base;
  243. else
  244. bdp++;
  245. if (bdp == fep->dirty_tx) {
  246. fep->tx_full = 1;
  247. netif_stop_queue(dev);
  248. }
  249. fep->cur_tx = bdp;
  250. spin_unlock_irqrestore(&fep->hw_lock, flags);
  251. return NETDEV_TX_OK;
  252. }
  253. static void
  254. fec_timeout(struct net_device *dev)
  255. {
  256. struct fec_enet_private *fep = netdev_priv(dev);
  257. dev->stats.tx_errors++;
  258. fec_restart(dev, fep->full_duplex);
  259. netif_wake_queue(dev);
  260. }
  261. static irqreturn_t
  262. fec_enet_interrupt(int irq, void * dev_id)
  263. {
  264. struct net_device *dev = dev_id;
  265. struct fec_enet_private *fep = netdev_priv(dev);
  266. uint int_events;
  267. irqreturn_t ret = IRQ_NONE;
  268. do {
  269. int_events = readl(fep->hwp + FEC_IEVENT);
  270. writel(int_events, fep->hwp + FEC_IEVENT);
  271. if (int_events & FEC_ENET_RXF) {
  272. ret = IRQ_HANDLED;
  273. fec_enet_rx(dev);
  274. }
  275. /* Transmit OK, or non-fatal error. Update the buffer
  276. * descriptors. FEC handles all errors, we just discover
  277. * them as part of the transmit process.
  278. */
  279. if (int_events & FEC_ENET_TXF) {
  280. ret = IRQ_HANDLED;
  281. fec_enet_tx(dev);
  282. }
  283. } while (int_events);
  284. return ret;
  285. }
  286. static void
  287. fec_enet_tx(struct net_device *dev)
  288. {
  289. struct fec_enet_private *fep;
  290. struct bufdesc *bdp;
  291. unsigned short status;
  292. struct sk_buff *skb;
  293. fep = netdev_priv(dev);
  294. spin_lock(&fep->hw_lock);
  295. bdp = fep->dirty_tx;
  296. while (((status = bdp->cbd_sc) & BD_ENET_TX_READY) == 0) {
  297. if (bdp == fep->cur_tx && fep->tx_full == 0)
  298. break;
  299. dma_unmap_single(&dev->dev, bdp->cbd_bufaddr, FEC_ENET_TX_FRSIZE, DMA_TO_DEVICE);
  300. bdp->cbd_bufaddr = 0;
  301. skb = fep->tx_skbuff[fep->skb_dirty];
  302. /* Check for errors. */
  303. if (status & (BD_ENET_TX_HB | BD_ENET_TX_LC |
  304. BD_ENET_TX_RL | BD_ENET_TX_UN |
  305. BD_ENET_TX_CSL)) {
  306. dev->stats.tx_errors++;
  307. if (status & BD_ENET_TX_HB) /* No heartbeat */
  308. dev->stats.tx_heartbeat_errors++;
  309. if (status & BD_ENET_TX_LC) /* Late collision */
  310. dev->stats.tx_window_errors++;
  311. if (status & BD_ENET_TX_RL) /* Retrans limit */
  312. dev->stats.tx_aborted_errors++;
  313. if (status & BD_ENET_TX_UN) /* Underrun */
  314. dev->stats.tx_fifo_errors++;
  315. if (status & BD_ENET_TX_CSL) /* Carrier lost */
  316. dev->stats.tx_carrier_errors++;
  317. } else {
  318. dev->stats.tx_packets++;
  319. }
  320. if (status & BD_ENET_TX_READY)
  321. printk("HEY! Enet xmit interrupt and TX_READY.\n");
  322. /* Deferred means some collisions occurred during transmit,
  323. * but we eventually sent the packet OK.
  324. */
  325. if (status & BD_ENET_TX_DEF)
  326. dev->stats.collisions++;
  327. /* Free the sk buffer associated with this last transmit */
  328. dev_kfree_skb_any(skb);
  329. fep->tx_skbuff[fep->skb_dirty] = NULL;
  330. fep->skb_dirty = (fep->skb_dirty + 1) & TX_RING_MOD_MASK;
  331. /* Update pointer to next buffer descriptor to be transmitted */
  332. if (status & BD_ENET_TX_WRAP)
  333. bdp = fep->tx_bd_base;
  334. else
  335. bdp++;
  336. /* Since we have freed up a buffer, the ring is no longer full
  337. */
  338. if (fep->tx_full) {
  339. fep->tx_full = 0;
  340. if (netif_queue_stopped(dev))
  341. netif_wake_queue(dev);
  342. }
  343. }
  344. fep->dirty_tx = bdp;
  345. spin_unlock(&fep->hw_lock);
  346. }
  347. /* During a receive, the cur_rx points to the current incoming buffer.
  348. * When we update through the ring, if the next incoming buffer has
  349. * not been given to the system, we just set the empty indicator,
  350. * effectively tossing the packet.
  351. */
  352. static void
  353. fec_enet_rx(struct net_device *dev)
  354. {
  355. struct fec_enet_private *fep = netdev_priv(dev);
  356. struct bufdesc *bdp;
  357. unsigned short status;
  358. struct sk_buff *skb;
  359. ushort pkt_len;
  360. __u8 *data;
  361. #ifdef CONFIG_M532x
  362. flush_cache_all();
  363. #endif
  364. spin_lock(&fep->hw_lock);
  365. /* First, grab all of the stats for the incoming packet.
  366. * These get messed up if we get called due to a busy condition.
  367. */
  368. bdp = fep->cur_rx;
  369. while (!((status = bdp->cbd_sc) & BD_ENET_RX_EMPTY)) {
  370. /* Since we have allocated space to hold a complete frame,
  371. * the last indicator should be set.
  372. */
  373. if ((status & BD_ENET_RX_LAST) == 0)
  374. printk("FEC ENET: rcv is not +last\n");
  375. if (!fep->opened)
  376. goto rx_processing_done;
  377. /* Check for errors. */
  378. if (status & (BD_ENET_RX_LG | BD_ENET_RX_SH | BD_ENET_RX_NO |
  379. BD_ENET_RX_CR | BD_ENET_RX_OV)) {
  380. dev->stats.rx_errors++;
  381. if (status & (BD_ENET_RX_LG | BD_ENET_RX_SH)) {
  382. /* Frame too long or too short. */
  383. dev->stats.rx_length_errors++;
  384. }
  385. if (status & BD_ENET_RX_NO) /* Frame alignment */
  386. dev->stats.rx_frame_errors++;
  387. if (status & BD_ENET_RX_CR) /* CRC Error */
  388. dev->stats.rx_crc_errors++;
  389. if (status & BD_ENET_RX_OV) /* FIFO overrun */
  390. dev->stats.rx_fifo_errors++;
  391. }
  392. /* Report late collisions as a frame error.
  393. * On this error, the BD is closed, but we don't know what we
  394. * have in the buffer. So, just drop this frame on the floor.
  395. */
  396. if (status & BD_ENET_RX_CL) {
  397. dev->stats.rx_errors++;
  398. dev->stats.rx_frame_errors++;
  399. goto rx_processing_done;
  400. }
  401. /* Process the incoming frame. */
  402. dev->stats.rx_packets++;
  403. pkt_len = bdp->cbd_datlen;
  404. dev->stats.rx_bytes += pkt_len;
  405. data = (__u8*)__va(bdp->cbd_bufaddr);
  406. dma_unmap_single(NULL, bdp->cbd_bufaddr, bdp->cbd_datlen,
  407. DMA_FROM_DEVICE);
  408. /* This does 16 byte alignment, exactly what we need.
  409. * The packet length includes FCS, but we don't want to
  410. * include that when passing upstream as it messes up
  411. * bridging applications.
  412. */
  413. skb = dev_alloc_skb(pkt_len - 4 + NET_IP_ALIGN);
  414. if (unlikely(!skb)) {
  415. printk("%s: Memory squeeze, dropping packet.\n",
  416. dev->name);
  417. dev->stats.rx_dropped++;
  418. } else {
  419. skb_reserve(skb, NET_IP_ALIGN);
  420. skb_put(skb, pkt_len - 4); /* Make room */
  421. skb_copy_to_linear_data(skb, data, pkt_len - 4);
  422. skb->protocol = eth_type_trans(skb, dev);
  423. netif_rx(skb);
  424. }
  425. bdp->cbd_bufaddr = dma_map_single(NULL, data, bdp->cbd_datlen,
  426. DMA_FROM_DEVICE);
  427. rx_processing_done:
  428. /* Clear the status flags for this buffer */
  429. status &= ~BD_ENET_RX_STATS;
  430. /* Mark the buffer empty */
  431. status |= BD_ENET_RX_EMPTY;
  432. bdp->cbd_sc = status;
  433. /* Update BD pointer to next entry */
  434. if (status & BD_ENET_RX_WRAP)
  435. bdp = fep->rx_bd_base;
  436. else
  437. bdp++;
  438. /* Doing this here will keep the FEC running while we process
  439. * incoming frames. On a heavily loaded network, we should be
  440. * able to keep up at the expense of system resources.
  441. */
  442. writel(0, fep->hwp + FEC_R_DES_ACTIVE);
  443. }
  444. fep->cur_rx = bdp;
  445. spin_unlock(&fep->hw_lock);
  446. }
  447. /* ------------------------------------------------------------------------- */
  448. #ifdef CONFIG_M5272
  449. static void __inline__ fec_get_mac(struct net_device *dev)
  450. {
  451. struct fec_enet_private *fep = netdev_priv(dev);
  452. unsigned char *iap, tmpaddr[ETH_ALEN];
  453. if (FEC_FLASHMAC) {
  454. /*
  455. * Get MAC address from FLASH.
  456. * If it is all 1's or 0's, use the default.
  457. */
  458. iap = (unsigned char *)FEC_FLASHMAC;
  459. if ((iap[0] == 0) && (iap[1] == 0) && (iap[2] == 0) &&
  460. (iap[3] == 0) && (iap[4] == 0) && (iap[5] == 0))
  461. iap = fec_mac_default;
  462. if ((iap[0] == 0xff) && (iap[1] == 0xff) && (iap[2] == 0xff) &&
  463. (iap[3] == 0xff) && (iap[4] == 0xff) && (iap[5] == 0xff))
  464. iap = fec_mac_default;
  465. } else {
  466. *((unsigned long *) &tmpaddr[0]) = readl(fep->hwp + FEC_ADDR_LOW);
  467. *((unsigned short *) &tmpaddr[4]) = (readl(fep->hwp + FEC_ADDR_HIGH) >> 16);
  468. iap = &tmpaddr[0];
  469. }
  470. memcpy(dev->dev_addr, iap, ETH_ALEN);
  471. /* Adjust MAC if using default MAC address */
  472. if (iap == fec_mac_default)
  473. dev->dev_addr[ETH_ALEN-1] = fec_mac_default[ETH_ALEN-1] + fep->index;
  474. }
  475. #endif
  476. /* ------------------------------------------------------------------------- */
  477. /*
  478. * Phy section
  479. */
  480. static void fec_enet_adjust_link(struct net_device *dev)
  481. {
  482. struct fec_enet_private *fep = netdev_priv(dev);
  483. struct phy_device *phy_dev = fep->phy_dev;
  484. unsigned long flags;
  485. int status_change = 0;
  486. spin_lock_irqsave(&fep->hw_lock, flags);
  487. /* Prevent a state halted on mii error */
  488. if (fep->mii_timeout && phy_dev->state == PHY_HALTED) {
  489. phy_dev->state = PHY_RESUMING;
  490. goto spin_unlock;
  491. }
  492. /* Duplex link change */
  493. if (phy_dev->link) {
  494. if (fep->full_duplex != phy_dev->duplex) {
  495. fec_restart(dev, phy_dev->duplex);
  496. status_change = 1;
  497. }
  498. }
  499. /* Link on or off change */
  500. if (phy_dev->link != fep->link) {
  501. fep->link = phy_dev->link;
  502. if (phy_dev->link)
  503. fec_restart(dev, phy_dev->duplex);
  504. else
  505. fec_stop(dev);
  506. status_change = 1;
  507. }
  508. spin_unlock:
  509. spin_unlock_irqrestore(&fep->hw_lock, flags);
  510. if (status_change)
  511. phy_print_status(phy_dev);
  512. }
  513. /*
  514. * NOTE: a MII transaction is during around 25 us, so polling it...
  515. */
  516. static int fec_enet_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
  517. {
  518. struct fec_enet_private *fep = bus->priv;
  519. int timeout = FEC_MII_TIMEOUT;
  520. fep->mii_timeout = 0;
  521. /* clear MII end of transfer bit*/
  522. writel(FEC_ENET_MII, fep->hwp + FEC_IEVENT);
  523. /* start a read op */
  524. writel(FEC_MMFR_ST | FEC_MMFR_OP_READ |
  525. FEC_MMFR_PA(mii_id) | FEC_MMFR_RA(regnum) |
  526. FEC_MMFR_TA, fep->hwp + FEC_MII_DATA);
  527. /* wait for end of transfer */
  528. while (!(readl(fep->hwp + FEC_IEVENT) & FEC_ENET_MII)) {
  529. cpu_relax();
  530. if (timeout-- < 0) {
  531. fep->mii_timeout = 1;
  532. printk(KERN_ERR "FEC: MDIO read timeout\n");
  533. return -ETIMEDOUT;
  534. }
  535. }
  536. /* return value */
  537. return FEC_MMFR_DATA(readl(fep->hwp + FEC_MII_DATA));
  538. }
  539. static int fec_enet_mdio_write(struct mii_bus *bus, int mii_id, int regnum,
  540. u16 value)
  541. {
  542. struct fec_enet_private *fep = bus->priv;
  543. int timeout = FEC_MII_TIMEOUT;
  544. fep->mii_timeout = 0;
  545. /* clear MII end of transfer bit*/
  546. writel(FEC_ENET_MII, fep->hwp + FEC_IEVENT);
  547. /* start a read op */
  548. writel(FEC_MMFR_ST | FEC_MMFR_OP_READ |
  549. FEC_MMFR_PA(mii_id) | FEC_MMFR_RA(regnum) |
  550. FEC_MMFR_TA | FEC_MMFR_DATA(value),
  551. fep->hwp + FEC_MII_DATA);
  552. /* wait for end of transfer */
  553. while (!(readl(fep->hwp + FEC_IEVENT) & FEC_ENET_MII)) {
  554. cpu_relax();
  555. if (timeout-- < 0) {
  556. fep->mii_timeout = 1;
  557. printk(KERN_ERR "FEC: MDIO write timeout\n");
  558. return -ETIMEDOUT;
  559. }
  560. }
  561. return 0;
  562. }
  563. static int fec_enet_mdio_reset(struct mii_bus *bus)
  564. {
  565. return 0;
  566. }
  567. static int fec_enet_mii_probe(struct net_device *dev)
  568. {
  569. struct fec_enet_private *fep = netdev_priv(dev);
  570. struct phy_device *phy_dev = NULL;
  571. int phy_addr;
  572. /* find the first phy */
  573. for (phy_addr = 0; phy_addr < PHY_MAX_ADDR; phy_addr++) {
  574. if (fep->mii_bus->phy_map[phy_addr]) {
  575. phy_dev = fep->mii_bus->phy_map[phy_addr];
  576. break;
  577. }
  578. }
  579. if (!phy_dev) {
  580. printk(KERN_ERR "%s: no PHY found\n", dev->name);
  581. return -ENODEV;
  582. }
  583. /* attach the mac to the phy */
  584. phy_dev = phy_connect(dev, dev_name(&phy_dev->dev),
  585. &fec_enet_adjust_link, 0,
  586. PHY_INTERFACE_MODE_MII);
  587. if (IS_ERR(phy_dev)) {
  588. printk(KERN_ERR "%s: Could not attach to PHY\n", dev->name);
  589. return PTR_ERR(phy_dev);
  590. }
  591. /* mask with MAC supported features */
  592. phy_dev->supported &= PHY_BASIC_FEATURES;
  593. phy_dev->advertising = phy_dev->supported;
  594. fep->phy_dev = phy_dev;
  595. fep->link = 0;
  596. fep->full_duplex = 0;
  597. return 0;
  598. }
  599. static int fec_enet_mii_init(struct platform_device *pdev)
  600. {
  601. struct net_device *dev = platform_get_drvdata(pdev);
  602. struct fec_enet_private *fep = netdev_priv(dev);
  603. int err = -ENXIO, i;
  604. fep->mii_timeout = 0;
  605. /*
  606. * Set MII speed to 2.5 MHz (= clk_get_rate() / 2 * phy_speed)
  607. */
  608. fep->phy_speed = DIV_ROUND_UP(clk_get_rate(fep->clk), 5000000) << 1;
  609. writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED);
  610. fep->mii_bus = mdiobus_alloc();
  611. if (fep->mii_bus == NULL) {
  612. err = -ENOMEM;
  613. goto err_out;
  614. }
  615. fep->mii_bus->name = "fec_enet_mii_bus";
  616. fep->mii_bus->read = fec_enet_mdio_read;
  617. fep->mii_bus->write = fec_enet_mdio_write;
  618. fep->mii_bus->reset = fec_enet_mdio_reset;
  619. snprintf(fep->mii_bus->id, MII_BUS_ID_SIZE, "%x", pdev->id);
  620. fep->mii_bus->priv = fep;
  621. fep->mii_bus->parent = &pdev->dev;
  622. fep->mii_bus->irq = kmalloc(sizeof(int) * PHY_MAX_ADDR, GFP_KERNEL);
  623. if (!fep->mii_bus->irq) {
  624. err = -ENOMEM;
  625. goto err_out_free_mdiobus;
  626. }
  627. for (i = 0; i < PHY_MAX_ADDR; i++)
  628. fep->mii_bus->irq[i] = PHY_POLL;
  629. platform_set_drvdata(dev, fep->mii_bus);
  630. if (mdiobus_register(fep->mii_bus))
  631. goto err_out_free_mdio_irq;
  632. if (fec_enet_mii_probe(dev) != 0)
  633. goto err_out_unregister_bus;
  634. return 0;
  635. err_out_unregister_bus:
  636. mdiobus_unregister(fep->mii_bus);
  637. err_out_free_mdio_irq:
  638. kfree(fep->mii_bus->irq);
  639. err_out_free_mdiobus:
  640. mdiobus_free(fep->mii_bus);
  641. err_out:
  642. return err;
  643. }
  644. static void fec_enet_mii_remove(struct fec_enet_private *fep)
  645. {
  646. if (fep->phy_dev)
  647. phy_disconnect(fep->phy_dev);
  648. mdiobus_unregister(fep->mii_bus);
  649. kfree(fep->mii_bus->irq);
  650. mdiobus_free(fep->mii_bus);
  651. }
  652. static int fec_enet_get_settings(struct net_device *dev,
  653. struct ethtool_cmd *cmd)
  654. {
  655. struct fec_enet_private *fep = netdev_priv(dev);
  656. struct phy_device *phydev = fep->phy_dev;
  657. if (!phydev)
  658. return -ENODEV;
  659. return phy_ethtool_gset(phydev, cmd);
  660. }
  661. static int fec_enet_set_settings(struct net_device *dev,
  662. struct ethtool_cmd *cmd)
  663. {
  664. struct fec_enet_private *fep = netdev_priv(dev);
  665. struct phy_device *phydev = fep->phy_dev;
  666. if (!phydev)
  667. return -ENODEV;
  668. return phy_ethtool_sset(phydev, cmd);
  669. }
  670. static void fec_enet_get_drvinfo(struct net_device *dev,
  671. struct ethtool_drvinfo *info)
  672. {
  673. struct fec_enet_private *fep = netdev_priv(dev);
  674. strcpy(info->driver, fep->pdev->dev.driver->name);
  675. strcpy(info->version, "Revision: 1.0");
  676. strcpy(info->bus_info, dev_name(&dev->dev));
  677. }
  678. static struct ethtool_ops fec_enet_ethtool_ops = {
  679. .get_settings = fec_enet_get_settings,
  680. .set_settings = fec_enet_set_settings,
  681. .get_drvinfo = fec_enet_get_drvinfo,
  682. .get_link = ethtool_op_get_link,
  683. };
  684. static int fec_enet_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
  685. {
  686. struct fec_enet_private *fep = netdev_priv(dev);
  687. struct phy_device *phydev = fep->phy_dev;
  688. if (!netif_running(dev))
  689. return -EINVAL;
  690. if (!phydev)
  691. return -ENODEV;
  692. return phy_mii_ioctl(phydev, if_mii(rq), cmd);
  693. }
  694. static void fec_enet_free_buffers(struct net_device *dev)
  695. {
  696. struct fec_enet_private *fep = netdev_priv(dev);
  697. int i;
  698. struct sk_buff *skb;
  699. struct bufdesc *bdp;
  700. bdp = fep->rx_bd_base;
  701. for (i = 0; i < RX_RING_SIZE; i++) {
  702. skb = fep->rx_skbuff[i];
  703. if (bdp->cbd_bufaddr)
  704. dma_unmap_single(&dev->dev, bdp->cbd_bufaddr,
  705. FEC_ENET_RX_FRSIZE, DMA_FROM_DEVICE);
  706. if (skb)
  707. dev_kfree_skb(skb);
  708. bdp++;
  709. }
  710. bdp = fep->tx_bd_base;
  711. for (i = 0; i < TX_RING_SIZE; i++)
  712. kfree(fep->tx_bounce[i]);
  713. }
  714. static int fec_enet_alloc_buffers(struct net_device *dev)
  715. {
  716. struct fec_enet_private *fep = netdev_priv(dev);
  717. int i;
  718. struct sk_buff *skb;
  719. struct bufdesc *bdp;
  720. bdp = fep->rx_bd_base;
  721. for (i = 0; i < RX_RING_SIZE; i++) {
  722. skb = dev_alloc_skb(FEC_ENET_RX_FRSIZE);
  723. if (!skb) {
  724. fec_enet_free_buffers(dev);
  725. return -ENOMEM;
  726. }
  727. fep->rx_skbuff[i] = skb;
  728. bdp->cbd_bufaddr = dma_map_single(&dev->dev, skb->data,
  729. FEC_ENET_RX_FRSIZE, DMA_FROM_DEVICE);
  730. bdp->cbd_sc = BD_ENET_RX_EMPTY;
  731. bdp++;
  732. }
  733. /* Set the last buffer to wrap. */
  734. bdp--;
  735. bdp->cbd_sc |= BD_SC_WRAP;
  736. bdp = fep->tx_bd_base;
  737. for (i = 0; i < TX_RING_SIZE; i++) {
  738. fep->tx_bounce[i] = kmalloc(FEC_ENET_TX_FRSIZE, GFP_KERNEL);
  739. bdp->cbd_sc = 0;
  740. bdp->cbd_bufaddr = 0;
  741. bdp++;
  742. }
  743. /* Set the last buffer to wrap. */
  744. bdp--;
  745. bdp->cbd_sc |= BD_SC_WRAP;
  746. return 0;
  747. }
  748. static int
  749. fec_enet_open(struct net_device *dev)
  750. {
  751. struct fec_enet_private *fep = netdev_priv(dev);
  752. int ret;
  753. /* I should reset the ring buffers here, but I don't yet know
  754. * a simple way to do that.
  755. */
  756. ret = fec_enet_alloc_buffers(dev);
  757. if (ret)
  758. return ret;
  759. /* schedule a link state check */
  760. phy_start(fep->phy_dev);
  761. netif_start_queue(dev);
  762. fep->opened = 1;
  763. return 0;
  764. }
  765. static int
  766. fec_enet_close(struct net_device *dev)
  767. {
  768. struct fec_enet_private *fep = netdev_priv(dev);
  769. /* Don't know what to do yet. */
  770. fep->opened = 0;
  771. phy_stop(fep->phy_dev);
  772. netif_stop_queue(dev);
  773. fec_stop(dev);
  774. fec_enet_free_buffers(dev);
  775. return 0;
  776. }
  777. /* Set or clear the multicast filter for this adaptor.
  778. * Skeleton taken from sunlance driver.
  779. * The CPM Ethernet implementation allows Multicast as well as individual
  780. * MAC address filtering. Some of the drivers check to make sure it is
  781. * a group multicast address, and discard those that are not. I guess I
  782. * will do the same for now, but just remove the test if you want
  783. * individual filtering as well (do the upper net layers want or support
  784. * this kind of feature?).
  785. */
  786. #define HASH_BITS 6 /* #bits in hash */
  787. #define CRC32_POLY 0xEDB88320
  788. static void set_multicast_list(struct net_device *dev)
  789. {
  790. struct fec_enet_private *fep = netdev_priv(dev);
  791. struct netdev_hw_addr *ha;
  792. unsigned int i, bit, data, crc, tmp;
  793. unsigned char hash;
  794. if (dev->flags & IFF_PROMISC) {
  795. tmp = readl(fep->hwp + FEC_R_CNTRL);
  796. tmp |= 0x8;
  797. writel(tmp, fep->hwp + FEC_R_CNTRL);
  798. return;
  799. }
  800. tmp = readl(fep->hwp + FEC_R_CNTRL);
  801. tmp &= ~0x8;
  802. writel(tmp, fep->hwp + FEC_R_CNTRL);
  803. if (dev->flags & IFF_ALLMULTI) {
  804. /* Catch all multicast addresses, so set the
  805. * filter to all 1's
  806. */
  807. writel(0xffffffff, fep->hwp + FEC_GRP_HASH_TABLE_HIGH);
  808. writel(0xffffffff, fep->hwp + FEC_GRP_HASH_TABLE_LOW);
  809. return;
  810. }
  811. /* Clear filter and add the addresses in hash register
  812. */
  813. writel(0, fep->hwp + FEC_GRP_HASH_TABLE_HIGH);
  814. writel(0, fep->hwp + FEC_GRP_HASH_TABLE_LOW);
  815. netdev_for_each_mc_addr(ha, dev) {
  816. /* Only support group multicast for now */
  817. if (!(ha->addr[0] & 1))
  818. continue;
  819. /* calculate crc32 value of mac address */
  820. crc = 0xffffffff;
  821. for (i = 0; i < dev->addr_len; i++) {
  822. data = ha->addr[i];
  823. for (bit = 0; bit < 8; bit++, data >>= 1) {
  824. crc = (crc >> 1) ^
  825. (((crc ^ data) & 1) ? CRC32_POLY : 0);
  826. }
  827. }
  828. /* only upper 6 bits (HASH_BITS) are used
  829. * which point to specific bit in he hash registers
  830. */
  831. hash = (crc >> (32 - HASH_BITS)) & 0x3f;
  832. if (hash > 31) {
  833. tmp = readl(fep->hwp + FEC_GRP_HASH_TABLE_HIGH);
  834. tmp |= 1 << (hash - 32);
  835. writel(tmp, fep->hwp + FEC_GRP_HASH_TABLE_HIGH);
  836. } else {
  837. tmp = readl(fep->hwp + FEC_GRP_HASH_TABLE_LOW);
  838. tmp |= 1 << hash;
  839. writel(tmp, fep->hwp + FEC_GRP_HASH_TABLE_LOW);
  840. }
  841. }
  842. }
  843. /* Set a MAC change in hardware. */
  844. static int
  845. fec_set_mac_address(struct net_device *dev, void *p)
  846. {
  847. struct fec_enet_private *fep = netdev_priv(dev);
  848. struct sockaddr *addr = p;
  849. if (!is_valid_ether_addr(addr->sa_data))
  850. return -EADDRNOTAVAIL;
  851. memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
  852. writel(dev->dev_addr[3] | (dev->dev_addr[2] << 8) |
  853. (dev->dev_addr[1] << 16) | (dev->dev_addr[0] << 24),
  854. fep->hwp + FEC_ADDR_LOW);
  855. writel((dev->dev_addr[5] << 16) | (dev->dev_addr[4] << 24),
  856. fep + FEC_ADDR_HIGH);
  857. return 0;
  858. }
  859. static const struct net_device_ops fec_netdev_ops = {
  860. .ndo_open = fec_enet_open,
  861. .ndo_stop = fec_enet_close,
  862. .ndo_start_xmit = fec_enet_start_xmit,
  863. .ndo_set_multicast_list = set_multicast_list,
  864. .ndo_change_mtu = eth_change_mtu,
  865. .ndo_validate_addr = eth_validate_addr,
  866. .ndo_tx_timeout = fec_timeout,
  867. .ndo_set_mac_address = fec_set_mac_address,
  868. .ndo_do_ioctl = fec_enet_ioctl,
  869. };
  870. /*
  871. * XXX: We need to clean up on failure exits here.
  872. *
  873. * index is only used in legacy code
  874. */
  875. static int fec_enet_init(struct net_device *dev, int index)
  876. {
  877. struct fec_enet_private *fep = netdev_priv(dev);
  878. struct bufdesc *cbd_base;
  879. struct bufdesc *bdp;
  880. int i;
  881. /* Allocate memory for buffer descriptors. */
  882. cbd_base = dma_alloc_coherent(NULL, PAGE_SIZE, &fep->bd_dma,
  883. GFP_KERNEL);
  884. if (!cbd_base) {
  885. printk("FEC: allocate descriptor memory failed?\n");
  886. return -ENOMEM;
  887. }
  888. spin_lock_init(&fep->hw_lock);
  889. fep->index = index;
  890. fep->hwp = (void __iomem *)dev->base_addr;
  891. fep->netdev = dev;
  892. /* Set the Ethernet address */
  893. #ifdef CONFIG_M5272
  894. fec_get_mac(dev);
  895. #else
  896. {
  897. unsigned long l;
  898. l = readl(fep->hwp + FEC_ADDR_LOW);
  899. dev->dev_addr[0] = (unsigned char)((l & 0xFF000000) >> 24);
  900. dev->dev_addr[1] = (unsigned char)((l & 0x00FF0000) >> 16);
  901. dev->dev_addr[2] = (unsigned char)((l & 0x0000FF00) >> 8);
  902. dev->dev_addr[3] = (unsigned char)((l & 0x000000FF) >> 0);
  903. l = readl(fep->hwp + FEC_ADDR_HIGH);
  904. dev->dev_addr[4] = (unsigned char)((l & 0xFF000000) >> 24);
  905. dev->dev_addr[5] = (unsigned char)((l & 0x00FF0000) >> 16);
  906. }
  907. #endif
  908. /* Set receive and transmit descriptor base. */
  909. fep->rx_bd_base = cbd_base;
  910. fep->tx_bd_base = cbd_base + RX_RING_SIZE;
  911. /* The FEC Ethernet specific entries in the device structure */
  912. dev->watchdog_timeo = TX_TIMEOUT;
  913. dev->netdev_ops = &fec_netdev_ops;
  914. dev->ethtool_ops = &fec_enet_ethtool_ops;
  915. /* Initialize the receive buffer descriptors. */
  916. bdp = fep->rx_bd_base;
  917. for (i = 0; i < RX_RING_SIZE; i++) {
  918. /* Initialize the BD for every fragment in the page. */
  919. bdp->cbd_sc = 0;
  920. bdp++;
  921. }
  922. /* Set the last buffer to wrap */
  923. bdp--;
  924. bdp->cbd_sc |= BD_SC_WRAP;
  925. /* ...and the same for transmit */
  926. bdp = fep->tx_bd_base;
  927. for (i = 0; i < TX_RING_SIZE; i++) {
  928. /* Initialize the BD for every fragment in the page. */
  929. bdp->cbd_sc = 0;
  930. bdp->cbd_bufaddr = 0;
  931. bdp++;
  932. }
  933. /* Set the last buffer to wrap */
  934. bdp--;
  935. bdp->cbd_sc |= BD_SC_WRAP;
  936. fec_restart(dev, 0);
  937. return 0;
  938. }
  939. /* This function is called to start or restart the FEC during a link
  940. * change. This only happens when switching between half and full
  941. * duplex.
  942. */
  943. static void
  944. fec_restart(struct net_device *dev, int duplex)
  945. {
  946. struct fec_enet_private *fep = netdev_priv(dev);
  947. int i;
  948. /* Whack a reset. We should wait for this. */
  949. writel(1, fep->hwp + FEC_ECNTRL);
  950. udelay(10);
  951. /* Clear any outstanding interrupt. */
  952. writel(0xffc00000, fep->hwp + FEC_IEVENT);
  953. /* Reset all multicast. */
  954. writel(0, fep->hwp + FEC_GRP_HASH_TABLE_HIGH);
  955. writel(0, fep->hwp + FEC_GRP_HASH_TABLE_LOW);
  956. #ifndef CONFIG_M5272
  957. writel(0, fep->hwp + FEC_HASH_TABLE_HIGH);
  958. writel(0, fep->hwp + FEC_HASH_TABLE_LOW);
  959. #endif
  960. /* Set maximum receive buffer size. */
  961. writel(PKT_MAXBLR_SIZE, fep->hwp + FEC_R_BUFF_SIZE);
  962. /* Set receive and transmit descriptor base. */
  963. writel(fep->bd_dma, fep->hwp + FEC_R_DES_START);
  964. writel((unsigned long)fep->bd_dma + sizeof(struct bufdesc) * RX_RING_SIZE,
  965. fep->hwp + FEC_X_DES_START);
  966. fep->dirty_tx = fep->cur_tx = fep->tx_bd_base;
  967. fep->cur_rx = fep->rx_bd_base;
  968. /* Reset SKB transmit buffers. */
  969. fep->skb_cur = fep->skb_dirty = 0;
  970. for (i = 0; i <= TX_RING_MOD_MASK; i++) {
  971. if (fep->tx_skbuff[i]) {
  972. dev_kfree_skb_any(fep->tx_skbuff[i]);
  973. fep->tx_skbuff[i] = NULL;
  974. }
  975. }
  976. /* Enable MII mode */
  977. if (duplex) {
  978. /* MII enable / FD enable */
  979. writel(OPT_FRAME_SIZE | 0x04, fep->hwp + FEC_R_CNTRL);
  980. writel(0x04, fep->hwp + FEC_X_CNTRL);
  981. } else {
  982. /* MII enable / No Rcv on Xmit */
  983. writel(OPT_FRAME_SIZE | 0x06, fep->hwp + FEC_R_CNTRL);
  984. writel(0x0, fep->hwp + FEC_X_CNTRL);
  985. }
  986. fep->full_duplex = duplex;
  987. /* Set MII speed */
  988. writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED);
  989. /* And last, enable the transmit and receive processing */
  990. writel(2, fep->hwp + FEC_ECNTRL);
  991. writel(0, fep->hwp + FEC_R_DES_ACTIVE);
  992. /* Enable interrupts we wish to service */
  993. writel(FEC_ENET_TXF | FEC_ENET_RXF, fep->hwp + FEC_IMASK);
  994. }
  995. static void
  996. fec_stop(struct net_device *dev)
  997. {
  998. struct fec_enet_private *fep = netdev_priv(dev);
  999. /* We cannot expect a graceful transmit stop without link !!! */
  1000. if (fep->link) {
  1001. writel(1, fep->hwp + FEC_X_CNTRL); /* Graceful transmit stop */
  1002. udelay(10);
  1003. if (!(readl(fep->hwp + FEC_IEVENT) & FEC_ENET_GRA))
  1004. printk("fec_stop : Graceful transmit stop did not complete !\n");
  1005. }
  1006. /* Whack a reset. We should wait for this. */
  1007. writel(1, fep->hwp + FEC_ECNTRL);
  1008. udelay(10);
  1009. /* Clear outstanding MII command interrupts. */
  1010. writel(FEC_ENET_MII, fep->hwp + FEC_IEVENT);
  1011. writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED);
  1012. }
  1013. static int __devinit
  1014. fec_probe(struct platform_device *pdev)
  1015. {
  1016. struct fec_enet_private *fep;
  1017. struct net_device *ndev;
  1018. int i, irq, ret = 0;
  1019. struct resource *r;
  1020. r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  1021. if (!r)
  1022. return -ENXIO;
  1023. r = request_mem_region(r->start, resource_size(r), pdev->name);
  1024. if (!r)
  1025. return -EBUSY;
  1026. /* Init network device */
  1027. ndev = alloc_etherdev(sizeof(struct fec_enet_private));
  1028. if (!ndev)
  1029. return -ENOMEM;
  1030. SET_NETDEV_DEV(ndev, &pdev->dev);
  1031. /* setup board info structure */
  1032. fep = netdev_priv(ndev);
  1033. memset(fep, 0, sizeof(*fep));
  1034. ndev->base_addr = (unsigned long)ioremap(r->start, resource_size(r));
  1035. fep->pdev = pdev;
  1036. if (!ndev->base_addr) {
  1037. ret = -ENOMEM;
  1038. goto failed_ioremap;
  1039. }
  1040. platform_set_drvdata(pdev, ndev);
  1041. /* This device has up to three irqs on some platforms */
  1042. for (i = 0; i < 3; i++) {
  1043. irq = platform_get_irq(pdev, i);
  1044. if (i && irq < 0)
  1045. break;
  1046. ret = request_irq(irq, fec_enet_interrupt, IRQF_DISABLED, pdev->name, ndev);
  1047. if (ret) {
  1048. while (i >= 0) {
  1049. irq = platform_get_irq(pdev, i);
  1050. free_irq(irq, ndev);
  1051. i--;
  1052. }
  1053. goto failed_irq;
  1054. }
  1055. }
  1056. fep->clk = clk_get(&pdev->dev, "fec_clk");
  1057. if (IS_ERR(fep->clk)) {
  1058. ret = PTR_ERR(fep->clk);
  1059. goto failed_clk;
  1060. }
  1061. clk_enable(fep->clk);
  1062. ret = fec_enet_init(ndev, 0);
  1063. if (ret)
  1064. goto failed_init;
  1065. ret = fec_enet_mii_init(pdev);
  1066. if (ret)
  1067. goto failed_mii_init;
  1068. ret = register_netdev(ndev);
  1069. if (ret)
  1070. goto failed_register;
  1071. printk(KERN_INFO "%s: Freescale FEC PHY driver [%s] "
  1072. "(mii_bus:phy_addr=%s, irq=%d)\n", ndev->name,
  1073. fep->phy_dev->drv->name, dev_name(&fep->phy_dev->dev),
  1074. fep->phy_dev->irq);
  1075. return 0;
  1076. failed_register:
  1077. fec_enet_mii_remove(fep);
  1078. failed_mii_init:
  1079. failed_init:
  1080. clk_disable(fep->clk);
  1081. clk_put(fep->clk);
  1082. failed_clk:
  1083. for (i = 0; i < 3; i++) {
  1084. irq = platform_get_irq(pdev, i);
  1085. if (irq > 0)
  1086. free_irq(irq, ndev);
  1087. }
  1088. failed_irq:
  1089. iounmap((void __iomem *)ndev->base_addr);
  1090. failed_ioremap:
  1091. free_netdev(ndev);
  1092. return ret;
  1093. }
  1094. static int __devexit
  1095. fec_drv_remove(struct platform_device *pdev)
  1096. {
  1097. struct net_device *ndev = platform_get_drvdata(pdev);
  1098. struct fec_enet_private *fep = netdev_priv(ndev);
  1099. platform_set_drvdata(pdev, NULL);
  1100. fec_stop(ndev);
  1101. fec_enet_mii_remove(fep);
  1102. clk_disable(fep->clk);
  1103. clk_put(fep->clk);
  1104. iounmap((void __iomem *)ndev->base_addr);
  1105. unregister_netdev(ndev);
  1106. free_netdev(ndev);
  1107. return 0;
  1108. }
  1109. static int
  1110. fec_suspend(struct platform_device *dev, pm_message_t state)
  1111. {
  1112. struct net_device *ndev = platform_get_drvdata(dev);
  1113. struct fec_enet_private *fep;
  1114. if (ndev) {
  1115. fep = netdev_priv(ndev);
  1116. if (netif_running(ndev)) {
  1117. netif_device_detach(ndev);
  1118. fec_stop(ndev);
  1119. }
  1120. }
  1121. return 0;
  1122. }
  1123. static int
  1124. fec_resume(struct platform_device *dev)
  1125. {
  1126. struct net_device *ndev = platform_get_drvdata(dev);
  1127. if (ndev) {
  1128. if (netif_running(ndev)) {
  1129. fec_enet_init(ndev, 0);
  1130. netif_device_attach(ndev);
  1131. }
  1132. }
  1133. return 0;
  1134. }
  1135. static struct platform_driver fec_driver = {
  1136. .driver = {
  1137. .name = "fec",
  1138. .owner = THIS_MODULE,
  1139. },
  1140. .probe = fec_probe,
  1141. .remove = __devexit_p(fec_drv_remove),
  1142. .suspend = fec_suspend,
  1143. .resume = fec_resume,
  1144. };
  1145. static int __init
  1146. fec_enet_module_init(void)
  1147. {
  1148. printk(KERN_INFO "FEC Ethernet Driver\n");
  1149. return platform_driver_register(&fec_driver);
  1150. }
  1151. static void __exit
  1152. fec_enet_cleanup(void)
  1153. {
  1154. platform_driver_unregister(&fec_driver);
  1155. }
  1156. module_exit(fec_enet_cleanup);
  1157. module_init(fec_enet_module_init);
  1158. MODULE_LICENSE("GPL");