bfin_mac.c 26 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111
  1. /*
  2. * Blackfin On-Chip MAC Driver
  3. *
  4. * Copyright 2004-2007 Analog Devices Inc.
  5. *
  6. * Enter bugs at http://blackfin.uclinux.org/
  7. *
  8. * Licensed under the GPL-2 or later.
  9. */
  10. #include <linux/init.h>
  11. #include <linux/module.h>
  12. #include <linux/kernel.h>
  13. #include <linux/sched.h>
  14. #include <linux/slab.h>
  15. #include <linux/delay.h>
  16. #include <linux/timer.h>
  17. #include <linux/errno.h>
  18. #include <linux/irq.h>
  19. #include <linux/io.h>
  20. #include <linux/ioport.h>
  21. #include <linux/crc32.h>
  22. #include <linux/device.h>
  23. #include <linux/spinlock.h>
  24. #include <linux/ethtool.h>
  25. #include <linux/mii.h>
  26. #include <linux/phy.h>
  27. #include <linux/netdevice.h>
  28. #include <linux/etherdevice.h>
  29. #include <linux/skbuff.h>
  30. #include <linux/platform_device.h>
  31. #include <asm/dma.h>
  32. #include <linux/dma-mapping.h>
  33. #include <asm/blackfin.h>
  34. #include <asm/cacheflush.h>
  35. #include <asm/portmux.h>
  36. #include "bfin_mac.h"
  37. #define DRV_NAME "bfin_mac"
  38. #define DRV_VERSION "1.1"
  39. #define DRV_AUTHOR "Bryan Wu, Luke Yang"
  40. #define DRV_DESC "Blackfin BF53[67] BF527 on-chip Ethernet MAC driver"
  41. MODULE_AUTHOR(DRV_AUTHOR);
  42. MODULE_LICENSE("GPL");
  43. MODULE_DESCRIPTION(DRV_DESC);
  44. MODULE_ALIAS("platform:bfin_mac");
  45. #if defined(CONFIG_BFIN_MAC_USE_L1)
  46. # define bfin_mac_alloc(dma_handle, size) l1_data_sram_zalloc(size)
  47. # define bfin_mac_free(dma_handle, ptr) l1_data_sram_free(ptr)
  48. #else
  49. # define bfin_mac_alloc(dma_handle, size) \
  50. dma_alloc_coherent(NULL, size, dma_handle, GFP_KERNEL)
  51. # define bfin_mac_free(dma_handle, ptr) \
  52. dma_free_coherent(NULL, sizeof(*ptr), ptr, dma_handle)
  53. #endif
  54. #define PKT_BUF_SZ 1580
  55. #define MAX_TIMEOUT_CNT 500
  56. /* pointers to maintain transmit list */
  57. static struct net_dma_desc_tx *tx_list_head;
  58. static struct net_dma_desc_tx *tx_list_tail;
  59. static struct net_dma_desc_rx *rx_list_head;
  60. static struct net_dma_desc_rx *rx_list_tail;
  61. static struct net_dma_desc_rx *current_rx_ptr;
  62. static struct net_dma_desc_tx *current_tx_ptr;
  63. static struct net_dma_desc_tx *tx_desc;
  64. static struct net_dma_desc_rx *rx_desc;
  65. static void bf537mac_disable(void);
  66. static void bf537mac_enable(void);
  67. static void desc_list_free(void)
  68. {
  69. struct net_dma_desc_rx *r;
  70. struct net_dma_desc_tx *t;
  71. int i;
  72. #if !defined(CONFIG_BFIN_MAC_USE_L1)
  73. dma_addr_t dma_handle = 0;
  74. #endif
  75. if (tx_desc) {
  76. t = tx_list_head;
  77. for (i = 0; i < CONFIG_BFIN_TX_DESC_NUM; i++) {
  78. if (t) {
  79. if (t->skb) {
  80. dev_kfree_skb(t->skb);
  81. t->skb = NULL;
  82. }
  83. t = t->next;
  84. }
  85. }
  86. bfin_mac_free(dma_handle, tx_desc);
  87. }
  88. if (rx_desc) {
  89. r = rx_list_head;
  90. for (i = 0; i < CONFIG_BFIN_RX_DESC_NUM; i++) {
  91. if (r) {
  92. if (r->skb) {
  93. dev_kfree_skb(r->skb);
  94. r->skb = NULL;
  95. }
  96. r = r->next;
  97. }
  98. }
  99. bfin_mac_free(dma_handle, rx_desc);
  100. }
  101. }
  102. static int desc_list_init(void)
  103. {
  104. int i;
  105. struct sk_buff *new_skb;
  106. #if !defined(CONFIG_BFIN_MAC_USE_L1)
  107. /*
  108. * This dma_handle is useless in Blackfin dma_alloc_coherent().
  109. * The real dma handler is the return value of dma_alloc_coherent().
  110. */
  111. dma_addr_t dma_handle;
  112. #endif
  113. tx_desc = bfin_mac_alloc(&dma_handle,
  114. sizeof(struct net_dma_desc_tx) *
  115. CONFIG_BFIN_TX_DESC_NUM);
  116. if (tx_desc == NULL)
  117. goto init_error;
  118. rx_desc = bfin_mac_alloc(&dma_handle,
  119. sizeof(struct net_dma_desc_rx) *
  120. CONFIG_BFIN_RX_DESC_NUM);
  121. if (rx_desc == NULL)
  122. goto init_error;
  123. /* init tx_list */
  124. tx_list_head = tx_list_tail = tx_desc;
  125. for (i = 0; i < CONFIG_BFIN_TX_DESC_NUM; i++) {
  126. struct net_dma_desc_tx *t = tx_desc + i;
  127. struct dma_descriptor *a = &(t->desc_a);
  128. struct dma_descriptor *b = &(t->desc_b);
  129. /*
  130. * disable DMA
  131. * read from memory WNR = 0
  132. * wordsize is 32 bits
  133. * 6 half words is desc size
  134. * large desc flow
  135. */
  136. a->config = WDSIZE_32 | NDSIZE_6 | DMAFLOW_LARGE;
  137. a->start_addr = (unsigned long)t->packet;
  138. a->x_count = 0;
  139. a->next_dma_desc = b;
  140. /*
  141. * enabled DMA
  142. * write to memory WNR = 1
  143. * wordsize is 32 bits
  144. * disable interrupt
  145. * 6 half words is desc size
  146. * large desc flow
  147. */
  148. b->config = DMAEN | WNR | WDSIZE_32 | NDSIZE_6 | DMAFLOW_LARGE;
  149. b->start_addr = (unsigned long)(&(t->status));
  150. b->x_count = 0;
  151. t->skb = NULL;
  152. tx_list_tail->desc_b.next_dma_desc = a;
  153. tx_list_tail->next = t;
  154. tx_list_tail = t;
  155. }
  156. tx_list_tail->next = tx_list_head; /* tx_list is a circle */
  157. tx_list_tail->desc_b.next_dma_desc = &(tx_list_head->desc_a);
  158. current_tx_ptr = tx_list_head;
  159. /* init rx_list */
  160. rx_list_head = rx_list_tail = rx_desc;
  161. for (i = 0; i < CONFIG_BFIN_RX_DESC_NUM; i++) {
  162. struct net_dma_desc_rx *r = rx_desc + i;
  163. struct dma_descriptor *a = &(r->desc_a);
  164. struct dma_descriptor *b = &(r->desc_b);
  165. /* allocate a new skb for next time receive */
  166. new_skb = dev_alloc_skb(PKT_BUF_SZ + 2);
  167. if (!new_skb) {
  168. printk(KERN_NOTICE DRV_NAME
  169. ": init: low on mem - packet dropped\n");
  170. goto init_error;
  171. }
  172. skb_reserve(new_skb, 2);
  173. r->skb = new_skb;
  174. /*
  175. * enabled DMA
  176. * write to memory WNR = 1
  177. * wordsize is 32 bits
  178. * disable interrupt
  179. * 6 half words is desc size
  180. * large desc flow
  181. */
  182. a->config = DMAEN | WNR | WDSIZE_32 | NDSIZE_6 | DMAFLOW_LARGE;
  183. /* since RXDWA is enabled */
  184. a->start_addr = (unsigned long)new_skb->data - 2;
  185. a->x_count = 0;
  186. a->next_dma_desc = b;
  187. /*
  188. * enabled DMA
  189. * write to memory WNR = 1
  190. * wordsize is 32 bits
  191. * enable interrupt
  192. * 6 half words is desc size
  193. * large desc flow
  194. */
  195. b->config = DMAEN | WNR | WDSIZE_32 | DI_EN |
  196. NDSIZE_6 | DMAFLOW_LARGE;
  197. b->start_addr = (unsigned long)(&(r->status));
  198. b->x_count = 0;
  199. rx_list_tail->desc_b.next_dma_desc = a;
  200. rx_list_tail->next = r;
  201. rx_list_tail = r;
  202. }
  203. rx_list_tail->next = rx_list_head; /* rx_list is a circle */
  204. rx_list_tail->desc_b.next_dma_desc = &(rx_list_head->desc_a);
  205. current_rx_ptr = rx_list_head;
  206. return 0;
  207. init_error:
  208. desc_list_free();
  209. printk(KERN_ERR DRV_NAME ": kmalloc failed\n");
  210. return -ENOMEM;
  211. }
  212. /*---PHY CONTROL AND CONFIGURATION-----------------------------------------*/
  213. /* Set FER regs to MUX in Ethernet pins */
  214. static int setup_pin_mux(int action)
  215. {
  216. #if defined(CONFIG_BFIN_MAC_RMII)
  217. u16 pin_req[] = P_RMII0;
  218. #else
  219. u16 pin_req[] = P_MII0;
  220. #endif
  221. if (action) {
  222. if (peripheral_request_list(pin_req, DRV_NAME)) {
  223. printk(KERN_ERR DRV_NAME
  224. ": Requesting Peripherals failed\n");
  225. return -EFAULT;
  226. }
  227. } else
  228. peripheral_free_list(pin_req);
  229. return 0;
  230. }
  231. /*
  232. * MII operations
  233. */
  234. /* Wait until the previous MDC/MDIO transaction has completed */
  235. static void mdio_poll(void)
  236. {
  237. int timeout_cnt = MAX_TIMEOUT_CNT;
  238. /* poll the STABUSY bit */
  239. while ((bfin_read_EMAC_STAADD()) & STABUSY) {
  240. udelay(1);
  241. if (timeout_cnt-- < 0) {
  242. printk(KERN_ERR DRV_NAME
  243. ": wait MDC/MDIO transaction to complete timeout\n");
  244. break;
  245. }
  246. }
  247. }
  248. /* Read an off-chip register in a PHY through the MDC/MDIO port */
  249. static int mdiobus_read(struct mii_bus *bus, int phy_addr, int regnum)
  250. {
  251. mdio_poll();
  252. /* read mode */
  253. bfin_write_EMAC_STAADD(SET_PHYAD((u16) phy_addr) |
  254. SET_REGAD((u16) regnum) |
  255. STABUSY);
  256. mdio_poll();
  257. return (int) bfin_read_EMAC_STADAT();
  258. }
  259. /* Write an off-chip register in a PHY through the MDC/MDIO port */
  260. static int mdiobus_write(struct mii_bus *bus, int phy_addr, int regnum,
  261. u16 value)
  262. {
  263. mdio_poll();
  264. bfin_write_EMAC_STADAT((u32) value);
  265. /* write mode */
  266. bfin_write_EMAC_STAADD(SET_PHYAD((u16) phy_addr) |
  267. SET_REGAD((u16) regnum) |
  268. STAOP |
  269. STABUSY);
  270. mdio_poll();
  271. return 0;
  272. }
  273. static int mdiobus_reset(struct mii_bus *bus)
  274. {
  275. return 0;
  276. }
  277. static void bf537_adjust_link(struct net_device *dev)
  278. {
  279. struct bf537mac_local *lp = netdev_priv(dev);
  280. struct phy_device *phydev = lp->phydev;
  281. unsigned long flags;
  282. int new_state = 0;
  283. spin_lock_irqsave(&lp->lock, flags);
  284. if (phydev->link) {
  285. /* Now we make sure that we can be in full duplex mode.
  286. * If not, we operate in half-duplex mode. */
  287. if (phydev->duplex != lp->old_duplex) {
  288. u32 opmode = bfin_read_EMAC_OPMODE();
  289. new_state = 1;
  290. if (phydev->duplex)
  291. opmode |= FDMODE;
  292. else
  293. opmode &= ~(FDMODE);
  294. bfin_write_EMAC_OPMODE(opmode);
  295. lp->old_duplex = phydev->duplex;
  296. }
  297. if (phydev->speed != lp->old_speed) {
  298. #if defined(CONFIG_BFIN_MAC_RMII)
  299. u32 opmode = bfin_read_EMAC_OPMODE();
  300. switch (phydev->speed) {
  301. case 10:
  302. opmode |= RMII_10;
  303. break;
  304. case 100:
  305. opmode &= ~(RMII_10);
  306. break;
  307. default:
  308. printk(KERN_WARNING
  309. "%s: Ack! Speed (%d) is not 10/100!\n",
  310. DRV_NAME, phydev->speed);
  311. break;
  312. }
  313. bfin_write_EMAC_OPMODE(opmode);
  314. #endif
  315. new_state = 1;
  316. lp->old_speed = phydev->speed;
  317. }
  318. if (!lp->old_link) {
  319. new_state = 1;
  320. lp->old_link = 1;
  321. netif_schedule(dev);
  322. }
  323. } else if (lp->old_link) {
  324. new_state = 1;
  325. lp->old_link = 0;
  326. lp->old_speed = 0;
  327. lp->old_duplex = -1;
  328. }
  329. if (new_state) {
  330. u32 opmode = bfin_read_EMAC_OPMODE();
  331. phy_print_status(phydev);
  332. pr_debug("EMAC_OPMODE = 0x%08x\n", opmode);
  333. }
  334. spin_unlock_irqrestore(&lp->lock, flags);
  335. }
  336. /* MDC = 2.5 MHz */
  337. #define MDC_CLK 2500000
  338. static int mii_probe(struct net_device *dev)
  339. {
  340. struct bf537mac_local *lp = netdev_priv(dev);
  341. struct phy_device *phydev = NULL;
  342. unsigned short sysctl;
  343. int i;
  344. u32 sclk, mdc_div;
  345. /* Enable PHY output early */
  346. if (!(bfin_read_VR_CTL() & PHYCLKOE))
  347. bfin_write_VR_CTL(bfin_read_VR_CTL() | PHYCLKOE);
  348. sclk = get_sclk();
  349. mdc_div = ((sclk / MDC_CLK) / 2) - 1;
  350. sysctl = bfin_read_EMAC_SYSCTL();
  351. sysctl = (sysctl & ~MDCDIV) | SET_MDCDIV(mdc_div);
  352. bfin_write_EMAC_SYSCTL(sysctl);
  353. /* search for connect PHY device */
  354. for (i = 0; i < PHY_MAX_ADDR; i++) {
  355. struct phy_device *const tmp_phydev = lp->mii_bus.phy_map[i];
  356. if (!tmp_phydev)
  357. continue; /* no PHY here... */
  358. phydev = tmp_phydev;
  359. break; /* found it */
  360. }
  361. /* now we are supposed to have a proper phydev, to attach to... */
  362. if (!phydev) {
  363. printk(KERN_INFO "%s: Don't found any phy device at all\n",
  364. dev->name);
  365. return -ENODEV;
  366. }
  367. #if defined(CONFIG_BFIN_MAC_RMII)
  368. phydev = phy_connect(dev, phydev->dev.bus_id, &bf537_adjust_link, 0,
  369. PHY_INTERFACE_MODE_RMII);
  370. #else
  371. phydev = phy_connect(dev, phydev->dev.bus_id, &bf537_adjust_link, 0,
  372. PHY_INTERFACE_MODE_MII);
  373. #endif
  374. if (IS_ERR(phydev)) {
  375. printk(KERN_ERR "%s: Could not attach to PHY\n", dev->name);
  376. return PTR_ERR(phydev);
  377. }
  378. /* mask with MAC supported features */
  379. phydev->supported &= (SUPPORTED_10baseT_Half
  380. | SUPPORTED_10baseT_Full
  381. | SUPPORTED_100baseT_Half
  382. | SUPPORTED_100baseT_Full
  383. | SUPPORTED_Autoneg
  384. | SUPPORTED_Pause | SUPPORTED_Asym_Pause
  385. | SUPPORTED_MII
  386. | SUPPORTED_TP);
  387. phydev->advertising = phydev->supported;
  388. lp->old_link = 0;
  389. lp->old_speed = 0;
  390. lp->old_duplex = -1;
  391. lp->phydev = phydev;
  392. printk(KERN_INFO "%s: attached PHY driver [%s] "
  393. "(mii_bus:phy_addr=%s, irq=%d, mdc_clk=%dHz(mdc_div=%d)"
  394. "@sclk=%dMHz)\n",
  395. DRV_NAME, phydev->drv->name, phydev->dev.bus_id, phydev->irq,
  396. MDC_CLK, mdc_div, sclk/1000000);
  397. return 0;
  398. }
  399. /**************************************************************************/
  400. void setup_system_regs(struct net_device *dev)
  401. {
  402. unsigned short sysctl;
  403. /*
  404. * Odd word alignment for Receive Frame DMA word
  405. * Configure checksum support and rcve frame word alignment
  406. */
  407. sysctl = bfin_read_EMAC_SYSCTL();
  408. #if defined(BFIN_MAC_CSUM_OFFLOAD)
  409. sysctl |= RXDWA | RXCKS;
  410. #else
  411. sysctl |= RXDWA;
  412. #endif
  413. bfin_write_EMAC_SYSCTL(sysctl);
  414. bfin_write_EMAC_MMC_CTL(RSTC | CROLL);
  415. /* Initialize the TX DMA channel registers */
  416. bfin_write_DMA2_X_COUNT(0);
  417. bfin_write_DMA2_X_MODIFY(4);
  418. bfin_write_DMA2_Y_COUNT(0);
  419. bfin_write_DMA2_Y_MODIFY(0);
  420. /* Initialize the RX DMA channel registers */
  421. bfin_write_DMA1_X_COUNT(0);
  422. bfin_write_DMA1_X_MODIFY(4);
  423. bfin_write_DMA1_Y_COUNT(0);
  424. bfin_write_DMA1_Y_MODIFY(0);
  425. }
  426. static void setup_mac_addr(u8 *mac_addr)
  427. {
  428. u32 addr_low = le32_to_cpu(*(__le32 *) & mac_addr[0]);
  429. u16 addr_hi = le16_to_cpu(*(__le16 *) & mac_addr[4]);
  430. /* this depends on a little-endian machine */
  431. bfin_write_EMAC_ADDRLO(addr_low);
  432. bfin_write_EMAC_ADDRHI(addr_hi);
  433. }
  434. static int bf537mac_set_mac_address(struct net_device *dev, void *p)
  435. {
  436. struct sockaddr *addr = p;
  437. if (netif_running(dev))
  438. return -EBUSY;
  439. memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
  440. setup_mac_addr(dev->dev_addr);
  441. return 0;
  442. }
  443. static void adjust_tx_list(void)
  444. {
  445. int timeout_cnt = MAX_TIMEOUT_CNT;
  446. if (tx_list_head->status.status_word != 0
  447. && current_tx_ptr != tx_list_head) {
  448. goto adjust_head; /* released something, just return; */
  449. }
  450. /*
  451. * if nothing released, check wait condition
  452. * current's next can not be the head,
  453. * otherwise the dma will not stop as we want
  454. */
  455. if (current_tx_ptr->next->next == tx_list_head) {
  456. while (tx_list_head->status.status_word == 0) {
  457. mdelay(1);
  458. if (tx_list_head->status.status_word != 0
  459. || !(bfin_read_DMA2_IRQ_STATUS() & 0x08)) {
  460. goto adjust_head;
  461. }
  462. if (timeout_cnt-- < 0) {
  463. printk(KERN_ERR DRV_NAME
  464. ": wait for adjust tx list head timeout\n");
  465. break;
  466. }
  467. }
  468. if (tx_list_head->status.status_word != 0) {
  469. goto adjust_head;
  470. }
  471. }
  472. return;
  473. adjust_head:
  474. do {
  475. tx_list_head->desc_a.config &= ~DMAEN;
  476. tx_list_head->status.status_word = 0;
  477. if (tx_list_head->skb) {
  478. dev_kfree_skb(tx_list_head->skb);
  479. tx_list_head->skb = NULL;
  480. } else {
  481. printk(KERN_ERR DRV_NAME
  482. ": no sk_buff in a transmitted frame!\n");
  483. }
  484. tx_list_head = tx_list_head->next;
  485. } while (tx_list_head->status.status_word != 0
  486. && current_tx_ptr != tx_list_head);
  487. return;
  488. }
  489. static int bf537mac_hard_start_xmit(struct sk_buff *skb,
  490. struct net_device *dev)
  491. {
  492. unsigned int data;
  493. current_tx_ptr->skb = skb;
  494. /*
  495. * Is skb->data always 16-bit aligned?
  496. * Do we need to memcpy((char *)(tail->packet + 2), skb->data, len)?
  497. */
  498. if ((((unsigned int)(skb->data)) & 0x02) == 2) {
  499. /* move skb->data to current_tx_ptr payload */
  500. data = (unsigned int)(skb->data) - 2;
  501. *((unsigned short *)data) = (unsigned short)(skb->len);
  502. current_tx_ptr->desc_a.start_addr = (unsigned long)data;
  503. /* this is important! */
  504. blackfin_dcache_flush_range(data, (data + (skb->len)) + 2);
  505. } else {
  506. *((unsigned short *)(current_tx_ptr->packet)) =
  507. (unsigned short)(skb->len);
  508. memcpy((char *)(current_tx_ptr->packet + 2), skb->data,
  509. (skb->len));
  510. current_tx_ptr->desc_a.start_addr =
  511. (unsigned long)current_tx_ptr->packet;
  512. if (current_tx_ptr->status.status_word != 0)
  513. current_tx_ptr->status.status_word = 0;
  514. blackfin_dcache_flush_range((unsigned int)current_tx_ptr->
  515. packet,
  516. (unsigned int)(current_tx_ptr->
  517. packet + skb->len) +
  518. 2);
  519. }
  520. /* enable this packet's dma */
  521. current_tx_ptr->desc_a.config |= DMAEN;
  522. /* tx dma is running, just return */
  523. if (bfin_read_DMA2_IRQ_STATUS() & 0x08)
  524. goto out;
  525. /* tx dma is not running */
  526. bfin_write_DMA2_NEXT_DESC_PTR(&(current_tx_ptr->desc_a));
  527. /* dma enabled, read from memory, size is 6 */
  528. bfin_write_DMA2_CONFIG(current_tx_ptr->desc_a.config);
  529. /* Turn on the EMAC tx */
  530. bfin_write_EMAC_OPMODE(bfin_read_EMAC_OPMODE() | TE);
  531. out:
  532. adjust_tx_list();
  533. current_tx_ptr = current_tx_ptr->next;
  534. dev->trans_start = jiffies;
  535. dev->stats.tx_packets++;
  536. dev->stats.tx_bytes += (skb->len);
  537. return 0;
  538. }
  539. static void bf537mac_rx(struct net_device *dev)
  540. {
  541. struct sk_buff *skb, *new_skb;
  542. unsigned short len;
  543. /* allocate a new skb for next time receive */
  544. skb = current_rx_ptr->skb;
  545. new_skb = dev_alloc_skb(PKT_BUF_SZ + 2);
  546. if (!new_skb) {
  547. printk(KERN_NOTICE DRV_NAME
  548. ": rx: low on mem - packet dropped\n");
  549. dev->stats.rx_dropped++;
  550. goto out;
  551. }
  552. /* reserve 2 bytes for RXDWA padding */
  553. skb_reserve(new_skb, 2);
  554. current_rx_ptr->skb = new_skb;
  555. current_rx_ptr->desc_a.start_addr = (unsigned long)new_skb->data - 2;
  556. /* Invidate the data cache of skb->data range when it is write back
  557. * cache. It will prevent overwritting the new data from DMA
  558. */
  559. blackfin_dcache_invalidate_range((unsigned long)new_skb->head,
  560. (unsigned long)new_skb->end);
  561. len = (unsigned short)((current_rx_ptr->status.status_word) & RX_FRLEN);
  562. skb_put(skb, len);
  563. blackfin_dcache_invalidate_range((unsigned long)skb->head,
  564. (unsigned long)skb->tail);
  565. dev->last_rx = jiffies;
  566. skb->dev = dev;
  567. skb->protocol = eth_type_trans(skb, dev);
  568. #if defined(BFIN_MAC_CSUM_OFFLOAD)
  569. skb->csum = current_rx_ptr->status.ip_payload_csum;
  570. skb->ip_summed = CHECKSUM_COMPLETE;
  571. #endif
  572. netif_rx(skb);
  573. dev->stats.rx_packets++;
  574. dev->stats.rx_bytes += len;
  575. current_rx_ptr->status.status_word = 0x00000000;
  576. current_rx_ptr = current_rx_ptr->next;
  577. out:
  578. return;
  579. }
  580. /* interrupt routine to handle rx and error signal */
  581. static irqreturn_t bf537mac_interrupt(int irq, void *dev_id)
  582. {
  583. struct net_device *dev = dev_id;
  584. int number = 0;
  585. get_one_packet:
  586. if (current_rx_ptr->status.status_word == 0) {
  587. /* no more new packet received */
  588. if (number == 0) {
  589. if (current_rx_ptr->next->status.status_word != 0) {
  590. current_rx_ptr = current_rx_ptr->next;
  591. goto real_rx;
  592. }
  593. }
  594. bfin_write_DMA1_IRQ_STATUS(bfin_read_DMA1_IRQ_STATUS() |
  595. DMA_DONE | DMA_ERR);
  596. return IRQ_HANDLED;
  597. }
  598. real_rx:
  599. bf537mac_rx(dev);
  600. number++;
  601. goto get_one_packet;
  602. }
  603. #ifdef CONFIG_NET_POLL_CONTROLLER
  604. static void bf537mac_poll(struct net_device *dev)
  605. {
  606. disable_irq(IRQ_MAC_RX);
  607. bf537mac_interrupt(IRQ_MAC_RX, dev);
  608. enable_irq(IRQ_MAC_RX);
  609. }
  610. #endif /* CONFIG_NET_POLL_CONTROLLER */
  611. static void bf537mac_disable(void)
  612. {
  613. unsigned int opmode;
  614. opmode = bfin_read_EMAC_OPMODE();
  615. opmode &= (~RE);
  616. opmode &= (~TE);
  617. /* Turn off the EMAC */
  618. bfin_write_EMAC_OPMODE(opmode);
  619. }
  620. /*
  621. * Enable Interrupts, Receive, and Transmit
  622. */
  623. static void bf537mac_enable(void)
  624. {
  625. u32 opmode;
  626. pr_debug("%s: %s\n", DRV_NAME, __FUNCTION__);
  627. /* Set RX DMA */
  628. bfin_write_DMA1_NEXT_DESC_PTR(&(rx_list_head->desc_a));
  629. bfin_write_DMA1_CONFIG(rx_list_head->desc_a.config);
  630. /* Wait MII done */
  631. mdio_poll();
  632. /* We enable only RX here */
  633. /* ASTP : Enable Automatic Pad Stripping
  634. PR : Promiscuous Mode for test
  635. PSF : Receive frames with total length less than 64 bytes.
  636. FDMODE : Full Duplex Mode
  637. LB : Internal Loopback for test
  638. RE : Receiver Enable */
  639. opmode = bfin_read_EMAC_OPMODE();
  640. if (opmode & FDMODE)
  641. opmode |= PSF;
  642. else
  643. opmode |= DRO | DC | PSF;
  644. opmode |= RE;
  645. #if defined(CONFIG_BFIN_MAC_RMII)
  646. opmode |= RMII; /* For Now only 100MBit are supported */
  647. #if (defined(CONFIG_BF537) || defined(CONFIG_BF536)) && CONFIG_BF_REV_0_2
  648. opmode |= TE;
  649. #endif
  650. #endif
  651. /* Turn on the EMAC rx */
  652. bfin_write_EMAC_OPMODE(opmode);
  653. }
  654. /* Our watchdog timed out. Called by the networking layer */
  655. static void bf537mac_timeout(struct net_device *dev)
  656. {
  657. pr_debug("%s: %s\n", dev->name, __FUNCTION__);
  658. bf537mac_disable();
  659. /* reset tx queue */
  660. tx_list_tail = tx_list_head->next;
  661. bf537mac_enable();
  662. /* We can accept TX packets again */
  663. dev->trans_start = jiffies;
  664. netif_wake_queue(dev);
  665. }
  666. static void bf537mac_multicast_hash(struct net_device *dev)
  667. {
  668. u32 emac_hashhi, emac_hashlo;
  669. struct dev_mc_list *dmi = dev->mc_list;
  670. char *addrs;
  671. int i;
  672. u32 crc;
  673. emac_hashhi = emac_hashlo = 0;
  674. for (i = 0; i < dev->mc_count; i++) {
  675. addrs = dmi->dmi_addr;
  676. dmi = dmi->next;
  677. /* skip non-multicast addresses */
  678. if (!(*addrs & 1))
  679. continue;
  680. crc = ether_crc(ETH_ALEN, addrs);
  681. crc >>= 26;
  682. if (crc & 0x20)
  683. emac_hashhi |= 1 << (crc & 0x1f);
  684. else
  685. emac_hashlo |= 1 << (crc & 0x1f);
  686. }
  687. bfin_write_EMAC_HASHHI(emac_hashhi);
  688. bfin_write_EMAC_HASHLO(emac_hashlo);
  689. return;
  690. }
  691. /*
  692. * This routine will, depending on the values passed to it,
  693. * either make it accept multicast packets, go into
  694. * promiscuous mode (for TCPDUMP and cousins) or accept
  695. * a select set of multicast packets
  696. */
  697. static void bf537mac_set_multicast_list(struct net_device *dev)
  698. {
  699. u32 sysctl;
  700. if (dev->flags & IFF_PROMISC) {
  701. printk(KERN_INFO "%s: set to promisc mode\n", dev->name);
  702. sysctl = bfin_read_EMAC_OPMODE();
  703. sysctl |= RAF;
  704. bfin_write_EMAC_OPMODE(sysctl);
  705. } else if (dev->flags & IFF_ALLMULTI) {
  706. /* accept all multicast */
  707. sysctl = bfin_read_EMAC_OPMODE();
  708. sysctl |= PAM;
  709. bfin_write_EMAC_OPMODE(sysctl);
  710. } else if (dev->mc_count) {
  711. /* set up multicast hash table */
  712. sysctl = bfin_read_EMAC_OPMODE();
  713. sysctl |= HM;
  714. bfin_write_EMAC_OPMODE(sysctl);
  715. bf537mac_multicast_hash(dev);
  716. } else {
  717. /* clear promisc or multicast mode */
  718. sysctl = bfin_read_EMAC_OPMODE();
  719. sysctl &= ~(RAF | PAM);
  720. bfin_write_EMAC_OPMODE(sysctl);
  721. }
  722. }
  723. /*
  724. * this puts the device in an inactive state
  725. */
  726. static void bf537mac_shutdown(struct net_device *dev)
  727. {
  728. /* Turn off the EMAC */
  729. bfin_write_EMAC_OPMODE(0x00000000);
  730. /* Turn off the EMAC RX DMA */
  731. bfin_write_DMA1_CONFIG(0x0000);
  732. bfin_write_DMA2_CONFIG(0x0000);
  733. }
  734. /*
  735. * Open and Initialize the interface
  736. *
  737. * Set up everything, reset the card, etc..
  738. */
  739. static int bf537mac_open(struct net_device *dev)
  740. {
  741. struct bf537mac_local *lp = netdev_priv(dev);
  742. int retval;
  743. pr_debug("%s: %s\n", dev->name, __FUNCTION__);
  744. /*
  745. * Check that the address is valid. If its not, refuse
  746. * to bring the device up. The user must specify an
  747. * address using ifconfig eth0 hw ether xx:xx:xx:xx:xx:xx
  748. */
  749. if (!is_valid_ether_addr(dev->dev_addr)) {
  750. printk(KERN_WARNING DRV_NAME ": no valid ethernet hw addr\n");
  751. return -EINVAL;
  752. }
  753. /* initial rx and tx list */
  754. retval = desc_list_init();
  755. if (retval)
  756. return retval;
  757. phy_start(lp->phydev);
  758. phy_write(lp->phydev, MII_BMCR, BMCR_RESET);
  759. setup_system_regs(dev);
  760. bf537mac_disable();
  761. bf537mac_enable();
  762. pr_debug("hardware init finished\n");
  763. netif_start_queue(dev);
  764. netif_carrier_on(dev);
  765. return 0;
  766. }
  767. /*
  768. *
  769. * this makes the board clean up everything that it can
  770. * and not talk to the outside world. Caused by
  771. * an 'ifconfig ethX down'
  772. */
  773. static int bf537mac_close(struct net_device *dev)
  774. {
  775. struct bf537mac_local *lp = netdev_priv(dev);
  776. pr_debug("%s: %s\n", dev->name, __FUNCTION__);
  777. netif_stop_queue(dev);
  778. netif_carrier_off(dev);
  779. phy_stop(lp->phydev);
  780. phy_write(lp->phydev, MII_BMCR, BMCR_PDOWN);
  781. /* clear everything */
  782. bf537mac_shutdown(dev);
  783. /* free the rx/tx buffers */
  784. desc_list_free();
  785. return 0;
  786. }
  787. static int __init bf537mac_probe(struct net_device *dev)
  788. {
  789. struct bf537mac_local *lp = netdev_priv(dev);
  790. int retval;
  791. int i;
  792. /* Grab the MAC address in the MAC */
  793. *(__le32 *) (&(dev->dev_addr[0])) = cpu_to_le32(bfin_read_EMAC_ADDRLO());
  794. *(__le16 *) (&(dev->dev_addr[4])) = cpu_to_le16((u16) bfin_read_EMAC_ADDRHI());
  795. /* probe mac */
  796. /*todo: how to proble? which is revision_register */
  797. bfin_write_EMAC_ADDRLO(0x12345678);
  798. if (bfin_read_EMAC_ADDRLO() != 0x12345678) {
  799. pr_debug("can't detect bf537 mac!\n");
  800. retval = -ENODEV;
  801. goto err_out;
  802. }
  803. /* set the GPIO pins to Ethernet mode */
  804. retval = setup_pin_mux(1);
  805. if (retval)
  806. return retval;
  807. /*Is it valid? (Did bootloader initialize it?) */
  808. if (!is_valid_ether_addr(dev->dev_addr)) {
  809. /* Grab the MAC from the board somehow - this is done in the
  810. arch/blackfin/mach-bf537/boards/eth_mac.c */
  811. bfin_get_ether_addr(dev->dev_addr);
  812. }
  813. /* If still not valid, get a random one */
  814. if (!is_valid_ether_addr(dev->dev_addr)) {
  815. random_ether_addr(dev->dev_addr);
  816. }
  817. setup_mac_addr(dev->dev_addr);
  818. /* MDIO bus initial */
  819. lp->mii_bus.priv = dev;
  820. lp->mii_bus.read = mdiobus_read;
  821. lp->mii_bus.write = mdiobus_write;
  822. lp->mii_bus.reset = mdiobus_reset;
  823. lp->mii_bus.name = "bfin_mac_mdio";
  824. snprintf(lp->mii_bus.id, MII_BUS_ID_SIZE, "0");
  825. lp->mii_bus.irq = kmalloc(sizeof(int)*PHY_MAX_ADDR, GFP_KERNEL);
  826. for (i = 0; i < PHY_MAX_ADDR; ++i)
  827. lp->mii_bus.irq[i] = PHY_POLL;
  828. mdiobus_register(&lp->mii_bus);
  829. retval = mii_probe(dev);
  830. if (retval)
  831. return retval;
  832. /* Fill in the fields of the device structure with ethernet values. */
  833. ether_setup(dev);
  834. dev->open = bf537mac_open;
  835. dev->stop = bf537mac_close;
  836. dev->hard_start_xmit = bf537mac_hard_start_xmit;
  837. dev->set_mac_address = bf537mac_set_mac_address;
  838. dev->tx_timeout = bf537mac_timeout;
  839. dev->set_multicast_list = bf537mac_set_multicast_list;
  840. #ifdef CONFIG_NET_POLL_CONTROLLER
  841. dev->poll_controller = bf537mac_poll;
  842. #endif
  843. spin_lock_init(&lp->lock);
  844. /* now, enable interrupts */
  845. /* register irq handler */
  846. if (request_irq
  847. (IRQ_MAC_RX, bf537mac_interrupt, IRQF_DISABLED | IRQF_SHARED,
  848. "EMAC_RX", dev)) {
  849. printk(KERN_WARNING DRV_NAME
  850. ": Unable to attach BlackFin MAC RX interrupt\n");
  851. return -EBUSY;
  852. }
  853. retval = register_netdev(dev);
  854. if (retval == 0) {
  855. /* now, print out the card info, in a short format.. */
  856. printk(KERN_INFO "%s: Version %s, %s\n",
  857. DRV_NAME, DRV_VERSION, DRV_DESC);
  858. }
  859. err_out:
  860. return retval;
  861. }
  862. static int bfin_mac_probe(struct platform_device *pdev)
  863. {
  864. struct net_device *ndev;
  865. ndev = alloc_etherdev(sizeof(struct bf537mac_local));
  866. if (!ndev) {
  867. printk(KERN_WARNING DRV_NAME ": could not allocate device\n");
  868. return -ENOMEM;
  869. }
  870. SET_NETDEV_DEV(ndev, &pdev->dev);
  871. platform_set_drvdata(pdev, ndev);
  872. if (bf537mac_probe(ndev) != 0) {
  873. platform_set_drvdata(pdev, NULL);
  874. free_netdev(ndev);
  875. printk(KERN_WARNING DRV_NAME ": not found\n");
  876. return -ENODEV;
  877. }
  878. return 0;
  879. }
  880. static int bfin_mac_remove(struct platform_device *pdev)
  881. {
  882. struct net_device *ndev = platform_get_drvdata(pdev);
  883. platform_set_drvdata(pdev, NULL);
  884. unregister_netdev(ndev);
  885. free_irq(IRQ_MAC_RX, ndev);
  886. free_netdev(ndev);
  887. setup_pin_mux(0);
  888. return 0;
  889. }
  890. #ifdef CONFIG_PM
  891. static int bfin_mac_suspend(struct platform_device *pdev, pm_message_t mesg)
  892. {
  893. struct net_device *net_dev = platform_get_drvdata(pdev);
  894. if (netif_running(net_dev))
  895. bf537mac_close(net_dev);
  896. return 0;
  897. }
  898. static int bfin_mac_resume(struct platform_device *pdev)
  899. {
  900. struct net_device *net_dev = platform_get_drvdata(pdev);
  901. if (netif_running(net_dev))
  902. bf537mac_open(net_dev);
  903. return 0;
  904. }
  905. #else
  906. #define bfin_mac_suspend NULL
  907. #define bfin_mac_resume NULL
  908. #endif /* CONFIG_PM */
  909. static struct platform_driver bfin_mac_driver = {
  910. .probe = bfin_mac_probe,
  911. .remove = bfin_mac_remove,
  912. .resume = bfin_mac_resume,
  913. .suspend = bfin_mac_suspend,
  914. .driver = {
  915. .name = DRV_NAME,
  916. .owner = THIS_MODULE,
  917. },
  918. };
  919. static int __init bfin_mac_init(void)
  920. {
  921. return platform_driver_register(&bfin_mac_driver);
  922. }
  923. module_init(bfin_mac_init);
  924. static void __exit bfin_mac_cleanup(void)
  925. {
  926. platform_driver_unregister(&bfin_mac_driver);
  927. }
  928. module_exit(bfin_mac_cleanup);