ll_temac_main.c 30 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188
  1. /*
  2. * Driver for Xilinx TEMAC Ethernet device
  3. *
  4. * Copyright (c) 2008 Nissin Systems Co., Ltd., Yoshio Kashiwagi
  5. * Copyright (c) 2005-2008 DLA Systems, David H. Lynch Jr. <dhlii@dlasys.net>
  6. * Copyright (c) 2008-2009 Secret Lab Technologies Ltd.
  7. *
  8. * This is a driver for the Xilinx ll_temac ipcore which is often used
  9. * in the Virtex and Spartan series of chips.
  10. *
  11. * Notes:
  12. * - The ll_temac hardware uses indirect access for many of the TEMAC
  13. * registers, include the MDIO bus. However, indirect access to MDIO
  14. * registers take considerably more clock cycles than to TEMAC registers.
  15. * MDIO accesses are long, so threads doing them should probably sleep
  16. * rather than busywait. However, since only one indirect access can be
  17. * in progress at any given time, that means that *all* indirect accesses
  18. * could end up sleeping (to wait for an MDIO access to complete).
  19. * Fortunately none of the indirect accesses are on the 'hot' path for tx
  20. * or rx, so this should be okay.
  21. *
  22. * TODO:
  23. * - Factor out locallink DMA code into separate driver
  24. * - Fix multicast assignment.
  25. * - Fix support for hardware checksumming.
  26. * - Testing. Lots and lots of testing.
  27. *
  28. */
  29. #include <linux/delay.h>
  30. #include <linux/etherdevice.h>
  31. #include <linux/init.h>
  32. #include <linux/mii.h>
  33. #include <linux/module.h>
  34. #include <linux/mutex.h>
  35. #include <linux/netdevice.h>
  36. #include <linux/of.h>
  37. #include <linux/of_device.h>
  38. #include <linux/of_mdio.h>
  39. #include <linux/of_platform.h>
  40. #include <linux/of_address.h>
  41. #include <linux/skbuff.h>
  42. #include <linux/spinlock.h>
  43. #include <linux/tcp.h> /* needed for sizeof(tcphdr) */
  44. #include <linux/udp.h> /* needed for sizeof(udphdr) */
  45. #include <linux/phy.h>
  46. #include <linux/in.h>
  47. #include <linux/io.h>
  48. #include <linux/ip.h>
  49. #include <linux/slab.h>
  50. #include <linux/interrupt.h>
  51. #include <linux/dma-mapping.h>
  52. #include "ll_temac.h"
  53. #define TX_BD_NUM 64
  54. #define RX_BD_NUM 128
  55. /* ---------------------------------------------------------------------
  56. * Low level register access functions
  57. */
  58. u32 temac_ior(struct temac_local *lp, int offset)
  59. {
  60. return in_be32((u32 *)(lp->regs + offset));
  61. }
  62. void temac_iow(struct temac_local *lp, int offset, u32 value)
  63. {
  64. out_be32((u32 *) (lp->regs + offset), value);
  65. }
  66. int temac_indirect_busywait(struct temac_local *lp)
  67. {
  68. long end = jiffies + 2;
  69. while (!(temac_ior(lp, XTE_RDY0_OFFSET) & XTE_RDY0_HARD_ACS_RDY_MASK)) {
  70. if (end - jiffies <= 0) {
  71. WARN_ON(1);
  72. return -ETIMEDOUT;
  73. }
  74. msleep(1);
  75. }
  76. return 0;
  77. }
  78. /**
  79. * temac_indirect_in32
  80. *
  81. * lp->indirect_mutex must be held when calling this function
  82. */
  83. u32 temac_indirect_in32(struct temac_local *lp, int reg)
  84. {
  85. u32 val;
  86. if (temac_indirect_busywait(lp))
  87. return -ETIMEDOUT;
  88. temac_iow(lp, XTE_CTL0_OFFSET, reg);
  89. if (temac_indirect_busywait(lp))
  90. return -ETIMEDOUT;
  91. val = temac_ior(lp, XTE_LSW0_OFFSET);
  92. return val;
  93. }
  94. /**
  95. * temac_indirect_out32
  96. *
  97. * lp->indirect_mutex must be held when calling this function
  98. */
  99. void temac_indirect_out32(struct temac_local *lp, int reg, u32 value)
  100. {
  101. if (temac_indirect_busywait(lp))
  102. return;
  103. temac_iow(lp, XTE_LSW0_OFFSET, value);
  104. temac_iow(lp, XTE_CTL0_OFFSET, CNTLREG_WRITE_ENABLE_MASK | reg);
  105. temac_indirect_busywait(lp);
  106. }
  107. /**
  108. * temac_dma_in32 - Memory mapped DMA read, this function expects a
  109. * register input that is based on DCR word addresses which
  110. * are then converted to memory mapped byte addresses
  111. */
  112. static u32 temac_dma_in32(struct temac_local *lp, int reg)
  113. {
  114. return in_be32((u32 *)(lp->sdma_regs + (reg << 2)));
  115. }
  116. /**
  117. * temac_dma_out32 - Memory mapped DMA read, this function expects a
  118. * register input that is based on DCR word addresses which
  119. * are then converted to memory mapped byte addresses
  120. */
  121. static void temac_dma_out32(struct temac_local *lp, int reg, u32 value)
  122. {
  123. out_be32((u32 *)(lp->sdma_regs + (reg << 2)), value);
  124. }
  125. /* DMA register access functions can be DCR based or memory mapped.
  126. * The PowerPC 440 is DCR based, the PowerPC 405 and MicroBlaze are both
  127. * memory mapped.
  128. */
  129. #ifdef CONFIG_PPC_DCR
  130. /**
  131. * temac_dma_dcr_in32 - DCR based DMA read
  132. */
  133. static u32 temac_dma_dcr_in(struct temac_local *lp, int reg)
  134. {
  135. return dcr_read(lp->sdma_dcrs, reg);
  136. }
  137. /**
  138. * temac_dma_dcr_out32 - DCR based DMA write
  139. */
  140. static void temac_dma_dcr_out(struct temac_local *lp, int reg, u32 value)
  141. {
  142. dcr_write(lp->sdma_dcrs, reg, value);
  143. }
  144. /**
  145. * temac_dcr_setup - If the DMA is DCR based, then setup the address and
  146. * I/O functions
  147. */
  148. static int temac_dcr_setup(struct temac_local *lp, struct platform_device *op,
  149. struct device_node *np)
  150. {
  151. unsigned int dcrs;
  152. /* setup the dcr address mapping if it's in the device tree */
  153. dcrs = dcr_resource_start(np, 0);
  154. if (dcrs != 0) {
  155. lp->sdma_dcrs = dcr_map(np, dcrs, dcr_resource_len(np, 0));
  156. lp->dma_in = temac_dma_dcr_in;
  157. lp->dma_out = temac_dma_dcr_out;
  158. dev_dbg(&op->dev, "DCR base: %x\n", dcrs);
  159. return 0;
  160. }
  161. /* no DCR in the device tree, indicate a failure */
  162. return -1;
  163. }
  164. #else
  165. /*
  166. * temac_dcr_setup - This is a stub for when DCR is not supported,
  167. * such as with MicroBlaze
  168. */
  169. static int temac_dcr_setup(struct temac_local *lp, struct platform_device *op,
  170. struct device_node *np)
  171. {
  172. return -1;
  173. }
  174. #endif
  175. /**
  176. * * temac_dma_bd_release - Release buffer descriptor rings
  177. */
  178. static void temac_dma_bd_release(struct net_device *ndev)
  179. {
  180. struct temac_local *lp = netdev_priv(ndev);
  181. int i;
  182. /* Reset Local Link (DMA) */
  183. lp->dma_out(lp, DMA_CONTROL_REG, DMA_CONTROL_RST);
  184. for (i = 0; i < RX_BD_NUM; i++) {
  185. if (!lp->rx_skb[i])
  186. break;
  187. else {
  188. dma_unmap_single(ndev->dev.parent, lp->rx_bd_v[i].phys,
  189. XTE_MAX_JUMBO_FRAME_SIZE, DMA_FROM_DEVICE);
  190. dev_kfree_skb(lp->rx_skb[i]);
  191. }
  192. }
  193. if (lp->rx_bd_v)
  194. dma_free_coherent(ndev->dev.parent,
  195. sizeof(*lp->rx_bd_v) * RX_BD_NUM,
  196. lp->rx_bd_v, lp->rx_bd_p);
  197. if (lp->tx_bd_v)
  198. dma_free_coherent(ndev->dev.parent,
  199. sizeof(*lp->tx_bd_v) * TX_BD_NUM,
  200. lp->tx_bd_v, lp->tx_bd_p);
  201. if (lp->rx_skb)
  202. kfree(lp->rx_skb);
  203. }
  204. /**
  205. * temac_dma_bd_init - Setup buffer descriptor rings
  206. */
  207. static int temac_dma_bd_init(struct net_device *ndev)
  208. {
  209. struct temac_local *lp = netdev_priv(ndev);
  210. struct sk_buff *skb;
  211. int i;
  212. lp->rx_skb = kcalloc(RX_BD_NUM, sizeof(*lp->rx_skb), GFP_KERNEL);
  213. if (!lp->rx_skb) {
  214. dev_err(&ndev->dev,
  215. "can't allocate memory for DMA RX buffer\n");
  216. goto out;
  217. }
  218. /* allocate the tx and rx ring buffer descriptors. */
  219. /* returns a virtual address and a physical address. */
  220. lp->tx_bd_v = dma_alloc_coherent(ndev->dev.parent,
  221. sizeof(*lp->tx_bd_v) * TX_BD_NUM,
  222. &lp->tx_bd_p, GFP_KERNEL);
  223. if (!lp->tx_bd_v) {
  224. dev_err(&ndev->dev,
  225. "unable to allocate DMA TX buffer descriptors");
  226. goto out;
  227. }
  228. lp->rx_bd_v = dma_alloc_coherent(ndev->dev.parent,
  229. sizeof(*lp->rx_bd_v) * RX_BD_NUM,
  230. &lp->rx_bd_p, GFP_KERNEL);
  231. if (!lp->rx_bd_v) {
  232. dev_err(&ndev->dev,
  233. "unable to allocate DMA RX buffer descriptors");
  234. goto out;
  235. }
  236. memset(lp->tx_bd_v, 0, sizeof(*lp->tx_bd_v) * TX_BD_NUM);
  237. for (i = 0; i < TX_BD_NUM; i++) {
  238. lp->tx_bd_v[i].next = lp->tx_bd_p +
  239. sizeof(*lp->tx_bd_v) * ((i + 1) % TX_BD_NUM);
  240. }
  241. memset(lp->rx_bd_v, 0, sizeof(*lp->rx_bd_v) * RX_BD_NUM);
  242. for (i = 0; i < RX_BD_NUM; i++) {
  243. lp->rx_bd_v[i].next = lp->rx_bd_p +
  244. sizeof(*lp->rx_bd_v) * ((i + 1) % RX_BD_NUM);
  245. skb = netdev_alloc_skb_ip_align(ndev,
  246. XTE_MAX_JUMBO_FRAME_SIZE);
  247. if (skb == 0) {
  248. dev_err(&ndev->dev, "alloc_skb error %d\n", i);
  249. goto out;
  250. }
  251. lp->rx_skb[i] = skb;
  252. /* returns physical address of skb->data */
  253. lp->rx_bd_v[i].phys = dma_map_single(ndev->dev.parent,
  254. skb->data,
  255. XTE_MAX_JUMBO_FRAME_SIZE,
  256. DMA_FROM_DEVICE);
  257. lp->rx_bd_v[i].len = XTE_MAX_JUMBO_FRAME_SIZE;
  258. lp->rx_bd_v[i].app0 = STS_CTRL_APP0_IRQONEND;
  259. }
  260. lp->dma_out(lp, TX_CHNL_CTRL, 0x10220400 |
  261. CHNL_CTRL_IRQ_EN |
  262. CHNL_CTRL_IRQ_DLY_EN |
  263. CHNL_CTRL_IRQ_COAL_EN);
  264. /* 0x10220483 */
  265. /* 0x00100483 */
  266. lp->dma_out(lp, RX_CHNL_CTRL, 0xff070000 |
  267. CHNL_CTRL_IRQ_EN |
  268. CHNL_CTRL_IRQ_DLY_EN |
  269. CHNL_CTRL_IRQ_COAL_EN |
  270. CHNL_CTRL_IRQ_IOE);
  271. /* 0xff010283 */
  272. lp->dma_out(lp, RX_CURDESC_PTR, lp->rx_bd_p);
  273. lp->dma_out(lp, RX_TAILDESC_PTR,
  274. lp->rx_bd_p + (sizeof(*lp->rx_bd_v) * (RX_BD_NUM - 1)));
  275. lp->dma_out(lp, TX_CURDESC_PTR, lp->tx_bd_p);
  276. return 0;
  277. out:
  278. temac_dma_bd_release(ndev);
  279. return -ENOMEM;
  280. }
  281. /* ---------------------------------------------------------------------
  282. * net_device_ops
  283. */
  284. static int temac_set_mac_address(struct net_device *ndev, void *address)
  285. {
  286. struct temac_local *lp = netdev_priv(ndev);
  287. if (address)
  288. memcpy(ndev->dev_addr, address, ETH_ALEN);
  289. if (!is_valid_ether_addr(ndev->dev_addr))
  290. random_ether_addr(ndev->dev_addr);
  291. /* set up unicast MAC address filter set its mac address */
  292. mutex_lock(&lp->indirect_mutex);
  293. temac_indirect_out32(lp, XTE_UAW0_OFFSET,
  294. (ndev->dev_addr[0]) |
  295. (ndev->dev_addr[1] << 8) |
  296. (ndev->dev_addr[2] << 16) |
  297. (ndev->dev_addr[3] << 24));
  298. /* There are reserved bits in EUAW1
  299. * so don't affect them Set MAC bits [47:32] in EUAW1 */
  300. temac_indirect_out32(lp, XTE_UAW1_OFFSET,
  301. (ndev->dev_addr[4] & 0x000000ff) |
  302. (ndev->dev_addr[5] << 8));
  303. mutex_unlock(&lp->indirect_mutex);
  304. return 0;
  305. }
  306. static int netdev_set_mac_address(struct net_device *ndev, void *p)
  307. {
  308. struct sockaddr *addr = p;
  309. return temac_set_mac_address(ndev, addr->sa_data);
  310. }
  311. static void temac_set_multicast_list(struct net_device *ndev)
  312. {
  313. struct temac_local *lp = netdev_priv(ndev);
  314. u32 multi_addr_msw, multi_addr_lsw, val;
  315. int i;
  316. mutex_lock(&lp->indirect_mutex);
  317. if (ndev->flags & (IFF_ALLMULTI | IFF_PROMISC) ||
  318. netdev_mc_count(ndev) > MULTICAST_CAM_TABLE_NUM) {
  319. /*
  320. * We must make the kernel realise we had to move
  321. * into promisc mode or we start all out war on
  322. * the cable. If it was a promisc request the
  323. * flag is already set. If not we assert it.
  324. */
  325. ndev->flags |= IFF_PROMISC;
  326. temac_indirect_out32(lp, XTE_AFM_OFFSET, XTE_AFM_EPPRM_MASK);
  327. dev_info(&ndev->dev, "Promiscuous mode enabled.\n");
  328. } else if (!netdev_mc_empty(ndev)) {
  329. struct netdev_hw_addr *ha;
  330. i = 0;
  331. netdev_for_each_mc_addr(ha, ndev) {
  332. if (i >= MULTICAST_CAM_TABLE_NUM)
  333. break;
  334. multi_addr_msw = ((ha->addr[3] << 24) |
  335. (ha->addr[2] << 16) |
  336. (ha->addr[1] << 8) |
  337. (ha->addr[0]));
  338. temac_indirect_out32(lp, XTE_MAW0_OFFSET,
  339. multi_addr_msw);
  340. multi_addr_lsw = ((ha->addr[5] << 8) |
  341. (ha->addr[4]) | (i << 16));
  342. temac_indirect_out32(lp, XTE_MAW1_OFFSET,
  343. multi_addr_lsw);
  344. i++;
  345. }
  346. } else {
  347. val = temac_indirect_in32(lp, XTE_AFM_OFFSET);
  348. temac_indirect_out32(lp, XTE_AFM_OFFSET,
  349. val & ~XTE_AFM_EPPRM_MASK);
  350. temac_indirect_out32(lp, XTE_MAW0_OFFSET, 0);
  351. temac_indirect_out32(lp, XTE_MAW1_OFFSET, 0);
  352. dev_info(&ndev->dev, "Promiscuous mode disabled.\n");
  353. }
  354. mutex_unlock(&lp->indirect_mutex);
  355. }
  356. struct temac_option {
  357. int flg;
  358. u32 opt;
  359. u32 reg;
  360. u32 m_or;
  361. u32 m_and;
  362. } temac_options[] = {
  363. /* Turn on jumbo packet support for both Rx and Tx */
  364. {
  365. .opt = XTE_OPTION_JUMBO,
  366. .reg = XTE_TXC_OFFSET,
  367. .m_or = XTE_TXC_TXJMBO_MASK,
  368. },
  369. {
  370. .opt = XTE_OPTION_JUMBO,
  371. .reg = XTE_RXC1_OFFSET,
  372. .m_or =XTE_RXC1_RXJMBO_MASK,
  373. },
  374. /* Turn on VLAN packet support for both Rx and Tx */
  375. {
  376. .opt = XTE_OPTION_VLAN,
  377. .reg = XTE_TXC_OFFSET,
  378. .m_or =XTE_TXC_TXVLAN_MASK,
  379. },
  380. {
  381. .opt = XTE_OPTION_VLAN,
  382. .reg = XTE_RXC1_OFFSET,
  383. .m_or =XTE_RXC1_RXVLAN_MASK,
  384. },
  385. /* Turn on FCS stripping on receive packets */
  386. {
  387. .opt = XTE_OPTION_FCS_STRIP,
  388. .reg = XTE_RXC1_OFFSET,
  389. .m_or =XTE_RXC1_RXFCS_MASK,
  390. },
  391. /* Turn on FCS insertion on transmit packets */
  392. {
  393. .opt = XTE_OPTION_FCS_INSERT,
  394. .reg = XTE_TXC_OFFSET,
  395. .m_or =XTE_TXC_TXFCS_MASK,
  396. },
  397. /* Turn on length/type field checking on receive packets */
  398. {
  399. .opt = XTE_OPTION_LENTYPE_ERR,
  400. .reg = XTE_RXC1_OFFSET,
  401. .m_or =XTE_RXC1_RXLT_MASK,
  402. },
  403. /* Turn on flow control */
  404. {
  405. .opt = XTE_OPTION_FLOW_CONTROL,
  406. .reg = XTE_FCC_OFFSET,
  407. .m_or =XTE_FCC_RXFLO_MASK,
  408. },
  409. /* Turn on flow control */
  410. {
  411. .opt = XTE_OPTION_FLOW_CONTROL,
  412. .reg = XTE_FCC_OFFSET,
  413. .m_or =XTE_FCC_TXFLO_MASK,
  414. },
  415. /* Turn on promiscuous frame filtering (all frames are received ) */
  416. {
  417. .opt = XTE_OPTION_PROMISC,
  418. .reg = XTE_AFM_OFFSET,
  419. .m_or =XTE_AFM_EPPRM_MASK,
  420. },
  421. /* Enable transmitter if not already enabled */
  422. {
  423. .opt = XTE_OPTION_TXEN,
  424. .reg = XTE_TXC_OFFSET,
  425. .m_or =XTE_TXC_TXEN_MASK,
  426. },
  427. /* Enable receiver? */
  428. {
  429. .opt = XTE_OPTION_RXEN,
  430. .reg = XTE_RXC1_OFFSET,
  431. .m_or =XTE_RXC1_RXEN_MASK,
  432. },
  433. {}
  434. };
  435. /**
  436. * temac_setoptions
  437. */
  438. static u32 temac_setoptions(struct net_device *ndev, u32 options)
  439. {
  440. struct temac_local *lp = netdev_priv(ndev);
  441. struct temac_option *tp = &temac_options[0];
  442. int reg;
  443. mutex_lock(&lp->indirect_mutex);
  444. while (tp->opt) {
  445. reg = temac_indirect_in32(lp, tp->reg) & ~tp->m_or;
  446. if (options & tp->opt)
  447. reg |= tp->m_or;
  448. temac_indirect_out32(lp, tp->reg, reg);
  449. tp++;
  450. }
  451. lp->options |= options;
  452. mutex_unlock(&lp->indirect_mutex);
  453. return 0;
  454. }
  455. /* Initialize temac */
  456. static void temac_device_reset(struct net_device *ndev)
  457. {
  458. struct temac_local *lp = netdev_priv(ndev);
  459. u32 timeout;
  460. u32 val;
  461. /* Perform a software reset */
  462. /* 0x300 host enable bit ? */
  463. /* reset PHY through control register ?:1 */
  464. dev_dbg(&ndev->dev, "%s()\n", __func__);
  465. mutex_lock(&lp->indirect_mutex);
  466. /* Reset the receiver and wait for it to finish reset */
  467. temac_indirect_out32(lp, XTE_RXC1_OFFSET, XTE_RXC1_RXRST_MASK);
  468. timeout = 1000;
  469. while (temac_indirect_in32(lp, XTE_RXC1_OFFSET) & XTE_RXC1_RXRST_MASK) {
  470. udelay(1);
  471. if (--timeout == 0) {
  472. dev_err(&ndev->dev,
  473. "temac_device_reset RX reset timeout!!\n");
  474. break;
  475. }
  476. }
  477. /* Reset the transmitter and wait for it to finish reset */
  478. temac_indirect_out32(lp, XTE_TXC_OFFSET, XTE_TXC_TXRST_MASK);
  479. timeout = 1000;
  480. while (temac_indirect_in32(lp, XTE_TXC_OFFSET) & XTE_TXC_TXRST_MASK) {
  481. udelay(1);
  482. if (--timeout == 0) {
  483. dev_err(&ndev->dev,
  484. "temac_device_reset TX reset timeout!!\n");
  485. break;
  486. }
  487. }
  488. /* Disable the receiver */
  489. val = temac_indirect_in32(lp, XTE_RXC1_OFFSET);
  490. temac_indirect_out32(lp, XTE_RXC1_OFFSET, val & ~XTE_RXC1_RXEN_MASK);
  491. /* Reset Local Link (DMA) */
  492. lp->dma_out(lp, DMA_CONTROL_REG, DMA_CONTROL_RST);
  493. timeout = 1000;
  494. while (lp->dma_in(lp, DMA_CONTROL_REG) & DMA_CONTROL_RST) {
  495. udelay(1);
  496. if (--timeout == 0) {
  497. dev_err(&ndev->dev,
  498. "temac_device_reset DMA reset timeout!!\n");
  499. break;
  500. }
  501. }
  502. lp->dma_out(lp, DMA_CONTROL_REG, DMA_TAIL_ENABLE);
  503. if (temac_dma_bd_init(ndev)) {
  504. dev_err(&ndev->dev,
  505. "temac_device_reset descriptor allocation failed\n");
  506. }
  507. temac_indirect_out32(lp, XTE_RXC0_OFFSET, 0);
  508. temac_indirect_out32(lp, XTE_RXC1_OFFSET, 0);
  509. temac_indirect_out32(lp, XTE_TXC_OFFSET, 0);
  510. temac_indirect_out32(lp, XTE_FCC_OFFSET, XTE_FCC_RXFLO_MASK);
  511. mutex_unlock(&lp->indirect_mutex);
  512. /* Sync default options with HW
  513. * but leave receiver and transmitter disabled. */
  514. temac_setoptions(ndev,
  515. lp->options & ~(XTE_OPTION_TXEN | XTE_OPTION_RXEN));
  516. temac_set_mac_address(ndev, NULL);
  517. /* Set address filter table */
  518. temac_set_multicast_list(ndev);
  519. if (temac_setoptions(ndev, lp->options))
  520. dev_err(&ndev->dev, "Error setting TEMAC options\n");
  521. /* Init Driver variable */
  522. ndev->trans_start = jiffies; /* prevent tx timeout */
  523. }
  524. void temac_adjust_link(struct net_device *ndev)
  525. {
  526. struct temac_local *lp = netdev_priv(ndev);
  527. struct phy_device *phy = lp->phy_dev;
  528. u32 mii_speed;
  529. int link_state;
  530. /* hash together the state values to decide if something has changed */
  531. link_state = phy->speed | (phy->duplex << 1) | phy->link;
  532. mutex_lock(&lp->indirect_mutex);
  533. if (lp->last_link != link_state) {
  534. mii_speed = temac_indirect_in32(lp, XTE_EMCFG_OFFSET);
  535. mii_speed &= ~XTE_EMCFG_LINKSPD_MASK;
  536. switch (phy->speed) {
  537. case SPEED_1000: mii_speed |= XTE_EMCFG_LINKSPD_1000; break;
  538. case SPEED_100: mii_speed |= XTE_EMCFG_LINKSPD_100; break;
  539. case SPEED_10: mii_speed |= XTE_EMCFG_LINKSPD_10; break;
  540. }
  541. /* Write new speed setting out to TEMAC */
  542. temac_indirect_out32(lp, XTE_EMCFG_OFFSET, mii_speed);
  543. lp->last_link = link_state;
  544. phy_print_status(phy);
  545. }
  546. mutex_unlock(&lp->indirect_mutex);
  547. }
  548. static void temac_start_xmit_done(struct net_device *ndev)
  549. {
  550. struct temac_local *lp = netdev_priv(ndev);
  551. struct cdmac_bd *cur_p;
  552. unsigned int stat = 0;
  553. cur_p = &lp->tx_bd_v[lp->tx_bd_ci];
  554. stat = cur_p->app0;
  555. while (stat & STS_CTRL_APP0_CMPLT) {
  556. dma_unmap_single(ndev->dev.parent, cur_p->phys, cur_p->len,
  557. DMA_TO_DEVICE);
  558. if (cur_p->app4)
  559. dev_kfree_skb_irq((struct sk_buff *)cur_p->app4);
  560. cur_p->app0 = 0;
  561. cur_p->app1 = 0;
  562. cur_p->app2 = 0;
  563. cur_p->app3 = 0;
  564. cur_p->app4 = 0;
  565. ndev->stats.tx_packets++;
  566. ndev->stats.tx_bytes += cur_p->len;
  567. lp->tx_bd_ci++;
  568. if (lp->tx_bd_ci >= TX_BD_NUM)
  569. lp->tx_bd_ci = 0;
  570. cur_p = &lp->tx_bd_v[lp->tx_bd_ci];
  571. stat = cur_p->app0;
  572. }
  573. netif_wake_queue(ndev);
  574. }
  575. static inline int temac_check_tx_bd_space(struct temac_local *lp, int num_frag)
  576. {
  577. struct cdmac_bd *cur_p;
  578. int tail;
  579. tail = lp->tx_bd_tail;
  580. cur_p = &lp->tx_bd_v[tail];
  581. do {
  582. if (cur_p->app0)
  583. return NETDEV_TX_BUSY;
  584. tail++;
  585. if (tail >= TX_BD_NUM)
  586. tail = 0;
  587. cur_p = &lp->tx_bd_v[tail];
  588. num_frag--;
  589. } while (num_frag >= 0);
  590. return 0;
  591. }
  592. static int temac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
  593. {
  594. struct temac_local *lp = netdev_priv(ndev);
  595. struct cdmac_bd *cur_p;
  596. dma_addr_t start_p, tail_p;
  597. int ii;
  598. unsigned long num_frag;
  599. skb_frag_t *frag;
  600. num_frag = skb_shinfo(skb)->nr_frags;
  601. frag = &skb_shinfo(skb)->frags[0];
  602. start_p = lp->tx_bd_p + sizeof(*lp->tx_bd_v) * lp->tx_bd_tail;
  603. cur_p = &lp->tx_bd_v[lp->tx_bd_tail];
  604. if (temac_check_tx_bd_space(lp, num_frag)) {
  605. if (!netif_queue_stopped(ndev)) {
  606. netif_stop_queue(ndev);
  607. return NETDEV_TX_BUSY;
  608. }
  609. return NETDEV_TX_BUSY;
  610. }
  611. cur_p->app0 = 0;
  612. if (skb->ip_summed == CHECKSUM_PARTIAL) {
  613. unsigned int csum_start_off = skb_checksum_start_offset(skb);
  614. unsigned int csum_index_off = csum_start_off + skb->csum_offset;
  615. cur_p->app0 |= 1; /* TX Checksum Enabled */
  616. cur_p->app1 = (csum_start_off << 16) | csum_index_off;
  617. cur_p->app2 = 0; /* initial checksum seed */
  618. }
  619. cur_p->app0 |= STS_CTRL_APP0_SOP;
  620. cur_p->len = skb_headlen(skb);
  621. cur_p->phys = dma_map_single(ndev->dev.parent, skb->data, skb->len,
  622. DMA_TO_DEVICE);
  623. cur_p->app4 = (unsigned long)skb;
  624. for (ii = 0; ii < num_frag; ii++) {
  625. lp->tx_bd_tail++;
  626. if (lp->tx_bd_tail >= TX_BD_NUM)
  627. lp->tx_bd_tail = 0;
  628. cur_p = &lp->tx_bd_v[lp->tx_bd_tail];
  629. cur_p->phys = dma_map_single(ndev->dev.parent,
  630. skb_frag_address(frag),
  631. skb_frag_size(frag), DMA_TO_DEVICE);
  632. cur_p->len = skb_frag_size(frag);
  633. cur_p->app0 = 0;
  634. frag++;
  635. }
  636. cur_p->app0 |= STS_CTRL_APP0_EOP;
  637. tail_p = lp->tx_bd_p + sizeof(*lp->tx_bd_v) * lp->tx_bd_tail;
  638. lp->tx_bd_tail++;
  639. if (lp->tx_bd_tail >= TX_BD_NUM)
  640. lp->tx_bd_tail = 0;
  641. skb_tx_timestamp(skb);
  642. /* Kick off the transfer */
  643. lp->dma_out(lp, TX_TAILDESC_PTR, tail_p); /* DMA start */
  644. return NETDEV_TX_OK;
  645. }
  646. static void ll_temac_recv(struct net_device *ndev)
  647. {
  648. struct temac_local *lp = netdev_priv(ndev);
  649. struct sk_buff *skb, *new_skb;
  650. unsigned int bdstat;
  651. struct cdmac_bd *cur_p;
  652. dma_addr_t tail_p;
  653. int length;
  654. unsigned long flags;
  655. spin_lock_irqsave(&lp->rx_lock, flags);
  656. tail_p = lp->rx_bd_p + sizeof(*lp->rx_bd_v) * lp->rx_bd_ci;
  657. cur_p = &lp->rx_bd_v[lp->rx_bd_ci];
  658. bdstat = cur_p->app0;
  659. while ((bdstat & STS_CTRL_APP0_CMPLT)) {
  660. skb = lp->rx_skb[lp->rx_bd_ci];
  661. length = cur_p->app4 & 0x3FFF;
  662. dma_unmap_single(ndev->dev.parent, cur_p->phys, length,
  663. DMA_FROM_DEVICE);
  664. skb_put(skb, length);
  665. skb->dev = ndev;
  666. skb->protocol = eth_type_trans(skb, ndev);
  667. skb_checksum_none_assert(skb);
  668. /* if we're doing rx csum offload, set it up */
  669. if (((lp->temac_features & TEMAC_FEATURE_RX_CSUM) != 0) &&
  670. (skb->protocol == __constant_htons(ETH_P_IP)) &&
  671. (skb->len > 64)) {
  672. skb->csum = cur_p->app3 & 0xFFFF;
  673. skb->ip_summed = CHECKSUM_COMPLETE;
  674. }
  675. if (!skb_defer_rx_timestamp(skb))
  676. netif_rx(skb);
  677. ndev->stats.rx_packets++;
  678. ndev->stats.rx_bytes += length;
  679. new_skb = netdev_alloc_skb_ip_align(ndev,
  680. XTE_MAX_JUMBO_FRAME_SIZE);
  681. if (new_skb == 0) {
  682. dev_err(&ndev->dev, "no memory for new sk_buff\n");
  683. spin_unlock_irqrestore(&lp->rx_lock, flags);
  684. return;
  685. }
  686. cur_p->app0 = STS_CTRL_APP0_IRQONEND;
  687. cur_p->phys = dma_map_single(ndev->dev.parent, new_skb->data,
  688. XTE_MAX_JUMBO_FRAME_SIZE,
  689. DMA_FROM_DEVICE);
  690. cur_p->len = XTE_MAX_JUMBO_FRAME_SIZE;
  691. lp->rx_skb[lp->rx_bd_ci] = new_skb;
  692. lp->rx_bd_ci++;
  693. if (lp->rx_bd_ci >= RX_BD_NUM)
  694. lp->rx_bd_ci = 0;
  695. cur_p = &lp->rx_bd_v[lp->rx_bd_ci];
  696. bdstat = cur_p->app0;
  697. }
  698. lp->dma_out(lp, RX_TAILDESC_PTR, tail_p);
  699. spin_unlock_irqrestore(&lp->rx_lock, flags);
  700. }
  701. static irqreturn_t ll_temac_tx_irq(int irq, void *_ndev)
  702. {
  703. struct net_device *ndev = _ndev;
  704. struct temac_local *lp = netdev_priv(ndev);
  705. unsigned int status;
  706. status = lp->dma_in(lp, TX_IRQ_REG);
  707. lp->dma_out(lp, TX_IRQ_REG, status);
  708. if (status & (IRQ_COAL | IRQ_DLY))
  709. temac_start_xmit_done(lp->ndev);
  710. if (status & 0x080)
  711. dev_err(&ndev->dev, "DMA error 0x%x\n", status);
  712. return IRQ_HANDLED;
  713. }
  714. static irqreturn_t ll_temac_rx_irq(int irq, void *_ndev)
  715. {
  716. struct net_device *ndev = _ndev;
  717. struct temac_local *lp = netdev_priv(ndev);
  718. unsigned int status;
  719. /* Read and clear the status registers */
  720. status = lp->dma_in(lp, RX_IRQ_REG);
  721. lp->dma_out(lp, RX_IRQ_REG, status);
  722. if (status & (IRQ_COAL | IRQ_DLY))
  723. ll_temac_recv(lp->ndev);
  724. return IRQ_HANDLED;
  725. }
  726. static int temac_open(struct net_device *ndev)
  727. {
  728. struct temac_local *lp = netdev_priv(ndev);
  729. int rc;
  730. dev_dbg(&ndev->dev, "temac_open()\n");
  731. if (lp->phy_node) {
  732. lp->phy_dev = of_phy_connect(lp->ndev, lp->phy_node,
  733. temac_adjust_link, 0, 0);
  734. if (!lp->phy_dev) {
  735. dev_err(lp->dev, "of_phy_connect() failed\n");
  736. return -ENODEV;
  737. }
  738. phy_start(lp->phy_dev);
  739. }
  740. temac_device_reset(ndev);
  741. rc = request_irq(lp->tx_irq, ll_temac_tx_irq, 0, ndev->name, ndev);
  742. if (rc)
  743. goto err_tx_irq;
  744. rc = request_irq(lp->rx_irq, ll_temac_rx_irq, 0, ndev->name, ndev);
  745. if (rc)
  746. goto err_rx_irq;
  747. return 0;
  748. err_rx_irq:
  749. free_irq(lp->tx_irq, ndev);
  750. err_tx_irq:
  751. if (lp->phy_dev)
  752. phy_disconnect(lp->phy_dev);
  753. lp->phy_dev = NULL;
  754. dev_err(lp->dev, "request_irq() failed\n");
  755. return rc;
  756. }
  757. static int temac_stop(struct net_device *ndev)
  758. {
  759. struct temac_local *lp = netdev_priv(ndev);
  760. dev_dbg(&ndev->dev, "temac_close()\n");
  761. free_irq(lp->tx_irq, ndev);
  762. free_irq(lp->rx_irq, ndev);
  763. if (lp->phy_dev)
  764. phy_disconnect(lp->phy_dev);
  765. lp->phy_dev = NULL;
  766. temac_dma_bd_release(ndev);
  767. return 0;
  768. }
  769. #ifdef CONFIG_NET_POLL_CONTROLLER
  770. static void
  771. temac_poll_controller(struct net_device *ndev)
  772. {
  773. struct temac_local *lp = netdev_priv(ndev);
  774. disable_irq(lp->tx_irq);
  775. disable_irq(lp->rx_irq);
  776. ll_temac_rx_irq(lp->tx_irq, ndev);
  777. ll_temac_tx_irq(lp->rx_irq, ndev);
  778. enable_irq(lp->tx_irq);
  779. enable_irq(lp->rx_irq);
  780. }
  781. #endif
  782. static int temac_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd)
  783. {
  784. struct temac_local *lp = netdev_priv(ndev);
  785. if (!netif_running(ndev))
  786. return -EINVAL;
  787. if (!lp->phy_dev)
  788. return -EINVAL;
  789. return phy_mii_ioctl(lp->phy_dev, rq, cmd);
  790. }
  791. static const struct net_device_ops temac_netdev_ops = {
  792. .ndo_open = temac_open,
  793. .ndo_stop = temac_stop,
  794. .ndo_start_xmit = temac_start_xmit,
  795. .ndo_set_mac_address = netdev_set_mac_address,
  796. .ndo_validate_addr = eth_validate_addr,
  797. .ndo_do_ioctl = temac_ioctl,
  798. #ifdef CONFIG_NET_POLL_CONTROLLER
  799. .ndo_poll_controller = temac_poll_controller,
  800. #endif
  801. };
  802. /* ---------------------------------------------------------------------
  803. * SYSFS device attributes
  804. */
  805. static ssize_t temac_show_llink_regs(struct device *dev,
  806. struct device_attribute *attr, char *buf)
  807. {
  808. struct net_device *ndev = dev_get_drvdata(dev);
  809. struct temac_local *lp = netdev_priv(ndev);
  810. int i, len = 0;
  811. for (i = 0; i < 0x11; i++)
  812. len += sprintf(buf + len, "%.8x%s", lp->dma_in(lp, i),
  813. (i % 8) == 7 ? "\n" : " ");
  814. len += sprintf(buf + len, "\n");
  815. return len;
  816. }
  817. static DEVICE_ATTR(llink_regs, 0440, temac_show_llink_regs, NULL);
  818. static struct attribute *temac_device_attrs[] = {
  819. &dev_attr_llink_regs.attr,
  820. NULL,
  821. };
  822. static const struct attribute_group temac_attr_group = {
  823. .attrs = temac_device_attrs,
  824. };
  825. /* ethtool support */
  826. static int temac_get_settings(struct net_device *ndev, struct ethtool_cmd *cmd)
  827. {
  828. struct temac_local *lp = netdev_priv(ndev);
  829. return phy_ethtool_gset(lp->phy_dev, cmd);
  830. }
  831. static int temac_set_settings(struct net_device *ndev, struct ethtool_cmd *cmd)
  832. {
  833. struct temac_local *lp = netdev_priv(ndev);
  834. return phy_ethtool_sset(lp->phy_dev, cmd);
  835. }
  836. static int temac_nway_reset(struct net_device *ndev)
  837. {
  838. struct temac_local *lp = netdev_priv(ndev);
  839. return phy_start_aneg(lp->phy_dev);
  840. }
  841. static const struct ethtool_ops temac_ethtool_ops = {
  842. .get_settings = temac_get_settings,
  843. .set_settings = temac_set_settings,
  844. .nway_reset = temac_nway_reset,
  845. .get_link = ethtool_op_get_link,
  846. };
  847. static int __devinit temac_of_probe(struct platform_device *op)
  848. {
  849. struct device_node *np;
  850. struct temac_local *lp;
  851. struct net_device *ndev;
  852. const void *addr;
  853. __be32 *p;
  854. int size, rc = 0;
  855. /* Init network device structure */
  856. ndev = alloc_etherdev(sizeof(*lp));
  857. if (!ndev) {
  858. dev_err(&op->dev, "could not allocate device.\n");
  859. return -ENOMEM;
  860. }
  861. ether_setup(ndev);
  862. dev_set_drvdata(&op->dev, ndev);
  863. SET_NETDEV_DEV(ndev, &op->dev);
  864. ndev->flags &= ~IFF_MULTICAST; /* clear multicast */
  865. ndev->features = NETIF_F_SG | NETIF_F_FRAGLIST;
  866. ndev->netdev_ops = &temac_netdev_ops;
  867. ndev->ethtool_ops = &temac_ethtool_ops;
  868. #if 0
  869. ndev->features |= NETIF_F_IP_CSUM; /* Can checksum TCP/UDP over IPv4. */
  870. ndev->features |= NETIF_F_HW_CSUM; /* Can checksum all the packets. */
  871. ndev->features |= NETIF_F_IPV6_CSUM; /* Can checksum IPV6 TCP/UDP */
  872. ndev->features |= NETIF_F_HIGHDMA; /* Can DMA to high memory. */
  873. ndev->features |= NETIF_F_HW_VLAN_TX; /* Transmit VLAN hw accel */
  874. ndev->features |= NETIF_F_HW_VLAN_RX; /* Receive VLAN hw acceleration */
  875. ndev->features |= NETIF_F_HW_VLAN_FILTER; /* Receive VLAN filtering */
  876. ndev->features |= NETIF_F_VLAN_CHALLENGED; /* cannot handle VLAN pkts */
  877. ndev->features |= NETIF_F_GSO; /* Enable software GSO. */
  878. ndev->features |= NETIF_F_MULTI_QUEUE; /* Has multiple TX/RX queues */
  879. ndev->features |= NETIF_F_LRO; /* large receive offload */
  880. #endif
  881. /* setup temac private info structure */
  882. lp = netdev_priv(ndev);
  883. lp->ndev = ndev;
  884. lp->dev = &op->dev;
  885. lp->options = XTE_OPTION_DEFAULTS;
  886. spin_lock_init(&lp->rx_lock);
  887. mutex_init(&lp->indirect_mutex);
  888. /* map device registers */
  889. lp->regs = of_iomap(op->dev.of_node, 0);
  890. if (!lp->regs) {
  891. dev_err(&op->dev, "could not map temac regs.\n");
  892. goto nodev;
  893. }
  894. /* Setup checksum offload, but default to off if not specified */
  895. lp->temac_features = 0;
  896. p = (__be32 *)of_get_property(op->dev.of_node, "xlnx,txcsum", NULL);
  897. if (p && be32_to_cpu(*p)) {
  898. lp->temac_features |= TEMAC_FEATURE_TX_CSUM;
  899. /* Can checksum TCP/UDP over IPv4. */
  900. ndev->features |= NETIF_F_IP_CSUM;
  901. }
  902. p = (__be32 *)of_get_property(op->dev.of_node, "xlnx,rxcsum", NULL);
  903. if (p && be32_to_cpu(*p))
  904. lp->temac_features |= TEMAC_FEATURE_RX_CSUM;
  905. /* Find the DMA node, map the DMA registers, and decode the DMA IRQs */
  906. np = of_parse_phandle(op->dev.of_node, "llink-connected", 0);
  907. if (!np) {
  908. dev_err(&op->dev, "could not find DMA node\n");
  909. goto err_iounmap;
  910. }
  911. /* Setup the DMA register accesses, could be DCR or memory mapped */
  912. if (temac_dcr_setup(lp, op, np)) {
  913. /* no DCR in the device tree, try non-DCR */
  914. lp->sdma_regs = of_iomap(np, 0);
  915. if (lp->sdma_regs) {
  916. lp->dma_in = temac_dma_in32;
  917. lp->dma_out = temac_dma_out32;
  918. dev_dbg(&op->dev, "MEM base: %p\n", lp->sdma_regs);
  919. } else {
  920. dev_err(&op->dev, "unable to map DMA registers\n");
  921. of_node_put(np);
  922. goto err_iounmap;
  923. }
  924. }
  925. lp->rx_irq = irq_of_parse_and_map(np, 0);
  926. lp->tx_irq = irq_of_parse_and_map(np, 1);
  927. of_node_put(np); /* Finished with the DMA node; drop the reference */
  928. if (!lp->rx_irq || !lp->tx_irq) {
  929. dev_err(&op->dev, "could not determine irqs\n");
  930. rc = -ENOMEM;
  931. goto err_iounmap_2;
  932. }
  933. /* Retrieve the MAC address */
  934. addr = of_get_property(op->dev.of_node, "local-mac-address", &size);
  935. if ((!addr) || (size != 6)) {
  936. dev_err(&op->dev, "could not find MAC address\n");
  937. rc = -ENODEV;
  938. goto err_iounmap_2;
  939. }
  940. temac_set_mac_address(ndev, (void *)addr);
  941. rc = temac_mdio_setup(lp, op->dev.of_node);
  942. if (rc)
  943. dev_warn(&op->dev, "error registering MDIO bus\n");
  944. lp->phy_node = of_parse_phandle(op->dev.of_node, "phy-handle", 0);
  945. if (lp->phy_node)
  946. dev_dbg(lp->dev, "using PHY node %s (%p)\n", np->full_name, np);
  947. /* Add the device attributes */
  948. rc = sysfs_create_group(&lp->dev->kobj, &temac_attr_group);
  949. if (rc) {
  950. dev_err(lp->dev, "Error creating sysfs files\n");
  951. goto err_iounmap_2;
  952. }
  953. rc = register_netdev(lp->ndev);
  954. if (rc) {
  955. dev_err(lp->dev, "register_netdev() error (%i)\n", rc);
  956. goto err_register_ndev;
  957. }
  958. return 0;
  959. err_register_ndev:
  960. sysfs_remove_group(&lp->dev->kobj, &temac_attr_group);
  961. err_iounmap_2:
  962. if (lp->sdma_regs)
  963. iounmap(lp->sdma_regs);
  964. err_iounmap:
  965. iounmap(lp->regs);
  966. nodev:
  967. free_netdev(ndev);
  968. ndev = NULL;
  969. return rc;
  970. }
  971. static int __devexit temac_of_remove(struct platform_device *op)
  972. {
  973. struct net_device *ndev = dev_get_drvdata(&op->dev);
  974. struct temac_local *lp = netdev_priv(ndev);
  975. temac_mdio_teardown(lp);
  976. unregister_netdev(ndev);
  977. sysfs_remove_group(&lp->dev->kobj, &temac_attr_group);
  978. if (lp->phy_node)
  979. of_node_put(lp->phy_node);
  980. lp->phy_node = NULL;
  981. dev_set_drvdata(&op->dev, NULL);
  982. iounmap(lp->regs);
  983. if (lp->sdma_regs)
  984. iounmap(lp->sdma_regs);
  985. free_netdev(ndev);
  986. return 0;
  987. }
  988. static struct of_device_id temac_of_match[] __devinitdata = {
  989. { .compatible = "xlnx,xps-ll-temac-1.01.b", },
  990. { .compatible = "xlnx,xps-ll-temac-2.00.a", },
  991. { .compatible = "xlnx,xps-ll-temac-2.02.a", },
  992. { .compatible = "xlnx,xps-ll-temac-2.03.a", },
  993. {},
  994. };
  995. MODULE_DEVICE_TABLE(of, temac_of_match);
  996. static struct platform_driver temac_of_driver = {
  997. .probe = temac_of_probe,
  998. .remove = __devexit_p(temac_of_remove),
  999. .driver = {
  1000. .owner = THIS_MODULE,
  1001. .name = "xilinx_temac",
  1002. .of_match_table = temac_of_match,
  1003. },
  1004. };
  1005. module_platform_driver(temac_of_driver);
  1006. MODULE_DESCRIPTION("Xilinx LL_TEMAC Ethernet driver");
  1007. MODULE_AUTHOR("Yoshio Kashiwagi");
  1008. MODULE_LICENSE("GPL");