ll_temac_main.c 30 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179
  1. /*
  2. * Driver for Xilinx TEMAC Ethernet device
  3. *
  4. * Copyright (c) 2008 Nissin Systems Co., Ltd., Yoshio Kashiwagi
  5. * Copyright (c) 2005-2008 DLA Systems, David H. Lynch Jr. <dhlii@dlasys.net>
  6. * Copyright (c) 2008-2009 Secret Lab Technologies Ltd.
  7. *
  8. * This is a driver for the Xilinx ll_temac ipcore which is often used
  9. * in the Virtex and Spartan series of chips.
  10. *
  11. * Notes:
  12. * - The ll_temac hardware uses indirect access for many of the TEMAC
  13. * registers, include the MDIO bus. However, indirect access to MDIO
  14. * registers take considerably more clock cycles than to TEMAC registers.
  15. * MDIO accesses are long, so threads doing them should probably sleep
  16. * rather than busywait. However, since only one indirect access can be
  17. * in progress at any given time, that means that *all* indirect accesses
  18. * could end up sleeping (to wait for an MDIO access to complete).
  19. * Fortunately none of the indirect accesses are on the 'hot' path for tx
  20. * or rx, so this should be okay.
  21. *
  22. * TODO:
  23. * - Factor out locallink DMA code into separate driver
  24. * - Fix multicast assignment.
  25. * - Fix support for hardware checksumming.
  26. * - Testing. Lots and lots of testing.
  27. *
  28. */
  29. #include <linux/delay.h>
  30. #include <linux/etherdevice.h>
  31. #include <linux/init.h>
  32. #include <linux/mii.h>
  33. #include <linux/module.h>
  34. #include <linux/mutex.h>
  35. #include <linux/netdevice.h>
  36. #include <linux/of.h>
  37. #include <linux/of_device.h>
  38. #include <linux/of_mdio.h>
  39. #include <linux/of_platform.h>
  40. #include <linux/of_address.h>
  41. #include <linux/skbuff.h>
  42. #include <linux/spinlock.h>
  43. #include <linux/tcp.h> /* needed for sizeof(tcphdr) */
  44. #include <linux/udp.h> /* needed for sizeof(udphdr) */
  45. #include <linux/phy.h>
  46. #include <linux/in.h>
  47. #include <linux/io.h>
  48. #include <linux/ip.h>
  49. #include <linux/slab.h>
  50. #include <linux/interrupt.h>
  51. #include <linux/dma-mapping.h>
  52. #include "ll_temac.h"
  53. #define TX_BD_NUM 64
  54. #define RX_BD_NUM 128
  55. /* ---------------------------------------------------------------------
  56. * Low level register access functions
  57. */
  58. u32 temac_ior(struct temac_local *lp, int offset)
  59. {
  60. return in_be32((u32 *)(lp->regs + offset));
  61. }
  62. void temac_iow(struct temac_local *lp, int offset, u32 value)
  63. {
  64. out_be32((u32 *) (lp->regs + offset), value);
  65. }
  66. int temac_indirect_busywait(struct temac_local *lp)
  67. {
  68. long end = jiffies + 2;
  69. while (!(temac_ior(lp, XTE_RDY0_OFFSET) & XTE_RDY0_HARD_ACS_RDY_MASK)) {
  70. if (end - jiffies <= 0) {
  71. WARN_ON(1);
  72. return -ETIMEDOUT;
  73. }
  74. msleep(1);
  75. }
  76. return 0;
  77. }
  78. /**
  79. * temac_indirect_in32
  80. *
  81. * lp->indirect_mutex must be held when calling this function
  82. */
  83. u32 temac_indirect_in32(struct temac_local *lp, int reg)
  84. {
  85. u32 val;
  86. if (temac_indirect_busywait(lp))
  87. return -ETIMEDOUT;
  88. temac_iow(lp, XTE_CTL0_OFFSET, reg);
  89. if (temac_indirect_busywait(lp))
  90. return -ETIMEDOUT;
  91. val = temac_ior(lp, XTE_LSW0_OFFSET);
  92. return val;
  93. }
  94. /**
  95. * temac_indirect_out32
  96. *
  97. * lp->indirect_mutex must be held when calling this function
  98. */
  99. void temac_indirect_out32(struct temac_local *lp, int reg, u32 value)
  100. {
  101. if (temac_indirect_busywait(lp))
  102. return;
  103. temac_iow(lp, XTE_LSW0_OFFSET, value);
  104. temac_iow(lp, XTE_CTL0_OFFSET, CNTLREG_WRITE_ENABLE_MASK | reg);
  105. }
  106. /**
  107. * temac_dma_in32 - Memory mapped DMA read, this function expects a
  108. * register input that is based on DCR word addresses which
  109. * are then converted to memory mapped byte addresses
  110. */
  111. static u32 temac_dma_in32(struct temac_local *lp, int reg)
  112. {
  113. return in_be32((u32 *)(lp->sdma_regs + (reg << 2)));
  114. }
  115. /**
  116. * temac_dma_out32 - Memory mapped DMA read, this function expects a
  117. * register input that is based on DCR word addresses which
  118. * are then converted to memory mapped byte addresses
  119. */
  120. static void temac_dma_out32(struct temac_local *lp, int reg, u32 value)
  121. {
  122. out_be32((u32 *)(lp->sdma_regs + (reg << 2)), value);
  123. }
  124. /* DMA register access functions can be DCR based or memory mapped.
  125. * The PowerPC 440 is DCR based, the PowerPC 405 and MicroBlaze are both
  126. * memory mapped.
  127. */
  128. #ifdef CONFIG_PPC_DCR
  129. /**
  130. * temac_dma_dcr_in32 - DCR based DMA read
  131. */
  132. static u32 temac_dma_dcr_in(struct temac_local *lp, int reg)
  133. {
  134. return dcr_read(lp->sdma_dcrs, reg);
  135. }
  136. /**
  137. * temac_dma_dcr_out32 - DCR based DMA write
  138. */
  139. static void temac_dma_dcr_out(struct temac_local *lp, int reg, u32 value)
  140. {
  141. dcr_write(lp->sdma_dcrs, reg, value);
  142. }
  143. /**
  144. * temac_dcr_setup - If the DMA is DCR based, then setup the address and
  145. * I/O functions
  146. */
  147. static int temac_dcr_setup(struct temac_local *lp, struct platform_device *op,
  148. struct device_node *np)
  149. {
  150. unsigned int dcrs;
  151. /* setup the dcr address mapping if it's in the device tree */
  152. dcrs = dcr_resource_start(np, 0);
  153. if (dcrs != 0) {
  154. lp->sdma_dcrs = dcr_map(np, dcrs, dcr_resource_len(np, 0));
  155. lp->dma_in = temac_dma_dcr_in;
  156. lp->dma_out = temac_dma_dcr_out;
  157. dev_dbg(&op->dev, "DCR base: %x\n", dcrs);
  158. return 0;
  159. }
  160. /* no DCR in the device tree, indicate a failure */
  161. return -1;
  162. }
  163. #else
  164. /*
  165. * temac_dcr_setup - This is a stub for when DCR is not supported,
  166. * such as with MicroBlaze
  167. */
  168. static int temac_dcr_setup(struct temac_local *lp, struct platform_device *op,
  169. struct device_node *np)
  170. {
  171. return -1;
  172. }
  173. #endif
  174. /**
  175. * * temac_dma_bd_release - Release buffer descriptor rings
  176. */
  177. static void temac_dma_bd_release(struct net_device *ndev)
  178. {
  179. struct temac_local *lp = netdev_priv(ndev);
  180. int i;
  181. for (i = 0; i < RX_BD_NUM; i++) {
  182. if (!lp->rx_skb[i])
  183. break;
  184. else {
  185. dma_unmap_single(ndev->dev.parent, lp->rx_bd_v[i].phys,
  186. XTE_MAX_JUMBO_FRAME_SIZE, DMA_FROM_DEVICE);
  187. dev_kfree_skb(lp->rx_skb[i]);
  188. }
  189. }
  190. if (lp->rx_bd_v)
  191. dma_free_coherent(ndev->dev.parent,
  192. sizeof(*lp->rx_bd_v) * RX_BD_NUM,
  193. lp->rx_bd_v, lp->rx_bd_p);
  194. if (lp->tx_bd_v)
  195. dma_free_coherent(ndev->dev.parent,
  196. sizeof(*lp->tx_bd_v) * TX_BD_NUM,
  197. lp->tx_bd_v, lp->tx_bd_p);
  198. if (lp->rx_skb)
  199. kfree(lp->rx_skb);
  200. }
  201. /**
  202. * temac_dma_bd_init - Setup buffer descriptor rings
  203. */
  204. static int temac_dma_bd_init(struct net_device *ndev)
  205. {
  206. struct temac_local *lp = netdev_priv(ndev);
  207. struct sk_buff *skb;
  208. int i;
  209. lp->rx_skb = kzalloc(sizeof(*lp->rx_skb) * RX_BD_NUM, GFP_KERNEL);
  210. if (!lp->rx_skb) {
  211. dev_err(&ndev->dev,
  212. "can't allocate memory for DMA RX buffer\n");
  213. goto out;
  214. }
  215. /* allocate the tx and rx ring buffer descriptors. */
  216. /* returns a virtual address and a physical address. */
  217. lp->tx_bd_v = dma_alloc_coherent(ndev->dev.parent,
  218. sizeof(*lp->tx_bd_v) * TX_BD_NUM,
  219. &lp->tx_bd_p, GFP_KERNEL);
  220. if (!lp->tx_bd_v) {
  221. dev_err(&ndev->dev,
  222. "unable to allocate DMA TX buffer descriptors");
  223. goto out;
  224. }
  225. lp->rx_bd_v = dma_alloc_coherent(ndev->dev.parent,
  226. sizeof(*lp->rx_bd_v) * RX_BD_NUM,
  227. &lp->rx_bd_p, GFP_KERNEL);
  228. if (!lp->rx_bd_v) {
  229. dev_err(&ndev->dev,
  230. "unable to allocate DMA RX buffer descriptors");
  231. goto out;
  232. }
  233. memset(lp->tx_bd_v, 0, sizeof(*lp->tx_bd_v) * TX_BD_NUM);
  234. for (i = 0; i < TX_BD_NUM; i++) {
  235. lp->tx_bd_v[i].next = lp->tx_bd_p +
  236. sizeof(*lp->tx_bd_v) * ((i + 1) % TX_BD_NUM);
  237. }
  238. memset(lp->rx_bd_v, 0, sizeof(*lp->rx_bd_v) * RX_BD_NUM);
  239. for (i = 0; i < RX_BD_NUM; i++) {
  240. lp->rx_bd_v[i].next = lp->rx_bd_p +
  241. sizeof(*lp->rx_bd_v) * ((i + 1) % RX_BD_NUM);
  242. skb = netdev_alloc_skb_ip_align(ndev,
  243. XTE_MAX_JUMBO_FRAME_SIZE);
  244. if (skb == 0) {
  245. dev_err(&ndev->dev, "alloc_skb error %d\n", i);
  246. goto out;
  247. }
  248. lp->rx_skb[i] = skb;
  249. /* returns physical address of skb->data */
  250. lp->rx_bd_v[i].phys = dma_map_single(ndev->dev.parent,
  251. skb->data,
  252. XTE_MAX_JUMBO_FRAME_SIZE,
  253. DMA_FROM_DEVICE);
  254. lp->rx_bd_v[i].len = XTE_MAX_JUMBO_FRAME_SIZE;
  255. lp->rx_bd_v[i].app0 = STS_CTRL_APP0_IRQONEND;
  256. }
  257. lp->dma_out(lp, TX_CHNL_CTRL, 0x10220400 |
  258. CHNL_CTRL_IRQ_EN |
  259. CHNL_CTRL_IRQ_DLY_EN |
  260. CHNL_CTRL_IRQ_COAL_EN);
  261. /* 0x10220483 */
  262. /* 0x00100483 */
  263. lp->dma_out(lp, RX_CHNL_CTRL, 0xff070000 |
  264. CHNL_CTRL_IRQ_EN |
  265. CHNL_CTRL_IRQ_DLY_EN |
  266. CHNL_CTRL_IRQ_COAL_EN |
  267. CHNL_CTRL_IRQ_IOE);
  268. /* 0xff010283 */
  269. lp->dma_out(lp, RX_CURDESC_PTR, lp->rx_bd_p);
  270. lp->dma_out(lp, RX_TAILDESC_PTR,
  271. lp->rx_bd_p + (sizeof(*lp->rx_bd_v) * (RX_BD_NUM - 1)));
  272. lp->dma_out(lp, TX_CURDESC_PTR, lp->tx_bd_p);
  273. return 0;
  274. out:
  275. temac_dma_bd_release(ndev);
  276. return -ENOMEM;
  277. }
  278. /* ---------------------------------------------------------------------
  279. * net_device_ops
  280. */
  281. static int temac_set_mac_address(struct net_device *ndev, void *address)
  282. {
  283. struct temac_local *lp = netdev_priv(ndev);
  284. if (address)
  285. memcpy(ndev->dev_addr, address, ETH_ALEN);
  286. if (!is_valid_ether_addr(ndev->dev_addr))
  287. random_ether_addr(ndev->dev_addr);
  288. /* set up unicast MAC address filter set its mac address */
  289. mutex_lock(&lp->indirect_mutex);
  290. temac_indirect_out32(lp, XTE_UAW0_OFFSET,
  291. (ndev->dev_addr[0]) |
  292. (ndev->dev_addr[1] << 8) |
  293. (ndev->dev_addr[2] << 16) |
  294. (ndev->dev_addr[3] << 24));
  295. /* There are reserved bits in EUAW1
  296. * so don't affect them Set MAC bits [47:32] in EUAW1 */
  297. temac_indirect_out32(lp, XTE_UAW1_OFFSET,
  298. (ndev->dev_addr[4] & 0x000000ff) |
  299. (ndev->dev_addr[5] << 8));
  300. mutex_unlock(&lp->indirect_mutex);
  301. return 0;
  302. }
  303. static int netdev_set_mac_address(struct net_device *ndev, void *p)
  304. {
  305. struct sockaddr *addr = p;
  306. return temac_set_mac_address(ndev, addr->sa_data);
  307. }
  308. static void temac_set_multicast_list(struct net_device *ndev)
  309. {
  310. struct temac_local *lp = netdev_priv(ndev);
  311. u32 multi_addr_msw, multi_addr_lsw, val;
  312. int i;
  313. mutex_lock(&lp->indirect_mutex);
  314. if (ndev->flags & (IFF_ALLMULTI | IFF_PROMISC) ||
  315. netdev_mc_count(ndev) > MULTICAST_CAM_TABLE_NUM) {
  316. /*
  317. * We must make the kernel realise we had to move
  318. * into promisc mode or we start all out war on
  319. * the cable. If it was a promisc request the
  320. * flag is already set. If not we assert it.
  321. */
  322. ndev->flags |= IFF_PROMISC;
  323. temac_indirect_out32(lp, XTE_AFM_OFFSET, XTE_AFM_EPPRM_MASK);
  324. dev_info(&ndev->dev, "Promiscuous mode enabled.\n");
  325. } else if (!netdev_mc_empty(ndev)) {
  326. struct netdev_hw_addr *ha;
  327. i = 0;
  328. netdev_for_each_mc_addr(ha, ndev) {
  329. if (i >= MULTICAST_CAM_TABLE_NUM)
  330. break;
  331. multi_addr_msw = ((ha->addr[3] << 24) |
  332. (ha->addr[2] << 16) |
  333. (ha->addr[1] << 8) |
  334. (ha->addr[0]));
  335. temac_indirect_out32(lp, XTE_MAW0_OFFSET,
  336. multi_addr_msw);
  337. multi_addr_lsw = ((ha->addr[5] << 8) |
  338. (ha->addr[4]) | (i << 16));
  339. temac_indirect_out32(lp, XTE_MAW1_OFFSET,
  340. multi_addr_lsw);
  341. i++;
  342. }
  343. } else {
  344. val = temac_indirect_in32(lp, XTE_AFM_OFFSET);
  345. temac_indirect_out32(lp, XTE_AFM_OFFSET,
  346. val & ~XTE_AFM_EPPRM_MASK);
  347. temac_indirect_out32(lp, XTE_MAW0_OFFSET, 0);
  348. temac_indirect_out32(lp, XTE_MAW1_OFFSET, 0);
  349. dev_info(&ndev->dev, "Promiscuous mode disabled.\n");
  350. }
  351. mutex_unlock(&lp->indirect_mutex);
  352. }
  353. struct temac_option {
  354. int flg;
  355. u32 opt;
  356. u32 reg;
  357. u32 m_or;
  358. u32 m_and;
  359. } temac_options[] = {
  360. /* Turn on jumbo packet support for both Rx and Tx */
  361. {
  362. .opt = XTE_OPTION_JUMBO,
  363. .reg = XTE_TXC_OFFSET,
  364. .m_or = XTE_TXC_TXJMBO_MASK,
  365. },
  366. {
  367. .opt = XTE_OPTION_JUMBO,
  368. .reg = XTE_RXC1_OFFSET,
  369. .m_or =XTE_RXC1_RXJMBO_MASK,
  370. },
  371. /* Turn on VLAN packet support for both Rx and Tx */
  372. {
  373. .opt = XTE_OPTION_VLAN,
  374. .reg = XTE_TXC_OFFSET,
  375. .m_or =XTE_TXC_TXVLAN_MASK,
  376. },
  377. {
  378. .opt = XTE_OPTION_VLAN,
  379. .reg = XTE_RXC1_OFFSET,
  380. .m_or =XTE_RXC1_RXVLAN_MASK,
  381. },
  382. /* Turn on FCS stripping on receive packets */
  383. {
  384. .opt = XTE_OPTION_FCS_STRIP,
  385. .reg = XTE_RXC1_OFFSET,
  386. .m_or =XTE_RXC1_RXFCS_MASK,
  387. },
  388. /* Turn on FCS insertion on transmit packets */
  389. {
  390. .opt = XTE_OPTION_FCS_INSERT,
  391. .reg = XTE_TXC_OFFSET,
  392. .m_or =XTE_TXC_TXFCS_MASK,
  393. },
  394. /* Turn on length/type field checking on receive packets */
  395. {
  396. .opt = XTE_OPTION_LENTYPE_ERR,
  397. .reg = XTE_RXC1_OFFSET,
  398. .m_or =XTE_RXC1_RXLT_MASK,
  399. },
  400. /* Turn on flow control */
  401. {
  402. .opt = XTE_OPTION_FLOW_CONTROL,
  403. .reg = XTE_FCC_OFFSET,
  404. .m_or =XTE_FCC_RXFLO_MASK,
  405. },
  406. /* Turn on flow control */
  407. {
  408. .opt = XTE_OPTION_FLOW_CONTROL,
  409. .reg = XTE_FCC_OFFSET,
  410. .m_or =XTE_FCC_TXFLO_MASK,
  411. },
  412. /* Turn on promiscuous frame filtering (all frames are received ) */
  413. {
  414. .opt = XTE_OPTION_PROMISC,
  415. .reg = XTE_AFM_OFFSET,
  416. .m_or =XTE_AFM_EPPRM_MASK,
  417. },
  418. /* Enable transmitter if not already enabled */
  419. {
  420. .opt = XTE_OPTION_TXEN,
  421. .reg = XTE_TXC_OFFSET,
  422. .m_or =XTE_TXC_TXEN_MASK,
  423. },
  424. /* Enable receiver? */
  425. {
  426. .opt = XTE_OPTION_RXEN,
  427. .reg = XTE_RXC1_OFFSET,
  428. .m_or =XTE_RXC1_RXEN_MASK,
  429. },
  430. {}
  431. };
  432. /**
  433. * temac_setoptions
  434. */
  435. static u32 temac_setoptions(struct net_device *ndev, u32 options)
  436. {
  437. struct temac_local *lp = netdev_priv(ndev);
  438. struct temac_option *tp = &temac_options[0];
  439. int reg;
  440. mutex_lock(&lp->indirect_mutex);
  441. while (tp->opt) {
  442. reg = temac_indirect_in32(lp, tp->reg) & ~tp->m_or;
  443. if (options & tp->opt)
  444. reg |= tp->m_or;
  445. temac_indirect_out32(lp, tp->reg, reg);
  446. tp++;
  447. }
  448. lp->options |= options;
  449. mutex_unlock(&lp->indirect_mutex);
  450. return 0;
  451. }
  452. /* Initialize temac */
  453. static void temac_device_reset(struct net_device *ndev)
  454. {
  455. struct temac_local *lp = netdev_priv(ndev);
  456. u32 timeout;
  457. u32 val;
  458. /* Perform a software reset */
  459. /* 0x300 host enable bit ? */
  460. /* reset PHY through control register ?:1 */
  461. dev_dbg(&ndev->dev, "%s()\n", __func__);
  462. mutex_lock(&lp->indirect_mutex);
  463. /* Reset the receiver and wait for it to finish reset */
  464. temac_indirect_out32(lp, XTE_RXC1_OFFSET, XTE_RXC1_RXRST_MASK);
  465. timeout = 1000;
  466. while (temac_indirect_in32(lp, XTE_RXC1_OFFSET) & XTE_RXC1_RXRST_MASK) {
  467. udelay(1);
  468. if (--timeout == 0) {
  469. dev_err(&ndev->dev,
  470. "temac_device_reset RX reset timeout!!\n");
  471. break;
  472. }
  473. }
  474. /* Reset the transmitter and wait for it to finish reset */
  475. temac_indirect_out32(lp, XTE_TXC_OFFSET, XTE_TXC_TXRST_MASK);
  476. timeout = 1000;
  477. while (temac_indirect_in32(lp, XTE_TXC_OFFSET) & XTE_TXC_TXRST_MASK) {
  478. udelay(1);
  479. if (--timeout == 0) {
  480. dev_err(&ndev->dev,
  481. "temac_device_reset TX reset timeout!!\n");
  482. break;
  483. }
  484. }
  485. /* Disable the receiver */
  486. val = temac_indirect_in32(lp, XTE_RXC1_OFFSET);
  487. temac_indirect_out32(lp, XTE_RXC1_OFFSET, val & ~XTE_RXC1_RXEN_MASK);
  488. /* Reset Local Link (DMA) */
  489. lp->dma_out(lp, DMA_CONTROL_REG, DMA_CONTROL_RST);
  490. timeout = 1000;
  491. while (lp->dma_in(lp, DMA_CONTROL_REG) & DMA_CONTROL_RST) {
  492. udelay(1);
  493. if (--timeout == 0) {
  494. dev_err(&ndev->dev,
  495. "temac_device_reset DMA reset timeout!!\n");
  496. break;
  497. }
  498. }
  499. lp->dma_out(lp, DMA_CONTROL_REG, DMA_TAIL_ENABLE);
  500. if (temac_dma_bd_init(ndev)) {
  501. dev_err(&ndev->dev,
  502. "temac_device_reset descriptor allocation failed\n");
  503. }
  504. temac_indirect_out32(lp, XTE_RXC0_OFFSET, 0);
  505. temac_indirect_out32(lp, XTE_RXC1_OFFSET, 0);
  506. temac_indirect_out32(lp, XTE_TXC_OFFSET, 0);
  507. temac_indirect_out32(lp, XTE_FCC_OFFSET, XTE_FCC_RXFLO_MASK);
  508. mutex_unlock(&lp->indirect_mutex);
  509. /* Sync default options with HW
  510. * but leave receiver and transmitter disabled. */
  511. temac_setoptions(ndev,
  512. lp->options & ~(XTE_OPTION_TXEN | XTE_OPTION_RXEN));
  513. temac_set_mac_address(ndev, NULL);
  514. /* Set address filter table */
  515. temac_set_multicast_list(ndev);
  516. if (temac_setoptions(ndev, lp->options))
  517. dev_err(&ndev->dev, "Error setting TEMAC options\n");
  518. /* Init Driver variable */
  519. ndev->trans_start = jiffies; /* prevent tx timeout */
  520. }
  521. void temac_adjust_link(struct net_device *ndev)
  522. {
  523. struct temac_local *lp = netdev_priv(ndev);
  524. struct phy_device *phy = lp->phy_dev;
  525. u32 mii_speed;
  526. int link_state;
  527. /* hash together the state values to decide if something has changed */
  528. link_state = phy->speed | (phy->duplex << 1) | phy->link;
  529. mutex_lock(&lp->indirect_mutex);
  530. if (lp->last_link != link_state) {
  531. mii_speed = temac_indirect_in32(lp, XTE_EMCFG_OFFSET);
  532. mii_speed &= ~XTE_EMCFG_LINKSPD_MASK;
  533. switch (phy->speed) {
  534. case SPEED_1000: mii_speed |= XTE_EMCFG_LINKSPD_1000; break;
  535. case SPEED_100: mii_speed |= XTE_EMCFG_LINKSPD_100; break;
  536. case SPEED_10: mii_speed |= XTE_EMCFG_LINKSPD_10; break;
  537. }
  538. /* Write new speed setting out to TEMAC */
  539. temac_indirect_out32(lp, XTE_EMCFG_OFFSET, mii_speed);
  540. lp->last_link = link_state;
  541. phy_print_status(phy);
  542. }
  543. mutex_unlock(&lp->indirect_mutex);
  544. }
  545. static void temac_start_xmit_done(struct net_device *ndev)
  546. {
  547. struct temac_local *lp = netdev_priv(ndev);
  548. struct cdmac_bd *cur_p;
  549. unsigned int stat = 0;
  550. cur_p = &lp->tx_bd_v[lp->tx_bd_ci];
  551. stat = cur_p->app0;
  552. while (stat & STS_CTRL_APP0_CMPLT) {
  553. dma_unmap_single(ndev->dev.parent, cur_p->phys, cur_p->len,
  554. DMA_TO_DEVICE);
  555. if (cur_p->app4)
  556. dev_kfree_skb_irq((struct sk_buff *)cur_p->app4);
  557. cur_p->app0 = 0;
  558. cur_p->app1 = 0;
  559. cur_p->app2 = 0;
  560. cur_p->app3 = 0;
  561. cur_p->app4 = 0;
  562. ndev->stats.tx_packets++;
  563. ndev->stats.tx_bytes += cur_p->len;
  564. lp->tx_bd_ci++;
  565. if (lp->tx_bd_ci >= TX_BD_NUM)
  566. lp->tx_bd_ci = 0;
  567. cur_p = &lp->tx_bd_v[lp->tx_bd_ci];
  568. stat = cur_p->app0;
  569. }
  570. netif_wake_queue(ndev);
  571. }
  572. static inline int temac_check_tx_bd_space(struct temac_local *lp, int num_frag)
  573. {
  574. struct cdmac_bd *cur_p;
  575. int tail;
  576. tail = lp->tx_bd_tail;
  577. cur_p = &lp->tx_bd_v[tail];
  578. do {
  579. if (cur_p->app0)
  580. return NETDEV_TX_BUSY;
  581. tail++;
  582. if (tail >= TX_BD_NUM)
  583. tail = 0;
  584. cur_p = &lp->tx_bd_v[tail];
  585. num_frag--;
  586. } while (num_frag >= 0);
  587. return 0;
  588. }
  589. static int temac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
  590. {
  591. struct temac_local *lp = netdev_priv(ndev);
  592. struct cdmac_bd *cur_p;
  593. dma_addr_t start_p, tail_p;
  594. int ii;
  595. unsigned long num_frag;
  596. skb_frag_t *frag;
  597. num_frag = skb_shinfo(skb)->nr_frags;
  598. frag = &skb_shinfo(skb)->frags[0];
  599. start_p = lp->tx_bd_p + sizeof(*lp->tx_bd_v) * lp->tx_bd_tail;
  600. cur_p = &lp->tx_bd_v[lp->tx_bd_tail];
  601. if (temac_check_tx_bd_space(lp, num_frag)) {
  602. if (!netif_queue_stopped(ndev)) {
  603. netif_stop_queue(ndev);
  604. return NETDEV_TX_BUSY;
  605. }
  606. return NETDEV_TX_BUSY;
  607. }
  608. cur_p->app0 = 0;
  609. if (skb->ip_summed == CHECKSUM_PARTIAL) {
  610. unsigned int csum_start_off = skb_checksum_start_offset(skb);
  611. unsigned int csum_index_off = csum_start_off + skb->csum_offset;
  612. cur_p->app0 |= 1; /* TX Checksum Enabled */
  613. cur_p->app1 = (csum_start_off << 16) | csum_index_off;
  614. cur_p->app2 = 0; /* initial checksum seed */
  615. }
  616. cur_p->app0 |= STS_CTRL_APP0_SOP;
  617. cur_p->len = skb_headlen(skb);
  618. cur_p->phys = dma_map_single(ndev->dev.parent, skb->data, skb->len,
  619. DMA_TO_DEVICE);
  620. cur_p->app4 = (unsigned long)skb;
  621. for (ii = 0; ii < num_frag; ii++) {
  622. lp->tx_bd_tail++;
  623. if (lp->tx_bd_tail >= TX_BD_NUM)
  624. lp->tx_bd_tail = 0;
  625. cur_p = &lp->tx_bd_v[lp->tx_bd_tail];
  626. cur_p->phys = dma_map_single(ndev->dev.parent,
  627. skb_frag_address(frag),
  628. skb_frag_size(frag), DMA_TO_DEVICE);
  629. cur_p->len = skb_frag_size(frag);
  630. cur_p->app0 = 0;
  631. frag++;
  632. }
  633. cur_p->app0 |= STS_CTRL_APP0_EOP;
  634. tail_p = lp->tx_bd_p + sizeof(*lp->tx_bd_v) * lp->tx_bd_tail;
  635. lp->tx_bd_tail++;
  636. if (lp->tx_bd_tail >= TX_BD_NUM)
  637. lp->tx_bd_tail = 0;
  638. skb_tx_timestamp(skb);
  639. /* Kick off the transfer */
  640. lp->dma_out(lp, TX_TAILDESC_PTR, tail_p); /* DMA start */
  641. return NETDEV_TX_OK;
  642. }
  643. static void ll_temac_recv(struct net_device *ndev)
  644. {
  645. struct temac_local *lp = netdev_priv(ndev);
  646. struct sk_buff *skb, *new_skb;
  647. unsigned int bdstat;
  648. struct cdmac_bd *cur_p;
  649. dma_addr_t tail_p;
  650. int length;
  651. unsigned long flags;
  652. spin_lock_irqsave(&lp->rx_lock, flags);
  653. tail_p = lp->rx_bd_p + sizeof(*lp->rx_bd_v) * lp->rx_bd_ci;
  654. cur_p = &lp->rx_bd_v[lp->rx_bd_ci];
  655. bdstat = cur_p->app0;
  656. while ((bdstat & STS_CTRL_APP0_CMPLT)) {
  657. skb = lp->rx_skb[lp->rx_bd_ci];
  658. length = cur_p->app4 & 0x3FFF;
  659. dma_unmap_single(ndev->dev.parent, cur_p->phys, length,
  660. DMA_FROM_DEVICE);
  661. skb_put(skb, length);
  662. skb->dev = ndev;
  663. skb->protocol = eth_type_trans(skb, ndev);
  664. skb_checksum_none_assert(skb);
  665. /* if we're doing rx csum offload, set it up */
  666. if (((lp->temac_features & TEMAC_FEATURE_RX_CSUM) != 0) &&
  667. (skb->protocol == __constant_htons(ETH_P_IP)) &&
  668. (skb->len > 64)) {
  669. skb->csum = cur_p->app3 & 0xFFFF;
  670. skb->ip_summed = CHECKSUM_COMPLETE;
  671. }
  672. if (!skb_defer_rx_timestamp(skb))
  673. netif_rx(skb);
  674. ndev->stats.rx_packets++;
  675. ndev->stats.rx_bytes += length;
  676. new_skb = netdev_alloc_skb_ip_align(ndev,
  677. XTE_MAX_JUMBO_FRAME_SIZE);
  678. if (new_skb == 0) {
  679. dev_err(&ndev->dev, "no memory for new sk_buff\n");
  680. spin_unlock_irqrestore(&lp->rx_lock, flags);
  681. return;
  682. }
  683. cur_p->app0 = STS_CTRL_APP0_IRQONEND;
  684. cur_p->phys = dma_map_single(ndev->dev.parent, new_skb->data,
  685. XTE_MAX_JUMBO_FRAME_SIZE,
  686. DMA_FROM_DEVICE);
  687. cur_p->len = XTE_MAX_JUMBO_FRAME_SIZE;
  688. lp->rx_skb[lp->rx_bd_ci] = new_skb;
  689. lp->rx_bd_ci++;
  690. if (lp->rx_bd_ci >= RX_BD_NUM)
  691. lp->rx_bd_ci = 0;
  692. cur_p = &lp->rx_bd_v[lp->rx_bd_ci];
  693. bdstat = cur_p->app0;
  694. }
  695. lp->dma_out(lp, RX_TAILDESC_PTR, tail_p);
  696. spin_unlock_irqrestore(&lp->rx_lock, flags);
  697. }
  698. static irqreturn_t ll_temac_tx_irq(int irq, void *_ndev)
  699. {
  700. struct net_device *ndev = _ndev;
  701. struct temac_local *lp = netdev_priv(ndev);
  702. unsigned int status;
  703. status = lp->dma_in(lp, TX_IRQ_REG);
  704. lp->dma_out(lp, TX_IRQ_REG, status);
  705. if (status & (IRQ_COAL | IRQ_DLY))
  706. temac_start_xmit_done(lp->ndev);
  707. if (status & 0x080)
  708. dev_err(&ndev->dev, "DMA error 0x%x\n", status);
  709. return IRQ_HANDLED;
  710. }
  711. static irqreturn_t ll_temac_rx_irq(int irq, void *_ndev)
  712. {
  713. struct net_device *ndev = _ndev;
  714. struct temac_local *lp = netdev_priv(ndev);
  715. unsigned int status;
  716. /* Read and clear the status registers */
  717. status = lp->dma_in(lp, RX_IRQ_REG);
  718. lp->dma_out(lp, RX_IRQ_REG, status);
  719. if (status & (IRQ_COAL | IRQ_DLY))
  720. ll_temac_recv(lp->ndev);
  721. return IRQ_HANDLED;
  722. }
  723. static int temac_open(struct net_device *ndev)
  724. {
  725. struct temac_local *lp = netdev_priv(ndev);
  726. int rc;
  727. dev_dbg(&ndev->dev, "temac_open()\n");
  728. if (lp->phy_node) {
  729. lp->phy_dev = of_phy_connect(lp->ndev, lp->phy_node,
  730. temac_adjust_link, 0, 0);
  731. if (!lp->phy_dev) {
  732. dev_err(lp->dev, "of_phy_connect() failed\n");
  733. return -ENODEV;
  734. }
  735. phy_start(lp->phy_dev);
  736. }
  737. rc = request_irq(lp->tx_irq, ll_temac_tx_irq, 0, ndev->name, ndev);
  738. if (rc)
  739. goto err_tx_irq;
  740. rc = request_irq(lp->rx_irq, ll_temac_rx_irq, 0, ndev->name, ndev);
  741. if (rc)
  742. goto err_rx_irq;
  743. temac_device_reset(ndev);
  744. return 0;
  745. err_rx_irq:
  746. free_irq(lp->tx_irq, ndev);
  747. err_tx_irq:
  748. if (lp->phy_dev)
  749. phy_disconnect(lp->phy_dev);
  750. lp->phy_dev = NULL;
  751. dev_err(lp->dev, "request_irq() failed\n");
  752. return rc;
  753. }
  754. static int temac_stop(struct net_device *ndev)
  755. {
  756. struct temac_local *lp = netdev_priv(ndev);
  757. dev_dbg(&ndev->dev, "temac_close()\n");
  758. free_irq(lp->tx_irq, ndev);
  759. free_irq(lp->rx_irq, ndev);
  760. if (lp->phy_dev)
  761. phy_disconnect(lp->phy_dev);
  762. lp->phy_dev = NULL;
  763. temac_dma_bd_release(ndev);
  764. return 0;
  765. }
  766. #ifdef CONFIG_NET_POLL_CONTROLLER
  767. static void
  768. temac_poll_controller(struct net_device *ndev)
  769. {
  770. struct temac_local *lp = netdev_priv(ndev);
  771. disable_irq(lp->tx_irq);
  772. disable_irq(lp->rx_irq);
  773. ll_temac_rx_irq(lp->tx_irq, ndev);
  774. ll_temac_tx_irq(lp->rx_irq, ndev);
  775. enable_irq(lp->tx_irq);
  776. enable_irq(lp->rx_irq);
  777. }
  778. #endif
  779. static const struct net_device_ops temac_netdev_ops = {
  780. .ndo_open = temac_open,
  781. .ndo_stop = temac_stop,
  782. .ndo_start_xmit = temac_start_xmit,
  783. .ndo_set_mac_address = netdev_set_mac_address,
  784. .ndo_validate_addr = eth_validate_addr,
  785. #ifdef CONFIG_NET_POLL_CONTROLLER
  786. .ndo_poll_controller = temac_poll_controller,
  787. #endif
  788. };
  789. /* ---------------------------------------------------------------------
  790. * SYSFS device attributes
  791. */
  792. static ssize_t temac_show_llink_regs(struct device *dev,
  793. struct device_attribute *attr, char *buf)
  794. {
  795. struct net_device *ndev = dev_get_drvdata(dev);
  796. struct temac_local *lp = netdev_priv(ndev);
  797. int i, len = 0;
  798. for (i = 0; i < 0x11; i++)
  799. len += sprintf(buf + len, "%.8x%s", lp->dma_in(lp, i),
  800. (i % 8) == 7 ? "\n" : " ");
  801. len += sprintf(buf + len, "\n");
  802. return len;
  803. }
  804. static DEVICE_ATTR(llink_regs, 0440, temac_show_llink_regs, NULL);
  805. static struct attribute *temac_device_attrs[] = {
  806. &dev_attr_llink_regs.attr,
  807. NULL,
  808. };
  809. static const struct attribute_group temac_attr_group = {
  810. .attrs = temac_device_attrs,
  811. };
  812. /* ethtool support */
  813. static int temac_get_settings(struct net_device *ndev, struct ethtool_cmd *cmd)
  814. {
  815. struct temac_local *lp = netdev_priv(ndev);
  816. return phy_ethtool_gset(lp->phy_dev, cmd);
  817. }
  818. static int temac_set_settings(struct net_device *ndev, struct ethtool_cmd *cmd)
  819. {
  820. struct temac_local *lp = netdev_priv(ndev);
  821. return phy_ethtool_sset(lp->phy_dev, cmd);
  822. }
  823. static int temac_nway_reset(struct net_device *ndev)
  824. {
  825. struct temac_local *lp = netdev_priv(ndev);
  826. return phy_start_aneg(lp->phy_dev);
  827. }
  828. static const struct ethtool_ops temac_ethtool_ops = {
  829. .get_settings = temac_get_settings,
  830. .set_settings = temac_set_settings,
  831. .nway_reset = temac_nway_reset,
  832. .get_link = ethtool_op_get_link,
  833. };
  834. static int __devinit temac_of_probe(struct platform_device *op)
  835. {
  836. struct device_node *np;
  837. struct temac_local *lp;
  838. struct net_device *ndev;
  839. const void *addr;
  840. __be32 *p;
  841. int size, rc = 0;
  842. /* Init network device structure */
  843. ndev = alloc_etherdev(sizeof(*lp));
  844. if (!ndev) {
  845. dev_err(&op->dev, "could not allocate device.\n");
  846. return -ENOMEM;
  847. }
  848. ether_setup(ndev);
  849. dev_set_drvdata(&op->dev, ndev);
  850. SET_NETDEV_DEV(ndev, &op->dev);
  851. ndev->flags &= ~IFF_MULTICAST; /* clear multicast */
  852. ndev->features = NETIF_F_SG | NETIF_F_FRAGLIST;
  853. ndev->netdev_ops = &temac_netdev_ops;
  854. ndev->ethtool_ops = &temac_ethtool_ops;
  855. #if 0
  856. ndev->features |= NETIF_F_IP_CSUM; /* Can checksum TCP/UDP over IPv4. */
  857. ndev->features |= NETIF_F_HW_CSUM; /* Can checksum all the packets. */
  858. ndev->features |= NETIF_F_IPV6_CSUM; /* Can checksum IPV6 TCP/UDP */
  859. ndev->features |= NETIF_F_HIGHDMA; /* Can DMA to high memory. */
  860. ndev->features |= NETIF_F_HW_VLAN_TX; /* Transmit VLAN hw accel */
  861. ndev->features |= NETIF_F_HW_VLAN_RX; /* Receive VLAN hw acceleration */
  862. ndev->features |= NETIF_F_HW_VLAN_FILTER; /* Receive VLAN filtering */
  863. ndev->features |= NETIF_F_VLAN_CHALLENGED; /* cannot handle VLAN pkts */
  864. ndev->features |= NETIF_F_GSO; /* Enable software GSO. */
  865. ndev->features |= NETIF_F_MULTI_QUEUE; /* Has multiple TX/RX queues */
  866. ndev->features |= NETIF_F_LRO; /* large receive offload */
  867. #endif
  868. /* setup temac private info structure */
  869. lp = netdev_priv(ndev);
  870. lp->ndev = ndev;
  871. lp->dev = &op->dev;
  872. lp->options = XTE_OPTION_DEFAULTS;
  873. spin_lock_init(&lp->rx_lock);
  874. mutex_init(&lp->indirect_mutex);
  875. /* map device registers */
  876. lp->regs = of_iomap(op->dev.of_node, 0);
  877. if (!lp->regs) {
  878. dev_err(&op->dev, "could not map temac regs.\n");
  879. goto nodev;
  880. }
  881. /* Setup checksum offload, but default to off if not specified */
  882. lp->temac_features = 0;
  883. p = (__be32 *)of_get_property(op->dev.of_node, "xlnx,txcsum", NULL);
  884. if (p && be32_to_cpu(*p)) {
  885. lp->temac_features |= TEMAC_FEATURE_TX_CSUM;
  886. /* Can checksum TCP/UDP over IPv4. */
  887. ndev->features |= NETIF_F_IP_CSUM;
  888. }
  889. p = (__be32 *)of_get_property(op->dev.of_node, "xlnx,rxcsum", NULL);
  890. if (p && be32_to_cpu(*p))
  891. lp->temac_features |= TEMAC_FEATURE_RX_CSUM;
  892. /* Find the DMA node, map the DMA registers, and decode the DMA IRQs */
  893. np = of_parse_phandle(op->dev.of_node, "llink-connected", 0);
  894. if (!np) {
  895. dev_err(&op->dev, "could not find DMA node\n");
  896. goto err_iounmap;
  897. }
  898. /* Setup the DMA register accesses, could be DCR or memory mapped */
  899. if (temac_dcr_setup(lp, op, np)) {
  900. /* no DCR in the device tree, try non-DCR */
  901. lp->sdma_regs = of_iomap(np, 0);
  902. if (lp->sdma_regs) {
  903. lp->dma_in = temac_dma_in32;
  904. lp->dma_out = temac_dma_out32;
  905. dev_dbg(&op->dev, "MEM base: %p\n", lp->sdma_regs);
  906. } else {
  907. dev_err(&op->dev, "unable to map DMA registers\n");
  908. of_node_put(np);
  909. goto err_iounmap;
  910. }
  911. }
  912. lp->rx_irq = irq_of_parse_and_map(np, 0);
  913. lp->tx_irq = irq_of_parse_and_map(np, 1);
  914. of_node_put(np); /* Finished with the DMA node; drop the reference */
  915. if ((lp->rx_irq == NO_IRQ) || (lp->tx_irq == NO_IRQ)) {
  916. dev_err(&op->dev, "could not determine irqs\n");
  917. rc = -ENOMEM;
  918. goto err_iounmap_2;
  919. }
  920. /* Retrieve the MAC address */
  921. addr = of_get_property(op->dev.of_node, "local-mac-address", &size);
  922. if ((!addr) || (size != 6)) {
  923. dev_err(&op->dev, "could not find MAC address\n");
  924. rc = -ENODEV;
  925. goto err_iounmap_2;
  926. }
  927. temac_set_mac_address(ndev, (void *)addr);
  928. rc = temac_mdio_setup(lp, op->dev.of_node);
  929. if (rc)
  930. dev_warn(&op->dev, "error registering MDIO bus\n");
  931. lp->phy_node = of_parse_phandle(op->dev.of_node, "phy-handle", 0);
  932. if (lp->phy_node)
  933. dev_dbg(lp->dev, "using PHY node %s (%p)\n", np->full_name, np);
  934. /* Add the device attributes */
  935. rc = sysfs_create_group(&lp->dev->kobj, &temac_attr_group);
  936. if (rc) {
  937. dev_err(lp->dev, "Error creating sysfs files\n");
  938. goto err_iounmap_2;
  939. }
  940. rc = register_netdev(lp->ndev);
  941. if (rc) {
  942. dev_err(lp->dev, "register_netdev() error (%i)\n", rc);
  943. goto err_register_ndev;
  944. }
  945. return 0;
  946. err_register_ndev:
  947. sysfs_remove_group(&lp->dev->kobj, &temac_attr_group);
  948. err_iounmap_2:
  949. if (lp->sdma_regs)
  950. iounmap(lp->sdma_regs);
  951. err_iounmap:
  952. iounmap(lp->regs);
  953. nodev:
  954. free_netdev(ndev);
  955. ndev = NULL;
  956. return rc;
  957. }
  958. static int __devexit temac_of_remove(struct platform_device *op)
  959. {
  960. struct net_device *ndev = dev_get_drvdata(&op->dev);
  961. struct temac_local *lp = netdev_priv(ndev);
  962. temac_mdio_teardown(lp);
  963. unregister_netdev(ndev);
  964. sysfs_remove_group(&lp->dev->kobj, &temac_attr_group);
  965. if (lp->phy_node)
  966. of_node_put(lp->phy_node);
  967. lp->phy_node = NULL;
  968. dev_set_drvdata(&op->dev, NULL);
  969. iounmap(lp->regs);
  970. if (lp->sdma_regs)
  971. iounmap(lp->sdma_regs);
  972. free_netdev(ndev);
  973. return 0;
  974. }
  975. static struct of_device_id temac_of_match[] __devinitdata = {
  976. { .compatible = "xlnx,xps-ll-temac-1.01.b", },
  977. { .compatible = "xlnx,xps-ll-temac-2.00.a", },
  978. { .compatible = "xlnx,xps-ll-temac-2.02.a", },
  979. { .compatible = "xlnx,xps-ll-temac-2.03.a", },
  980. {},
  981. };
  982. MODULE_DEVICE_TABLE(of, temac_of_match);
  983. static struct platform_driver temac_of_driver = {
  984. .probe = temac_of_probe,
  985. .remove = __devexit_p(temac_of_remove),
  986. .driver = {
  987. .owner = THIS_MODULE,
  988. .name = "xilinx_temac",
  989. .of_match_table = temac_of_match,
  990. },
  991. };
  992. static int __init temac_init(void)
  993. {
  994. return platform_driver_register(&temac_of_driver);
  995. }
  996. module_init(temac_init);
  997. static void __exit temac_exit(void)
  998. {
  999. platform_driver_unregister(&temac_of_driver);
  1000. }
  1001. module_exit(temac_exit);
  1002. MODULE_DESCRIPTION("Xilinx LL_TEMAC Ethernet driver");
  1003. MODULE_AUTHOR("Yoshio Kashiwagi");
  1004. MODULE_LICENSE("GPL");