xilinx_ll_temac_sdma.c 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370
  1. /*
  2. * Xilinx xps_ll_temac ethernet driver for u-boot
  3. *
  4. * SDMA sub-controller
  5. *
  6. * Copyright (C) 2011 - 2012 Stephan Linz <linz@li-pro.net>
  7. * Copyright (C) 2008 - 2011 Michal Simek <monstr@monstr.eu>
  8. * Copyright (C) 2008 - 2011 PetaLogix
  9. *
  10. * Based on Yoshio Kashiwagi kashiwagi@co-nss.co.jp driver
  11. * Copyright (C) 2008 Nissin Systems Co.,Ltd.
  12. * March 2008 created
  13. *
  14. * CREDITS: tsec driver
  15. *
  16. * This program is free software; you can redistribute it and/or modify it
  17. * under the terms of the GNU General Public License as published by the
  18. * Free Software Foundation; either version 2 of the License, or (at your
  19. * option) any later version.
  20. *
  21. * [0]: http://www.xilinx.com/support/documentation
  22. *
  23. * [M]: [0]/ip_documentation/mpmc.pdf
  24. * [S]: [0]/ip_documentation/xps_ll_temac.pdf
  25. * [A]: [0]/application_notes/xapp1041.pdf
  26. */
  27. #include <config.h>
  28. #include <common.h>
  29. #include <net.h>
  30. #include <asm/types.h>
  31. #include <asm/io.h>
  32. #include "xilinx_ll_temac.h"
  33. #include "xilinx_ll_temac_sdma.h"
  34. #define TX_BUF_CNT 2
  35. static unsigned int rx_idx; /* index of the current RX buffer */
  36. static unsigned int tx_idx; /* index of the current TX buffer */
  37. struct rtx_cdmac_bd {
  38. struct cdmac_bd rx[PKTBUFSRX];
  39. struct cdmac_bd tx[TX_BUF_CNT];
  40. };
  41. /*
  42. * DMA Buffer Descriptor alignment
  43. *
  44. * If the address contained in the Next Descriptor Pointer register is not
  45. * 8-word aligned or reaches beyond the range of available memory, the SDMA
  46. * halts processing and sets the CDMAC_BD_STCTRL_ERROR bit in the respective
  47. * status register (tx_chnl_sts or rx_chnl_sts).
  48. *
  49. * [1]: [0]/ip_documentation/mpmc.pdf
  50. * page 161, Next Descriptor Pointer
  51. */
  52. static struct rtx_cdmac_bd cdmac_bd __aligned(32);
  53. #if defined(CONFIG_XILINX_440) || defined(CONFIG_XILINX_405)
  54. /*
  55. * Indirect DCR access operations mi{ft}dcr_xilinx() espacialy
  56. * for Xilinx PowerPC implementations on FPGA.
  57. *
  58. * FIXME: This part should go up to arch/powerpc -- but where?
  59. */
  60. #include <asm/processor.h>
  61. #define XILINX_INDIRECT_DCR_ADDRESS_REG 0
  62. #define XILINX_INDIRECT_DCR_ACCESS_REG 1
  63. inline unsigned mifdcr_xilinx(const unsigned dcrn)
  64. {
  65. mtdcr(XILINX_INDIRECT_DCR_ADDRESS_REG, dcrn);
  66. return mfdcr(XILINX_INDIRECT_DCR_ACCESS_REG);
  67. }
  68. inline void mitdcr_xilinx(const unsigned dcrn, int val)
  69. {
  70. mtdcr(XILINX_INDIRECT_DCR_ADDRESS_REG, dcrn);
  71. mtdcr(XILINX_INDIRECT_DCR_ACCESS_REG, val);
  72. }
  73. /* Xilinx Device Control Register (DCR) in/out accessors */
  74. inline unsigned ll_temac_xldcr_in32(phys_addr_t addr)
  75. {
  76. return mifdcr_xilinx((const unsigned)addr);
  77. }
  78. inline void ll_temac_xldcr_out32(phys_addr_t addr, unsigned value)
  79. {
  80. mitdcr_xilinx((const unsigned)addr, value);
  81. }
  82. void ll_temac_collect_xldcr_sdma_reg_addr(struct eth_device *dev)
  83. {
  84. struct ll_temac *ll_temac = dev->priv;
  85. phys_addr_t dmac_ctrl = ll_temac->ctrladdr;
  86. phys_addr_t *ra = ll_temac->sdma_reg_addr;
  87. ra[TX_NXTDESC_PTR] = dmac_ctrl + TX_NXTDESC_PTR;
  88. ra[TX_CURBUF_ADDR] = dmac_ctrl + TX_CURBUF_ADDR;
  89. ra[TX_CURBUF_LENGTH] = dmac_ctrl + TX_CURBUF_LENGTH;
  90. ra[TX_CURDESC_PTR] = dmac_ctrl + TX_CURDESC_PTR;
  91. ra[TX_TAILDESC_PTR] = dmac_ctrl + TX_TAILDESC_PTR;
  92. ra[TX_CHNL_CTRL] = dmac_ctrl + TX_CHNL_CTRL;
  93. ra[TX_IRQ_REG] = dmac_ctrl + TX_IRQ_REG;
  94. ra[TX_CHNL_STS] = dmac_ctrl + TX_CHNL_STS;
  95. ra[RX_NXTDESC_PTR] = dmac_ctrl + RX_NXTDESC_PTR;
  96. ra[RX_CURBUF_ADDR] = dmac_ctrl + RX_CURBUF_ADDR;
  97. ra[RX_CURBUF_LENGTH] = dmac_ctrl + RX_CURBUF_LENGTH;
  98. ra[RX_CURDESC_PTR] = dmac_ctrl + RX_CURDESC_PTR;
  99. ra[RX_TAILDESC_PTR] = dmac_ctrl + RX_TAILDESC_PTR;
  100. ra[RX_CHNL_CTRL] = dmac_ctrl + RX_CHNL_CTRL;
  101. ra[RX_IRQ_REG] = dmac_ctrl + RX_IRQ_REG;
  102. ra[RX_CHNL_STS] = dmac_ctrl + RX_CHNL_STS;
  103. ra[DMA_CONTROL_REG] = dmac_ctrl + DMA_CONTROL_REG;
  104. }
  105. #endif /* CONFIG_XILINX_440 || ONFIG_XILINX_405 */
  106. /* Xilinx Processor Local Bus (PLB) in/out accessors */
  107. inline unsigned ll_temac_xlplb_in32(phys_addr_t addr)
  108. {
  109. return in_be32((void *)addr);
  110. }
  111. inline void ll_temac_xlplb_out32(phys_addr_t addr, unsigned value)
  112. {
  113. out_be32((void *)addr, value);
  114. }
  115. /* collect all register addresses for Xilinx PLB in/out accessors */
  116. void ll_temac_collect_xlplb_sdma_reg_addr(struct eth_device *dev)
  117. {
  118. struct ll_temac *ll_temac = dev->priv;
  119. struct sdma_ctrl *sdma_ctrl = (void *)ll_temac->ctrladdr;
  120. phys_addr_t *ra = ll_temac->sdma_reg_addr;
  121. ra[TX_NXTDESC_PTR] = (phys_addr_t)&sdma_ctrl->tx_nxtdesc_ptr;
  122. ra[TX_CURBUF_ADDR] = (phys_addr_t)&sdma_ctrl->tx_curbuf_addr;
  123. ra[TX_CURBUF_LENGTH] = (phys_addr_t)&sdma_ctrl->tx_curbuf_length;
  124. ra[TX_CURDESC_PTR] = (phys_addr_t)&sdma_ctrl->tx_curdesc_ptr;
  125. ra[TX_TAILDESC_PTR] = (phys_addr_t)&sdma_ctrl->tx_taildesc_ptr;
  126. ra[TX_CHNL_CTRL] = (phys_addr_t)&sdma_ctrl->tx_chnl_ctrl;
  127. ra[TX_IRQ_REG] = (phys_addr_t)&sdma_ctrl->tx_irq_reg;
  128. ra[TX_CHNL_STS] = (phys_addr_t)&sdma_ctrl->tx_chnl_sts;
  129. ra[RX_NXTDESC_PTR] = (phys_addr_t)&sdma_ctrl->rx_nxtdesc_ptr;
  130. ra[RX_CURBUF_ADDR] = (phys_addr_t)&sdma_ctrl->rx_curbuf_addr;
  131. ra[RX_CURBUF_LENGTH] = (phys_addr_t)&sdma_ctrl->rx_curbuf_length;
  132. ra[RX_CURDESC_PTR] = (phys_addr_t)&sdma_ctrl->rx_curdesc_ptr;
  133. ra[RX_TAILDESC_PTR] = (phys_addr_t)&sdma_ctrl->rx_taildesc_ptr;
  134. ra[RX_CHNL_CTRL] = (phys_addr_t)&sdma_ctrl->rx_chnl_ctrl;
  135. ra[RX_IRQ_REG] = (phys_addr_t)&sdma_ctrl->rx_irq_reg;
  136. ra[RX_CHNL_STS] = (phys_addr_t)&sdma_ctrl->rx_chnl_sts;
  137. ra[DMA_CONTROL_REG] = (phys_addr_t)&sdma_ctrl->dma_control_reg;
  138. }
  139. /* Check for TX and RX channel errors. */
  140. static inline int ll_temac_sdma_error(struct eth_device *dev)
  141. {
  142. int err;
  143. struct ll_temac *ll_temac = dev->priv;
  144. phys_addr_t *ra = ll_temac->sdma_reg_addr;
  145. err = ll_temac->in32(ra[TX_CHNL_STS]) & CHNL_STS_ERROR;
  146. err |= ll_temac->in32(ra[RX_CHNL_STS]) & CHNL_STS_ERROR;
  147. return err;
  148. }
  149. int ll_temac_init_sdma(struct eth_device *dev)
  150. {
  151. struct ll_temac *ll_temac = dev->priv;
  152. struct cdmac_bd *rx_dp;
  153. struct cdmac_bd *tx_dp;
  154. phys_addr_t *ra = ll_temac->sdma_reg_addr;
  155. int i;
  156. printf("%s: SDMA: %d Rx buffers, %d Tx buffers\n",
  157. dev->name, PKTBUFSRX, TX_BUF_CNT);
  158. /* Initialize the Rx Buffer descriptors */
  159. for (i = 0; i < PKTBUFSRX; i++) {
  160. rx_dp = &cdmac_bd.rx[i];
  161. memset(rx_dp, 0, sizeof(*rx_dp));
  162. rx_dp->next_p = rx_dp;
  163. rx_dp->buf_len = PKTSIZE_ALIGN;
  164. rx_dp->phys_buf_p = (u8 *)NetRxPackets[i];
  165. flush_cache((u32)rx_dp->phys_buf_p, PKTSIZE_ALIGN);
  166. }
  167. flush_cache((u32)cdmac_bd.rx, sizeof(cdmac_bd.rx));
  168. /* Initialize the TX Buffer Descriptors */
  169. for (i = 0; i < TX_BUF_CNT; i++) {
  170. tx_dp = &cdmac_bd.tx[i];
  171. memset(tx_dp, 0, sizeof(*tx_dp));
  172. tx_dp->next_p = tx_dp;
  173. }
  174. flush_cache((u32)cdmac_bd.tx, sizeof(cdmac_bd.tx));
  175. /* Reset index counter to the Rx and Tx Buffer descriptors */
  176. rx_idx = tx_idx = 0;
  177. /* initial Rx DMA start by writing to respective TAILDESC_PTR */
  178. ll_temac->out32(ra[RX_CURDESC_PTR], (int)&cdmac_bd.rx[rx_idx]);
  179. ll_temac->out32(ra[RX_TAILDESC_PTR], (int)&cdmac_bd.rx[rx_idx]);
  180. return 0;
  181. }
  182. int ll_temac_halt_sdma(struct eth_device *dev)
  183. {
  184. unsigned timeout = 50; /* 1usec * 50 = 50usec */
  185. struct ll_temac *ll_temac = dev->priv;
  186. phys_addr_t *ra = ll_temac->sdma_reg_addr;
  187. /*
  188. * Soft reset the DMA
  189. *
  190. * Quote from MPMC documentation: Writing a 1 to this field
  191. * forces the DMA engine to shutdown and reset itself. After
  192. * setting this bit, software must poll it until the bit is
  193. * cleared by the DMA. This indicates that the reset process
  194. * is done and the pipeline has been flushed.
  195. */
  196. ll_temac->out32(ra[DMA_CONTROL_REG], DMA_CONTROL_RESET);
  197. while (timeout && (ll_temac->in32(ra[DMA_CONTROL_REG])
  198. & DMA_CONTROL_RESET)) {
  199. timeout--;
  200. udelay(1);
  201. }
  202. if (!timeout) {
  203. printf("%s: Timeout\n", __func__);
  204. return -1;
  205. }
  206. return 0;
  207. }
  208. int ll_temac_reset_sdma(struct eth_device *dev)
  209. {
  210. u32 r;
  211. struct ll_temac *ll_temac = dev->priv;
  212. phys_addr_t *ra = ll_temac->sdma_reg_addr;
  213. /* Soft reset the DMA. */
  214. if (ll_temac_halt_sdma(dev))
  215. return -1;
  216. /* Now clear the interrupts. */
  217. r = ll_temac->in32(ra[TX_CHNL_CTRL]);
  218. r &= ~CHNL_CTRL_IRQ_MASK;
  219. ll_temac->out32(ra[TX_CHNL_CTRL], r);
  220. r = ll_temac->in32(ra[RX_CHNL_CTRL]);
  221. r &= ~CHNL_CTRL_IRQ_MASK;
  222. ll_temac->out32(ra[RX_CHNL_CTRL], r);
  223. /* Now ACK pending IRQs. */
  224. ll_temac->out32(ra[TX_IRQ_REG], IRQ_REG_IRQ_MASK);
  225. ll_temac->out32(ra[RX_IRQ_REG], IRQ_REG_IRQ_MASK);
  226. /* Set tail-ptr mode, disable errors for both channels. */
  227. ll_temac->out32(ra[DMA_CONTROL_REG],
  228. /* Enable use of tail pointer register */
  229. DMA_CONTROL_TPE |
  230. /* Disable error when 2 or 4 bit coalesce cnt overfl */
  231. DMA_CONTROL_RXOCEID |
  232. /* Disable error when 2 or 4 bit coalesce cnt overfl */
  233. DMA_CONTROL_TXOCEID);
  234. return 0;
  235. }
  236. int ll_temac_recv_sdma(struct eth_device *dev)
  237. {
  238. int length, pb_idx;
  239. struct cdmac_bd *rx_dp = &cdmac_bd.rx[rx_idx];
  240. struct ll_temac *ll_temac = dev->priv;
  241. phys_addr_t *ra = ll_temac->sdma_reg_addr;
  242. if (ll_temac_sdma_error(dev)) {
  243. if (ll_temac_reset_sdma(dev))
  244. return -1;
  245. ll_temac_init_sdma(dev);
  246. }
  247. flush_cache((u32)rx_dp, sizeof(*rx_dp));
  248. if (!(rx_dp->sca.stctrl & CDMAC_BD_STCTRL_COMPLETED))
  249. return 0;
  250. if (rx_dp->sca.stctrl & (CDMAC_BD_STCTRL_SOP | CDMAC_BD_STCTRL_EOP)) {
  251. pb_idx = rx_idx;
  252. length = rx_dp->sca.app[4] & CDMAC_BD_APP4_RXBYTECNT_MASK;
  253. } else {
  254. pb_idx = -1;
  255. length = 0;
  256. printf("%s: Got part of package, unsupported (%x)\n",
  257. __func__, rx_dp->sca.stctrl);
  258. }
  259. /* flip the buffer */
  260. flush_cache((u32)rx_dp->phys_buf_p, length);
  261. /* reset the current descriptor */
  262. rx_dp->sca.stctrl = 0;
  263. rx_dp->sca.app[4] = 0;
  264. flush_cache((u32)rx_dp, sizeof(*rx_dp));
  265. /* Find next empty buffer descriptor, preparation for next iteration */
  266. rx_idx = (rx_idx + 1) % PKTBUFSRX;
  267. rx_dp = &cdmac_bd.rx[rx_idx];
  268. flush_cache((u32)rx_dp, sizeof(*rx_dp));
  269. /* DMA start by writing to respective TAILDESC_PTR */
  270. ll_temac->out32(ra[RX_CURDESC_PTR], (int)&cdmac_bd.rx[rx_idx]);
  271. ll_temac->out32(ra[RX_TAILDESC_PTR], (int)&cdmac_bd.rx[rx_idx]);
  272. if (length > 0 && pb_idx != -1)
  273. NetReceive(NetRxPackets[pb_idx], length);
  274. return 0;
  275. }
  276. int ll_temac_send_sdma(struct eth_device *dev, volatile void *packet,
  277. int length)
  278. {
  279. unsigned timeout = 50; /* 1usec * 50 = 50usec */
  280. struct cdmac_bd *tx_dp = &cdmac_bd.tx[tx_idx];
  281. struct ll_temac *ll_temac = dev->priv;
  282. phys_addr_t *ra = ll_temac->sdma_reg_addr;
  283. if (ll_temac_sdma_error(dev)) {
  284. if (ll_temac_reset_sdma(dev))
  285. return -1;
  286. ll_temac_init_sdma(dev);
  287. }
  288. tx_dp->phys_buf_p = (u8 *)packet;
  289. tx_dp->buf_len = length;
  290. tx_dp->sca.stctrl = CDMAC_BD_STCTRL_SOP | CDMAC_BD_STCTRL_EOP |
  291. CDMAC_BD_STCTRL_STOP_ON_END;
  292. flush_cache((u32)packet, length);
  293. flush_cache((u32)tx_dp, sizeof(*tx_dp));
  294. /* DMA start by writing to respective TAILDESC_PTR */
  295. ll_temac->out32(ra[TX_CURDESC_PTR], (int)tx_dp);
  296. ll_temac->out32(ra[TX_TAILDESC_PTR], (int)tx_dp);
  297. /* Find next empty buffer descriptor, preparation for next iteration */
  298. tx_idx = (tx_idx + 1) % TX_BUF_CNT;
  299. tx_dp = &cdmac_bd.tx[tx_idx];
  300. do {
  301. flush_cache((u32)tx_dp, sizeof(*tx_dp));
  302. udelay(1);
  303. } while (timeout-- && !(tx_dp->sca.stctrl & CDMAC_BD_STCTRL_COMPLETED));
  304. if (!timeout) {
  305. printf("%s: Timeout\n", __func__);
  306. return -1;
  307. }
  308. return 0;
  309. }