dma-m2p.c 9.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409
  1. /*
  2. * arch/arm/mach-ep93xx/dma-m2p.c
  3. * M2P DMA handling for Cirrus EP93xx chips.
  4. *
  5. * Copyright (C) 2006 Lennert Buytenhek <buytenh@wantstofly.org>
  6. * Copyright (C) 2006 Applied Data Systems
  7. *
  8. * Copyright (C) 2009 Ryan Mallon <ryan@bluewatersys.com>
  9. *
  10. * This program is free software; you can redistribute it and/or modify
  11. * it under the terms of the GNU General Public License as published by
  12. * the Free Software Foundation; either version 2 of the License, or (at
  13. * your option) any later version.
  14. */
  15. /*
  16. * On the EP93xx chip the following peripherals my be allocated to the 10
  17. * Memory to Internal Peripheral (M2P) channels (5 transmit + 5 receive).
  18. *
  19. * I2S contains 3 Tx and 3 Rx DMA Channels
  20. * AAC contains 3 Tx and 3 Rx DMA Channels
  21. * UART1 contains 1 Tx and 1 Rx DMA Channels
  22. * UART2 contains 1 Tx and 1 Rx DMA Channels
  23. * UART3 contains 1 Tx and 1 Rx DMA Channels
  24. * IrDA contains 1 Tx and 1 Rx DMA Channels
  25. *
  26. * SSP and IDE use the Memory to Memory (M2M) channels and are not covered
  27. * with this implementation.
  28. */
  29. #include <linux/kernel.h>
  30. #include <linux/clk.h>
  31. #include <linux/err.h>
  32. #include <linux/interrupt.h>
  33. #include <linux/module.h>
  34. #include <linux/io.h>
  35. #include <mach/dma.h>
  36. #include <mach/hardware.h>
  37. #define M2P_CONTROL 0x00
  38. #define M2P_CONTROL_STALL_IRQ_EN (1 << 0)
  39. #define M2P_CONTROL_NFB_IRQ_EN (1 << 1)
  40. #define M2P_CONTROL_ERROR_IRQ_EN (1 << 3)
  41. #define M2P_CONTROL_ENABLE (1 << 4)
  42. #define M2P_INTERRUPT 0x04
  43. #define M2P_INTERRUPT_STALL (1 << 0)
  44. #define M2P_INTERRUPT_NFB (1 << 1)
  45. #define M2P_INTERRUPT_ERROR (1 << 3)
  46. #define M2P_PPALLOC 0x08
  47. #define M2P_STATUS 0x0c
  48. #define M2P_REMAIN 0x14
  49. #define M2P_MAXCNT0 0x20
  50. #define M2P_BASE0 0x24
  51. #define M2P_MAXCNT1 0x30
  52. #define M2P_BASE1 0x34
  53. #define STATE_IDLE 0 /* Channel is inactive. */
  54. #define STATE_STALL 1 /* Channel is active, no buffers pending. */
  55. #define STATE_ON 2 /* Channel is active, one buffer pending. */
  56. #define STATE_NEXT 3 /* Channel is active, two buffers pending. */
  57. struct m2p_channel {
  58. char *name;
  59. void __iomem *base;
  60. int irq;
  61. struct clk *clk;
  62. spinlock_t lock;
  63. void *client;
  64. unsigned next_slot:1;
  65. struct ep93xx_dma_buffer *buffer_xfer;
  66. struct ep93xx_dma_buffer *buffer_next;
  67. struct list_head buffers_pending;
  68. };
  69. static struct m2p_channel m2p_rx[] = {
  70. {"m2p1", EP93XX_DMA_BASE + 0x0040, IRQ_EP93XX_DMAM2P1},
  71. {"m2p3", EP93XX_DMA_BASE + 0x00c0, IRQ_EP93XX_DMAM2P3},
  72. {"m2p5", EP93XX_DMA_BASE + 0x0200, IRQ_EP93XX_DMAM2P5},
  73. {"m2p7", EP93XX_DMA_BASE + 0x0280, IRQ_EP93XX_DMAM2P7},
  74. {"m2p9", EP93XX_DMA_BASE + 0x0300, IRQ_EP93XX_DMAM2P9},
  75. {NULL},
  76. };
  77. static struct m2p_channel m2p_tx[] = {
  78. {"m2p0", EP93XX_DMA_BASE + 0x0000, IRQ_EP93XX_DMAM2P0},
  79. {"m2p2", EP93XX_DMA_BASE + 0x0080, IRQ_EP93XX_DMAM2P2},
  80. {"m2p4", EP93XX_DMA_BASE + 0x0240, IRQ_EP93XX_DMAM2P4},
  81. {"m2p6", EP93XX_DMA_BASE + 0x02c0, IRQ_EP93XX_DMAM2P6},
  82. {"m2p8", EP93XX_DMA_BASE + 0x0340, IRQ_EP93XX_DMAM2P8},
  83. {NULL},
  84. };
  85. static void feed_buf(struct m2p_channel *ch, struct ep93xx_dma_buffer *buf)
  86. {
  87. if (ch->next_slot == 0) {
  88. writel(buf->size, ch->base + M2P_MAXCNT0);
  89. writel(buf->bus_addr, ch->base + M2P_BASE0);
  90. } else {
  91. writel(buf->size, ch->base + M2P_MAXCNT1);
  92. writel(buf->bus_addr, ch->base + M2P_BASE1);
  93. }
  94. ch->next_slot ^= 1;
  95. }
  96. static void choose_buffer_xfer(struct m2p_channel *ch)
  97. {
  98. struct ep93xx_dma_buffer *buf;
  99. ch->buffer_xfer = NULL;
  100. if (!list_empty(&ch->buffers_pending)) {
  101. buf = list_entry(ch->buffers_pending.next,
  102. struct ep93xx_dma_buffer, list);
  103. list_del(&buf->list);
  104. feed_buf(ch, buf);
  105. ch->buffer_xfer = buf;
  106. }
  107. }
  108. static void choose_buffer_next(struct m2p_channel *ch)
  109. {
  110. struct ep93xx_dma_buffer *buf;
  111. ch->buffer_next = NULL;
  112. if (!list_empty(&ch->buffers_pending)) {
  113. buf = list_entry(ch->buffers_pending.next,
  114. struct ep93xx_dma_buffer, list);
  115. list_del(&buf->list);
  116. feed_buf(ch, buf);
  117. ch->buffer_next = buf;
  118. }
  119. }
  120. static inline void m2p_set_control(struct m2p_channel *ch, u32 v)
  121. {
  122. /*
  123. * The control register must be read immediately after being written so
  124. * that the internal state machine is correctly updated. See the ep93xx
  125. * users' guide for details.
  126. */
  127. writel(v, ch->base + M2P_CONTROL);
  128. readl(ch->base + M2P_CONTROL);
  129. }
  130. static inline int m2p_channel_state(struct m2p_channel *ch)
  131. {
  132. return (readl(ch->base + M2P_STATUS) >> 4) & 0x3;
  133. }
  134. static irqreturn_t m2p_irq(int irq, void *dev_id)
  135. {
  136. struct m2p_channel *ch = dev_id;
  137. struct ep93xx_dma_m2p_client *cl;
  138. u32 irq_status, v;
  139. int error = 0;
  140. cl = ch->client;
  141. spin_lock(&ch->lock);
  142. irq_status = readl(ch->base + M2P_INTERRUPT);
  143. if (irq_status & M2P_INTERRUPT_ERROR) {
  144. writel(M2P_INTERRUPT_ERROR, ch->base + M2P_INTERRUPT);
  145. error = 1;
  146. }
  147. if ((irq_status & (M2P_INTERRUPT_STALL | M2P_INTERRUPT_NFB)) == 0) {
  148. spin_unlock(&ch->lock);
  149. return IRQ_NONE;
  150. }
  151. switch (m2p_channel_state(ch)) {
  152. case STATE_IDLE:
  153. pr_crit("m2p_irq: dma interrupt without a dma buffer\n");
  154. BUG();
  155. break;
  156. case STATE_STALL:
  157. cl->buffer_finished(cl->cookie, ch->buffer_xfer, 0, error);
  158. if (ch->buffer_next != NULL) {
  159. cl->buffer_finished(cl->cookie, ch->buffer_next,
  160. 0, error);
  161. }
  162. choose_buffer_xfer(ch);
  163. choose_buffer_next(ch);
  164. if (ch->buffer_xfer != NULL)
  165. cl->buffer_started(cl->cookie, ch->buffer_xfer);
  166. break;
  167. case STATE_ON:
  168. cl->buffer_finished(cl->cookie, ch->buffer_xfer, 0, error);
  169. ch->buffer_xfer = ch->buffer_next;
  170. choose_buffer_next(ch);
  171. cl->buffer_started(cl->cookie, ch->buffer_xfer);
  172. break;
  173. case STATE_NEXT:
  174. pr_crit("m2p_irq: dma interrupt while next\n");
  175. BUG();
  176. break;
  177. }
  178. v = readl(ch->base + M2P_CONTROL) & ~(M2P_CONTROL_STALL_IRQ_EN |
  179. M2P_CONTROL_NFB_IRQ_EN);
  180. if (ch->buffer_xfer != NULL)
  181. v |= M2P_CONTROL_STALL_IRQ_EN;
  182. if (ch->buffer_next != NULL)
  183. v |= M2P_CONTROL_NFB_IRQ_EN;
  184. m2p_set_control(ch, v);
  185. spin_unlock(&ch->lock);
  186. return IRQ_HANDLED;
  187. }
  188. static struct m2p_channel *find_free_channel(struct ep93xx_dma_m2p_client *cl)
  189. {
  190. struct m2p_channel *ch;
  191. int i;
  192. if (cl->flags & EP93XX_DMA_M2P_RX)
  193. ch = m2p_rx;
  194. else
  195. ch = m2p_tx;
  196. for (i = 0; ch[i].base; i++) {
  197. struct ep93xx_dma_m2p_client *client;
  198. client = ch[i].client;
  199. if (client != NULL) {
  200. int port;
  201. port = cl->flags & EP93XX_DMA_M2P_PORT_MASK;
  202. if (port == (client->flags &
  203. EP93XX_DMA_M2P_PORT_MASK)) {
  204. pr_warning("DMA channel already used by %s\n",
  205. cl->name ? : "unknown client");
  206. return ERR_PTR(-EBUSY);
  207. }
  208. }
  209. }
  210. for (i = 0; ch[i].base; i++) {
  211. if (ch[i].client == NULL)
  212. return ch + i;
  213. }
  214. pr_warning("No free DMA channel for %s\n",
  215. cl->name ? : "unknown client");
  216. return ERR_PTR(-ENODEV);
  217. }
  218. static void channel_enable(struct m2p_channel *ch)
  219. {
  220. struct ep93xx_dma_m2p_client *cl = ch->client;
  221. u32 v;
  222. clk_enable(ch->clk);
  223. v = cl->flags & EP93XX_DMA_M2P_PORT_MASK;
  224. writel(v, ch->base + M2P_PPALLOC);
  225. v = cl->flags & EP93XX_DMA_M2P_ERROR_MASK;
  226. v |= M2P_CONTROL_ENABLE | M2P_CONTROL_ERROR_IRQ_EN;
  227. m2p_set_control(ch, v);
  228. }
  229. static void channel_disable(struct m2p_channel *ch)
  230. {
  231. u32 v;
  232. v = readl(ch->base + M2P_CONTROL);
  233. v &= ~(M2P_CONTROL_STALL_IRQ_EN | M2P_CONTROL_NFB_IRQ_EN);
  234. m2p_set_control(ch, v);
  235. while (m2p_channel_state(ch) == STATE_ON)
  236. cpu_relax();
  237. m2p_set_control(ch, 0x0);
  238. while (m2p_channel_state(ch) == STATE_STALL)
  239. cpu_relax();
  240. clk_disable(ch->clk);
  241. }
  242. int ep93xx_dma_m2p_client_register(struct ep93xx_dma_m2p_client *cl)
  243. {
  244. struct m2p_channel *ch;
  245. int err;
  246. ch = find_free_channel(cl);
  247. if (IS_ERR(ch))
  248. return PTR_ERR(ch);
  249. err = request_irq(ch->irq, m2p_irq, 0, cl->name ? : "dma-m2p", ch);
  250. if (err)
  251. return err;
  252. ch->client = cl;
  253. ch->next_slot = 0;
  254. ch->buffer_xfer = NULL;
  255. ch->buffer_next = NULL;
  256. INIT_LIST_HEAD(&ch->buffers_pending);
  257. cl->channel = ch;
  258. channel_enable(ch);
  259. return 0;
  260. }
  261. EXPORT_SYMBOL_GPL(ep93xx_dma_m2p_client_register);
  262. void ep93xx_dma_m2p_client_unregister(struct ep93xx_dma_m2p_client *cl)
  263. {
  264. struct m2p_channel *ch = cl->channel;
  265. channel_disable(ch);
  266. free_irq(ch->irq, ch);
  267. ch->client = NULL;
  268. }
  269. EXPORT_SYMBOL_GPL(ep93xx_dma_m2p_client_unregister);
  270. void ep93xx_dma_m2p_submit(struct ep93xx_dma_m2p_client *cl,
  271. struct ep93xx_dma_buffer *buf)
  272. {
  273. struct m2p_channel *ch = cl->channel;
  274. unsigned long flags;
  275. u32 v;
  276. spin_lock_irqsave(&ch->lock, flags);
  277. v = readl(ch->base + M2P_CONTROL);
  278. if (ch->buffer_xfer == NULL) {
  279. ch->buffer_xfer = buf;
  280. feed_buf(ch, buf);
  281. cl->buffer_started(cl->cookie, buf);
  282. v |= M2P_CONTROL_STALL_IRQ_EN;
  283. m2p_set_control(ch, v);
  284. } else if (ch->buffer_next == NULL) {
  285. ch->buffer_next = buf;
  286. feed_buf(ch, buf);
  287. v |= M2P_CONTROL_NFB_IRQ_EN;
  288. m2p_set_control(ch, v);
  289. } else {
  290. list_add_tail(&buf->list, &ch->buffers_pending);
  291. }
  292. spin_unlock_irqrestore(&ch->lock, flags);
  293. }
  294. EXPORT_SYMBOL_GPL(ep93xx_dma_m2p_submit);
  295. void ep93xx_dma_m2p_submit_recursive(struct ep93xx_dma_m2p_client *cl,
  296. struct ep93xx_dma_buffer *buf)
  297. {
  298. struct m2p_channel *ch = cl->channel;
  299. list_add_tail(&buf->list, &ch->buffers_pending);
  300. }
  301. EXPORT_SYMBOL_GPL(ep93xx_dma_m2p_submit_recursive);
  302. void ep93xx_dma_m2p_flush(struct ep93xx_dma_m2p_client *cl)
  303. {
  304. struct m2p_channel *ch = cl->channel;
  305. channel_disable(ch);
  306. ch->next_slot = 0;
  307. ch->buffer_xfer = NULL;
  308. ch->buffer_next = NULL;
  309. INIT_LIST_HEAD(&ch->buffers_pending);
  310. channel_enable(ch);
  311. }
  312. EXPORT_SYMBOL_GPL(ep93xx_dma_m2p_flush);
  313. static int init_channel(struct m2p_channel *ch)
  314. {
  315. ch->clk = clk_get(NULL, ch->name);
  316. if (IS_ERR(ch->clk))
  317. return PTR_ERR(ch->clk);
  318. spin_lock_init(&ch->lock);
  319. ch->client = NULL;
  320. return 0;
  321. }
  322. static int __init ep93xx_dma_m2p_init(void)
  323. {
  324. int i;
  325. int ret;
  326. for (i = 0; m2p_rx[i].base; i++) {
  327. ret = init_channel(m2p_rx + i);
  328. if (ret)
  329. return ret;
  330. }
  331. for (i = 0; m2p_tx[i].base; i++) {
  332. ret = init_channel(m2p_tx + i);
  333. if (ret)
  334. return ret;
  335. }
  336. pr_info("M2P DMA subsystem initialized\n");
  337. return 0;
  338. }
  339. arch_initcall(ep93xx_dma_m2p_init);