dma-m2p.c 9.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411
  1. /*
  2. * arch/arm/mach-ep93xx/dma-m2p.c
  3. * M2P DMA handling for Cirrus EP93xx chips.
  4. *
  5. * Copyright (C) 2006 Lennert Buytenhek <buytenh@wantstofly.org>
  6. * Copyright (C) 2006 Applied Data Systems
  7. *
  8. * Copyright (C) 2009 Ryan Mallon <ryan@bluewatersys.com>
  9. *
  10. * This program is free software; you can redistribute it and/or modify
  11. * it under the terms of the GNU General Public License as published by
  12. * the Free Software Foundation; either version 2 of the License, or (at
  13. * your option) any later version.
  14. */
  15. /*
  16. * On the EP93xx chip the following peripherals my be allocated to the 10
  17. * Memory to Internal Peripheral (M2P) channels (5 transmit + 5 receive).
  18. *
  19. * I2S contains 3 Tx and 3 Rx DMA Channels
  20. * AAC contains 3 Tx and 3 Rx DMA Channels
  21. * UART1 contains 1 Tx and 1 Rx DMA Channels
  22. * UART2 contains 1 Tx and 1 Rx DMA Channels
  23. * UART3 contains 1 Tx and 1 Rx DMA Channels
  24. * IrDA contains 1 Tx and 1 Rx DMA Channels
  25. *
  26. * SSP and IDE use the Memory to Memory (M2M) channels and are not covered
  27. * with this implementation.
  28. */
  29. #define pr_fmt(fmt) "ep93xx " KBUILD_MODNAME ": " fmt
  30. #include <linux/kernel.h>
  31. #include <linux/clk.h>
  32. #include <linux/err.h>
  33. #include <linux/interrupt.h>
  34. #include <linux/module.h>
  35. #include <linux/io.h>
  36. #include <mach/dma.h>
  37. #include <mach/hardware.h>
  38. #define M2P_CONTROL 0x00
  39. #define M2P_CONTROL_STALL_IRQ_EN (1 << 0)
  40. #define M2P_CONTROL_NFB_IRQ_EN (1 << 1)
  41. #define M2P_CONTROL_ERROR_IRQ_EN (1 << 3)
  42. #define M2P_CONTROL_ENABLE (1 << 4)
  43. #define M2P_INTERRUPT 0x04
  44. #define M2P_INTERRUPT_STALL (1 << 0)
  45. #define M2P_INTERRUPT_NFB (1 << 1)
  46. #define M2P_INTERRUPT_ERROR (1 << 3)
  47. #define M2P_PPALLOC 0x08
  48. #define M2P_STATUS 0x0c
  49. #define M2P_REMAIN 0x14
  50. #define M2P_MAXCNT0 0x20
  51. #define M2P_BASE0 0x24
  52. #define M2P_MAXCNT1 0x30
  53. #define M2P_BASE1 0x34
  54. #define STATE_IDLE 0 /* Channel is inactive. */
  55. #define STATE_STALL 1 /* Channel is active, no buffers pending. */
  56. #define STATE_ON 2 /* Channel is active, one buffer pending. */
  57. #define STATE_NEXT 3 /* Channel is active, two buffers pending. */
  58. struct m2p_channel {
  59. char *name;
  60. void __iomem *base;
  61. int irq;
  62. struct clk *clk;
  63. spinlock_t lock;
  64. void *client;
  65. unsigned next_slot:1;
  66. struct ep93xx_dma_buffer *buffer_xfer;
  67. struct ep93xx_dma_buffer *buffer_next;
  68. struct list_head buffers_pending;
  69. };
  70. static struct m2p_channel m2p_rx[] = {
  71. {"m2p1", EP93XX_DMA_BASE + 0x0040, IRQ_EP93XX_DMAM2P1},
  72. {"m2p3", EP93XX_DMA_BASE + 0x00c0, IRQ_EP93XX_DMAM2P3},
  73. {"m2p5", EP93XX_DMA_BASE + 0x0200, IRQ_EP93XX_DMAM2P5},
  74. {"m2p7", EP93XX_DMA_BASE + 0x0280, IRQ_EP93XX_DMAM2P7},
  75. {"m2p9", EP93XX_DMA_BASE + 0x0300, IRQ_EP93XX_DMAM2P9},
  76. {NULL},
  77. };
  78. static struct m2p_channel m2p_tx[] = {
  79. {"m2p0", EP93XX_DMA_BASE + 0x0000, IRQ_EP93XX_DMAM2P0},
  80. {"m2p2", EP93XX_DMA_BASE + 0x0080, IRQ_EP93XX_DMAM2P2},
  81. {"m2p4", EP93XX_DMA_BASE + 0x0240, IRQ_EP93XX_DMAM2P4},
  82. {"m2p6", EP93XX_DMA_BASE + 0x02c0, IRQ_EP93XX_DMAM2P6},
  83. {"m2p8", EP93XX_DMA_BASE + 0x0340, IRQ_EP93XX_DMAM2P8},
  84. {NULL},
  85. };
  86. static void feed_buf(struct m2p_channel *ch, struct ep93xx_dma_buffer *buf)
  87. {
  88. if (ch->next_slot == 0) {
  89. writel(buf->size, ch->base + M2P_MAXCNT0);
  90. writel(buf->bus_addr, ch->base + M2P_BASE0);
  91. } else {
  92. writel(buf->size, ch->base + M2P_MAXCNT1);
  93. writel(buf->bus_addr, ch->base + M2P_BASE1);
  94. }
  95. ch->next_slot ^= 1;
  96. }
  97. static void choose_buffer_xfer(struct m2p_channel *ch)
  98. {
  99. struct ep93xx_dma_buffer *buf;
  100. ch->buffer_xfer = NULL;
  101. if (!list_empty(&ch->buffers_pending)) {
  102. buf = list_entry(ch->buffers_pending.next,
  103. struct ep93xx_dma_buffer, list);
  104. list_del(&buf->list);
  105. feed_buf(ch, buf);
  106. ch->buffer_xfer = buf;
  107. }
  108. }
  109. static void choose_buffer_next(struct m2p_channel *ch)
  110. {
  111. struct ep93xx_dma_buffer *buf;
  112. ch->buffer_next = NULL;
  113. if (!list_empty(&ch->buffers_pending)) {
  114. buf = list_entry(ch->buffers_pending.next,
  115. struct ep93xx_dma_buffer, list);
  116. list_del(&buf->list);
  117. feed_buf(ch, buf);
  118. ch->buffer_next = buf;
  119. }
  120. }
  121. static inline void m2p_set_control(struct m2p_channel *ch, u32 v)
  122. {
  123. /*
  124. * The control register must be read immediately after being written so
  125. * that the internal state machine is correctly updated. See the ep93xx
  126. * users' guide for details.
  127. */
  128. writel(v, ch->base + M2P_CONTROL);
  129. readl(ch->base + M2P_CONTROL);
  130. }
  131. static inline int m2p_channel_state(struct m2p_channel *ch)
  132. {
  133. return (readl(ch->base + M2P_STATUS) >> 4) & 0x3;
  134. }
  135. static irqreturn_t m2p_irq(int irq, void *dev_id)
  136. {
  137. struct m2p_channel *ch = dev_id;
  138. struct ep93xx_dma_m2p_client *cl;
  139. u32 irq_status, v;
  140. int error = 0;
  141. cl = ch->client;
  142. spin_lock(&ch->lock);
  143. irq_status = readl(ch->base + M2P_INTERRUPT);
  144. if (irq_status & M2P_INTERRUPT_ERROR) {
  145. writel(M2P_INTERRUPT_ERROR, ch->base + M2P_INTERRUPT);
  146. error = 1;
  147. }
  148. if ((irq_status & (M2P_INTERRUPT_STALL | M2P_INTERRUPT_NFB)) == 0) {
  149. spin_unlock(&ch->lock);
  150. return IRQ_NONE;
  151. }
  152. switch (m2p_channel_state(ch)) {
  153. case STATE_IDLE:
  154. pr_crit("dma interrupt without a dma buffer\n");
  155. BUG();
  156. break;
  157. case STATE_STALL:
  158. cl->buffer_finished(cl->cookie, ch->buffer_xfer, 0, error);
  159. if (ch->buffer_next != NULL) {
  160. cl->buffer_finished(cl->cookie, ch->buffer_next,
  161. 0, error);
  162. }
  163. choose_buffer_xfer(ch);
  164. choose_buffer_next(ch);
  165. if (ch->buffer_xfer != NULL)
  166. cl->buffer_started(cl->cookie, ch->buffer_xfer);
  167. break;
  168. case STATE_ON:
  169. cl->buffer_finished(cl->cookie, ch->buffer_xfer, 0, error);
  170. ch->buffer_xfer = ch->buffer_next;
  171. choose_buffer_next(ch);
  172. cl->buffer_started(cl->cookie, ch->buffer_xfer);
  173. break;
  174. case STATE_NEXT:
  175. pr_crit("dma interrupt while next\n");
  176. BUG();
  177. break;
  178. }
  179. v = readl(ch->base + M2P_CONTROL) & ~(M2P_CONTROL_STALL_IRQ_EN |
  180. M2P_CONTROL_NFB_IRQ_EN);
  181. if (ch->buffer_xfer != NULL)
  182. v |= M2P_CONTROL_STALL_IRQ_EN;
  183. if (ch->buffer_next != NULL)
  184. v |= M2P_CONTROL_NFB_IRQ_EN;
  185. m2p_set_control(ch, v);
  186. spin_unlock(&ch->lock);
  187. return IRQ_HANDLED;
  188. }
  189. static struct m2p_channel *find_free_channel(struct ep93xx_dma_m2p_client *cl)
  190. {
  191. struct m2p_channel *ch;
  192. int i;
  193. if (cl->flags & EP93XX_DMA_M2P_RX)
  194. ch = m2p_rx;
  195. else
  196. ch = m2p_tx;
  197. for (i = 0; ch[i].base; i++) {
  198. struct ep93xx_dma_m2p_client *client;
  199. client = ch[i].client;
  200. if (client != NULL) {
  201. int port;
  202. port = cl->flags & EP93XX_DMA_M2P_PORT_MASK;
  203. if (port == (client->flags &
  204. EP93XX_DMA_M2P_PORT_MASK)) {
  205. pr_warning("DMA channel already used by %s\n",
  206. cl->name ? : "unknown client");
  207. return ERR_PTR(-EBUSY);
  208. }
  209. }
  210. }
  211. for (i = 0; ch[i].base; i++) {
  212. if (ch[i].client == NULL)
  213. return ch + i;
  214. }
  215. pr_warning("No free DMA channel for %s\n",
  216. cl->name ? : "unknown client");
  217. return ERR_PTR(-ENODEV);
  218. }
  219. static void channel_enable(struct m2p_channel *ch)
  220. {
  221. struct ep93xx_dma_m2p_client *cl = ch->client;
  222. u32 v;
  223. clk_enable(ch->clk);
  224. v = cl->flags & EP93XX_DMA_M2P_PORT_MASK;
  225. writel(v, ch->base + M2P_PPALLOC);
  226. v = cl->flags & EP93XX_DMA_M2P_ERROR_MASK;
  227. v |= M2P_CONTROL_ENABLE | M2P_CONTROL_ERROR_IRQ_EN;
  228. m2p_set_control(ch, v);
  229. }
  230. static void channel_disable(struct m2p_channel *ch)
  231. {
  232. u32 v;
  233. v = readl(ch->base + M2P_CONTROL);
  234. v &= ~(M2P_CONTROL_STALL_IRQ_EN | M2P_CONTROL_NFB_IRQ_EN);
  235. m2p_set_control(ch, v);
  236. while (m2p_channel_state(ch) == STATE_ON)
  237. cpu_relax();
  238. m2p_set_control(ch, 0x0);
  239. while (m2p_channel_state(ch) == STATE_STALL)
  240. cpu_relax();
  241. clk_disable(ch->clk);
  242. }
  243. int ep93xx_dma_m2p_client_register(struct ep93xx_dma_m2p_client *cl)
  244. {
  245. struct m2p_channel *ch;
  246. int err;
  247. ch = find_free_channel(cl);
  248. if (IS_ERR(ch))
  249. return PTR_ERR(ch);
  250. err = request_irq(ch->irq, m2p_irq, 0, cl->name ? : "dma-m2p", ch);
  251. if (err)
  252. return err;
  253. ch->client = cl;
  254. ch->next_slot = 0;
  255. ch->buffer_xfer = NULL;
  256. ch->buffer_next = NULL;
  257. INIT_LIST_HEAD(&ch->buffers_pending);
  258. cl->channel = ch;
  259. channel_enable(ch);
  260. return 0;
  261. }
  262. EXPORT_SYMBOL_GPL(ep93xx_dma_m2p_client_register);
  263. void ep93xx_dma_m2p_client_unregister(struct ep93xx_dma_m2p_client *cl)
  264. {
  265. struct m2p_channel *ch = cl->channel;
  266. channel_disable(ch);
  267. free_irq(ch->irq, ch);
  268. ch->client = NULL;
  269. }
  270. EXPORT_SYMBOL_GPL(ep93xx_dma_m2p_client_unregister);
  271. void ep93xx_dma_m2p_submit(struct ep93xx_dma_m2p_client *cl,
  272. struct ep93xx_dma_buffer *buf)
  273. {
  274. struct m2p_channel *ch = cl->channel;
  275. unsigned long flags;
  276. u32 v;
  277. spin_lock_irqsave(&ch->lock, flags);
  278. v = readl(ch->base + M2P_CONTROL);
  279. if (ch->buffer_xfer == NULL) {
  280. ch->buffer_xfer = buf;
  281. feed_buf(ch, buf);
  282. cl->buffer_started(cl->cookie, buf);
  283. v |= M2P_CONTROL_STALL_IRQ_EN;
  284. m2p_set_control(ch, v);
  285. } else if (ch->buffer_next == NULL) {
  286. ch->buffer_next = buf;
  287. feed_buf(ch, buf);
  288. v |= M2P_CONTROL_NFB_IRQ_EN;
  289. m2p_set_control(ch, v);
  290. } else {
  291. list_add_tail(&buf->list, &ch->buffers_pending);
  292. }
  293. spin_unlock_irqrestore(&ch->lock, flags);
  294. }
  295. EXPORT_SYMBOL_GPL(ep93xx_dma_m2p_submit);
  296. void ep93xx_dma_m2p_submit_recursive(struct ep93xx_dma_m2p_client *cl,
  297. struct ep93xx_dma_buffer *buf)
  298. {
  299. struct m2p_channel *ch = cl->channel;
  300. list_add_tail(&buf->list, &ch->buffers_pending);
  301. }
  302. EXPORT_SYMBOL_GPL(ep93xx_dma_m2p_submit_recursive);
  303. void ep93xx_dma_m2p_flush(struct ep93xx_dma_m2p_client *cl)
  304. {
  305. struct m2p_channel *ch = cl->channel;
  306. channel_disable(ch);
  307. ch->next_slot = 0;
  308. ch->buffer_xfer = NULL;
  309. ch->buffer_next = NULL;
  310. INIT_LIST_HEAD(&ch->buffers_pending);
  311. channel_enable(ch);
  312. }
  313. EXPORT_SYMBOL_GPL(ep93xx_dma_m2p_flush);
  314. static int init_channel(struct m2p_channel *ch)
  315. {
  316. ch->clk = clk_get(NULL, ch->name);
  317. if (IS_ERR(ch->clk))
  318. return PTR_ERR(ch->clk);
  319. spin_lock_init(&ch->lock);
  320. ch->client = NULL;
  321. return 0;
  322. }
  323. static int __init ep93xx_dma_m2p_init(void)
  324. {
  325. int i;
  326. int ret;
  327. for (i = 0; m2p_rx[i].base; i++) {
  328. ret = init_channel(m2p_rx + i);
  329. if (ret)
  330. return ret;
  331. }
  332. for (i = 0; m2p_tx[i].base; i++) {
  333. ret = init_channel(m2p_tx + i);
  334. if (ret)
  335. return ret;
  336. }
  337. pr_info("M2P DMA subsystem initialized\n");
  338. return 0;
  339. }
  340. arch_initcall(ep93xx_dma_m2p_init);