tmio_mmc_dma.c 7.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321
  1. /*
  2. * linux/drivers/mmc/tmio_mmc_dma.c
  3. *
  4. * Copyright (C) 2010-2011 Guennadi Liakhovetski
  5. *
  6. * This program is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU General Public License version 2 as
  8. * published by the Free Software Foundation.
  9. *
  10. * DMA function for TMIO MMC implementations
  11. */
  12. #include <linux/device.h>
  13. #include <linux/dma-mapping.h>
  14. #include <linux/dmaengine.h>
  15. #include <linux/mfd/tmio.h>
  16. #include <linux/mmc/host.h>
  17. #include <linux/mmc/tmio.h>
  18. #include <linux/pagemap.h>
  19. #include <linux/scatterlist.h>
  20. #include "tmio_mmc.h"
  21. #define TMIO_MMC_MIN_DMA_LEN 8
  22. static void tmio_mmc_enable_dma(struct tmio_mmc_host *host, bool enable)
  23. {
  24. #if defined(CONFIG_SUPERH) || defined(CONFIG_ARCH_SHMOBILE)
  25. /* Switch DMA mode on or off - SuperH specific? */
  26. writew(enable ? 2 : 0, host->ctl + (0xd8 << host->bus_shift));
  27. #endif
  28. }
  29. static void tmio_mmc_start_dma_rx(struct tmio_mmc_host *host)
  30. {
  31. struct scatterlist *sg = host->sg_ptr, *sg_tmp;
  32. struct dma_async_tx_descriptor *desc = NULL;
  33. struct dma_chan *chan = host->chan_rx;
  34. struct tmio_mmc_data *pdata = host->pdata;
  35. dma_cookie_t cookie;
  36. int ret, i;
  37. bool aligned = true, multiple = true;
  38. unsigned int align = (1 << pdata->dma->alignment_shift) - 1;
  39. for_each_sg(sg, sg_tmp, host->sg_len, i) {
  40. if (sg_tmp->offset & align)
  41. aligned = false;
  42. if (sg_tmp->length & align) {
  43. multiple = false;
  44. break;
  45. }
  46. }
  47. if ((!aligned && (host->sg_len > 1 || sg->length > PAGE_CACHE_SIZE ||
  48. (align & PAGE_MASK))) || !multiple) {
  49. ret = -EINVAL;
  50. goto pio;
  51. }
  52. if (sg->length < TMIO_MMC_MIN_DMA_LEN) {
  53. host->force_pio = true;
  54. return;
  55. }
  56. tmio_mmc_disable_mmc_irqs(host, TMIO_STAT_RXRDY);
  57. /* The only sg element can be unaligned, use our bounce buffer then */
  58. if (!aligned) {
  59. sg_init_one(&host->bounce_sg, host->bounce_buf, sg->length);
  60. host->sg_ptr = &host->bounce_sg;
  61. sg = host->sg_ptr;
  62. }
  63. ret = dma_map_sg(chan->device->dev, sg, host->sg_len, DMA_FROM_DEVICE);
  64. if (ret > 0)
  65. desc = chan->device->device_prep_slave_sg(chan, sg, ret,
  66. DMA_FROM_DEVICE, DMA_CTRL_ACK);
  67. if (desc) {
  68. cookie = dmaengine_submit(desc);
  69. if (cookie < 0) {
  70. desc = NULL;
  71. ret = cookie;
  72. }
  73. }
  74. dev_dbg(&host->pdev->dev, "%s(): mapped %d -> %d, cookie %d, rq %p\n",
  75. __func__, host->sg_len, ret, cookie, host->mrq);
  76. pio:
  77. if (!desc) {
  78. /* DMA failed, fall back to PIO */
  79. if (ret >= 0)
  80. ret = -EIO;
  81. host->chan_rx = NULL;
  82. dma_release_channel(chan);
  83. /* Free the Tx channel too */
  84. chan = host->chan_tx;
  85. if (chan) {
  86. host->chan_tx = NULL;
  87. dma_release_channel(chan);
  88. }
  89. dev_warn(&host->pdev->dev,
  90. "DMA failed: %d, falling back to PIO\n", ret);
  91. tmio_mmc_enable_dma(host, false);
  92. }
  93. dev_dbg(&host->pdev->dev, "%s(): desc %p, cookie %d, sg[%d]\n", __func__,
  94. desc, cookie, host->sg_len);
  95. }
  96. static void tmio_mmc_start_dma_tx(struct tmio_mmc_host *host)
  97. {
  98. struct scatterlist *sg = host->sg_ptr, *sg_tmp;
  99. struct dma_async_tx_descriptor *desc = NULL;
  100. struct dma_chan *chan = host->chan_tx;
  101. struct tmio_mmc_data *pdata = host->pdata;
  102. dma_cookie_t cookie;
  103. int ret, i;
  104. bool aligned = true, multiple = true;
  105. unsigned int align = (1 << pdata->dma->alignment_shift) - 1;
  106. for_each_sg(sg, sg_tmp, host->sg_len, i) {
  107. if (sg_tmp->offset & align)
  108. aligned = false;
  109. if (sg_tmp->length & align) {
  110. multiple = false;
  111. break;
  112. }
  113. }
  114. if ((!aligned && (host->sg_len > 1 || sg->length > PAGE_CACHE_SIZE ||
  115. (align & PAGE_MASK))) || !multiple) {
  116. ret = -EINVAL;
  117. goto pio;
  118. }
  119. if (sg->length < TMIO_MMC_MIN_DMA_LEN) {
  120. host->force_pio = true;
  121. return;
  122. }
  123. tmio_mmc_disable_mmc_irqs(host, TMIO_STAT_TXRQ);
  124. /* The only sg element can be unaligned, use our bounce buffer then */
  125. if (!aligned) {
  126. unsigned long flags;
  127. void *sg_vaddr = tmio_mmc_kmap_atomic(sg, &flags);
  128. sg_init_one(&host->bounce_sg, host->bounce_buf, sg->length);
  129. memcpy(host->bounce_buf, sg_vaddr, host->bounce_sg.length);
  130. tmio_mmc_kunmap_atomic(sg, &flags, sg_vaddr);
  131. host->sg_ptr = &host->bounce_sg;
  132. sg = host->sg_ptr;
  133. }
  134. ret = dma_map_sg(chan->device->dev, sg, host->sg_len, DMA_TO_DEVICE);
  135. if (ret > 0)
  136. desc = chan->device->device_prep_slave_sg(chan, sg, ret,
  137. DMA_TO_DEVICE, DMA_CTRL_ACK);
  138. if (desc) {
  139. cookie = dmaengine_submit(desc);
  140. if (cookie < 0) {
  141. desc = NULL;
  142. ret = cookie;
  143. }
  144. }
  145. dev_dbg(&host->pdev->dev, "%s(): mapped %d -> %d, cookie %d, rq %p\n",
  146. __func__, host->sg_len, ret, cookie, host->mrq);
  147. pio:
  148. if (!desc) {
  149. /* DMA failed, fall back to PIO */
  150. if (ret >= 0)
  151. ret = -EIO;
  152. host->chan_tx = NULL;
  153. dma_release_channel(chan);
  154. /* Free the Rx channel too */
  155. chan = host->chan_rx;
  156. if (chan) {
  157. host->chan_rx = NULL;
  158. dma_release_channel(chan);
  159. }
  160. dev_warn(&host->pdev->dev,
  161. "DMA failed: %d, falling back to PIO\n", ret);
  162. tmio_mmc_enable_dma(host, false);
  163. }
  164. dev_dbg(&host->pdev->dev, "%s(): desc %p, cookie %d\n", __func__,
  165. desc, cookie);
  166. }
  167. void tmio_mmc_start_dma(struct tmio_mmc_host *host,
  168. struct mmc_data *data)
  169. {
  170. if (data->flags & MMC_DATA_READ) {
  171. if (host->chan_rx)
  172. tmio_mmc_start_dma_rx(host);
  173. } else {
  174. if (host->chan_tx)
  175. tmio_mmc_start_dma_tx(host);
  176. }
  177. }
  178. static void tmio_mmc_issue_tasklet_fn(unsigned long priv)
  179. {
  180. struct tmio_mmc_host *host = (struct tmio_mmc_host *)priv;
  181. struct dma_chan *chan = NULL;
  182. spin_lock_irq(&host->lock);
  183. if (host && host->data) {
  184. if (host->data->flags & MMC_DATA_READ)
  185. chan = host->chan_rx;
  186. else
  187. chan = host->chan_tx;
  188. }
  189. spin_unlock_irq(&host->lock);
  190. tmio_mmc_enable_mmc_irqs(host, TMIO_STAT_DATAEND);
  191. if (chan)
  192. dma_async_issue_pending(chan);
  193. }
  194. static void tmio_mmc_tasklet_fn(unsigned long arg)
  195. {
  196. struct tmio_mmc_host *host = (struct tmio_mmc_host *)arg;
  197. spin_lock_irq(&host->lock);
  198. if (!host->data)
  199. goto out;
  200. if (host->data->flags & MMC_DATA_READ)
  201. dma_unmap_sg(host->chan_rx->device->dev,
  202. host->sg_ptr, host->sg_len,
  203. DMA_FROM_DEVICE);
  204. else
  205. dma_unmap_sg(host->chan_tx->device->dev,
  206. host->sg_ptr, host->sg_len,
  207. DMA_TO_DEVICE);
  208. tmio_mmc_do_data_irq(host);
  209. out:
  210. spin_unlock_irq(&host->lock);
  211. }
  212. /* It might be necessary to make filter MFD specific */
  213. static bool tmio_mmc_filter(struct dma_chan *chan, void *arg)
  214. {
  215. dev_dbg(chan->device->dev, "%s: slave data %p\n", __func__, arg);
  216. chan->private = arg;
  217. return true;
  218. }
  219. void tmio_mmc_request_dma(struct tmio_mmc_host *host, struct tmio_mmc_data *pdata)
  220. {
  221. /* We can only either use DMA for both Tx and Rx or not use it at all */
  222. if (!pdata->dma)
  223. return;
  224. if (!host->chan_tx && !host->chan_rx) {
  225. dma_cap_mask_t mask;
  226. dma_cap_zero(mask);
  227. dma_cap_set(DMA_SLAVE, mask);
  228. host->chan_tx = dma_request_channel(mask, tmio_mmc_filter,
  229. pdata->dma->chan_priv_tx);
  230. dev_dbg(&host->pdev->dev, "%s: TX: got channel %p\n", __func__,
  231. host->chan_tx);
  232. if (!host->chan_tx)
  233. return;
  234. host->chan_rx = dma_request_channel(mask, tmio_mmc_filter,
  235. pdata->dma->chan_priv_rx);
  236. dev_dbg(&host->pdev->dev, "%s: RX: got channel %p\n", __func__,
  237. host->chan_rx);
  238. if (!host->chan_rx)
  239. goto ereqrx;
  240. host->bounce_buf = (u8 *)__get_free_page(GFP_KERNEL | GFP_DMA);
  241. if (!host->bounce_buf)
  242. goto ebouncebuf;
  243. tasklet_init(&host->dma_complete, tmio_mmc_tasklet_fn, (unsigned long)host);
  244. tasklet_init(&host->dma_issue, tmio_mmc_issue_tasklet_fn, (unsigned long)host);
  245. }
  246. tmio_mmc_enable_dma(host, true);
  247. return;
  248. ebouncebuf:
  249. dma_release_channel(host->chan_rx);
  250. host->chan_rx = NULL;
  251. ereqrx:
  252. dma_release_channel(host->chan_tx);
  253. host->chan_tx = NULL;
  254. }
  255. void tmio_mmc_release_dma(struct tmio_mmc_host *host)
  256. {
  257. if (host->chan_tx) {
  258. struct dma_chan *chan = host->chan_tx;
  259. host->chan_tx = NULL;
  260. dma_release_channel(chan);
  261. }
  262. if (host->chan_rx) {
  263. struct dma_chan *chan = host->chan_rx;
  264. host->chan_rx = NULL;
  265. dma_release_channel(chan);
  266. }
  267. if (host->bounce_buf) {
  268. free_pages((unsigned long)host->bounce_buf, 0);
  269. host->bounce_buf = NULL;
  270. }
  271. }