shdma.c 30 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155
  1. /*
  2. * Renesas SuperH DMA Engine support
  3. *
  4. * base is drivers/dma/flsdma.c
  5. *
  6. * Copyright (C) 2009 Nobuhiro Iwamatsu <iwamatsu.nobuhiro@renesas.com>
  7. * Copyright (C) 2009 Renesas Solutions, Inc. All rights reserved.
  8. * Copyright (C) 2007 Freescale Semiconductor, Inc. All rights reserved.
  9. *
  10. * This is free software; you can redistribute it and/or modify
  11. * it under the terms of the GNU General Public License as published by
  12. * the Free Software Foundation; either version 2 of the License, or
  13. * (at your option) any later version.
  14. *
  15. * - DMA of SuperH does not have Hardware DMA chain mode.
  16. * - MAX DMA size is 16MB.
  17. *
  18. */
  19. #include <linux/init.h>
  20. #include <linux/module.h>
  21. #include <linux/interrupt.h>
  22. #include <linux/dmaengine.h>
  23. #include <linux/delay.h>
  24. #include <linux/dma-mapping.h>
  25. #include <linux/platform_device.h>
  26. #include <asm/dmaengine.h>
  27. #include "shdma.h"
  28. /* DMA descriptor control */
  29. enum sh_dmae_desc_status {
  30. DESC_IDLE,
  31. DESC_PREPARED,
  32. DESC_SUBMITTED,
  33. DESC_COMPLETED, /* completed, have to call callback */
  34. DESC_WAITING, /* callback called, waiting for ack / re-submit */
  35. };
  36. #define NR_DESCS_PER_CHANNEL 32
  37. /* Default MEMCPY transfer size = 2^2 = 4 bytes */
  38. #define LOG2_DEFAULT_XFER_SIZE 2
  39. /* A bitmask with bits enough for enum sh_dmae_slave_chan_id */
  40. static unsigned long sh_dmae_slave_used[BITS_TO_LONGS(SHDMA_SLAVE_NUMBER)];
  41. static void sh_dmae_chan_ld_cleanup(struct sh_dmae_chan *sh_chan, bool all);
  42. static void sh_dmae_writel(struct sh_dmae_chan *sh_dc, u32 data, u32 reg)
  43. {
  44. __raw_writel(data, sh_dc->base + reg / sizeof(u32));
  45. }
  46. static u32 sh_dmae_readl(struct sh_dmae_chan *sh_dc, u32 reg)
  47. {
  48. return __raw_readl(sh_dc->base + reg / sizeof(u32));
  49. }
  50. static u16 dmaor_read(struct sh_dmae_device *shdev)
  51. {
  52. return __raw_readw(shdev->chan_reg + DMAOR / sizeof(u32));
  53. }
  54. static void dmaor_write(struct sh_dmae_device *shdev, u16 data)
  55. {
  56. __raw_writew(data, shdev->chan_reg + DMAOR / sizeof(u32));
  57. }
  58. /*
  59. * Reset DMA controller
  60. *
  61. * SH7780 has two DMAOR register
  62. */
  63. static void sh_dmae_ctl_stop(struct sh_dmae_device *shdev)
  64. {
  65. unsigned short dmaor = dmaor_read(shdev);
  66. dmaor_write(shdev, dmaor & ~(DMAOR_NMIF | DMAOR_AE | DMAOR_DME));
  67. }
  68. static int sh_dmae_rst(struct sh_dmae_device *shdev)
  69. {
  70. unsigned short dmaor;
  71. sh_dmae_ctl_stop(shdev);
  72. dmaor = dmaor_read(shdev) | shdev->pdata->dmaor_init;
  73. dmaor_write(shdev, dmaor);
  74. if (dmaor_read(shdev) & (DMAOR_AE | DMAOR_NMIF)) {
  75. pr_warning("dma-sh: Can't initialize DMAOR.\n");
  76. return -EINVAL;
  77. }
  78. return 0;
  79. }
  80. static bool dmae_is_busy(struct sh_dmae_chan *sh_chan)
  81. {
  82. u32 chcr = sh_dmae_readl(sh_chan, CHCR);
  83. if ((chcr & (CHCR_DE | CHCR_TE)) == CHCR_DE)
  84. return true; /* working */
  85. return false; /* waiting */
  86. }
  87. static unsigned int calc_xmit_shift(struct sh_dmae_chan *sh_chan, u32 chcr)
  88. {
  89. struct sh_dmae_device *shdev = container_of(sh_chan->common.device,
  90. struct sh_dmae_device, common);
  91. struct sh_dmae_pdata *pdata = shdev->pdata;
  92. int cnt = ((chcr & pdata->ts_low_mask) >> pdata->ts_low_shift) |
  93. ((chcr & pdata->ts_high_mask) >> pdata->ts_high_shift);
  94. if (cnt >= pdata->ts_shift_num)
  95. cnt = 0;
  96. return pdata->ts_shift[cnt];
  97. }
  98. static u32 log2size_to_chcr(struct sh_dmae_chan *sh_chan, int l2size)
  99. {
  100. struct sh_dmae_device *shdev = container_of(sh_chan->common.device,
  101. struct sh_dmae_device, common);
  102. struct sh_dmae_pdata *pdata = shdev->pdata;
  103. int i;
  104. for (i = 0; i < pdata->ts_shift_num; i++)
  105. if (pdata->ts_shift[i] == l2size)
  106. break;
  107. if (i == pdata->ts_shift_num)
  108. i = 0;
  109. return ((i << pdata->ts_low_shift) & pdata->ts_low_mask) |
  110. ((i << pdata->ts_high_shift) & pdata->ts_high_mask);
  111. }
  112. static void dmae_set_reg(struct sh_dmae_chan *sh_chan, struct sh_dmae_regs *hw)
  113. {
  114. sh_dmae_writel(sh_chan, hw->sar, SAR);
  115. sh_dmae_writel(sh_chan, hw->dar, DAR);
  116. sh_dmae_writel(sh_chan, hw->tcr >> sh_chan->xmit_shift, TCR);
  117. }
  118. static void dmae_start(struct sh_dmae_chan *sh_chan)
  119. {
  120. u32 chcr = sh_dmae_readl(sh_chan, CHCR);
  121. chcr |= CHCR_DE | CHCR_IE;
  122. sh_dmae_writel(sh_chan, chcr & ~CHCR_TE, CHCR);
  123. }
  124. static void dmae_halt(struct sh_dmae_chan *sh_chan)
  125. {
  126. u32 chcr = sh_dmae_readl(sh_chan, CHCR);
  127. chcr &= ~(CHCR_DE | CHCR_TE | CHCR_IE);
  128. sh_dmae_writel(sh_chan, chcr, CHCR);
  129. }
  130. static void dmae_init(struct sh_dmae_chan *sh_chan)
  131. {
  132. /*
  133. * Default configuration for dual address memory-memory transfer.
  134. * 0x400 represents auto-request.
  135. */
  136. u32 chcr = DM_INC | SM_INC | 0x400 | log2size_to_chcr(sh_chan,
  137. LOG2_DEFAULT_XFER_SIZE);
  138. sh_chan->xmit_shift = calc_xmit_shift(sh_chan, chcr);
  139. sh_dmae_writel(sh_chan, chcr, CHCR);
  140. }
  141. static int dmae_set_chcr(struct sh_dmae_chan *sh_chan, u32 val)
  142. {
  143. /* When DMA was working, can not set data to CHCR */
  144. if (dmae_is_busy(sh_chan))
  145. return -EBUSY;
  146. sh_chan->xmit_shift = calc_xmit_shift(sh_chan, val);
  147. sh_dmae_writel(sh_chan, val, CHCR);
  148. return 0;
  149. }
  150. static int dmae_set_dmars(struct sh_dmae_chan *sh_chan, u16 val)
  151. {
  152. struct sh_dmae_device *shdev = container_of(sh_chan->common.device,
  153. struct sh_dmae_device, common);
  154. struct sh_dmae_pdata *pdata = shdev->pdata;
  155. struct sh_dmae_channel *chan_pdata = &pdata->channel[sh_chan->id];
  156. u16 __iomem *addr = shdev->dmars + chan_pdata->dmars / sizeof(u16);
  157. int shift = chan_pdata->dmars_bit;
  158. if (dmae_is_busy(sh_chan))
  159. return -EBUSY;
  160. __raw_writew((__raw_readw(addr) & (0xff00 >> shift)) | (val << shift),
  161. addr);
  162. return 0;
  163. }
  164. static dma_cookie_t sh_dmae_tx_submit(struct dma_async_tx_descriptor *tx)
  165. {
  166. struct sh_desc *desc = tx_to_sh_desc(tx), *chunk, *last = desc, *c;
  167. struct sh_dmae_chan *sh_chan = to_sh_chan(tx->chan);
  168. dma_async_tx_callback callback = tx->callback;
  169. dma_cookie_t cookie;
  170. spin_lock_bh(&sh_chan->desc_lock);
  171. cookie = sh_chan->common.cookie;
  172. cookie++;
  173. if (cookie < 0)
  174. cookie = 1;
  175. sh_chan->common.cookie = cookie;
  176. tx->cookie = cookie;
  177. /* Mark all chunks of this descriptor as submitted, move to the queue */
  178. list_for_each_entry_safe(chunk, c, desc->node.prev, node) {
  179. /*
  180. * All chunks are on the global ld_free, so, we have to find
  181. * the end of the chain ourselves
  182. */
  183. if (chunk != desc && (chunk->mark == DESC_IDLE ||
  184. chunk->async_tx.cookie > 0 ||
  185. chunk->async_tx.cookie == -EBUSY ||
  186. &chunk->node == &sh_chan->ld_free))
  187. break;
  188. chunk->mark = DESC_SUBMITTED;
  189. /* Callback goes to the last chunk */
  190. chunk->async_tx.callback = NULL;
  191. chunk->cookie = cookie;
  192. list_move_tail(&chunk->node, &sh_chan->ld_queue);
  193. last = chunk;
  194. }
  195. last->async_tx.callback = callback;
  196. last->async_tx.callback_param = tx->callback_param;
  197. dev_dbg(sh_chan->dev, "submit #%d@%p on %d: %x[%d] -> %x\n",
  198. tx->cookie, &last->async_tx, sh_chan->id,
  199. desc->hw.sar, desc->hw.tcr, desc->hw.dar);
  200. spin_unlock_bh(&sh_chan->desc_lock);
  201. return cookie;
  202. }
  203. /* Called with desc_lock held */
  204. static struct sh_desc *sh_dmae_get_desc(struct sh_dmae_chan *sh_chan)
  205. {
  206. struct sh_desc *desc;
  207. list_for_each_entry(desc, &sh_chan->ld_free, node)
  208. if (desc->mark != DESC_PREPARED) {
  209. BUG_ON(desc->mark != DESC_IDLE);
  210. list_del(&desc->node);
  211. return desc;
  212. }
  213. return NULL;
  214. }
  215. static struct sh_dmae_slave_config *sh_dmae_find_slave(
  216. struct sh_dmae_chan *sh_chan, enum sh_dmae_slave_chan_id slave_id)
  217. {
  218. struct dma_device *dma_dev = sh_chan->common.device;
  219. struct sh_dmae_device *shdev = container_of(dma_dev,
  220. struct sh_dmae_device, common);
  221. struct sh_dmae_pdata *pdata = shdev->pdata;
  222. int i;
  223. if ((unsigned)slave_id >= SHDMA_SLAVE_NUMBER)
  224. return NULL;
  225. for (i = 0; i < pdata->slave_num; i++)
  226. if (pdata->slave[i].slave_id == slave_id)
  227. return pdata->slave + i;
  228. return NULL;
  229. }
  230. static int sh_dmae_alloc_chan_resources(struct dma_chan *chan)
  231. {
  232. struct sh_dmae_chan *sh_chan = to_sh_chan(chan);
  233. struct sh_desc *desc;
  234. struct sh_dmae_slave *param = chan->private;
  235. /*
  236. * This relies on the guarantee from dmaengine that alloc_chan_resources
  237. * never runs concurrently with itself or free_chan_resources.
  238. */
  239. if (param) {
  240. struct sh_dmae_slave_config *cfg;
  241. cfg = sh_dmae_find_slave(sh_chan, param->slave_id);
  242. if (!cfg)
  243. return -EINVAL;
  244. if (test_and_set_bit(param->slave_id, sh_dmae_slave_used))
  245. return -EBUSY;
  246. param->config = cfg;
  247. dmae_set_dmars(sh_chan, cfg->mid_rid);
  248. dmae_set_chcr(sh_chan, cfg->chcr);
  249. } else if ((sh_dmae_readl(sh_chan, CHCR) & 0xf00) != 0x400) {
  250. dmae_init(sh_chan);
  251. }
  252. spin_lock_bh(&sh_chan->desc_lock);
  253. while (sh_chan->descs_allocated < NR_DESCS_PER_CHANNEL) {
  254. spin_unlock_bh(&sh_chan->desc_lock);
  255. desc = kzalloc(sizeof(struct sh_desc), GFP_KERNEL);
  256. if (!desc) {
  257. spin_lock_bh(&sh_chan->desc_lock);
  258. break;
  259. }
  260. dma_async_tx_descriptor_init(&desc->async_tx,
  261. &sh_chan->common);
  262. desc->async_tx.tx_submit = sh_dmae_tx_submit;
  263. desc->mark = DESC_IDLE;
  264. spin_lock_bh(&sh_chan->desc_lock);
  265. list_add(&desc->node, &sh_chan->ld_free);
  266. sh_chan->descs_allocated++;
  267. }
  268. spin_unlock_bh(&sh_chan->desc_lock);
  269. return sh_chan->descs_allocated;
  270. }
  271. /*
  272. * sh_dma_free_chan_resources - Free all resources of the channel.
  273. */
  274. static void sh_dmae_free_chan_resources(struct dma_chan *chan)
  275. {
  276. struct sh_dmae_chan *sh_chan = to_sh_chan(chan);
  277. struct sh_desc *desc, *_desc;
  278. LIST_HEAD(list);
  279. dmae_halt(sh_chan);
  280. /* Prepared and not submitted descriptors can still be on the queue */
  281. if (!list_empty(&sh_chan->ld_queue))
  282. sh_dmae_chan_ld_cleanup(sh_chan, true);
  283. if (chan->private) {
  284. /* The caller is holding dma_list_mutex */
  285. struct sh_dmae_slave *param = chan->private;
  286. clear_bit(param->slave_id, sh_dmae_slave_used);
  287. }
  288. spin_lock_bh(&sh_chan->desc_lock);
  289. list_splice_init(&sh_chan->ld_free, &list);
  290. sh_chan->descs_allocated = 0;
  291. spin_unlock_bh(&sh_chan->desc_lock);
  292. list_for_each_entry_safe(desc, _desc, &list, node)
  293. kfree(desc);
  294. }
  295. /**
  296. * sh_dmae_add_desc - get, set up and return one transfer descriptor
  297. * @sh_chan: DMA channel
  298. * @flags: DMA transfer flags
  299. * @dest: destination DMA address, incremented when direction equals
  300. * DMA_FROM_DEVICE or DMA_BIDIRECTIONAL
  301. * @src: source DMA address, incremented when direction equals
  302. * DMA_TO_DEVICE or DMA_BIDIRECTIONAL
  303. * @len: DMA transfer length
  304. * @first: if NULL, set to the current descriptor and cookie set to -EBUSY
  305. * @direction: needed for slave DMA to decide which address to keep constant,
  306. * equals DMA_BIDIRECTIONAL for MEMCPY
  307. * Returns 0 or an error
  308. * Locks: called with desc_lock held
  309. */
  310. static struct sh_desc *sh_dmae_add_desc(struct sh_dmae_chan *sh_chan,
  311. unsigned long flags, dma_addr_t *dest, dma_addr_t *src, size_t *len,
  312. struct sh_desc **first, enum dma_data_direction direction)
  313. {
  314. struct sh_desc *new;
  315. size_t copy_size;
  316. if (!*len)
  317. return NULL;
  318. /* Allocate the link descriptor from the free list */
  319. new = sh_dmae_get_desc(sh_chan);
  320. if (!new) {
  321. dev_err(sh_chan->dev, "No free link descriptor available\n");
  322. return NULL;
  323. }
  324. copy_size = min(*len, (size_t)SH_DMA_TCR_MAX + 1);
  325. new->hw.sar = *src;
  326. new->hw.dar = *dest;
  327. new->hw.tcr = copy_size;
  328. if (!*first) {
  329. /* First desc */
  330. new->async_tx.cookie = -EBUSY;
  331. *first = new;
  332. } else {
  333. /* Other desc - invisible to the user */
  334. new->async_tx.cookie = -EINVAL;
  335. }
  336. dev_dbg(sh_chan->dev,
  337. "chaining (%u/%u)@%x -> %x with %p, cookie %d, shift %d\n",
  338. copy_size, *len, *src, *dest, &new->async_tx,
  339. new->async_tx.cookie, sh_chan->xmit_shift);
  340. new->mark = DESC_PREPARED;
  341. new->async_tx.flags = flags;
  342. new->direction = direction;
  343. *len -= copy_size;
  344. if (direction == DMA_BIDIRECTIONAL || direction == DMA_TO_DEVICE)
  345. *src += copy_size;
  346. if (direction == DMA_BIDIRECTIONAL || direction == DMA_FROM_DEVICE)
  347. *dest += copy_size;
  348. return new;
  349. }
  350. /*
  351. * sh_dmae_prep_sg - prepare transfer descriptors from an SG list
  352. *
  353. * Common routine for public (MEMCPY) and slave DMA. The MEMCPY case is also
  354. * converted to scatter-gather to guarantee consistent locking and a correct
  355. * list manipulation. For slave DMA direction carries the usual meaning, and,
  356. * logically, the SG list is RAM and the addr variable contains slave address,
  357. * e.g., the FIFO I/O register. For MEMCPY direction equals DMA_BIDIRECTIONAL
  358. * and the SG list contains only one element and points at the source buffer.
  359. */
  360. static struct dma_async_tx_descriptor *sh_dmae_prep_sg(struct sh_dmae_chan *sh_chan,
  361. struct scatterlist *sgl, unsigned int sg_len, dma_addr_t *addr,
  362. enum dma_data_direction direction, unsigned long flags)
  363. {
  364. struct scatterlist *sg;
  365. struct sh_desc *first = NULL, *new = NULL /* compiler... */;
  366. LIST_HEAD(tx_list);
  367. int chunks = 0;
  368. int i;
  369. if (!sg_len)
  370. return NULL;
  371. for_each_sg(sgl, sg, sg_len, i)
  372. chunks += (sg_dma_len(sg) + SH_DMA_TCR_MAX) /
  373. (SH_DMA_TCR_MAX + 1);
  374. /* Have to lock the whole loop to protect against concurrent release */
  375. spin_lock_bh(&sh_chan->desc_lock);
  376. /*
  377. * Chaining:
  378. * first descriptor is what user is dealing with in all API calls, its
  379. * cookie is at first set to -EBUSY, at tx-submit to a positive
  380. * number
  381. * if more than one chunk is needed further chunks have cookie = -EINVAL
  382. * the last chunk, if not equal to the first, has cookie = -ENOSPC
  383. * all chunks are linked onto the tx_list head with their .node heads
  384. * only during this function, then they are immediately spliced
  385. * back onto the free list in form of a chain
  386. */
  387. for_each_sg(sgl, sg, sg_len, i) {
  388. dma_addr_t sg_addr = sg_dma_address(sg);
  389. size_t len = sg_dma_len(sg);
  390. if (!len)
  391. goto err_get_desc;
  392. do {
  393. dev_dbg(sh_chan->dev, "Add SG #%d@%p[%d], dma %llx\n",
  394. i, sg, len, (unsigned long long)sg_addr);
  395. if (direction == DMA_FROM_DEVICE)
  396. new = sh_dmae_add_desc(sh_chan, flags,
  397. &sg_addr, addr, &len, &first,
  398. direction);
  399. else
  400. new = sh_dmae_add_desc(sh_chan, flags,
  401. addr, &sg_addr, &len, &first,
  402. direction);
  403. if (!new)
  404. goto err_get_desc;
  405. new->chunks = chunks--;
  406. list_add_tail(&new->node, &tx_list);
  407. } while (len);
  408. }
  409. if (new != first)
  410. new->async_tx.cookie = -ENOSPC;
  411. /* Put them back on the free list, so, they don't get lost */
  412. list_splice_tail(&tx_list, &sh_chan->ld_free);
  413. spin_unlock_bh(&sh_chan->desc_lock);
  414. return &first->async_tx;
  415. err_get_desc:
  416. list_for_each_entry(new, &tx_list, node)
  417. new->mark = DESC_IDLE;
  418. list_splice(&tx_list, &sh_chan->ld_free);
  419. spin_unlock_bh(&sh_chan->desc_lock);
  420. return NULL;
  421. }
  422. static struct dma_async_tx_descriptor *sh_dmae_prep_memcpy(
  423. struct dma_chan *chan, dma_addr_t dma_dest, dma_addr_t dma_src,
  424. size_t len, unsigned long flags)
  425. {
  426. struct sh_dmae_chan *sh_chan;
  427. struct scatterlist sg;
  428. if (!chan || !len)
  429. return NULL;
  430. chan->private = NULL;
  431. sh_chan = to_sh_chan(chan);
  432. sg_init_table(&sg, 1);
  433. sg_set_page(&sg, pfn_to_page(PFN_DOWN(dma_src)), len,
  434. offset_in_page(dma_src));
  435. sg_dma_address(&sg) = dma_src;
  436. sg_dma_len(&sg) = len;
  437. return sh_dmae_prep_sg(sh_chan, &sg, 1, &dma_dest, DMA_BIDIRECTIONAL,
  438. flags);
  439. }
  440. static struct dma_async_tx_descriptor *sh_dmae_prep_slave_sg(
  441. struct dma_chan *chan, struct scatterlist *sgl, unsigned int sg_len,
  442. enum dma_data_direction direction, unsigned long flags)
  443. {
  444. struct sh_dmae_slave *param;
  445. struct sh_dmae_chan *sh_chan;
  446. if (!chan)
  447. return NULL;
  448. sh_chan = to_sh_chan(chan);
  449. param = chan->private;
  450. /* Someone calling slave DMA on a public channel? */
  451. if (!param || !sg_len) {
  452. dev_warn(sh_chan->dev, "%s: bad parameter: %p, %d, %d\n",
  453. __func__, param, sg_len, param ? param->slave_id : -1);
  454. return NULL;
  455. }
  456. /*
  457. * if (param != NULL), this is a successfully requested slave channel,
  458. * therefore param->config != NULL too.
  459. */
  460. return sh_dmae_prep_sg(sh_chan, sgl, sg_len, &param->config->addr,
  461. direction, flags);
  462. }
  463. static void sh_dmae_terminate_all(struct dma_chan *chan)
  464. {
  465. struct sh_dmae_chan *sh_chan = to_sh_chan(chan);
  466. if (!chan)
  467. return;
  468. sh_dmae_chan_ld_cleanup(sh_chan, true);
  469. }
  470. static dma_async_tx_callback __ld_cleanup(struct sh_dmae_chan *sh_chan, bool all)
  471. {
  472. struct sh_desc *desc, *_desc;
  473. /* Is the "exposed" head of a chain acked? */
  474. bool head_acked = false;
  475. dma_cookie_t cookie = 0;
  476. dma_async_tx_callback callback = NULL;
  477. void *param = NULL;
  478. spin_lock_bh(&sh_chan->desc_lock);
  479. list_for_each_entry_safe(desc, _desc, &sh_chan->ld_queue, node) {
  480. struct dma_async_tx_descriptor *tx = &desc->async_tx;
  481. BUG_ON(tx->cookie > 0 && tx->cookie != desc->cookie);
  482. BUG_ON(desc->mark != DESC_SUBMITTED &&
  483. desc->mark != DESC_COMPLETED &&
  484. desc->mark != DESC_WAITING);
  485. /*
  486. * queue is ordered, and we use this loop to (1) clean up all
  487. * completed descriptors, and to (2) update descriptor flags of
  488. * any chunks in a (partially) completed chain
  489. */
  490. if (!all && desc->mark == DESC_SUBMITTED &&
  491. desc->cookie != cookie)
  492. break;
  493. if (tx->cookie > 0)
  494. cookie = tx->cookie;
  495. if (desc->mark == DESC_COMPLETED && desc->chunks == 1) {
  496. if (sh_chan->completed_cookie != desc->cookie - 1)
  497. dev_dbg(sh_chan->dev,
  498. "Completing cookie %d, expected %d\n",
  499. desc->cookie,
  500. sh_chan->completed_cookie + 1);
  501. sh_chan->completed_cookie = desc->cookie;
  502. }
  503. /* Call callback on the last chunk */
  504. if (desc->mark == DESC_COMPLETED && tx->callback) {
  505. desc->mark = DESC_WAITING;
  506. callback = tx->callback;
  507. param = tx->callback_param;
  508. dev_dbg(sh_chan->dev, "descriptor #%d@%p on %d callback\n",
  509. tx->cookie, tx, sh_chan->id);
  510. BUG_ON(desc->chunks != 1);
  511. break;
  512. }
  513. if (tx->cookie > 0 || tx->cookie == -EBUSY) {
  514. if (desc->mark == DESC_COMPLETED) {
  515. BUG_ON(tx->cookie < 0);
  516. desc->mark = DESC_WAITING;
  517. }
  518. head_acked = async_tx_test_ack(tx);
  519. } else {
  520. switch (desc->mark) {
  521. case DESC_COMPLETED:
  522. desc->mark = DESC_WAITING;
  523. /* Fall through */
  524. case DESC_WAITING:
  525. if (head_acked)
  526. async_tx_ack(&desc->async_tx);
  527. }
  528. }
  529. dev_dbg(sh_chan->dev, "descriptor %p #%d completed.\n",
  530. tx, tx->cookie);
  531. if (((desc->mark == DESC_COMPLETED ||
  532. desc->mark == DESC_WAITING) &&
  533. async_tx_test_ack(&desc->async_tx)) || all) {
  534. /* Remove from ld_queue list */
  535. desc->mark = DESC_IDLE;
  536. list_move(&desc->node, &sh_chan->ld_free);
  537. }
  538. }
  539. spin_unlock_bh(&sh_chan->desc_lock);
  540. if (callback)
  541. callback(param);
  542. return callback;
  543. }
  544. /*
  545. * sh_chan_ld_cleanup - Clean up link descriptors
  546. *
  547. * This function cleans up the ld_queue of DMA channel.
  548. */
  549. static void sh_dmae_chan_ld_cleanup(struct sh_dmae_chan *sh_chan, bool all)
  550. {
  551. while (__ld_cleanup(sh_chan, all))
  552. ;
  553. }
  554. static void sh_chan_xfer_ld_queue(struct sh_dmae_chan *sh_chan)
  555. {
  556. struct sh_desc *desc;
  557. spin_lock_bh(&sh_chan->desc_lock);
  558. /* DMA work check */
  559. if (dmae_is_busy(sh_chan)) {
  560. spin_unlock_bh(&sh_chan->desc_lock);
  561. return;
  562. }
  563. /* Find the first not transferred desciptor */
  564. list_for_each_entry(desc, &sh_chan->ld_queue, node)
  565. if (desc->mark == DESC_SUBMITTED) {
  566. /* Get the ld start address from ld_queue */
  567. dmae_set_reg(sh_chan, &desc->hw);
  568. dmae_start(sh_chan);
  569. break;
  570. }
  571. spin_unlock_bh(&sh_chan->desc_lock);
  572. }
  573. static void sh_dmae_memcpy_issue_pending(struct dma_chan *chan)
  574. {
  575. struct sh_dmae_chan *sh_chan = to_sh_chan(chan);
  576. sh_chan_xfer_ld_queue(sh_chan);
  577. }
  578. static enum dma_status sh_dmae_is_complete(struct dma_chan *chan,
  579. dma_cookie_t cookie,
  580. dma_cookie_t *done,
  581. dma_cookie_t *used)
  582. {
  583. struct sh_dmae_chan *sh_chan = to_sh_chan(chan);
  584. dma_cookie_t last_used;
  585. dma_cookie_t last_complete;
  586. enum dma_status status;
  587. sh_dmae_chan_ld_cleanup(sh_chan, false);
  588. last_used = chan->cookie;
  589. last_complete = sh_chan->completed_cookie;
  590. BUG_ON(last_complete < 0);
  591. if (done)
  592. *done = last_complete;
  593. if (used)
  594. *used = last_used;
  595. spin_lock_bh(&sh_chan->desc_lock);
  596. status = dma_async_is_complete(cookie, last_complete, last_used);
  597. /*
  598. * If we don't find cookie on the queue, it has been aborted and we have
  599. * to report error
  600. */
  601. if (status != DMA_SUCCESS) {
  602. struct sh_desc *desc;
  603. status = DMA_ERROR;
  604. list_for_each_entry(desc, &sh_chan->ld_queue, node)
  605. if (desc->cookie == cookie) {
  606. status = DMA_IN_PROGRESS;
  607. break;
  608. }
  609. }
  610. spin_unlock_bh(&sh_chan->desc_lock);
  611. return status;
  612. }
  613. static irqreturn_t sh_dmae_interrupt(int irq, void *data)
  614. {
  615. irqreturn_t ret = IRQ_NONE;
  616. struct sh_dmae_chan *sh_chan = (struct sh_dmae_chan *)data;
  617. u32 chcr = sh_dmae_readl(sh_chan, CHCR);
  618. if (chcr & CHCR_TE) {
  619. /* DMA stop */
  620. dmae_halt(sh_chan);
  621. ret = IRQ_HANDLED;
  622. tasklet_schedule(&sh_chan->tasklet);
  623. }
  624. return ret;
  625. }
  626. #if defined(CONFIG_CPU_SH4)
  627. static irqreturn_t sh_dmae_err(int irq, void *data)
  628. {
  629. struct sh_dmae_device *shdev = (struct sh_dmae_device *)data;
  630. int i;
  631. /* halt the dma controller */
  632. sh_dmae_ctl_stop(shdev);
  633. /* We cannot detect, which channel caused the error, have to reset all */
  634. for (i = 0; i < SH_DMAC_MAX_CHANNELS; i++) {
  635. struct sh_dmae_chan *sh_chan = shdev->chan[i];
  636. if (sh_chan) {
  637. struct sh_desc *desc;
  638. /* Stop the channel */
  639. dmae_halt(sh_chan);
  640. /* Complete all */
  641. list_for_each_entry(desc, &sh_chan->ld_queue, node) {
  642. struct dma_async_tx_descriptor *tx = &desc->async_tx;
  643. desc->mark = DESC_IDLE;
  644. if (tx->callback)
  645. tx->callback(tx->callback_param);
  646. }
  647. list_splice_init(&sh_chan->ld_queue, &sh_chan->ld_free);
  648. }
  649. }
  650. sh_dmae_rst(shdev);
  651. return IRQ_HANDLED;
  652. }
  653. #endif
  654. static void dmae_do_tasklet(unsigned long data)
  655. {
  656. struct sh_dmae_chan *sh_chan = (struct sh_dmae_chan *)data;
  657. struct sh_desc *desc;
  658. u32 sar_buf = sh_dmae_readl(sh_chan, SAR);
  659. u32 dar_buf = sh_dmae_readl(sh_chan, DAR);
  660. spin_lock(&sh_chan->desc_lock);
  661. list_for_each_entry(desc, &sh_chan->ld_queue, node) {
  662. if (desc->mark == DESC_SUBMITTED &&
  663. ((desc->direction == DMA_FROM_DEVICE &&
  664. (desc->hw.dar + desc->hw.tcr) == dar_buf) ||
  665. (desc->hw.sar + desc->hw.tcr) == sar_buf)) {
  666. dev_dbg(sh_chan->dev, "done #%d@%p dst %u\n",
  667. desc->async_tx.cookie, &desc->async_tx,
  668. desc->hw.dar);
  669. desc->mark = DESC_COMPLETED;
  670. break;
  671. }
  672. }
  673. spin_unlock(&sh_chan->desc_lock);
  674. /* Next desc */
  675. sh_chan_xfer_ld_queue(sh_chan);
  676. sh_dmae_chan_ld_cleanup(sh_chan, false);
  677. }
  678. static int __devinit sh_dmae_chan_probe(struct sh_dmae_device *shdev, int id,
  679. int irq, unsigned long flags)
  680. {
  681. int err;
  682. struct sh_dmae_channel *chan_pdata = &shdev->pdata->channel[id];
  683. struct platform_device *pdev = to_platform_device(shdev->common.dev);
  684. struct sh_dmae_chan *new_sh_chan;
  685. /* alloc channel */
  686. new_sh_chan = kzalloc(sizeof(struct sh_dmae_chan), GFP_KERNEL);
  687. if (!new_sh_chan) {
  688. dev_err(shdev->common.dev,
  689. "No free memory for allocating dma channels!\n");
  690. return -ENOMEM;
  691. }
  692. /* copy struct dma_device */
  693. new_sh_chan->common.device = &shdev->common;
  694. new_sh_chan->dev = shdev->common.dev;
  695. new_sh_chan->id = id;
  696. new_sh_chan->irq = irq;
  697. new_sh_chan->base = shdev->chan_reg + chan_pdata->offset / sizeof(u32);
  698. /* Init DMA tasklet */
  699. tasklet_init(&new_sh_chan->tasklet, dmae_do_tasklet,
  700. (unsigned long)new_sh_chan);
  701. /* Init the channel */
  702. dmae_init(new_sh_chan);
  703. spin_lock_init(&new_sh_chan->desc_lock);
  704. /* Init descripter manage list */
  705. INIT_LIST_HEAD(&new_sh_chan->ld_queue);
  706. INIT_LIST_HEAD(&new_sh_chan->ld_free);
  707. /* Add the channel to DMA device channel list */
  708. list_add_tail(&new_sh_chan->common.device_node,
  709. &shdev->common.channels);
  710. shdev->common.chancnt++;
  711. if (pdev->id >= 0)
  712. snprintf(new_sh_chan->dev_id, sizeof(new_sh_chan->dev_id),
  713. "sh-dmae%d.%d", pdev->id, new_sh_chan->id);
  714. else
  715. snprintf(new_sh_chan->dev_id, sizeof(new_sh_chan->dev_id),
  716. "sh-dma%d", new_sh_chan->id);
  717. /* set up channel irq */
  718. err = request_irq(irq, &sh_dmae_interrupt, flags,
  719. new_sh_chan->dev_id, new_sh_chan);
  720. if (err) {
  721. dev_err(shdev->common.dev, "DMA channel %d request_irq error "
  722. "with return %d\n", id, err);
  723. goto err_no_irq;
  724. }
  725. shdev->chan[id] = new_sh_chan;
  726. return 0;
  727. err_no_irq:
  728. /* remove from dmaengine device node */
  729. list_del(&new_sh_chan->common.device_node);
  730. kfree(new_sh_chan);
  731. return err;
  732. }
  733. static void sh_dmae_chan_remove(struct sh_dmae_device *shdev)
  734. {
  735. int i;
  736. for (i = shdev->common.chancnt - 1 ; i >= 0 ; i--) {
  737. if (shdev->chan[i]) {
  738. struct sh_dmae_chan *sh_chan = shdev->chan[i];
  739. free_irq(sh_chan->irq, sh_chan);
  740. list_del(&sh_chan->common.device_node);
  741. kfree(sh_chan);
  742. shdev->chan[i] = NULL;
  743. }
  744. }
  745. shdev->common.chancnt = 0;
  746. }
  747. static int __init sh_dmae_probe(struct platform_device *pdev)
  748. {
  749. struct sh_dmae_pdata *pdata = pdev->dev.platform_data;
  750. unsigned long irqflags = IRQF_DISABLED,
  751. chan_flag[SH_DMAC_MAX_CHANNELS] = {};
  752. int errirq, chan_irq[SH_DMAC_MAX_CHANNELS];
  753. int err, i, irq_cnt = 0, irqres = 0;
  754. struct sh_dmae_device *shdev;
  755. struct resource *chan, *dmars, *errirq_res, *chanirq_res;
  756. /* get platform data */
  757. if (!pdata || !pdata->channel_num)
  758. return -ENODEV;
  759. chan = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  760. /* DMARS area is optional, if absent, this controller cannot do slave DMA */
  761. dmars = platform_get_resource(pdev, IORESOURCE_MEM, 1);
  762. /*
  763. * IRQ resources:
  764. * 1. there always must be at least one IRQ IO-resource. On SH4 it is
  765. * the error IRQ, in which case it is the only IRQ in this resource:
  766. * start == end. If it is the only IRQ resource, all channels also
  767. * use the same IRQ.
  768. * 2. DMA channel IRQ resources can be specified one per resource or in
  769. * ranges (start != end)
  770. * 3. iff all events (channels and, optionally, error) on this
  771. * controller use the same IRQ, only one IRQ resource can be
  772. * specified, otherwise there must be one IRQ per channel, even if
  773. * some of them are equal
  774. * 4. if all IRQs on this controller are equal or if some specific IRQs
  775. * specify IORESOURCE_IRQ_SHAREABLE in their resources, they will be
  776. * requested with the IRQF_SHARED flag
  777. */
  778. errirq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
  779. if (!chan || !errirq_res)
  780. return -ENODEV;
  781. if (!request_mem_region(chan->start, resource_size(chan), pdev->name)) {
  782. dev_err(&pdev->dev, "DMAC register region already claimed\n");
  783. return -EBUSY;
  784. }
  785. if (dmars && !request_mem_region(dmars->start, resource_size(dmars), pdev->name)) {
  786. dev_err(&pdev->dev, "DMAC DMARS region already claimed\n");
  787. err = -EBUSY;
  788. goto ermrdmars;
  789. }
  790. err = -ENOMEM;
  791. shdev = kzalloc(sizeof(struct sh_dmae_device), GFP_KERNEL);
  792. if (!shdev) {
  793. dev_err(&pdev->dev, "Not enough memory\n");
  794. goto ealloc;
  795. }
  796. shdev->chan_reg = ioremap(chan->start, resource_size(chan));
  797. if (!shdev->chan_reg)
  798. goto emapchan;
  799. if (dmars) {
  800. shdev->dmars = ioremap(dmars->start, resource_size(dmars));
  801. if (!shdev->dmars)
  802. goto emapdmars;
  803. }
  804. /* platform data */
  805. shdev->pdata = pdata;
  806. /* reset dma controller */
  807. err = sh_dmae_rst(shdev);
  808. if (err)
  809. goto rst_err;
  810. INIT_LIST_HEAD(&shdev->common.channels);
  811. dma_cap_set(DMA_MEMCPY, shdev->common.cap_mask);
  812. if (dmars)
  813. dma_cap_set(DMA_SLAVE, shdev->common.cap_mask);
  814. shdev->common.device_alloc_chan_resources
  815. = sh_dmae_alloc_chan_resources;
  816. shdev->common.device_free_chan_resources = sh_dmae_free_chan_resources;
  817. shdev->common.device_prep_dma_memcpy = sh_dmae_prep_memcpy;
  818. shdev->common.device_is_tx_complete = sh_dmae_is_complete;
  819. shdev->common.device_issue_pending = sh_dmae_memcpy_issue_pending;
  820. /* Compulsory for DMA_SLAVE fields */
  821. shdev->common.device_prep_slave_sg = sh_dmae_prep_slave_sg;
  822. shdev->common.device_terminate_all = sh_dmae_terminate_all;
  823. shdev->common.dev = &pdev->dev;
  824. /* Default transfer size of 32 bytes requires 32-byte alignment */
  825. shdev->common.copy_align = LOG2_DEFAULT_XFER_SIZE;
  826. #if defined(CONFIG_CPU_SH4)
  827. chanirq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 1);
  828. if (!chanirq_res)
  829. chanirq_res = errirq_res;
  830. else
  831. irqres++;
  832. if (chanirq_res == errirq_res ||
  833. (errirq_res->flags & IORESOURCE_BITS) == IORESOURCE_IRQ_SHAREABLE)
  834. irqflags = IRQF_SHARED;
  835. errirq = errirq_res->start;
  836. err = request_irq(errirq, sh_dmae_err, irqflags,
  837. "DMAC Address Error", shdev);
  838. if (err) {
  839. dev_err(&pdev->dev,
  840. "DMA failed requesting irq #%d, error %d\n",
  841. errirq, err);
  842. goto eirq_err;
  843. }
  844. #else
  845. chanirq_res = errirq_res;
  846. #endif /* CONFIG_CPU_SH4 */
  847. if (chanirq_res->start == chanirq_res->end &&
  848. !platform_get_resource(pdev, IORESOURCE_IRQ, 1)) {
  849. /* Special case - all multiplexed */
  850. for (; irq_cnt < pdata->channel_num; irq_cnt++) {
  851. chan_irq[irq_cnt] = chanirq_res->start;
  852. chan_flag[irq_cnt] = IRQF_SHARED;
  853. }
  854. } else {
  855. do {
  856. for (i = chanirq_res->start; i <= chanirq_res->end; i++) {
  857. if ((errirq_res->flags & IORESOURCE_BITS) ==
  858. IORESOURCE_IRQ_SHAREABLE)
  859. chan_flag[irq_cnt] = IRQF_SHARED;
  860. else
  861. chan_flag[irq_cnt] = IRQF_DISABLED;
  862. dev_dbg(&pdev->dev,
  863. "Found IRQ %d for channel %d\n",
  864. i, irq_cnt);
  865. chan_irq[irq_cnt++] = i;
  866. }
  867. chanirq_res = platform_get_resource(pdev,
  868. IORESOURCE_IRQ, ++irqres);
  869. } while (irq_cnt < pdata->channel_num && chanirq_res);
  870. }
  871. if (irq_cnt < pdata->channel_num)
  872. goto eirqres;
  873. /* Create DMA Channel */
  874. for (i = 0; i < pdata->channel_num; i++) {
  875. err = sh_dmae_chan_probe(shdev, i, chan_irq[i], chan_flag[i]);
  876. if (err)
  877. goto chan_probe_err;
  878. }
  879. platform_set_drvdata(pdev, shdev);
  880. dma_async_device_register(&shdev->common);
  881. return err;
  882. chan_probe_err:
  883. sh_dmae_chan_remove(shdev);
  884. eirqres:
  885. #if defined(CONFIG_CPU_SH4)
  886. free_irq(errirq, shdev);
  887. eirq_err:
  888. #endif
  889. rst_err:
  890. if (dmars)
  891. iounmap(shdev->dmars);
  892. emapdmars:
  893. iounmap(shdev->chan_reg);
  894. emapchan:
  895. kfree(shdev);
  896. ealloc:
  897. if (dmars)
  898. release_mem_region(dmars->start, resource_size(dmars));
  899. ermrdmars:
  900. release_mem_region(chan->start, resource_size(chan));
  901. return err;
  902. }
  903. static int __exit sh_dmae_remove(struct platform_device *pdev)
  904. {
  905. struct sh_dmae_device *shdev = platform_get_drvdata(pdev);
  906. struct resource *res;
  907. int errirq = platform_get_irq(pdev, 0);
  908. dma_async_device_unregister(&shdev->common);
  909. if (errirq > 0)
  910. free_irq(errirq, shdev);
  911. /* channel data remove */
  912. sh_dmae_chan_remove(shdev);
  913. if (shdev->dmars)
  914. iounmap(shdev->dmars);
  915. iounmap(shdev->chan_reg);
  916. kfree(shdev);
  917. res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  918. if (res)
  919. release_mem_region(res->start, resource_size(res));
  920. res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
  921. if (res)
  922. release_mem_region(res->start, resource_size(res));
  923. return 0;
  924. }
  925. static void sh_dmae_shutdown(struct platform_device *pdev)
  926. {
  927. struct sh_dmae_device *shdev = platform_get_drvdata(pdev);
  928. sh_dmae_ctl_stop(shdev);
  929. }
  930. static struct platform_driver sh_dmae_driver = {
  931. .remove = __exit_p(sh_dmae_remove),
  932. .shutdown = sh_dmae_shutdown,
  933. .driver = {
  934. .name = "sh-dma-engine",
  935. },
  936. };
  937. static int __init sh_dmae_init(void)
  938. {
  939. return platform_driver_probe(&sh_dmae_driver, sh_dmae_probe);
  940. }
  941. module_init(sh_dmae_init);
  942. static void __exit sh_dmae_exit(void)
  943. {
  944. platform_driver_unregister(&sh_dmae_driver);
  945. }
  946. module_exit(sh_dmae_exit);
  947. MODULE_AUTHOR("Nobuhiro Iwamatsu <iwamatsu.nobuhiro@renesas.com>");
  948. MODULE_DESCRIPTION("Renesas SH DMA Engine driver");
  949. MODULE_LICENSE("GPL");