at_hdmac.c 32 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218
  1. /*
  2. * Driver for the Atmel AHB DMA Controller (aka HDMA or DMAC on AT91 systems)
  3. *
  4. * Copyright (C) 2008 Atmel Corporation
  5. *
  6. * This program is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU General Public License as published by
  8. * the Free Software Foundation; either version 2 of the License, or
  9. * (at your option) any later version.
  10. *
  11. *
  12. * This supports the Atmel AHB DMA Controller,
  13. *
  14. * The driver has currently been tested with the Atmel AT91SAM9RL
  15. * and AT91SAM9G45 series.
  16. */
  17. #include <linux/clk.h>
  18. #include <linux/dmaengine.h>
  19. #include <linux/dma-mapping.h>
  20. #include <linux/dmapool.h>
  21. #include <linux/interrupt.h>
  22. #include <linux/module.h>
  23. #include <linux/platform_device.h>
  24. #include "at_hdmac_regs.h"
  25. /*
  26. * Glossary
  27. * --------
  28. *
  29. * at_hdmac : Name of the ATmel AHB DMA Controller
  30. * at_dma_ / atdma : ATmel DMA controller entity related
  31. * atc_ / atchan : ATmel DMA Channel entity related
  32. */
  33. #define ATC_DEFAULT_CFG (ATC_FIFOCFG_HALFFIFO)
  34. #define ATC_DEFAULT_CTRLA (0)
  35. #define ATC_DEFAULT_CTRLB (ATC_SIF(0) \
  36. |ATC_DIF(1))
  37. /*
  38. * Initial number of descriptors to allocate for each channel. This could
  39. * be increased during dma usage.
  40. */
  41. static unsigned int init_nr_desc_per_channel = 64;
  42. module_param(init_nr_desc_per_channel, uint, 0644);
  43. MODULE_PARM_DESC(init_nr_desc_per_channel,
  44. "initial descriptors per channel (default: 64)");
  45. /* prototypes */
  46. static dma_cookie_t atc_tx_submit(struct dma_async_tx_descriptor *tx);
  47. /*----------------------------------------------------------------------*/
  48. static struct at_desc *atc_first_active(struct at_dma_chan *atchan)
  49. {
  50. return list_first_entry(&atchan->active_list,
  51. struct at_desc, desc_node);
  52. }
  53. static struct at_desc *atc_first_queued(struct at_dma_chan *atchan)
  54. {
  55. return list_first_entry(&atchan->queue,
  56. struct at_desc, desc_node);
  57. }
  58. /**
  59. * atc_alloc_descriptor - allocate and return an initilized descriptor
  60. * @chan: the channel to allocate descriptors for
  61. * @gfp_flags: GFP allocation flags
  62. *
  63. * Note: The ack-bit is positioned in the descriptor flag at creation time
  64. * to make initial allocation more convenient. This bit will be cleared
  65. * and control will be given to client at usage time (during
  66. * preparation functions).
  67. */
  68. static struct at_desc *atc_alloc_descriptor(struct dma_chan *chan,
  69. gfp_t gfp_flags)
  70. {
  71. struct at_desc *desc = NULL;
  72. struct at_dma *atdma = to_at_dma(chan->device);
  73. dma_addr_t phys;
  74. desc = dma_pool_alloc(atdma->dma_desc_pool, gfp_flags, &phys);
  75. if (desc) {
  76. memset(desc, 0, sizeof(struct at_desc));
  77. dma_async_tx_descriptor_init(&desc->txd, chan);
  78. /* txd.flags will be overwritten in prep functions */
  79. desc->txd.flags = DMA_CTRL_ACK;
  80. desc->txd.tx_submit = atc_tx_submit;
  81. desc->txd.phys = phys;
  82. }
  83. return desc;
  84. }
  85. /**
  86. * atc_desc_get - get a unsused descriptor from free_list
  87. * @atchan: channel we want a new descriptor for
  88. */
  89. static struct at_desc *atc_desc_get(struct at_dma_chan *atchan)
  90. {
  91. struct at_desc *desc, *_desc;
  92. struct at_desc *ret = NULL;
  93. unsigned int i = 0;
  94. LIST_HEAD(tmp_list);
  95. spin_lock_bh(&atchan->lock);
  96. list_for_each_entry_safe(desc, _desc, &atchan->free_list, desc_node) {
  97. i++;
  98. if (async_tx_test_ack(&desc->txd)) {
  99. list_del(&desc->desc_node);
  100. ret = desc;
  101. break;
  102. }
  103. dev_dbg(chan2dev(&atchan->chan_common),
  104. "desc %p not ACKed\n", desc);
  105. }
  106. spin_unlock_bh(&atchan->lock);
  107. dev_vdbg(chan2dev(&atchan->chan_common),
  108. "scanned %u descriptors on freelist\n", i);
  109. /* no more descriptor available in initial pool: create one more */
  110. if (!ret) {
  111. ret = atc_alloc_descriptor(&atchan->chan_common, GFP_ATOMIC);
  112. if (ret) {
  113. spin_lock_bh(&atchan->lock);
  114. atchan->descs_allocated++;
  115. spin_unlock_bh(&atchan->lock);
  116. } else {
  117. dev_err(chan2dev(&atchan->chan_common),
  118. "not enough descriptors available\n");
  119. }
  120. }
  121. return ret;
  122. }
  123. /**
  124. * atc_desc_put - move a descriptor, including any children, to the free list
  125. * @atchan: channel we work on
  126. * @desc: descriptor, at the head of a chain, to move to free list
  127. */
  128. static void atc_desc_put(struct at_dma_chan *atchan, struct at_desc *desc)
  129. {
  130. if (desc) {
  131. struct at_desc *child;
  132. spin_lock_bh(&atchan->lock);
  133. list_for_each_entry(child, &desc->txd.tx_list, desc_node)
  134. dev_vdbg(chan2dev(&atchan->chan_common),
  135. "moving child desc %p to freelist\n",
  136. child);
  137. list_splice_init(&desc->txd.tx_list, &atchan->free_list);
  138. dev_vdbg(chan2dev(&atchan->chan_common),
  139. "moving desc %p to freelist\n", desc);
  140. list_add(&desc->desc_node, &atchan->free_list);
  141. spin_unlock_bh(&atchan->lock);
  142. }
  143. }
  144. /**
  145. * atc_assign_cookie - compute and assign new cookie
  146. * @atchan: channel we work on
  147. * @desc: descriptor to asign cookie for
  148. *
  149. * Called with atchan->lock held and bh disabled
  150. */
  151. static dma_cookie_t
  152. atc_assign_cookie(struct at_dma_chan *atchan, struct at_desc *desc)
  153. {
  154. dma_cookie_t cookie = atchan->chan_common.cookie;
  155. if (++cookie < 0)
  156. cookie = 1;
  157. atchan->chan_common.cookie = cookie;
  158. desc->txd.cookie = cookie;
  159. return cookie;
  160. }
  161. /**
  162. * atc_dostart - starts the DMA engine for real
  163. * @atchan: the channel we want to start
  164. * @first: first descriptor in the list we want to begin with
  165. *
  166. * Called with atchan->lock held and bh disabled
  167. */
  168. static void atc_dostart(struct at_dma_chan *atchan, struct at_desc *first)
  169. {
  170. struct at_dma *atdma = to_at_dma(atchan->chan_common.device);
  171. /* ASSERT: channel is idle */
  172. if (atc_chan_is_enabled(atchan)) {
  173. dev_err(chan2dev(&atchan->chan_common),
  174. "BUG: Attempted to start non-idle channel\n");
  175. dev_err(chan2dev(&atchan->chan_common),
  176. " channel: s0x%x d0x%x ctrl0x%x:0x%x l0x%x\n",
  177. channel_readl(atchan, SADDR),
  178. channel_readl(atchan, DADDR),
  179. channel_readl(atchan, CTRLA),
  180. channel_readl(atchan, CTRLB),
  181. channel_readl(atchan, DSCR));
  182. /* The tasklet will hopefully advance the queue... */
  183. return;
  184. }
  185. vdbg_dump_regs(atchan);
  186. /* clear any pending interrupt */
  187. while (dma_readl(atdma, EBCISR))
  188. cpu_relax();
  189. channel_writel(atchan, SADDR, 0);
  190. channel_writel(atchan, DADDR, 0);
  191. channel_writel(atchan, CTRLA, 0);
  192. channel_writel(atchan, CTRLB, 0);
  193. channel_writel(atchan, DSCR, first->txd.phys);
  194. dma_writel(atdma, CHER, atchan->mask);
  195. vdbg_dump_regs(atchan);
  196. }
  197. /**
  198. * atc_chain_complete - finish work for one transaction chain
  199. * @atchan: channel we work on
  200. * @desc: descriptor at the head of the chain we want do complete
  201. *
  202. * Called with atchan->lock held and bh disabled */
  203. static void
  204. atc_chain_complete(struct at_dma_chan *atchan, struct at_desc *desc)
  205. {
  206. dma_async_tx_callback callback;
  207. void *param;
  208. struct dma_async_tx_descriptor *txd = &desc->txd;
  209. dev_vdbg(chan2dev(&atchan->chan_common),
  210. "descriptor %u complete\n", txd->cookie);
  211. atchan->completed_cookie = txd->cookie;
  212. callback = txd->callback;
  213. param = txd->callback_param;
  214. /* move children to free_list */
  215. list_splice_init(&txd->tx_list, &atchan->free_list);
  216. /* move myself to free_list */
  217. list_move(&desc->desc_node, &atchan->free_list);
  218. /* unmap dma addresses */
  219. if (!(txd->flags & DMA_COMPL_SKIP_DEST_UNMAP)) {
  220. if (txd->flags & DMA_COMPL_DEST_UNMAP_SINGLE)
  221. dma_unmap_single(chan2parent(&atchan->chan_common),
  222. desc->lli.daddr,
  223. desc->len, DMA_FROM_DEVICE);
  224. else
  225. dma_unmap_page(chan2parent(&atchan->chan_common),
  226. desc->lli.daddr,
  227. desc->len, DMA_FROM_DEVICE);
  228. }
  229. if (!(txd->flags & DMA_COMPL_SKIP_SRC_UNMAP)) {
  230. if (txd->flags & DMA_COMPL_SRC_UNMAP_SINGLE)
  231. dma_unmap_single(chan2parent(&atchan->chan_common),
  232. desc->lli.saddr,
  233. desc->len, DMA_TO_DEVICE);
  234. else
  235. dma_unmap_page(chan2parent(&atchan->chan_common),
  236. desc->lli.saddr,
  237. desc->len, DMA_TO_DEVICE);
  238. }
  239. /*
  240. * The API requires that no submissions are done from a
  241. * callback, so we don't need to drop the lock here
  242. */
  243. if (callback)
  244. callback(param);
  245. dma_run_dependencies(txd);
  246. }
  247. /**
  248. * atc_complete_all - finish work for all transactions
  249. * @atchan: channel to complete transactions for
  250. *
  251. * Eventually submit queued descriptors if any
  252. *
  253. * Assume channel is idle while calling this function
  254. * Called with atchan->lock held and bh disabled
  255. */
  256. static void atc_complete_all(struct at_dma_chan *atchan)
  257. {
  258. struct at_desc *desc, *_desc;
  259. LIST_HEAD(list);
  260. dev_vdbg(chan2dev(&atchan->chan_common), "complete all\n");
  261. BUG_ON(atc_chan_is_enabled(atchan));
  262. /*
  263. * Submit queued descriptors ASAP, i.e. before we go through
  264. * the completed ones.
  265. */
  266. if (!list_empty(&atchan->queue))
  267. atc_dostart(atchan, atc_first_queued(atchan));
  268. /* empty active_list now it is completed */
  269. list_splice_init(&atchan->active_list, &list);
  270. /* empty queue list by moving descriptors (if any) to active_list */
  271. list_splice_init(&atchan->queue, &atchan->active_list);
  272. list_for_each_entry_safe(desc, _desc, &list, desc_node)
  273. atc_chain_complete(atchan, desc);
  274. }
  275. /**
  276. * atc_cleanup_descriptors - cleanup up finished descriptors in active_list
  277. * @atchan: channel to be cleaned up
  278. *
  279. * Called with atchan->lock held and bh disabled
  280. */
  281. static void atc_cleanup_descriptors(struct at_dma_chan *atchan)
  282. {
  283. struct at_desc *desc, *_desc;
  284. struct at_desc *child;
  285. dev_vdbg(chan2dev(&atchan->chan_common), "cleanup descriptors\n");
  286. list_for_each_entry_safe(desc, _desc, &atchan->active_list, desc_node) {
  287. if (!(desc->lli.ctrla & ATC_DONE))
  288. /* This one is currently in progress */
  289. return;
  290. list_for_each_entry(child, &desc->txd.tx_list, desc_node)
  291. if (!(child->lli.ctrla & ATC_DONE))
  292. /* Currently in progress */
  293. return;
  294. /*
  295. * No descriptors so far seem to be in progress, i.e.
  296. * this chain must be done.
  297. */
  298. atc_chain_complete(atchan, desc);
  299. }
  300. }
  301. /**
  302. * atc_advance_work - at the end of a transaction, move forward
  303. * @atchan: channel where the transaction ended
  304. *
  305. * Called with atchan->lock held and bh disabled
  306. */
  307. static void atc_advance_work(struct at_dma_chan *atchan)
  308. {
  309. dev_vdbg(chan2dev(&atchan->chan_common), "advance_work\n");
  310. if (list_empty(&atchan->active_list) ||
  311. list_is_singular(&atchan->active_list)) {
  312. atc_complete_all(atchan);
  313. } else {
  314. atc_chain_complete(atchan, atc_first_active(atchan));
  315. /* advance work */
  316. atc_dostart(atchan, atc_first_active(atchan));
  317. }
  318. }
  319. /**
  320. * atc_handle_error - handle errors reported by DMA controller
  321. * @atchan: channel where error occurs
  322. *
  323. * Called with atchan->lock held and bh disabled
  324. */
  325. static void atc_handle_error(struct at_dma_chan *atchan)
  326. {
  327. struct at_desc *bad_desc;
  328. struct at_desc *child;
  329. /*
  330. * The descriptor currently at the head of the active list is
  331. * broked. Since we don't have any way to report errors, we'll
  332. * just have to scream loudly and try to carry on.
  333. */
  334. bad_desc = atc_first_active(atchan);
  335. list_del_init(&bad_desc->desc_node);
  336. /* As we are stopped, take advantage to push queued descriptors
  337. * in active_list */
  338. list_splice_init(&atchan->queue, atchan->active_list.prev);
  339. /* Try to restart the controller */
  340. if (!list_empty(&atchan->active_list))
  341. atc_dostart(atchan, atc_first_active(atchan));
  342. /*
  343. * KERN_CRITICAL may seem harsh, but since this only happens
  344. * when someone submits a bad physical address in a
  345. * descriptor, we should consider ourselves lucky that the
  346. * controller flagged an error instead of scribbling over
  347. * random memory locations.
  348. */
  349. dev_crit(chan2dev(&atchan->chan_common),
  350. "Bad descriptor submitted for DMA!\n");
  351. dev_crit(chan2dev(&atchan->chan_common),
  352. " cookie: %d\n", bad_desc->txd.cookie);
  353. atc_dump_lli(atchan, &bad_desc->lli);
  354. list_for_each_entry(child, &bad_desc->txd.tx_list, desc_node)
  355. atc_dump_lli(atchan, &child->lli);
  356. /* Pretend the descriptor completed successfully */
  357. atc_chain_complete(atchan, bad_desc);
  358. }
  359. /*-- IRQ & Tasklet ---------------------------------------------------*/
  360. static void atc_tasklet(unsigned long data)
  361. {
  362. struct at_dma_chan *atchan = (struct at_dma_chan *)data;
  363. /* Channel cannot be enabled here */
  364. if (atc_chan_is_enabled(atchan)) {
  365. dev_err(chan2dev(&atchan->chan_common),
  366. "BUG: channel enabled in tasklet\n");
  367. return;
  368. }
  369. spin_lock(&atchan->lock);
  370. if (test_and_clear_bit(0, &atchan->error_status))
  371. atc_handle_error(atchan);
  372. else
  373. atc_advance_work(atchan);
  374. spin_unlock(&atchan->lock);
  375. }
  376. static irqreturn_t at_dma_interrupt(int irq, void *dev_id)
  377. {
  378. struct at_dma *atdma = (struct at_dma *)dev_id;
  379. struct at_dma_chan *atchan;
  380. int i;
  381. u32 status, pending, imr;
  382. int ret = IRQ_NONE;
  383. do {
  384. imr = dma_readl(atdma, EBCIMR);
  385. status = dma_readl(atdma, EBCISR);
  386. pending = status & imr;
  387. if (!pending)
  388. break;
  389. dev_vdbg(atdma->dma_common.dev,
  390. "interrupt: status = 0x%08x, 0x%08x, 0x%08x\n",
  391. status, imr, pending);
  392. for (i = 0; i < atdma->dma_common.chancnt; i++) {
  393. atchan = &atdma->chan[i];
  394. if (pending & (AT_DMA_CBTC(i) | AT_DMA_ERR(i))) {
  395. if (pending & AT_DMA_ERR(i)) {
  396. /* Disable channel on AHB error */
  397. dma_writel(atdma, CHDR, atchan->mask);
  398. /* Give information to tasklet */
  399. set_bit(0, &atchan->error_status);
  400. }
  401. tasklet_schedule(&atchan->tasklet);
  402. ret = IRQ_HANDLED;
  403. }
  404. }
  405. } while (pending);
  406. return ret;
  407. }
  408. /*-- DMA Engine API --------------------------------------------------*/
  409. /**
  410. * atc_tx_submit - set the prepared descriptor(s) to be executed by the engine
  411. * @desc: descriptor at the head of the transaction chain
  412. *
  413. * Queue chain if DMA engine is working already
  414. *
  415. * Cookie increment and adding to active_list or queue must be atomic
  416. */
  417. static dma_cookie_t atc_tx_submit(struct dma_async_tx_descriptor *tx)
  418. {
  419. struct at_desc *desc = txd_to_at_desc(tx);
  420. struct at_dma_chan *atchan = to_at_dma_chan(tx->chan);
  421. dma_cookie_t cookie;
  422. spin_lock_bh(&atchan->lock);
  423. cookie = atc_assign_cookie(atchan, desc);
  424. if (list_empty(&atchan->active_list)) {
  425. dev_vdbg(chan2dev(tx->chan), "tx_submit: started %u\n",
  426. desc->txd.cookie);
  427. atc_dostart(atchan, desc);
  428. list_add_tail(&desc->desc_node, &atchan->active_list);
  429. } else {
  430. dev_vdbg(chan2dev(tx->chan), "tx_submit: queued %u\n",
  431. desc->txd.cookie);
  432. list_add_tail(&desc->desc_node, &atchan->queue);
  433. }
  434. spin_unlock_bh(&atchan->lock);
  435. return cookie;
  436. }
  437. /**
  438. * atc_prep_dma_memcpy - prepare a memcpy operation
  439. * @chan: the channel to prepare operation on
  440. * @dest: operation virtual destination address
  441. * @src: operation virtual source address
  442. * @len: operation length
  443. * @flags: tx descriptor status flags
  444. */
  445. static struct dma_async_tx_descriptor *
  446. atc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
  447. size_t len, unsigned long flags)
  448. {
  449. struct at_dma_chan *atchan = to_at_dma_chan(chan);
  450. struct at_desc *desc = NULL;
  451. struct at_desc *first = NULL;
  452. struct at_desc *prev = NULL;
  453. size_t xfer_count;
  454. size_t offset;
  455. unsigned int src_width;
  456. unsigned int dst_width;
  457. u32 ctrla;
  458. u32 ctrlb;
  459. dev_vdbg(chan2dev(chan), "prep_dma_memcpy: d0x%x s0x%x l0x%zx f0x%lx\n",
  460. dest, src, len, flags);
  461. if (unlikely(!len)) {
  462. dev_dbg(chan2dev(chan), "prep_dma_memcpy: length is zero!\n");
  463. return NULL;
  464. }
  465. ctrla = ATC_DEFAULT_CTRLA;
  466. ctrlb = ATC_DEFAULT_CTRLB
  467. | ATC_SRC_ADDR_MODE_INCR
  468. | ATC_DST_ADDR_MODE_INCR
  469. | ATC_FC_MEM2MEM;
  470. /*
  471. * We can be a lot more clever here, but this should take care
  472. * of the most common optimization.
  473. */
  474. if (!((src | dest | len) & 3)) {
  475. ctrla |= ATC_SRC_WIDTH_WORD | ATC_DST_WIDTH_WORD;
  476. src_width = dst_width = 2;
  477. } else if (!((src | dest | len) & 1)) {
  478. ctrla |= ATC_SRC_WIDTH_HALFWORD | ATC_DST_WIDTH_HALFWORD;
  479. src_width = dst_width = 1;
  480. } else {
  481. ctrla |= ATC_SRC_WIDTH_BYTE | ATC_DST_WIDTH_BYTE;
  482. src_width = dst_width = 0;
  483. }
  484. for (offset = 0; offset < len; offset += xfer_count << src_width) {
  485. xfer_count = min_t(size_t, (len - offset) >> src_width,
  486. ATC_BTSIZE_MAX);
  487. desc = atc_desc_get(atchan);
  488. if (!desc)
  489. goto err_desc_get;
  490. desc->lli.saddr = src + offset;
  491. desc->lli.daddr = dest + offset;
  492. desc->lli.ctrla = ctrla | xfer_count;
  493. desc->lli.ctrlb = ctrlb;
  494. desc->txd.cookie = 0;
  495. async_tx_ack(&desc->txd);
  496. if (!first) {
  497. first = desc;
  498. } else {
  499. /* inform the HW lli about chaining */
  500. prev->lli.dscr = desc->txd.phys;
  501. /* insert the link descriptor to the LD ring */
  502. list_add_tail(&desc->desc_node,
  503. &first->txd.tx_list);
  504. }
  505. prev = desc;
  506. }
  507. /* First descriptor of the chain embedds additional information */
  508. first->txd.cookie = -EBUSY;
  509. first->len = len;
  510. /* set end-of-link to the last link descriptor of list*/
  511. set_desc_eol(desc);
  512. desc->txd.flags = flags; /* client is in control of this ack */
  513. return &first->txd;
  514. err_desc_get:
  515. atc_desc_put(atchan, first);
  516. return NULL;
  517. }
  518. /**
  519. * atc_prep_slave_sg - prepare descriptors for a DMA_SLAVE transaction
  520. * @chan: DMA channel
  521. * @sgl: scatterlist to transfer to/from
  522. * @sg_len: number of entries in @scatterlist
  523. * @direction: DMA direction
  524. * @flags: tx descriptor status flags
  525. */
  526. static struct dma_async_tx_descriptor *
  527. atc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
  528. unsigned int sg_len, enum dma_data_direction direction,
  529. unsigned long flags)
  530. {
  531. struct at_dma_chan *atchan = to_at_dma_chan(chan);
  532. struct at_dma_slave *atslave = chan->private;
  533. struct at_desc *first = NULL;
  534. struct at_desc *prev = NULL;
  535. u32 ctrla;
  536. u32 ctrlb;
  537. dma_addr_t reg;
  538. unsigned int reg_width;
  539. unsigned int mem_width;
  540. unsigned int i;
  541. struct scatterlist *sg;
  542. size_t total_len = 0;
  543. dev_vdbg(chan2dev(chan), "prep_slave_sg: %s f0x%lx\n",
  544. direction == DMA_TO_DEVICE ? "TO DEVICE" : "FROM DEVICE",
  545. flags);
  546. if (unlikely(!atslave || !sg_len)) {
  547. dev_dbg(chan2dev(chan), "prep_dma_memcpy: length is zero!\n");
  548. return NULL;
  549. }
  550. reg_width = atslave->reg_width;
  551. sg_len = dma_map_sg(chan2parent(chan), sgl, sg_len, direction);
  552. ctrla = ATC_DEFAULT_CTRLA | atslave->ctrla;
  553. ctrlb = ATC_DEFAULT_CTRLB | ATC_IEN;
  554. switch (direction) {
  555. case DMA_TO_DEVICE:
  556. ctrla |= ATC_DST_WIDTH(reg_width);
  557. ctrlb |= ATC_DST_ADDR_MODE_FIXED
  558. | ATC_SRC_ADDR_MODE_INCR
  559. | ATC_FC_MEM2PER;
  560. reg = atslave->tx_reg;
  561. for_each_sg(sgl, sg, sg_len, i) {
  562. struct at_desc *desc;
  563. u32 len;
  564. u32 mem;
  565. desc = atc_desc_get(atchan);
  566. if (!desc)
  567. goto err_desc_get;
  568. mem = sg_phys(sg);
  569. len = sg_dma_len(sg);
  570. mem_width = 2;
  571. if (unlikely(mem & 3 || len & 3))
  572. mem_width = 0;
  573. desc->lli.saddr = mem;
  574. desc->lli.daddr = reg;
  575. desc->lli.ctrla = ctrla
  576. | ATC_SRC_WIDTH(mem_width)
  577. | len >> mem_width;
  578. desc->lli.ctrlb = ctrlb;
  579. if (!first) {
  580. first = desc;
  581. } else {
  582. /* inform the HW lli about chaining */
  583. prev->lli.dscr = desc->txd.phys;
  584. /* insert the link descriptor to the LD ring */
  585. list_add_tail(&desc->desc_node,
  586. &first->txd.tx_list);
  587. }
  588. prev = desc;
  589. total_len += len;
  590. }
  591. break;
  592. case DMA_FROM_DEVICE:
  593. ctrla |= ATC_SRC_WIDTH(reg_width);
  594. ctrlb |= ATC_DST_ADDR_MODE_INCR
  595. | ATC_SRC_ADDR_MODE_FIXED
  596. | ATC_FC_PER2MEM;
  597. reg = atslave->rx_reg;
  598. for_each_sg(sgl, sg, sg_len, i) {
  599. struct at_desc *desc;
  600. u32 len;
  601. u32 mem;
  602. desc = atc_desc_get(atchan);
  603. if (!desc)
  604. goto err_desc_get;
  605. mem = sg_phys(sg);
  606. len = sg_dma_len(sg);
  607. mem_width = 2;
  608. if (unlikely(mem & 3 || len & 3))
  609. mem_width = 0;
  610. desc->lli.saddr = reg;
  611. desc->lli.daddr = mem;
  612. desc->lli.ctrla = ctrla
  613. | ATC_DST_WIDTH(mem_width)
  614. | len >> mem_width;
  615. desc->lli.ctrlb = ctrlb;
  616. if (!first) {
  617. first = desc;
  618. } else {
  619. /* inform the HW lli about chaining */
  620. prev->lli.dscr = desc->txd.phys;
  621. /* insert the link descriptor to the LD ring */
  622. list_add_tail(&desc->desc_node,
  623. &first->txd.tx_list);
  624. }
  625. prev = desc;
  626. total_len += len;
  627. }
  628. break;
  629. default:
  630. return NULL;
  631. }
  632. /* set end-of-link to the last link descriptor of list*/
  633. set_desc_eol(prev);
  634. /* First descriptor of the chain embedds additional information */
  635. first->txd.cookie = -EBUSY;
  636. first->len = total_len;
  637. /* last link descriptor of list is responsible of flags */
  638. prev->txd.flags = flags; /* client is in control of this ack */
  639. return &first->txd;
  640. err_desc_get:
  641. dev_err(chan2dev(chan), "not enough descriptors available\n");
  642. atc_desc_put(atchan, first);
  643. return NULL;
  644. }
  645. static void atc_terminate_all(struct dma_chan *chan)
  646. {
  647. struct at_dma_chan *atchan = to_at_dma_chan(chan);
  648. struct at_dma *atdma = to_at_dma(chan->device);
  649. struct at_desc *desc, *_desc;
  650. LIST_HEAD(list);
  651. /*
  652. * This is only called when something went wrong elsewhere, so
  653. * we don't really care about the data. Just disable the
  654. * channel. We still have to poll the channel enable bit due
  655. * to AHB/HSB limitations.
  656. */
  657. spin_lock_bh(&atchan->lock);
  658. dma_writel(atdma, CHDR, atchan->mask);
  659. /* confirm that this channel is disabled */
  660. while (dma_readl(atdma, CHSR) & atchan->mask)
  661. cpu_relax();
  662. /* active_list entries will end up before queued entries */
  663. list_splice_init(&atchan->queue, &list);
  664. list_splice_init(&atchan->active_list, &list);
  665. spin_unlock_bh(&atchan->lock);
  666. /* Flush all pending and queued descriptors */
  667. list_for_each_entry_safe(desc, _desc, &list, desc_node)
  668. atc_chain_complete(atchan, desc);
  669. }
  670. /**
  671. * atc_is_tx_complete - poll for transaction completion
  672. * @chan: DMA channel
  673. * @cookie: transaction identifier to check status of
  674. * @done: if not %NULL, updated with last completed transaction
  675. * @used: if not %NULL, updated with last used transaction
  676. *
  677. * If @done and @used are passed in, upon return they reflect the driver
  678. * internal state and can be used with dma_async_is_complete() to check
  679. * the status of multiple cookies without re-checking hardware state.
  680. */
  681. static enum dma_status
  682. atc_is_tx_complete(struct dma_chan *chan,
  683. dma_cookie_t cookie,
  684. dma_cookie_t *done, dma_cookie_t *used)
  685. {
  686. struct at_dma_chan *atchan = to_at_dma_chan(chan);
  687. dma_cookie_t last_used;
  688. dma_cookie_t last_complete;
  689. enum dma_status ret;
  690. dev_vdbg(chan2dev(chan), "is_tx_complete: %d (d%d, u%d)\n",
  691. cookie, done ? *done : 0, used ? *used : 0);
  692. spin_lock_bh(atchan->lock);
  693. last_complete = atchan->completed_cookie;
  694. last_used = chan->cookie;
  695. ret = dma_async_is_complete(cookie, last_complete, last_used);
  696. if (ret != DMA_SUCCESS) {
  697. atc_cleanup_descriptors(atchan);
  698. last_complete = atchan->completed_cookie;
  699. last_used = chan->cookie;
  700. ret = dma_async_is_complete(cookie, last_complete, last_used);
  701. }
  702. spin_unlock_bh(atchan->lock);
  703. if (done)
  704. *done = last_complete;
  705. if (used)
  706. *used = last_used;
  707. return ret;
  708. }
  709. /**
  710. * atc_issue_pending - try to finish work
  711. * @chan: target DMA channel
  712. */
  713. static void atc_issue_pending(struct dma_chan *chan)
  714. {
  715. struct at_dma_chan *atchan = to_at_dma_chan(chan);
  716. dev_vdbg(chan2dev(chan), "issue_pending\n");
  717. if (!atc_chan_is_enabled(atchan)) {
  718. spin_lock_bh(&atchan->lock);
  719. atc_advance_work(atchan);
  720. spin_unlock_bh(&atchan->lock);
  721. }
  722. }
  723. /**
  724. * atc_alloc_chan_resources - allocate resources for DMA channel
  725. * @chan: allocate descriptor resources for this channel
  726. * @client: current client requesting the channel be ready for requests
  727. *
  728. * return - the number of allocated descriptors
  729. */
  730. static int atc_alloc_chan_resources(struct dma_chan *chan)
  731. {
  732. struct at_dma_chan *atchan = to_at_dma_chan(chan);
  733. struct at_dma *atdma = to_at_dma(chan->device);
  734. struct at_desc *desc;
  735. struct at_dma_slave *atslave;
  736. int i;
  737. u32 cfg;
  738. LIST_HEAD(tmp_list);
  739. dev_vdbg(chan2dev(chan), "alloc_chan_resources\n");
  740. /* ASSERT: channel is idle */
  741. if (atc_chan_is_enabled(atchan)) {
  742. dev_dbg(chan2dev(chan), "DMA channel not idle ?\n");
  743. return -EIO;
  744. }
  745. cfg = ATC_DEFAULT_CFG;
  746. atslave = chan->private;
  747. if (atslave) {
  748. /*
  749. * We need controller-specific data to set up slave
  750. * transfers.
  751. */
  752. BUG_ON(!atslave->dma_dev || atslave->dma_dev != atdma->dma_common.dev);
  753. /* if cfg configuration specified take it instad of default */
  754. if (atslave->cfg)
  755. cfg = atslave->cfg;
  756. }
  757. /* have we already been set up?
  758. * reconfigure channel but no need to reallocate descriptors */
  759. if (!list_empty(&atchan->free_list))
  760. return atchan->descs_allocated;
  761. /* Allocate initial pool of descriptors */
  762. for (i = 0; i < init_nr_desc_per_channel; i++) {
  763. desc = atc_alloc_descriptor(chan, GFP_KERNEL);
  764. if (!desc) {
  765. dev_err(atdma->dma_common.dev,
  766. "Only %d initial descriptors\n", i);
  767. break;
  768. }
  769. list_add_tail(&desc->desc_node, &tmp_list);
  770. }
  771. spin_lock_bh(&atchan->lock);
  772. atchan->descs_allocated = i;
  773. list_splice(&tmp_list, &atchan->free_list);
  774. atchan->completed_cookie = chan->cookie = 1;
  775. spin_unlock_bh(&atchan->lock);
  776. /* channel parameters */
  777. channel_writel(atchan, CFG, cfg);
  778. dev_dbg(chan2dev(chan),
  779. "alloc_chan_resources: allocated %d descriptors\n",
  780. atchan->descs_allocated);
  781. return atchan->descs_allocated;
  782. }
  783. /**
  784. * atc_free_chan_resources - free all channel resources
  785. * @chan: DMA channel
  786. */
  787. static void atc_free_chan_resources(struct dma_chan *chan)
  788. {
  789. struct at_dma_chan *atchan = to_at_dma_chan(chan);
  790. struct at_dma *atdma = to_at_dma(chan->device);
  791. struct at_desc *desc, *_desc;
  792. LIST_HEAD(list);
  793. dev_dbg(chan2dev(chan), "free_chan_resources: (descs allocated=%u)\n",
  794. atchan->descs_allocated);
  795. /* ASSERT: channel is idle */
  796. BUG_ON(!list_empty(&atchan->active_list));
  797. BUG_ON(!list_empty(&atchan->queue));
  798. BUG_ON(atc_chan_is_enabled(atchan));
  799. list_for_each_entry_safe(desc, _desc, &atchan->free_list, desc_node) {
  800. dev_vdbg(chan2dev(chan), " freeing descriptor %p\n", desc);
  801. list_del(&desc->desc_node);
  802. /* free link descriptor */
  803. dma_pool_free(atdma->dma_desc_pool, desc, desc->txd.phys);
  804. }
  805. list_splice_init(&atchan->free_list, &list);
  806. atchan->descs_allocated = 0;
  807. dev_vdbg(chan2dev(chan), "free_chan_resources: done\n");
  808. }
  809. /*-- Module Management -----------------------------------------------*/
  810. /**
  811. * at_dma_off - disable DMA controller
  812. * @atdma: the Atmel HDAMC device
  813. */
  814. static void at_dma_off(struct at_dma *atdma)
  815. {
  816. dma_writel(atdma, EN, 0);
  817. /* disable all interrupts */
  818. dma_writel(atdma, EBCIDR, -1L);
  819. /* confirm that all channels are disabled */
  820. while (dma_readl(atdma, CHSR) & atdma->all_chan_mask)
  821. cpu_relax();
  822. }
  823. static int __init at_dma_probe(struct platform_device *pdev)
  824. {
  825. struct at_dma_platform_data *pdata;
  826. struct resource *io;
  827. struct at_dma *atdma;
  828. size_t size;
  829. int irq;
  830. int err;
  831. int i;
  832. /* get DMA Controller parameters from platform */
  833. pdata = pdev->dev.platform_data;
  834. if (!pdata || pdata->nr_channels > AT_DMA_MAX_NR_CHANNELS)
  835. return -EINVAL;
  836. io = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  837. if (!io)
  838. return -EINVAL;
  839. irq = platform_get_irq(pdev, 0);
  840. if (irq < 0)
  841. return irq;
  842. size = sizeof(struct at_dma);
  843. size += pdata->nr_channels * sizeof(struct at_dma_chan);
  844. atdma = kzalloc(size, GFP_KERNEL);
  845. if (!atdma)
  846. return -ENOMEM;
  847. /* discover transaction capabilites from the platform data */
  848. atdma->dma_common.cap_mask = pdata->cap_mask;
  849. atdma->all_chan_mask = (1 << pdata->nr_channels) - 1;
  850. size = io->end - io->start + 1;
  851. if (!request_mem_region(io->start, size, pdev->dev.driver->name)) {
  852. err = -EBUSY;
  853. goto err_kfree;
  854. }
  855. atdma->regs = ioremap(io->start, size);
  856. if (!atdma->regs) {
  857. err = -ENOMEM;
  858. goto err_release_r;
  859. }
  860. atdma->clk = clk_get(&pdev->dev, "dma_clk");
  861. if (IS_ERR(atdma->clk)) {
  862. err = PTR_ERR(atdma->clk);
  863. goto err_clk;
  864. }
  865. clk_enable(atdma->clk);
  866. /* force dma off, just in case */
  867. at_dma_off(atdma);
  868. err = request_irq(irq, at_dma_interrupt, 0, "at_hdmac", atdma);
  869. if (err)
  870. goto err_irq;
  871. platform_set_drvdata(pdev, atdma);
  872. /* create a pool of consistent memory blocks for hardware descriptors */
  873. atdma->dma_desc_pool = dma_pool_create("at_hdmac_desc_pool",
  874. &pdev->dev, sizeof(struct at_desc),
  875. 4 /* word alignment */, 0);
  876. if (!atdma->dma_desc_pool) {
  877. dev_err(&pdev->dev, "No memory for descriptors dma pool\n");
  878. err = -ENOMEM;
  879. goto err_pool_create;
  880. }
  881. /* clear any pending interrupt */
  882. while (dma_readl(atdma, EBCISR))
  883. cpu_relax();
  884. /* initialize channels related values */
  885. INIT_LIST_HEAD(&atdma->dma_common.channels);
  886. for (i = 0; i < pdata->nr_channels; i++, atdma->dma_common.chancnt++) {
  887. struct at_dma_chan *atchan = &atdma->chan[i];
  888. atchan->chan_common.device = &atdma->dma_common;
  889. atchan->chan_common.cookie = atchan->completed_cookie = 1;
  890. atchan->chan_common.chan_id = i;
  891. list_add_tail(&atchan->chan_common.device_node,
  892. &atdma->dma_common.channels);
  893. atchan->ch_regs = atdma->regs + ch_regs(i);
  894. spin_lock_init(&atchan->lock);
  895. atchan->mask = 1 << i;
  896. INIT_LIST_HEAD(&atchan->active_list);
  897. INIT_LIST_HEAD(&atchan->queue);
  898. INIT_LIST_HEAD(&atchan->free_list);
  899. tasklet_init(&atchan->tasklet, atc_tasklet,
  900. (unsigned long)atchan);
  901. atc_enable_irq(atchan);
  902. }
  903. /* set base routines */
  904. atdma->dma_common.device_alloc_chan_resources = atc_alloc_chan_resources;
  905. atdma->dma_common.device_free_chan_resources = atc_free_chan_resources;
  906. atdma->dma_common.device_is_tx_complete = atc_is_tx_complete;
  907. atdma->dma_common.device_issue_pending = atc_issue_pending;
  908. atdma->dma_common.dev = &pdev->dev;
  909. /* set prep routines based on capability */
  910. if (dma_has_cap(DMA_MEMCPY, atdma->dma_common.cap_mask))
  911. atdma->dma_common.device_prep_dma_memcpy = atc_prep_dma_memcpy;
  912. if (dma_has_cap(DMA_SLAVE, atdma->dma_common.cap_mask)) {
  913. atdma->dma_common.device_prep_slave_sg = atc_prep_slave_sg;
  914. atdma->dma_common.device_terminate_all = atc_terminate_all;
  915. }
  916. dma_writel(atdma, EN, AT_DMA_ENABLE);
  917. dev_info(&pdev->dev, "Atmel AHB DMA Controller ( %s%s), %d channels\n",
  918. dma_has_cap(DMA_MEMCPY, atdma->dma_common.cap_mask) ? "cpy " : "",
  919. dma_has_cap(DMA_SLAVE, atdma->dma_common.cap_mask) ? "slave " : "",
  920. atdma->dma_common.chancnt);
  921. dma_async_device_register(&atdma->dma_common);
  922. return 0;
  923. err_pool_create:
  924. platform_set_drvdata(pdev, NULL);
  925. free_irq(platform_get_irq(pdev, 0), atdma);
  926. err_irq:
  927. clk_disable(atdma->clk);
  928. clk_put(atdma->clk);
  929. err_clk:
  930. iounmap(atdma->regs);
  931. atdma->regs = NULL;
  932. err_release_r:
  933. release_mem_region(io->start, size);
  934. err_kfree:
  935. kfree(atdma);
  936. return err;
  937. }
  938. static int __exit at_dma_remove(struct platform_device *pdev)
  939. {
  940. struct at_dma *atdma = platform_get_drvdata(pdev);
  941. struct dma_chan *chan, *_chan;
  942. struct resource *io;
  943. at_dma_off(atdma);
  944. dma_async_device_unregister(&atdma->dma_common);
  945. dma_pool_destroy(atdma->dma_desc_pool);
  946. platform_set_drvdata(pdev, NULL);
  947. free_irq(platform_get_irq(pdev, 0), atdma);
  948. list_for_each_entry_safe(chan, _chan, &atdma->dma_common.channels,
  949. device_node) {
  950. struct at_dma_chan *atchan = to_at_dma_chan(chan);
  951. /* Disable interrupts */
  952. atc_disable_irq(atchan);
  953. tasklet_disable(&atchan->tasklet);
  954. tasklet_kill(&atchan->tasklet);
  955. list_del(&chan->device_node);
  956. }
  957. clk_disable(atdma->clk);
  958. clk_put(atdma->clk);
  959. iounmap(atdma->regs);
  960. atdma->regs = NULL;
  961. io = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  962. release_mem_region(io->start, io->end - io->start + 1);
  963. kfree(atdma);
  964. return 0;
  965. }
  966. static void at_dma_shutdown(struct platform_device *pdev)
  967. {
  968. struct at_dma *atdma = platform_get_drvdata(pdev);
  969. at_dma_off(platform_get_drvdata(pdev));
  970. clk_disable(atdma->clk);
  971. }
  972. static int at_dma_suspend_noirq(struct device *dev)
  973. {
  974. struct platform_device *pdev = to_platform_device(dev);
  975. struct at_dma *atdma = platform_get_drvdata(pdev);
  976. at_dma_off(platform_get_drvdata(pdev));
  977. clk_disable(atdma->clk);
  978. return 0;
  979. }
  980. static int at_dma_resume_noirq(struct device *dev)
  981. {
  982. struct platform_device *pdev = to_platform_device(dev);
  983. struct at_dma *atdma = platform_get_drvdata(pdev);
  984. clk_enable(atdma->clk);
  985. dma_writel(atdma, EN, AT_DMA_ENABLE);
  986. return 0;
  987. }
  988. static struct dev_pm_ops at_dma_dev_pm_ops = {
  989. .suspend_noirq = at_dma_suspend_noirq,
  990. .resume_noirq = at_dma_resume_noirq,
  991. };
  992. static struct platform_driver at_dma_driver = {
  993. .remove = __exit_p(at_dma_remove),
  994. .shutdown = at_dma_shutdown,
  995. .driver = {
  996. .name = "at_hdmac",
  997. .pm = &at_dma_dev_pm_ops,
  998. },
  999. };
  1000. static int __init at_dma_init(void)
  1001. {
  1002. return platform_driver_probe(&at_dma_driver, at_dma_probe);
  1003. }
  1004. module_init(at_dma_init);
  1005. static void __exit at_dma_exit(void)
  1006. {
  1007. platform_driver_unregister(&at_dma_driver);
  1008. }
  1009. module_exit(at_dma_exit);
  1010. MODULE_DESCRIPTION("Atmel AHB DMA Controller driver");
  1011. MODULE_AUTHOR("Nicolas Ferre <nicolas.ferre@atmel.com>");
  1012. MODULE_LICENSE("GPL");
  1013. MODULE_ALIAS("platform:at_hdmac");