pl330.c 23 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066
  1. /* linux/drivers/dma/pl330.c
  2. *
  3. * Copyright (C) 2010 Samsung Electronics Co. Ltd.
  4. * Jaswinder Singh <jassi.brar@samsung.com>
  5. *
  6. * This program is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU General Public License as published by
  8. * the Free Software Foundation; either version 2 of the License, or
  9. * (at your option) any later version.
  10. */
  11. #include <linux/io.h>
  12. #include <linux/init.h>
  13. #include <linux/slab.h>
  14. #include <linux/module.h>
  15. #include <linux/dmaengine.h>
  16. #include <linux/interrupt.h>
  17. #include <linux/amba/bus.h>
  18. #include <linux/amba/pl330.h>
  19. #include <linux/pm_runtime.h>
  20. #include <linux/scatterlist.h>
  21. #include <linux/of.h>
  22. #define NR_DEFAULT_DESC 16
  23. enum desc_status {
  24. /* In the DMAC pool */
  25. FREE,
  26. /*
  27. * Allocted to some channel during prep_xxx
  28. * Also may be sitting on the work_list.
  29. */
  30. PREP,
  31. /*
  32. * Sitting on the work_list and already submitted
  33. * to the PL330 core. Not more than two descriptors
  34. * of a channel can be BUSY at any time.
  35. */
  36. BUSY,
  37. /*
  38. * Sitting on the channel work_list but xfer done
  39. * by PL330 core
  40. */
  41. DONE,
  42. };
  43. struct dma_pl330_chan {
  44. /* Schedule desc completion */
  45. struct tasklet_struct task;
  46. /* DMA-Engine Channel */
  47. struct dma_chan chan;
  48. /* Last completed cookie */
  49. dma_cookie_t completed;
  50. /* List of to be xfered descriptors */
  51. struct list_head work_list;
  52. /* Pointer to the DMAC that manages this channel,
  53. * NULL if the channel is available to be acquired.
  54. * As the parent, this DMAC also provides descriptors
  55. * to the channel.
  56. */
  57. struct dma_pl330_dmac *dmac;
  58. /* To protect channel manipulation */
  59. spinlock_t lock;
  60. /* Token of a hardware channel thread of PL330 DMAC
  61. * NULL if the channel is available to be acquired.
  62. */
  63. void *pl330_chid;
  64. /* For D-to-M and M-to-D channels */
  65. int burst_sz; /* the peripheral fifo width */
  66. int burst_len; /* the number of burst */
  67. dma_addr_t fifo_addr;
  68. /* for cyclic capability */
  69. bool cyclic;
  70. };
  71. struct dma_pl330_dmac {
  72. struct pl330_info pif;
  73. /* DMA-Engine Device */
  74. struct dma_device ddma;
  75. /* Pool of descriptors available for the DMAC's channels */
  76. struct list_head desc_pool;
  77. /* To protect desc_pool manipulation */
  78. spinlock_t pool_lock;
  79. /* Peripheral channels connected to this DMAC */
  80. struct dma_pl330_chan *peripherals; /* keep at end */
  81. struct clk *clk;
  82. };
  83. struct dma_pl330_desc {
  84. /* To attach to a queue as child */
  85. struct list_head node;
  86. /* Descriptor for the DMA Engine API */
  87. struct dma_async_tx_descriptor txd;
  88. /* Xfer for PL330 core */
  89. struct pl330_xfer px;
  90. struct pl330_reqcfg rqcfg;
  91. struct pl330_req req;
  92. enum desc_status status;
  93. /* The channel which currently holds this desc */
  94. struct dma_pl330_chan *pchan;
  95. };
  96. /* forward declaration */
  97. static struct amba_driver pl330_driver;
  98. static inline struct dma_pl330_chan *
  99. to_pchan(struct dma_chan *ch)
  100. {
  101. if (!ch)
  102. return NULL;
  103. return container_of(ch, struct dma_pl330_chan, chan);
  104. }
  105. static inline struct dma_pl330_desc *
  106. to_desc(struct dma_async_tx_descriptor *tx)
  107. {
  108. return container_of(tx, struct dma_pl330_desc, txd);
  109. }
  110. static inline void free_desc_list(struct list_head *list)
  111. {
  112. struct dma_pl330_dmac *pdmac;
  113. struct dma_pl330_desc *desc;
  114. struct dma_pl330_chan *pch;
  115. unsigned long flags;
  116. if (list_empty(list))
  117. return;
  118. /* Finish off the work list */
  119. list_for_each_entry(desc, list, node) {
  120. dma_async_tx_callback callback;
  121. void *param;
  122. /* All desc in a list belong to same channel */
  123. pch = desc->pchan;
  124. callback = desc->txd.callback;
  125. param = desc->txd.callback_param;
  126. if (callback)
  127. callback(param);
  128. desc->pchan = NULL;
  129. }
  130. pdmac = pch->dmac;
  131. spin_lock_irqsave(&pdmac->pool_lock, flags);
  132. list_splice_tail_init(list, &pdmac->desc_pool);
  133. spin_unlock_irqrestore(&pdmac->pool_lock, flags);
  134. }
  135. static inline void handle_cyclic_desc_list(struct list_head *list)
  136. {
  137. struct dma_pl330_desc *desc;
  138. struct dma_pl330_chan *pch;
  139. unsigned long flags;
  140. if (list_empty(list))
  141. return;
  142. list_for_each_entry(desc, list, node) {
  143. dma_async_tx_callback callback;
  144. /* Change status to reload it */
  145. desc->status = PREP;
  146. pch = desc->pchan;
  147. callback = desc->txd.callback;
  148. if (callback)
  149. callback(desc->txd.callback_param);
  150. }
  151. spin_lock_irqsave(&pch->lock, flags);
  152. list_splice_tail_init(list, &pch->work_list);
  153. spin_unlock_irqrestore(&pch->lock, flags);
  154. }
  155. static inline void fill_queue(struct dma_pl330_chan *pch)
  156. {
  157. struct dma_pl330_desc *desc;
  158. int ret;
  159. list_for_each_entry(desc, &pch->work_list, node) {
  160. /* If already submitted */
  161. if (desc->status == BUSY)
  162. break;
  163. ret = pl330_submit_req(pch->pl330_chid,
  164. &desc->req);
  165. if (!ret) {
  166. desc->status = BUSY;
  167. break;
  168. } else if (ret == -EAGAIN) {
  169. /* QFull or DMAC Dying */
  170. break;
  171. } else {
  172. /* Unacceptable request */
  173. desc->status = DONE;
  174. dev_err(pch->dmac->pif.dev, "%s:%d Bad Desc(%d)\n",
  175. __func__, __LINE__, desc->txd.cookie);
  176. tasklet_schedule(&pch->task);
  177. }
  178. }
  179. }
  180. static void pl330_tasklet(unsigned long data)
  181. {
  182. struct dma_pl330_chan *pch = (struct dma_pl330_chan *)data;
  183. struct dma_pl330_desc *desc, *_dt;
  184. unsigned long flags;
  185. LIST_HEAD(list);
  186. spin_lock_irqsave(&pch->lock, flags);
  187. /* Pick up ripe tomatoes */
  188. list_for_each_entry_safe(desc, _dt, &pch->work_list, node)
  189. if (desc->status == DONE) {
  190. pch->completed = desc->txd.cookie;
  191. list_move_tail(&desc->node, &list);
  192. }
  193. /* Try to submit a req imm. next to the last completed cookie */
  194. fill_queue(pch);
  195. /* Make sure the PL330 Channel thread is active */
  196. pl330_chan_ctrl(pch->pl330_chid, PL330_OP_START);
  197. spin_unlock_irqrestore(&pch->lock, flags);
  198. if (pch->cyclic)
  199. handle_cyclic_desc_list(&list);
  200. else
  201. free_desc_list(&list);
  202. }
  203. static void dma_pl330_rqcb(void *token, enum pl330_op_err err)
  204. {
  205. struct dma_pl330_desc *desc = token;
  206. struct dma_pl330_chan *pch = desc->pchan;
  207. unsigned long flags;
  208. /* If desc aborted */
  209. if (!pch)
  210. return;
  211. spin_lock_irqsave(&pch->lock, flags);
  212. desc->status = DONE;
  213. spin_unlock_irqrestore(&pch->lock, flags);
  214. tasklet_schedule(&pch->task);
  215. }
  216. bool pl330_filter(struct dma_chan *chan, void *param)
  217. {
  218. u8 *peri_id;
  219. if (chan->device->dev->driver != &pl330_driver.drv)
  220. return false;
  221. #ifdef CONFIG_OF
  222. if (chan->device->dev->of_node) {
  223. const __be32 *prop_value;
  224. phandle phandle;
  225. struct device_node *node;
  226. prop_value = ((struct property *)param)->value;
  227. phandle = be32_to_cpup(prop_value++);
  228. node = of_find_node_by_phandle(phandle);
  229. return ((chan->private == node) &&
  230. (chan->chan_id == be32_to_cpup(prop_value)));
  231. }
  232. #endif
  233. peri_id = chan->private;
  234. return *peri_id == (unsigned)param;
  235. }
  236. EXPORT_SYMBOL(pl330_filter);
  237. static int pl330_alloc_chan_resources(struct dma_chan *chan)
  238. {
  239. struct dma_pl330_chan *pch = to_pchan(chan);
  240. struct dma_pl330_dmac *pdmac = pch->dmac;
  241. unsigned long flags;
  242. spin_lock_irqsave(&pch->lock, flags);
  243. pch->completed = chan->cookie = 1;
  244. pch->cyclic = false;
  245. pch->pl330_chid = pl330_request_channel(&pdmac->pif);
  246. if (!pch->pl330_chid) {
  247. spin_unlock_irqrestore(&pch->lock, flags);
  248. return 0;
  249. }
  250. tasklet_init(&pch->task, pl330_tasklet, (unsigned long) pch);
  251. spin_unlock_irqrestore(&pch->lock, flags);
  252. return 1;
  253. }
  254. static int pl330_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, unsigned long arg)
  255. {
  256. struct dma_pl330_chan *pch = to_pchan(chan);
  257. struct dma_pl330_desc *desc, *_dt;
  258. unsigned long flags;
  259. struct dma_pl330_dmac *pdmac = pch->dmac;
  260. struct dma_slave_config *slave_config;
  261. LIST_HEAD(list);
  262. switch (cmd) {
  263. case DMA_TERMINATE_ALL:
  264. spin_lock_irqsave(&pch->lock, flags);
  265. /* FLUSH the PL330 Channel thread */
  266. pl330_chan_ctrl(pch->pl330_chid, PL330_OP_FLUSH);
  267. /* Mark all desc done */
  268. list_for_each_entry_safe(desc, _dt, &pch->work_list , node) {
  269. desc->status = DONE;
  270. pch->completed = desc->txd.cookie;
  271. list_move_tail(&desc->node, &list);
  272. }
  273. list_splice_tail_init(&list, &pdmac->desc_pool);
  274. spin_unlock_irqrestore(&pch->lock, flags);
  275. break;
  276. case DMA_SLAVE_CONFIG:
  277. slave_config = (struct dma_slave_config *)arg;
  278. if (slave_config->direction == DMA_TO_DEVICE) {
  279. if (slave_config->dst_addr)
  280. pch->fifo_addr = slave_config->dst_addr;
  281. if (slave_config->dst_addr_width)
  282. pch->burst_sz = __ffs(slave_config->dst_addr_width);
  283. if (slave_config->dst_maxburst)
  284. pch->burst_len = slave_config->dst_maxburst;
  285. } else if (slave_config->direction == DMA_FROM_DEVICE) {
  286. if (slave_config->src_addr)
  287. pch->fifo_addr = slave_config->src_addr;
  288. if (slave_config->src_addr_width)
  289. pch->burst_sz = __ffs(slave_config->src_addr_width);
  290. if (slave_config->src_maxburst)
  291. pch->burst_len = slave_config->src_maxburst;
  292. }
  293. break;
  294. default:
  295. dev_err(pch->dmac->pif.dev, "Not supported command.\n");
  296. return -ENXIO;
  297. }
  298. return 0;
  299. }
  300. static void pl330_free_chan_resources(struct dma_chan *chan)
  301. {
  302. struct dma_pl330_chan *pch = to_pchan(chan);
  303. unsigned long flags;
  304. spin_lock_irqsave(&pch->lock, flags);
  305. tasklet_kill(&pch->task);
  306. pl330_release_channel(pch->pl330_chid);
  307. pch->pl330_chid = NULL;
  308. if (pch->cyclic)
  309. list_splice_tail_init(&pch->work_list, &pch->dmac->desc_pool);
  310. spin_unlock_irqrestore(&pch->lock, flags);
  311. }
  312. static enum dma_status
  313. pl330_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
  314. struct dma_tx_state *txstate)
  315. {
  316. struct dma_pl330_chan *pch = to_pchan(chan);
  317. dma_cookie_t last_done, last_used;
  318. int ret;
  319. last_done = pch->completed;
  320. last_used = chan->cookie;
  321. ret = dma_async_is_complete(cookie, last_done, last_used);
  322. dma_set_tx_state(txstate, last_done, last_used, 0);
  323. return ret;
  324. }
  325. static void pl330_issue_pending(struct dma_chan *chan)
  326. {
  327. pl330_tasklet((unsigned long) to_pchan(chan));
  328. }
  329. /*
  330. * We returned the last one of the circular list of descriptor(s)
  331. * from prep_xxx, so the argument to submit corresponds to the last
  332. * descriptor of the list.
  333. */
  334. static dma_cookie_t pl330_tx_submit(struct dma_async_tx_descriptor *tx)
  335. {
  336. struct dma_pl330_desc *desc, *last = to_desc(tx);
  337. struct dma_pl330_chan *pch = to_pchan(tx->chan);
  338. dma_cookie_t cookie;
  339. unsigned long flags;
  340. spin_lock_irqsave(&pch->lock, flags);
  341. /* Assign cookies to all nodes */
  342. cookie = tx->chan->cookie;
  343. while (!list_empty(&last->node)) {
  344. desc = list_entry(last->node.next, struct dma_pl330_desc, node);
  345. if (++cookie < 0)
  346. cookie = 1;
  347. desc->txd.cookie = cookie;
  348. list_move_tail(&desc->node, &pch->work_list);
  349. }
  350. if (++cookie < 0)
  351. cookie = 1;
  352. last->txd.cookie = cookie;
  353. list_add_tail(&last->node, &pch->work_list);
  354. tx->chan->cookie = cookie;
  355. spin_unlock_irqrestore(&pch->lock, flags);
  356. return cookie;
  357. }
  358. static inline void _init_desc(struct dma_pl330_desc *desc)
  359. {
  360. desc->pchan = NULL;
  361. desc->req.x = &desc->px;
  362. desc->req.token = desc;
  363. desc->rqcfg.swap = SWAP_NO;
  364. desc->rqcfg.privileged = 0;
  365. desc->rqcfg.insnaccess = 0;
  366. desc->rqcfg.scctl = SCCTRL0;
  367. desc->rqcfg.dcctl = DCCTRL0;
  368. desc->req.cfg = &desc->rqcfg;
  369. desc->req.xfer_cb = dma_pl330_rqcb;
  370. desc->txd.tx_submit = pl330_tx_submit;
  371. INIT_LIST_HEAD(&desc->node);
  372. }
  373. /* Returns the number of descriptors added to the DMAC pool */
  374. int add_desc(struct dma_pl330_dmac *pdmac, gfp_t flg, int count)
  375. {
  376. struct dma_pl330_desc *desc;
  377. unsigned long flags;
  378. int i;
  379. if (!pdmac)
  380. return 0;
  381. desc = kmalloc(count * sizeof(*desc), flg);
  382. if (!desc)
  383. return 0;
  384. spin_lock_irqsave(&pdmac->pool_lock, flags);
  385. for (i = 0; i < count; i++) {
  386. _init_desc(&desc[i]);
  387. list_add_tail(&desc[i].node, &pdmac->desc_pool);
  388. }
  389. spin_unlock_irqrestore(&pdmac->pool_lock, flags);
  390. return count;
  391. }
  392. static struct dma_pl330_desc *
  393. pluck_desc(struct dma_pl330_dmac *pdmac)
  394. {
  395. struct dma_pl330_desc *desc = NULL;
  396. unsigned long flags;
  397. if (!pdmac)
  398. return NULL;
  399. spin_lock_irqsave(&pdmac->pool_lock, flags);
  400. if (!list_empty(&pdmac->desc_pool)) {
  401. desc = list_entry(pdmac->desc_pool.next,
  402. struct dma_pl330_desc, node);
  403. list_del_init(&desc->node);
  404. desc->status = PREP;
  405. desc->txd.callback = NULL;
  406. }
  407. spin_unlock_irqrestore(&pdmac->pool_lock, flags);
  408. return desc;
  409. }
  410. static struct dma_pl330_desc *pl330_get_desc(struct dma_pl330_chan *pch)
  411. {
  412. struct dma_pl330_dmac *pdmac = pch->dmac;
  413. u8 *peri_id = pch->chan.private;
  414. struct dma_pl330_desc *desc;
  415. /* Pluck one desc from the pool of DMAC */
  416. desc = pluck_desc(pdmac);
  417. /* If the DMAC pool is empty, alloc new */
  418. if (!desc) {
  419. if (!add_desc(pdmac, GFP_ATOMIC, 1))
  420. return NULL;
  421. /* Try again */
  422. desc = pluck_desc(pdmac);
  423. if (!desc) {
  424. dev_err(pch->dmac->pif.dev,
  425. "%s:%d ALERT!\n", __func__, __LINE__);
  426. return NULL;
  427. }
  428. }
  429. /* Initialize the descriptor */
  430. desc->pchan = pch;
  431. desc->txd.cookie = 0;
  432. async_tx_ack(&desc->txd);
  433. desc->req.peri = peri_id ? pch->chan.chan_id : 0;
  434. dma_async_tx_descriptor_init(&desc->txd, &pch->chan);
  435. return desc;
  436. }
  437. static inline void fill_px(struct pl330_xfer *px,
  438. dma_addr_t dst, dma_addr_t src, size_t len)
  439. {
  440. px->next = NULL;
  441. px->bytes = len;
  442. px->dst_addr = dst;
  443. px->src_addr = src;
  444. }
  445. static struct dma_pl330_desc *
  446. __pl330_prep_dma_memcpy(struct dma_pl330_chan *pch, dma_addr_t dst,
  447. dma_addr_t src, size_t len)
  448. {
  449. struct dma_pl330_desc *desc = pl330_get_desc(pch);
  450. if (!desc) {
  451. dev_err(pch->dmac->pif.dev, "%s:%d Unable to fetch desc\n",
  452. __func__, __LINE__);
  453. return NULL;
  454. }
  455. /*
  456. * Ideally we should lookout for reqs bigger than
  457. * those that can be programmed with 256 bytes of
  458. * MC buffer, but considering a req size is seldom
  459. * going to be word-unaligned and more than 200MB,
  460. * we take it easy.
  461. * Also, should the limit is reached we'd rather
  462. * have the platform increase MC buffer size than
  463. * complicating this API driver.
  464. */
  465. fill_px(&desc->px, dst, src, len);
  466. return desc;
  467. }
  468. /* Call after fixing burst size */
  469. static inline int get_burst_len(struct dma_pl330_desc *desc, size_t len)
  470. {
  471. struct dma_pl330_chan *pch = desc->pchan;
  472. struct pl330_info *pi = &pch->dmac->pif;
  473. int burst_len;
  474. burst_len = pi->pcfg.data_bus_width / 8;
  475. burst_len *= pi->pcfg.data_buf_dep;
  476. burst_len >>= desc->rqcfg.brst_size;
  477. /* src/dst_burst_len can't be more than 16 */
  478. if (burst_len > 16)
  479. burst_len = 16;
  480. while (burst_len > 1) {
  481. if (!(len % (burst_len << desc->rqcfg.brst_size)))
  482. break;
  483. burst_len--;
  484. }
  485. return burst_len;
  486. }
  487. static struct dma_async_tx_descriptor *pl330_prep_dma_cyclic(
  488. struct dma_chan *chan, dma_addr_t dma_addr, size_t len,
  489. size_t period_len, enum dma_data_direction direction)
  490. {
  491. struct dma_pl330_desc *desc;
  492. struct dma_pl330_chan *pch = to_pchan(chan);
  493. dma_addr_t dst;
  494. dma_addr_t src;
  495. desc = pl330_get_desc(pch);
  496. if (!desc) {
  497. dev_err(pch->dmac->pif.dev, "%s:%d Unable to fetch desc\n",
  498. __func__, __LINE__);
  499. return NULL;
  500. }
  501. switch (direction) {
  502. case DMA_TO_DEVICE:
  503. desc->rqcfg.src_inc = 1;
  504. desc->rqcfg.dst_inc = 0;
  505. desc->req.rqtype = MEMTODEV;
  506. src = dma_addr;
  507. dst = pch->fifo_addr;
  508. break;
  509. case DMA_FROM_DEVICE:
  510. desc->rqcfg.src_inc = 0;
  511. desc->rqcfg.dst_inc = 1;
  512. desc->req.rqtype = DEVTOMEM;
  513. src = pch->fifo_addr;
  514. dst = dma_addr;
  515. break;
  516. default:
  517. dev_err(pch->dmac->pif.dev, "%s:%d Invalid dma direction\n",
  518. __func__, __LINE__);
  519. return NULL;
  520. }
  521. desc->rqcfg.brst_size = pch->burst_sz;
  522. desc->rqcfg.brst_len = 1;
  523. pch->cyclic = true;
  524. fill_px(&desc->px, dst, src, period_len);
  525. return &desc->txd;
  526. }
  527. static struct dma_async_tx_descriptor *
  528. pl330_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dst,
  529. dma_addr_t src, size_t len, unsigned long flags)
  530. {
  531. struct dma_pl330_desc *desc;
  532. struct dma_pl330_chan *pch = to_pchan(chan);
  533. struct pl330_info *pi;
  534. int burst;
  535. if (unlikely(!pch || !len))
  536. return NULL;
  537. pi = &pch->dmac->pif;
  538. desc = __pl330_prep_dma_memcpy(pch, dst, src, len);
  539. if (!desc)
  540. return NULL;
  541. desc->rqcfg.src_inc = 1;
  542. desc->rqcfg.dst_inc = 1;
  543. desc->req.rqtype = MEMTOMEM;
  544. /* Select max possible burst size */
  545. burst = pi->pcfg.data_bus_width / 8;
  546. while (burst > 1) {
  547. if (!(len % burst))
  548. break;
  549. burst /= 2;
  550. }
  551. desc->rqcfg.brst_size = 0;
  552. while (burst != (1 << desc->rqcfg.brst_size))
  553. desc->rqcfg.brst_size++;
  554. desc->rqcfg.brst_len = get_burst_len(desc, len);
  555. desc->txd.flags = flags;
  556. return &desc->txd;
  557. }
  558. static struct dma_async_tx_descriptor *
  559. pl330_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
  560. unsigned int sg_len, enum dma_data_direction direction,
  561. unsigned long flg)
  562. {
  563. struct dma_pl330_desc *first, *desc = NULL;
  564. struct dma_pl330_chan *pch = to_pchan(chan);
  565. struct scatterlist *sg;
  566. unsigned long flags;
  567. int i;
  568. dma_addr_t addr;
  569. if (unlikely(!pch || !sgl || !sg_len))
  570. return NULL;
  571. addr = pch->fifo_addr;
  572. first = NULL;
  573. for_each_sg(sgl, sg, sg_len, i) {
  574. desc = pl330_get_desc(pch);
  575. if (!desc) {
  576. struct dma_pl330_dmac *pdmac = pch->dmac;
  577. dev_err(pch->dmac->pif.dev,
  578. "%s:%d Unable to fetch desc\n",
  579. __func__, __LINE__);
  580. if (!first)
  581. return NULL;
  582. spin_lock_irqsave(&pdmac->pool_lock, flags);
  583. while (!list_empty(&first->node)) {
  584. desc = list_entry(first->node.next,
  585. struct dma_pl330_desc, node);
  586. list_move_tail(&desc->node, &pdmac->desc_pool);
  587. }
  588. list_move_tail(&first->node, &pdmac->desc_pool);
  589. spin_unlock_irqrestore(&pdmac->pool_lock, flags);
  590. return NULL;
  591. }
  592. if (!first)
  593. first = desc;
  594. else
  595. list_add_tail(&desc->node, &first->node);
  596. if (direction == DMA_TO_DEVICE) {
  597. desc->rqcfg.src_inc = 1;
  598. desc->rqcfg.dst_inc = 0;
  599. desc->req.rqtype = MEMTODEV;
  600. fill_px(&desc->px,
  601. addr, sg_dma_address(sg), sg_dma_len(sg));
  602. } else {
  603. desc->rqcfg.src_inc = 0;
  604. desc->rqcfg.dst_inc = 1;
  605. desc->req.rqtype = DEVTOMEM;
  606. fill_px(&desc->px,
  607. sg_dma_address(sg), addr, sg_dma_len(sg));
  608. }
  609. desc->rqcfg.brst_size = pch->burst_sz;
  610. desc->rqcfg.brst_len = 1;
  611. }
  612. /* Return the last desc in the chain */
  613. desc->txd.flags = flg;
  614. return &desc->txd;
  615. }
  616. static irqreturn_t pl330_irq_handler(int irq, void *data)
  617. {
  618. if (pl330_update(data))
  619. return IRQ_HANDLED;
  620. else
  621. return IRQ_NONE;
  622. }
  623. static int __devinit
  624. pl330_probe(struct amba_device *adev, const struct amba_id *id)
  625. {
  626. struct dma_pl330_platdata *pdat;
  627. struct dma_pl330_dmac *pdmac;
  628. struct dma_pl330_chan *pch;
  629. struct pl330_info *pi;
  630. struct dma_device *pd;
  631. struct resource *res;
  632. int i, ret, irq;
  633. int num_chan;
  634. pdat = adev->dev.platform_data;
  635. /* Allocate a new DMAC and its Channels */
  636. pdmac = kzalloc(sizeof(*pdmac), GFP_KERNEL);
  637. if (!pdmac) {
  638. dev_err(&adev->dev, "unable to allocate mem\n");
  639. return -ENOMEM;
  640. }
  641. pi = &pdmac->pif;
  642. pi->dev = &adev->dev;
  643. pi->pl330_data = NULL;
  644. pi->mcbufsz = pdat ? pdat->mcbuf_sz : 0;
  645. res = &adev->res;
  646. request_mem_region(res->start, resource_size(res), "dma-pl330");
  647. pi->base = ioremap(res->start, resource_size(res));
  648. if (!pi->base) {
  649. ret = -ENXIO;
  650. goto probe_err1;
  651. }
  652. pdmac->clk = clk_get(&adev->dev, "dma");
  653. if (IS_ERR(pdmac->clk)) {
  654. dev_err(&adev->dev, "Cannot get operation clock.\n");
  655. ret = -EINVAL;
  656. goto probe_err1;
  657. }
  658. amba_set_drvdata(adev, pdmac);
  659. #ifdef CONFIG_PM_RUNTIME
  660. /* to use the runtime PM helper functions */
  661. pm_runtime_enable(&adev->dev);
  662. /* enable the power domain */
  663. if (pm_runtime_get_sync(&adev->dev)) {
  664. dev_err(&adev->dev, "failed to get runtime pm\n");
  665. ret = -ENODEV;
  666. goto probe_err1;
  667. }
  668. #else
  669. /* enable dma clk */
  670. clk_enable(pdmac->clk);
  671. #endif
  672. irq = adev->irq[0];
  673. ret = request_irq(irq, pl330_irq_handler, 0,
  674. dev_name(&adev->dev), pi);
  675. if (ret)
  676. goto probe_err2;
  677. ret = pl330_add(pi);
  678. if (ret)
  679. goto probe_err3;
  680. INIT_LIST_HEAD(&pdmac->desc_pool);
  681. spin_lock_init(&pdmac->pool_lock);
  682. /* Create a descriptor pool of default size */
  683. if (!add_desc(pdmac, GFP_KERNEL, NR_DEFAULT_DESC))
  684. dev_warn(&adev->dev, "unable to allocate desc\n");
  685. pd = &pdmac->ddma;
  686. INIT_LIST_HEAD(&pd->channels);
  687. /* Initialize channel parameters */
  688. num_chan = max(pdat ? pdat->nr_valid_peri : (u8)pi->pcfg.num_peri,
  689. (u8)pi->pcfg.num_chan);
  690. pdmac->peripherals = kzalloc(num_chan * sizeof(*pch), GFP_KERNEL);
  691. for (i = 0; i < num_chan; i++) {
  692. pch = &pdmac->peripherals[i];
  693. if (!adev->dev.of_node)
  694. pch->chan.private = pdat ? &pdat->peri_id[i] : NULL;
  695. else
  696. pch->chan.private = adev->dev.of_node;
  697. INIT_LIST_HEAD(&pch->work_list);
  698. spin_lock_init(&pch->lock);
  699. pch->pl330_chid = NULL;
  700. pch->chan.device = pd;
  701. pch->dmac = pdmac;
  702. /* Add the channel to the DMAC list */
  703. list_add_tail(&pch->chan.device_node, &pd->channels);
  704. }
  705. pd->dev = &adev->dev;
  706. if (pdat) {
  707. pd->cap_mask = pdat->cap_mask;
  708. } else {
  709. dma_cap_set(DMA_MEMCPY, pd->cap_mask);
  710. if (pi->pcfg.num_peri) {
  711. dma_cap_set(DMA_SLAVE, pd->cap_mask);
  712. dma_cap_set(DMA_CYCLIC, pd->cap_mask);
  713. }
  714. }
  715. pd->device_alloc_chan_resources = pl330_alloc_chan_resources;
  716. pd->device_free_chan_resources = pl330_free_chan_resources;
  717. pd->device_prep_dma_memcpy = pl330_prep_dma_memcpy;
  718. pd->device_prep_dma_cyclic = pl330_prep_dma_cyclic;
  719. pd->device_tx_status = pl330_tx_status;
  720. pd->device_prep_slave_sg = pl330_prep_slave_sg;
  721. pd->device_control = pl330_control;
  722. pd->device_issue_pending = pl330_issue_pending;
  723. ret = dma_async_device_register(pd);
  724. if (ret) {
  725. dev_err(&adev->dev, "unable to register DMAC\n");
  726. goto probe_err4;
  727. }
  728. dev_info(&adev->dev,
  729. "Loaded driver for PL330 DMAC-%d\n", adev->periphid);
  730. dev_info(&adev->dev,
  731. "\tDBUFF-%ux%ubytes Num_Chans-%u Num_Peri-%u Num_Events-%u\n",
  732. pi->pcfg.data_buf_dep,
  733. pi->pcfg.data_bus_width / 8, pi->pcfg.num_chan,
  734. pi->pcfg.num_peri, pi->pcfg.num_events);
  735. return 0;
  736. probe_err4:
  737. pl330_del(pi);
  738. probe_err3:
  739. free_irq(irq, pi);
  740. probe_err2:
  741. iounmap(pi->base);
  742. probe_err1:
  743. release_mem_region(res->start, resource_size(res));
  744. kfree(pdmac);
  745. return ret;
  746. }
  747. static int __devexit pl330_remove(struct amba_device *adev)
  748. {
  749. struct dma_pl330_dmac *pdmac = amba_get_drvdata(adev);
  750. struct dma_pl330_chan *pch, *_p;
  751. struct pl330_info *pi;
  752. struct resource *res;
  753. int irq;
  754. if (!pdmac)
  755. return 0;
  756. amba_set_drvdata(adev, NULL);
  757. /* Idle the DMAC */
  758. list_for_each_entry_safe(pch, _p, &pdmac->ddma.channels,
  759. chan.device_node) {
  760. /* Remove the channel */
  761. list_del(&pch->chan.device_node);
  762. /* Flush the channel */
  763. pl330_control(&pch->chan, DMA_TERMINATE_ALL, 0);
  764. pl330_free_chan_resources(&pch->chan);
  765. }
  766. pi = &pdmac->pif;
  767. pl330_del(pi);
  768. irq = adev->irq[0];
  769. free_irq(irq, pi);
  770. iounmap(pi->base);
  771. res = &adev->res;
  772. release_mem_region(res->start, resource_size(res));
  773. #ifdef CONFIG_PM_RUNTIME
  774. pm_runtime_put(&adev->dev);
  775. pm_runtime_disable(&adev->dev);
  776. #else
  777. clk_disable(pdmac->clk);
  778. #endif
  779. kfree(pdmac);
  780. return 0;
  781. }
  782. static struct amba_id pl330_ids[] = {
  783. {
  784. .id = 0x00041330,
  785. .mask = 0x000fffff,
  786. },
  787. { 0, 0 },
  788. };
  789. MODULE_DEVICE_TABLE(amba, pl330_ids);
  790. #ifdef CONFIG_PM_RUNTIME
  791. static int pl330_runtime_suspend(struct device *dev)
  792. {
  793. struct dma_pl330_dmac *pdmac = dev_get_drvdata(dev);
  794. if (!pdmac) {
  795. dev_err(dev, "failed to get dmac\n");
  796. return -ENODEV;
  797. }
  798. clk_disable(pdmac->clk);
  799. return 0;
  800. }
  801. static int pl330_runtime_resume(struct device *dev)
  802. {
  803. struct dma_pl330_dmac *pdmac = dev_get_drvdata(dev);
  804. if (!pdmac) {
  805. dev_err(dev, "failed to get dmac\n");
  806. return -ENODEV;
  807. }
  808. clk_enable(pdmac->clk);
  809. return 0;
  810. }
  811. #else
  812. #define pl330_runtime_suspend NULL
  813. #define pl330_runtime_resume NULL
  814. #endif /* CONFIG_PM_RUNTIME */
  815. static const struct dev_pm_ops pl330_pm_ops = {
  816. .runtime_suspend = pl330_runtime_suspend,
  817. .runtime_resume = pl330_runtime_resume,
  818. };
  819. static struct amba_driver pl330_driver = {
  820. .drv = {
  821. .owner = THIS_MODULE,
  822. .name = "dma-pl330",
  823. .pm = &pl330_pm_ops,
  824. },
  825. .id_table = pl330_ids,
  826. .probe = pl330_probe,
  827. .remove = pl330_remove,
  828. };
  829. static int __init pl330_init(void)
  830. {
  831. return amba_driver_register(&pl330_driver);
  832. }
  833. module_init(pl330_init);
  834. static void __exit pl330_exit(void)
  835. {
  836. amba_driver_unregister(&pl330_driver);
  837. return;
  838. }
  839. module_exit(pl330_exit);
  840. MODULE_AUTHOR("Jaswinder Singh <jassi.brar@samsung.com>");
  841. MODULE_DESCRIPTION("API Driver for PL330 DMAC");
  842. MODULE_LICENSE("GPL");