pl330.c 23 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035
  1. /* linux/drivers/dma/pl330.c
  2. *
  3. * Copyright (C) 2010 Samsung Electronics Co. Ltd.
  4. * Jaswinder Singh <jassi.brar@samsung.com>
  5. *
  6. * This program is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU General Public License as published by
  8. * the Free Software Foundation; either version 2 of the License, or
  9. * (at your option) any later version.
  10. */
  11. #include <linux/io.h>
  12. #include <linux/init.h>
  13. #include <linux/slab.h>
  14. #include <linux/module.h>
  15. #include <linux/dmaengine.h>
  16. #include <linux/interrupt.h>
  17. #include <linux/amba/bus.h>
  18. #include <linux/amba/pl330.h>
  19. #include <linux/pm_runtime.h>
  20. #include <linux/scatterlist.h>
  21. #include <linux/of.h>
  22. #include "dmaengine.h"
  23. #define NR_DEFAULT_DESC 16
  24. enum desc_status {
  25. /* In the DMAC pool */
  26. FREE,
  27. /*
  28. * Allocted to some channel during prep_xxx
  29. * Also may be sitting on the work_list.
  30. */
  31. PREP,
  32. /*
  33. * Sitting on the work_list and already submitted
  34. * to the PL330 core. Not more than two descriptors
  35. * of a channel can be BUSY at any time.
  36. */
  37. BUSY,
  38. /*
  39. * Sitting on the channel work_list but xfer done
  40. * by PL330 core
  41. */
  42. DONE,
  43. };
  44. struct dma_pl330_chan {
  45. /* Schedule desc completion */
  46. struct tasklet_struct task;
  47. /* DMA-Engine Channel */
  48. struct dma_chan chan;
  49. /* List of to be xfered descriptors */
  50. struct list_head work_list;
  51. /* Pointer to the DMAC that manages this channel,
  52. * NULL if the channel is available to be acquired.
  53. * As the parent, this DMAC also provides descriptors
  54. * to the channel.
  55. */
  56. struct dma_pl330_dmac *dmac;
  57. /* To protect channel manipulation */
  58. spinlock_t lock;
  59. /* Token of a hardware channel thread of PL330 DMAC
  60. * NULL if the channel is available to be acquired.
  61. */
  62. void *pl330_chid;
  63. /* For D-to-M and M-to-D channels */
  64. int burst_sz; /* the peripheral fifo width */
  65. int burst_len; /* the number of burst */
  66. dma_addr_t fifo_addr;
  67. /* for cyclic capability */
  68. bool cyclic;
  69. };
  70. struct dma_pl330_dmac {
  71. struct pl330_info pif;
  72. /* DMA-Engine Device */
  73. struct dma_device ddma;
  74. /* Pool of descriptors available for the DMAC's channels */
  75. struct list_head desc_pool;
  76. /* To protect desc_pool manipulation */
  77. spinlock_t pool_lock;
  78. /* Peripheral channels connected to this DMAC */
  79. struct dma_pl330_chan *peripherals; /* keep at end */
  80. struct clk *clk;
  81. };
  82. struct dma_pl330_desc {
  83. /* To attach to a queue as child */
  84. struct list_head node;
  85. /* Descriptor for the DMA Engine API */
  86. struct dma_async_tx_descriptor txd;
  87. /* Xfer for PL330 core */
  88. struct pl330_xfer px;
  89. struct pl330_reqcfg rqcfg;
  90. struct pl330_req req;
  91. enum desc_status status;
  92. /* The channel which currently holds this desc */
  93. struct dma_pl330_chan *pchan;
  94. };
  95. /* forward declaration */
  96. static struct amba_driver pl330_driver;
  97. static inline struct dma_pl330_chan *
  98. to_pchan(struct dma_chan *ch)
  99. {
  100. if (!ch)
  101. return NULL;
  102. return container_of(ch, struct dma_pl330_chan, chan);
  103. }
  104. static inline struct dma_pl330_desc *
  105. to_desc(struct dma_async_tx_descriptor *tx)
  106. {
  107. return container_of(tx, struct dma_pl330_desc, txd);
  108. }
  109. static inline void free_desc_list(struct list_head *list)
  110. {
  111. struct dma_pl330_dmac *pdmac;
  112. struct dma_pl330_desc *desc;
  113. struct dma_pl330_chan *pch;
  114. unsigned long flags;
  115. if (list_empty(list))
  116. return;
  117. /* Finish off the work list */
  118. list_for_each_entry(desc, list, node) {
  119. dma_async_tx_callback callback;
  120. void *param;
  121. /* All desc in a list belong to same channel */
  122. pch = desc->pchan;
  123. callback = desc->txd.callback;
  124. param = desc->txd.callback_param;
  125. if (callback)
  126. callback(param);
  127. desc->pchan = NULL;
  128. }
  129. pdmac = pch->dmac;
  130. spin_lock_irqsave(&pdmac->pool_lock, flags);
  131. list_splice_tail_init(list, &pdmac->desc_pool);
  132. spin_unlock_irqrestore(&pdmac->pool_lock, flags);
  133. }
  134. static inline void handle_cyclic_desc_list(struct list_head *list)
  135. {
  136. struct dma_pl330_desc *desc;
  137. struct dma_pl330_chan *pch;
  138. unsigned long flags;
  139. if (list_empty(list))
  140. return;
  141. list_for_each_entry(desc, list, node) {
  142. dma_async_tx_callback callback;
  143. /* Change status to reload it */
  144. desc->status = PREP;
  145. pch = desc->pchan;
  146. callback = desc->txd.callback;
  147. if (callback)
  148. callback(desc->txd.callback_param);
  149. }
  150. spin_lock_irqsave(&pch->lock, flags);
  151. list_splice_tail_init(list, &pch->work_list);
  152. spin_unlock_irqrestore(&pch->lock, flags);
  153. }
  154. static inline void fill_queue(struct dma_pl330_chan *pch)
  155. {
  156. struct dma_pl330_desc *desc;
  157. int ret;
  158. list_for_each_entry(desc, &pch->work_list, node) {
  159. /* If already submitted */
  160. if (desc->status == BUSY)
  161. break;
  162. ret = pl330_submit_req(pch->pl330_chid,
  163. &desc->req);
  164. if (!ret) {
  165. desc->status = BUSY;
  166. break;
  167. } else if (ret == -EAGAIN) {
  168. /* QFull or DMAC Dying */
  169. break;
  170. } else {
  171. /* Unacceptable request */
  172. desc->status = DONE;
  173. dev_err(pch->dmac->pif.dev, "%s:%d Bad Desc(%d)\n",
  174. __func__, __LINE__, desc->txd.cookie);
  175. tasklet_schedule(&pch->task);
  176. }
  177. }
  178. }
  179. static void pl330_tasklet(unsigned long data)
  180. {
  181. struct dma_pl330_chan *pch = (struct dma_pl330_chan *)data;
  182. struct dma_pl330_desc *desc, *_dt;
  183. unsigned long flags;
  184. LIST_HEAD(list);
  185. spin_lock_irqsave(&pch->lock, flags);
  186. /* Pick up ripe tomatoes */
  187. list_for_each_entry_safe(desc, _dt, &pch->work_list, node)
  188. if (desc->status == DONE) {
  189. dma_cookie_complete(&desc->txd);
  190. list_move_tail(&desc->node, &list);
  191. }
  192. /* Try to submit a req imm. next to the last completed cookie */
  193. fill_queue(pch);
  194. /* Make sure the PL330 Channel thread is active */
  195. pl330_chan_ctrl(pch->pl330_chid, PL330_OP_START);
  196. spin_unlock_irqrestore(&pch->lock, flags);
  197. if (pch->cyclic)
  198. handle_cyclic_desc_list(&list);
  199. else
  200. free_desc_list(&list);
  201. }
  202. static void dma_pl330_rqcb(void *token, enum pl330_op_err err)
  203. {
  204. struct dma_pl330_desc *desc = token;
  205. struct dma_pl330_chan *pch = desc->pchan;
  206. unsigned long flags;
  207. /* If desc aborted */
  208. if (!pch)
  209. return;
  210. spin_lock_irqsave(&pch->lock, flags);
  211. desc->status = DONE;
  212. spin_unlock_irqrestore(&pch->lock, flags);
  213. tasklet_schedule(&pch->task);
  214. }
  215. bool pl330_filter(struct dma_chan *chan, void *param)
  216. {
  217. u8 *peri_id;
  218. if (chan->device->dev->driver != &pl330_driver.drv)
  219. return false;
  220. #ifdef CONFIG_OF
  221. if (chan->device->dev->of_node) {
  222. const __be32 *prop_value;
  223. phandle phandle;
  224. struct device_node *node;
  225. prop_value = ((struct property *)param)->value;
  226. phandle = be32_to_cpup(prop_value++);
  227. node = of_find_node_by_phandle(phandle);
  228. return ((chan->private == node) &&
  229. (chan->chan_id == be32_to_cpup(prop_value)));
  230. }
  231. #endif
  232. peri_id = chan->private;
  233. return *peri_id == (unsigned)param;
  234. }
  235. EXPORT_SYMBOL(pl330_filter);
  236. static int pl330_alloc_chan_resources(struct dma_chan *chan)
  237. {
  238. struct dma_pl330_chan *pch = to_pchan(chan);
  239. struct dma_pl330_dmac *pdmac = pch->dmac;
  240. unsigned long flags;
  241. spin_lock_irqsave(&pch->lock, flags);
  242. dma_cookie_init(chan);
  243. pch->cyclic = false;
  244. pch->pl330_chid = pl330_request_channel(&pdmac->pif);
  245. if (!pch->pl330_chid) {
  246. spin_unlock_irqrestore(&pch->lock, flags);
  247. return 0;
  248. }
  249. tasklet_init(&pch->task, pl330_tasklet, (unsigned long) pch);
  250. spin_unlock_irqrestore(&pch->lock, flags);
  251. return 1;
  252. }
  253. static int pl330_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, unsigned long arg)
  254. {
  255. struct dma_pl330_chan *pch = to_pchan(chan);
  256. struct dma_pl330_desc *desc, *_dt;
  257. unsigned long flags;
  258. struct dma_pl330_dmac *pdmac = pch->dmac;
  259. struct dma_slave_config *slave_config;
  260. LIST_HEAD(list);
  261. switch (cmd) {
  262. case DMA_TERMINATE_ALL:
  263. spin_lock_irqsave(&pch->lock, flags);
  264. /* FLUSH the PL330 Channel thread */
  265. pl330_chan_ctrl(pch->pl330_chid, PL330_OP_FLUSH);
  266. /* Mark all desc done */
  267. list_for_each_entry_safe(desc, _dt, &pch->work_list , node) {
  268. desc->status = DONE;
  269. list_move_tail(&desc->node, &list);
  270. }
  271. list_splice_tail_init(&list, &pdmac->desc_pool);
  272. spin_unlock_irqrestore(&pch->lock, flags);
  273. break;
  274. case DMA_SLAVE_CONFIG:
  275. slave_config = (struct dma_slave_config *)arg;
  276. if (slave_config->direction == DMA_MEM_TO_DEV) {
  277. if (slave_config->dst_addr)
  278. pch->fifo_addr = slave_config->dst_addr;
  279. if (slave_config->dst_addr_width)
  280. pch->burst_sz = __ffs(slave_config->dst_addr_width);
  281. if (slave_config->dst_maxburst)
  282. pch->burst_len = slave_config->dst_maxburst;
  283. } else if (slave_config->direction == DMA_DEV_TO_MEM) {
  284. if (slave_config->src_addr)
  285. pch->fifo_addr = slave_config->src_addr;
  286. if (slave_config->src_addr_width)
  287. pch->burst_sz = __ffs(slave_config->src_addr_width);
  288. if (slave_config->src_maxburst)
  289. pch->burst_len = slave_config->src_maxburst;
  290. }
  291. break;
  292. default:
  293. dev_err(pch->dmac->pif.dev, "Not supported command.\n");
  294. return -ENXIO;
  295. }
  296. return 0;
  297. }
  298. static void pl330_free_chan_resources(struct dma_chan *chan)
  299. {
  300. struct dma_pl330_chan *pch = to_pchan(chan);
  301. unsigned long flags;
  302. spin_lock_irqsave(&pch->lock, flags);
  303. tasklet_kill(&pch->task);
  304. pl330_release_channel(pch->pl330_chid);
  305. pch->pl330_chid = NULL;
  306. if (pch->cyclic)
  307. list_splice_tail_init(&pch->work_list, &pch->dmac->desc_pool);
  308. spin_unlock_irqrestore(&pch->lock, flags);
  309. }
  310. static enum dma_status
  311. pl330_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
  312. struct dma_tx_state *txstate)
  313. {
  314. return dma_cookie_status(chan, cookie, txstate);
  315. }
  316. static void pl330_issue_pending(struct dma_chan *chan)
  317. {
  318. pl330_tasklet((unsigned long) to_pchan(chan));
  319. }
  320. /*
  321. * We returned the last one of the circular list of descriptor(s)
  322. * from prep_xxx, so the argument to submit corresponds to the last
  323. * descriptor of the list.
  324. */
  325. static dma_cookie_t pl330_tx_submit(struct dma_async_tx_descriptor *tx)
  326. {
  327. struct dma_pl330_desc *desc, *last = to_desc(tx);
  328. struct dma_pl330_chan *pch = to_pchan(tx->chan);
  329. dma_cookie_t cookie;
  330. unsigned long flags;
  331. spin_lock_irqsave(&pch->lock, flags);
  332. /* Assign cookies to all nodes */
  333. while (!list_empty(&last->node)) {
  334. desc = list_entry(last->node.next, struct dma_pl330_desc, node);
  335. dma_cookie_assign(&desc->txd);
  336. list_move_tail(&desc->node, &pch->work_list);
  337. }
  338. cookie = dma_cookie_assign(&last->txd);
  339. list_add_tail(&last->node, &pch->work_list);
  340. spin_unlock_irqrestore(&pch->lock, flags);
  341. return cookie;
  342. }
  343. static inline void _init_desc(struct dma_pl330_desc *desc)
  344. {
  345. desc->pchan = NULL;
  346. desc->req.x = &desc->px;
  347. desc->req.token = desc;
  348. desc->rqcfg.swap = SWAP_NO;
  349. desc->rqcfg.privileged = 0;
  350. desc->rqcfg.insnaccess = 0;
  351. desc->rqcfg.scctl = SCCTRL0;
  352. desc->rqcfg.dcctl = DCCTRL0;
  353. desc->req.cfg = &desc->rqcfg;
  354. desc->req.xfer_cb = dma_pl330_rqcb;
  355. desc->txd.tx_submit = pl330_tx_submit;
  356. INIT_LIST_HEAD(&desc->node);
  357. }
  358. /* Returns the number of descriptors added to the DMAC pool */
  359. int add_desc(struct dma_pl330_dmac *pdmac, gfp_t flg, int count)
  360. {
  361. struct dma_pl330_desc *desc;
  362. unsigned long flags;
  363. int i;
  364. if (!pdmac)
  365. return 0;
  366. desc = kmalloc(count * sizeof(*desc), flg);
  367. if (!desc)
  368. return 0;
  369. spin_lock_irqsave(&pdmac->pool_lock, flags);
  370. for (i = 0; i < count; i++) {
  371. _init_desc(&desc[i]);
  372. list_add_tail(&desc[i].node, &pdmac->desc_pool);
  373. }
  374. spin_unlock_irqrestore(&pdmac->pool_lock, flags);
  375. return count;
  376. }
  377. static struct dma_pl330_desc *
  378. pluck_desc(struct dma_pl330_dmac *pdmac)
  379. {
  380. struct dma_pl330_desc *desc = NULL;
  381. unsigned long flags;
  382. if (!pdmac)
  383. return NULL;
  384. spin_lock_irqsave(&pdmac->pool_lock, flags);
  385. if (!list_empty(&pdmac->desc_pool)) {
  386. desc = list_entry(pdmac->desc_pool.next,
  387. struct dma_pl330_desc, node);
  388. list_del_init(&desc->node);
  389. desc->status = PREP;
  390. desc->txd.callback = NULL;
  391. }
  392. spin_unlock_irqrestore(&pdmac->pool_lock, flags);
  393. return desc;
  394. }
  395. static struct dma_pl330_desc *pl330_get_desc(struct dma_pl330_chan *pch)
  396. {
  397. struct dma_pl330_dmac *pdmac = pch->dmac;
  398. u8 *peri_id = pch->chan.private;
  399. struct dma_pl330_desc *desc;
  400. /* Pluck one desc from the pool of DMAC */
  401. desc = pluck_desc(pdmac);
  402. /* If the DMAC pool is empty, alloc new */
  403. if (!desc) {
  404. if (!add_desc(pdmac, GFP_ATOMIC, 1))
  405. return NULL;
  406. /* Try again */
  407. desc = pluck_desc(pdmac);
  408. if (!desc) {
  409. dev_err(pch->dmac->pif.dev,
  410. "%s:%d ALERT!\n", __func__, __LINE__);
  411. return NULL;
  412. }
  413. }
  414. /* Initialize the descriptor */
  415. desc->pchan = pch;
  416. desc->txd.cookie = 0;
  417. async_tx_ack(&desc->txd);
  418. desc->req.peri = peri_id ? pch->chan.chan_id : 0;
  419. dma_async_tx_descriptor_init(&desc->txd, &pch->chan);
  420. return desc;
  421. }
  422. static inline void fill_px(struct pl330_xfer *px,
  423. dma_addr_t dst, dma_addr_t src, size_t len)
  424. {
  425. px->next = NULL;
  426. px->bytes = len;
  427. px->dst_addr = dst;
  428. px->src_addr = src;
  429. }
  430. static struct dma_pl330_desc *
  431. __pl330_prep_dma_memcpy(struct dma_pl330_chan *pch, dma_addr_t dst,
  432. dma_addr_t src, size_t len)
  433. {
  434. struct dma_pl330_desc *desc = pl330_get_desc(pch);
  435. if (!desc) {
  436. dev_err(pch->dmac->pif.dev, "%s:%d Unable to fetch desc\n",
  437. __func__, __LINE__);
  438. return NULL;
  439. }
  440. /*
  441. * Ideally we should lookout for reqs bigger than
  442. * those that can be programmed with 256 bytes of
  443. * MC buffer, but considering a req size is seldom
  444. * going to be word-unaligned and more than 200MB,
  445. * we take it easy.
  446. * Also, should the limit is reached we'd rather
  447. * have the platform increase MC buffer size than
  448. * complicating this API driver.
  449. */
  450. fill_px(&desc->px, dst, src, len);
  451. return desc;
  452. }
  453. /* Call after fixing burst size */
  454. static inline int get_burst_len(struct dma_pl330_desc *desc, size_t len)
  455. {
  456. struct dma_pl330_chan *pch = desc->pchan;
  457. struct pl330_info *pi = &pch->dmac->pif;
  458. int burst_len;
  459. burst_len = pi->pcfg.data_bus_width / 8;
  460. burst_len *= pi->pcfg.data_buf_dep;
  461. burst_len >>= desc->rqcfg.brst_size;
  462. /* src/dst_burst_len can't be more than 16 */
  463. if (burst_len > 16)
  464. burst_len = 16;
  465. while (burst_len > 1) {
  466. if (!(len % (burst_len << desc->rqcfg.brst_size)))
  467. break;
  468. burst_len--;
  469. }
  470. return burst_len;
  471. }
  472. static struct dma_async_tx_descriptor *pl330_prep_dma_cyclic(
  473. struct dma_chan *chan, dma_addr_t dma_addr, size_t len,
  474. size_t period_len, enum dma_transfer_direction direction)
  475. {
  476. struct dma_pl330_desc *desc;
  477. struct dma_pl330_chan *pch = to_pchan(chan);
  478. dma_addr_t dst;
  479. dma_addr_t src;
  480. desc = pl330_get_desc(pch);
  481. if (!desc) {
  482. dev_err(pch->dmac->pif.dev, "%s:%d Unable to fetch desc\n",
  483. __func__, __LINE__);
  484. return NULL;
  485. }
  486. switch (direction) {
  487. case DMA_MEM_TO_DEV:
  488. desc->rqcfg.src_inc = 1;
  489. desc->rqcfg.dst_inc = 0;
  490. desc->req.rqtype = MEMTODEV;
  491. src = dma_addr;
  492. dst = pch->fifo_addr;
  493. break;
  494. case DMA_DEV_TO_MEM:
  495. desc->rqcfg.src_inc = 0;
  496. desc->rqcfg.dst_inc = 1;
  497. desc->req.rqtype = DEVTOMEM;
  498. src = pch->fifo_addr;
  499. dst = dma_addr;
  500. break;
  501. default:
  502. dev_err(pch->dmac->pif.dev, "%s:%d Invalid dma direction\n",
  503. __func__, __LINE__);
  504. return NULL;
  505. }
  506. desc->rqcfg.brst_size = pch->burst_sz;
  507. desc->rqcfg.brst_len = 1;
  508. pch->cyclic = true;
  509. fill_px(&desc->px, dst, src, period_len);
  510. return &desc->txd;
  511. }
  512. static struct dma_async_tx_descriptor *
  513. pl330_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dst,
  514. dma_addr_t src, size_t len, unsigned long flags)
  515. {
  516. struct dma_pl330_desc *desc;
  517. struct dma_pl330_chan *pch = to_pchan(chan);
  518. struct pl330_info *pi;
  519. int burst;
  520. if (unlikely(!pch || !len))
  521. return NULL;
  522. pi = &pch->dmac->pif;
  523. desc = __pl330_prep_dma_memcpy(pch, dst, src, len);
  524. if (!desc)
  525. return NULL;
  526. desc->rqcfg.src_inc = 1;
  527. desc->rqcfg.dst_inc = 1;
  528. desc->req.rqtype = MEMTOMEM;
  529. /* Select max possible burst size */
  530. burst = pi->pcfg.data_bus_width / 8;
  531. while (burst > 1) {
  532. if (!(len % burst))
  533. break;
  534. burst /= 2;
  535. }
  536. desc->rqcfg.brst_size = 0;
  537. while (burst != (1 << desc->rqcfg.brst_size))
  538. desc->rqcfg.brst_size++;
  539. desc->rqcfg.brst_len = get_burst_len(desc, len);
  540. desc->txd.flags = flags;
  541. return &desc->txd;
  542. }
  543. static struct dma_async_tx_descriptor *
  544. pl330_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
  545. unsigned int sg_len, enum dma_transfer_direction direction,
  546. unsigned long flg)
  547. {
  548. struct dma_pl330_desc *first, *desc = NULL;
  549. struct dma_pl330_chan *pch = to_pchan(chan);
  550. struct scatterlist *sg;
  551. unsigned long flags;
  552. int i;
  553. dma_addr_t addr;
  554. if (unlikely(!pch || !sgl || !sg_len))
  555. return NULL;
  556. addr = pch->fifo_addr;
  557. first = NULL;
  558. for_each_sg(sgl, sg, sg_len, i) {
  559. desc = pl330_get_desc(pch);
  560. if (!desc) {
  561. struct dma_pl330_dmac *pdmac = pch->dmac;
  562. dev_err(pch->dmac->pif.dev,
  563. "%s:%d Unable to fetch desc\n",
  564. __func__, __LINE__);
  565. if (!first)
  566. return NULL;
  567. spin_lock_irqsave(&pdmac->pool_lock, flags);
  568. while (!list_empty(&first->node)) {
  569. desc = list_entry(first->node.next,
  570. struct dma_pl330_desc, node);
  571. list_move_tail(&desc->node, &pdmac->desc_pool);
  572. }
  573. list_move_tail(&first->node, &pdmac->desc_pool);
  574. spin_unlock_irqrestore(&pdmac->pool_lock, flags);
  575. return NULL;
  576. }
  577. if (!first)
  578. first = desc;
  579. else
  580. list_add_tail(&desc->node, &first->node);
  581. if (direction == DMA_MEM_TO_DEV) {
  582. desc->rqcfg.src_inc = 1;
  583. desc->rqcfg.dst_inc = 0;
  584. desc->req.rqtype = MEMTODEV;
  585. fill_px(&desc->px,
  586. addr, sg_dma_address(sg), sg_dma_len(sg));
  587. } else {
  588. desc->rqcfg.src_inc = 0;
  589. desc->rqcfg.dst_inc = 1;
  590. desc->req.rqtype = DEVTOMEM;
  591. fill_px(&desc->px,
  592. sg_dma_address(sg), addr, sg_dma_len(sg));
  593. }
  594. desc->rqcfg.brst_size = pch->burst_sz;
  595. desc->rqcfg.brst_len = 1;
  596. }
  597. /* Return the last desc in the chain */
  598. desc->txd.flags = flg;
  599. return &desc->txd;
  600. }
  601. static irqreturn_t pl330_irq_handler(int irq, void *data)
  602. {
  603. if (pl330_update(data))
  604. return IRQ_HANDLED;
  605. else
  606. return IRQ_NONE;
  607. }
  608. static int __devinit
  609. pl330_probe(struct amba_device *adev, const struct amba_id *id)
  610. {
  611. struct dma_pl330_platdata *pdat;
  612. struct dma_pl330_dmac *pdmac;
  613. struct dma_pl330_chan *pch;
  614. struct pl330_info *pi;
  615. struct dma_device *pd;
  616. struct resource *res;
  617. int i, ret, irq;
  618. int num_chan;
  619. pdat = adev->dev.platform_data;
  620. /* Allocate a new DMAC and its Channels */
  621. pdmac = kzalloc(sizeof(*pdmac), GFP_KERNEL);
  622. if (!pdmac) {
  623. dev_err(&adev->dev, "unable to allocate mem\n");
  624. return -ENOMEM;
  625. }
  626. pi = &pdmac->pif;
  627. pi->dev = &adev->dev;
  628. pi->pl330_data = NULL;
  629. pi->mcbufsz = pdat ? pdat->mcbuf_sz : 0;
  630. res = &adev->res;
  631. request_mem_region(res->start, resource_size(res), "dma-pl330");
  632. pi->base = ioremap(res->start, resource_size(res));
  633. if (!pi->base) {
  634. ret = -ENXIO;
  635. goto probe_err1;
  636. }
  637. pdmac->clk = clk_get(&adev->dev, "dma");
  638. if (IS_ERR(pdmac->clk)) {
  639. dev_err(&adev->dev, "Cannot get operation clock.\n");
  640. ret = -EINVAL;
  641. goto probe_err2;
  642. }
  643. amba_set_drvdata(adev, pdmac);
  644. #ifndef CONFIG_PM_RUNTIME
  645. /* enable dma clk */
  646. clk_enable(pdmac->clk);
  647. #endif
  648. irq = adev->irq[0];
  649. ret = request_irq(irq, pl330_irq_handler, 0,
  650. dev_name(&adev->dev), pi);
  651. if (ret)
  652. goto probe_err3;
  653. ret = pl330_add(pi);
  654. if (ret)
  655. goto probe_err4;
  656. INIT_LIST_HEAD(&pdmac->desc_pool);
  657. spin_lock_init(&pdmac->pool_lock);
  658. /* Create a descriptor pool of default size */
  659. if (!add_desc(pdmac, GFP_KERNEL, NR_DEFAULT_DESC))
  660. dev_warn(&adev->dev, "unable to allocate desc\n");
  661. pd = &pdmac->ddma;
  662. INIT_LIST_HEAD(&pd->channels);
  663. /* Initialize channel parameters */
  664. num_chan = max(pdat ? pdat->nr_valid_peri : (u8)pi->pcfg.num_peri,
  665. (u8)pi->pcfg.num_chan);
  666. pdmac->peripherals = kzalloc(num_chan * sizeof(*pch), GFP_KERNEL);
  667. for (i = 0; i < num_chan; i++) {
  668. pch = &pdmac->peripherals[i];
  669. if (!adev->dev.of_node)
  670. pch->chan.private = pdat ? &pdat->peri_id[i] : NULL;
  671. else
  672. pch->chan.private = adev->dev.of_node;
  673. INIT_LIST_HEAD(&pch->work_list);
  674. spin_lock_init(&pch->lock);
  675. pch->pl330_chid = NULL;
  676. pch->chan.device = pd;
  677. pch->dmac = pdmac;
  678. /* Add the channel to the DMAC list */
  679. list_add_tail(&pch->chan.device_node, &pd->channels);
  680. }
  681. pd->dev = &adev->dev;
  682. if (pdat) {
  683. pd->cap_mask = pdat->cap_mask;
  684. } else {
  685. dma_cap_set(DMA_MEMCPY, pd->cap_mask);
  686. if (pi->pcfg.num_peri) {
  687. dma_cap_set(DMA_SLAVE, pd->cap_mask);
  688. dma_cap_set(DMA_CYCLIC, pd->cap_mask);
  689. }
  690. }
  691. pd->device_alloc_chan_resources = pl330_alloc_chan_resources;
  692. pd->device_free_chan_resources = pl330_free_chan_resources;
  693. pd->device_prep_dma_memcpy = pl330_prep_dma_memcpy;
  694. pd->device_prep_dma_cyclic = pl330_prep_dma_cyclic;
  695. pd->device_tx_status = pl330_tx_status;
  696. pd->device_prep_slave_sg = pl330_prep_slave_sg;
  697. pd->device_control = pl330_control;
  698. pd->device_issue_pending = pl330_issue_pending;
  699. ret = dma_async_device_register(pd);
  700. if (ret) {
  701. dev_err(&adev->dev, "unable to register DMAC\n");
  702. goto probe_err5;
  703. }
  704. dev_info(&adev->dev,
  705. "Loaded driver for PL330 DMAC-%d\n", adev->periphid);
  706. dev_info(&adev->dev,
  707. "\tDBUFF-%ux%ubytes Num_Chans-%u Num_Peri-%u Num_Events-%u\n",
  708. pi->pcfg.data_buf_dep,
  709. pi->pcfg.data_bus_width / 8, pi->pcfg.num_chan,
  710. pi->pcfg.num_peri, pi->pcfg.num_events);
  711. return 0;
  712. probe_err5:
  713. pl330_del(pi);
  714. probe_err4:
  715. free_irq(irq, pi);
  716. probe_err3:
  717. #ifndef CONFIG_PM_RUNTIME
  718. clk_disable(pdmac->clk);
  719. #endif
  720. clk_put(pdmac->clk);
  721. probe_err2:
  722. iounmap(pi->base);
  723. probe_err1:
  724. release_mem_region(res->start, resource_size(res));
  725. kfree(pdmac);
  726. return ret;
  727. }
  728. static int __devexit pl330_remove(struct amba_device *adev)
  729. {
  730. struct dma_pl330_dmac *pdmac = amba_get_drvdata(adev);
  731. struct dma_pl330_chan *pch, *_p;
  732. struct pl330_info *pi;
  733. struct resource *res;
  734. int irq;
  735. if (!pdmac)
  736. return 0;
  737. amba_set_drvdata(adev, NULL);
  738. /* Idle the DMAC */
  739. list_for_each_entry_safe(pch, _p, &pdmac->ddma.channels,
  740. chan.device_node) {
  741. /* Remove the channel */
  742. list_del(&pch->chan.device_node);
  743. /* Flush the channel */
  744. pl330_control(&pch->chan, DMA_TERMINATE_ALL, 0);
  745. pl330_free_chan_resources(&pch->chan);
  746. }
  747. pi = &pdmac->pif;
  748. pl330_del(pi);
  749. irq = adev->irq[0];
  750. free_irq(irq, pi);
  751. iounmap(pi->base);
  752. res = &adev->res;
  753. release_mem_region(res->start, resource_size(res));
  754. #ifndef CONFIG_PM_RUNTIME
  755. clk_disable(pdmac->clk);
  756. #endif
  757. kfree(pdmac);
  758. return 0;
  759. }
  760. static struct amba_id pl330_ids[] = {
  761. {
  762. .id = 0x00041330,
  763. .mask = 0x000fffff,
  764. },
  765. { 0, 0 },
  766. };
  767. MODULE_DEVICE_TABLE(amba, pl330_ids);
  768. #ifdef CONFIG_PM_RUNTIME
  769. static int pl330_runtime_suspend(struct device *dev)
  770. {
  771. struct dma_pl330_dmac *pdmac = dev_get_drvdata(dev);
  772. if (!pdmac) {
  773. dev_err(dev, "failed to get dmac\n");
  774. return -ENODEV;
  775. }
  776. clk_disable(pdmac->clk);
  777. return 0;
  778. }
  779. static int pl330_runtime_resume(struct device *dev)
  780. {
  781. struct dma_pl330_dmac *pdmac = dev_get_drvdata(dev);
  782. if (!pdmac) {
  783. dev_err(dev, "failed to get dmac\n");
  784. return -ENODEV;
  785. }
  786. clk_enable(pdmac->clk);
  787. return 0;
  788. }
  789. #else
  790. #define pl330_runtime_suspend NULL
  791. #define pl330_runtime_resume NULL
  792. #endif /* CONFIG_PM_RUNTIME */
  793. static const struct dev_pm_ops pl330_pm_ops = {
  794. .runtime_suspend = pl330_runtime_suspend,
  795. .runtime_resume = pl330_runtime_resume,
  796. };
  797. static struct amba_driver pl330_driver = {
  798. .drv = {
  799. .owner = THIS_MODULE,
  800. .name = "dma-pl330",
  801. .pm = &pl330_pm_ops,
  802. },
  803. .id_table = pl330_ids,
  804. .probe = pl330_probe,
  805. .remove = pl330_remove,
  806. };
  807. static int __init pl330_init(void)
  808. {
  809. return amba_driver_register(&pl330_driver);
  810. }
  811. module_init(pl330_init);
  812. static void __exit pl330_exit(void)
  813. {
  814. amba_driver_unregister(&pl330_driver);
  815. return;
  816. }
  817. module_exit(pl330_exit);
  818. MODULE_AUTHOR("Jaswinder Singh <jassi.brar@samsung.com>");
  819. MODULE_DESCRIPTION("API Driver for PL330 DMAC");
  820. MODULE_LICENSE("GPL");