pl330.c 23 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053
  1. /* linux/drivers/dma/pl330.c
  2. *
  3. * Copyright (C) 2010 Samsung Electronics Co. Ltd.
  4. * Jaswinder Singh <jassi.brar@samsung.com>
  5. *
  6. * This program is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU General Public License as published by
  8. * the Free Software Foundation; either version 2 of the License, or
  9. * (at your option) any later version.
  10. */
  11. #include <linux/io.h>
  12. #include <linux/init.h>
  13. #include <linux/slab.h>
  14. #include <linux/module.h>
  15. #include <linux/dmaengine.h>
  16. #include <linux/interrupt.h>
  17. #include <linux/amba/bus.h>
  18. #include <linux/amba/pl330.h>
  19. #include <linux/pm_runtime.h>
  20. #include <linux/scatterlist.h>
  21. #include <linux/of.h>
  22. #define NR_DEFAULT_DESC 16
  23. enum desc_status {
  24. /* In the DMAC pool */
  25. FREE,
  26. /*
  27. * Allocted to some channel during prep_xxx
  28. * Also may be sitting on the work_list.
  29. */
  30. PREP,
  31. /*
  32. * Sitting on the work_list and already submitted
  33. * to the PL330 core. Not more than two descriptors
  34. * of a channel can be BUSY at any time.
  35. */
  36. BUSY,
  37. /*
  38. * Sitting on the channel work_list but xfer done
  39. * by PL330 core
  40. */
  41. DONE,
  42. };
  43. struct dma_pl330_chan {
  44. /* Schedule desc completion */
  45. struct tasklet_struct task;
  46. /* DMA-Engine Channel */
  47. struct dma_chan chan;
  48. /* Last completed cookie */
  49. dma_cookie_t completed;
  50. /* List of to be xfered descriptors */
  51. struct list_head work_list;
  52. /* Pointer to the DMAC that manages this channel,
  53. * NULL if the channel is available to be acquired.
  54. * As the parent, this DMAC also provides descriptors
  55. * to the channel.
  56. */
  57. struct dma_pl330_dmac *dmac;
  58. /* To protect channel manipulation */
  59. spinlock_t lock;
  60. /* Token of a hardware channel thread of PL330 DMAC
  61. * NULL if the channel is available to be acquired.
  62. */
  63. void *pl330_chid;
  64. /* For D-to-M and M-to-D channels */
  65. int burst_sz; /* the peripheral fifo width */
  66. int burst_len; /* the number of burst */
  67. dma_addr_t fifo_addr;
  68. /* for cyclic capability */
  69. bool cyclic;
  70. };
  71. struct dma_pl330_dmac {
  72. struct pl330_info pif;
  73. /* DMA-Engine Device */
  74. struct dma_device ddma;
  75. /* Pool of descriptors available for the DMAC's channels */
  76. struct list_head desc_pool;
  77. /* To protect desc_pool manipulation */
  78. spinlock_t pool_lock;
  79. /* Peripheral channels connected to this DMAC */
  80. struct dma_pl330_chan *peripherals; /* keep at end */
  81. struct clk *clk;
  82. };
  83. struct dma_pl330_desc {
  84. /* To attach to a queue as child */
  85. struct list_head node;
  86. /* Descriptor for the DMA Engine API */
  87. struct dma_async_tx_descriptor txd;
  88. /* Xfer for PL330 core */
  89. struct pl330_xfer px;
  90. struct pl330_reqcfg rqcfg;
  91. struct pl330_req req;
  92. enum desc_status status;
  93. /* The channel which currently holds this desc */
  94. struct dma_pl330_chan *pchan;
  95. };
  96. /* forward declaration */
  97. static struct amba_driver pl330_driver;
  98. static inline struct dma_pl330_chan *
  99. to_pchan(struct dma_chan *ch)
  100. {
  101. if (!ch)
  102. return NULL;
  103. return container_of(ch, struct dma_pl330_chan, chan);
  104. }
  105. static inline struct dma_pl330_desc *
  106. to_desc(struct dma_async_tx_descriptor *tx)
  107. {
  108. return container_of(tx, struct dma_pl330_desc, txd);
  109. }
  110. static inline void free_desc_list(struct list_head *list)
  111. {
  112. struct dma_pl330_dmac *pdmac;
  113. struct dma_pl330_desc *desc;
  114. struct dma_pl330_chan *pch;
  115. unsigned long flags;
  116. if (list_empty(list))
  117. return;
  118. /* Finish off the work list */
  119. list_for_each_entry(desc, list, node) {
  120. dma_async_tx_callback callback;
  121. void *param;
  122. /* All desc in a list belong to same channel */
  123. pch = desc->pchan;
  124. callback = desc->txd.callback;
  125. param = desc->txd.callback_param;
  126. if (callback)
  127. callback(param);
  128. desc->pchan = NULL;
  129. }
  130. pdmac = pch->dmac;
  131. spin_lock_irqsave(&pdmac->pool_lock, flags);
  132. list_splice_tail_init(list, &pdmac->desc_pool);
  133. spin_unlock_irqrestore(&pdmac->pool_lock, flags);
  134. }
  135. static inline void handle_cyclic_desc_list(struct list_head *list)
  136. {
  137. struct dma_pl330_desc *desc;
  138. struct dma_pl330_chan *pch;
  139. unsigned long flags;
  140. if (list_empty(list))
  141. return;
  142. list_for_each_entry(desc, list, node) {
  143. dma_async_tx_callback callback;
  144. /* Change status to reload it */
  145. desc->status = PREP;
  146. pch = desc->pchan;
  147. callback = desc->txd.callback;
  148. if (callback)
  149. callback(desc->txd.callback_param);
  150. }
  151. spin_lock_irqsave(&pch->lock, flags);
  152. list_splice_tail_init(list, &pch->work_list);
  153. spin_unlock_irqrestore(&pch->lock, flags);
  154. }
  155. static inline void fill_queue(struct dma_pl330_chan *pch)
  156. {
  157. struct dma_pl330_desc *desc;
  158. int ret;
  159. list_for_each_entry(desc, &pch->work_list, node) {
  160. /* If already submitted */
  161. if (desc->status == BUSY)
  162. break;
  163. ret = pl330_submit_req(pch->pl330_chid,
  164. &desc->req);
  165. if (!ret) {
  166. desc->status = BUSY;
  167. break;
  168. } else if (ret == -EAGAIN) {
  169. /* QFull or DMAC Dying */
  170. break;
  171. } else {
  172. /* Unacceptable request */
  173. desc->status = DONE;
  174. dev_err(pch->dmac->pif.dev, "%s:%d Bad Desc(%d)\n",
  175. __func__, __LINE__, desc->txd.cookie);
  176. tasklet_schedule(&pch->task);
  177. }
  178. }
  179. }
  180. static void pl330_tasklet(unsigned long data)
  181. {
  182. struct dma_pl330_chan *pch = (struct dma_pl330_chan *)data;
  183. struct dma_pl330_desc *desc, *_dt;
  184. unsigned long flags;
  185. LIST_HEAD(list);
  186. spin_lock_irqsave(&pch->lock, flags);
  187. /* Pick up ripe tomatoes */
  188. list_for_each_entry_safe(desc, _dt, &pch->work_list, node)
  189. if (desc->status == DONE) {
  190. pch->completed = desc->txd.cookie;
  191. list_move_tail(&desc->node, &list);
  192. }
  193. /* Try to submit a req imm. next to the last completed cookie */
  194. fill_queue(pch);
  195. /* Make sure the PL330 Channel thread is active */
  196. pl330_chan_ctrl(pch->pl330_chid, PL330_OP_START);
  197. spin_unlock_irqrestore(&pch->lock, flags);
  198. if (pch->cyclic)
  199. handle_cyclic_desc_list(&list);
  200. else
  201. free_desc_list(&list);
  202. }
  203. static void dma_pl330_rqcb(void *token, enum pl330_op_err err)
  204. {
  205. struct dma_pl330_desc *desc = token;
  206. struct dma_pl330_chan *pch = desc->pchan;
  207. unsigned long flags;
  208. /* If desc aborted */
  209. if (!pch)
  210. return;
  211. spin_lock_irqsave(&pch->lock, flags);
  212. desc->status = DONE;
  213. spin_unlock_irqrestore(&pch->lock, flags);
  214. tasklet_schedule(&pch->task);
  215. }
  216. bool pl330_filter(struct dma_chan *chan, void *param)
  217. {
  218. u8 *peri_id;
  219. if (chan->device->dev->driver != &pl330_driver.drv)
  220. return false;
  221. #ifdef CONFIG_OF
  222. if (chan->device->dev->of_node) {
  223. const __be32 *prop_value;
  224. phandle phandle;
  225. struct device_node *node;
  226. prop_value = ((struct property *)param)->value;
  227. phandle = be32_to_cpup(prop_value++);
  228. node = of_find_node_by_phandle(phandle);
  229. return ((chan->private == node) &&
  230. (chan->chan_id == be32_to_cpup(prop_value)));
  231. }
  232. #endif
  233. peri_id = chan->private;
  234. return *peri_id == (unsigned)param;
  235. }
  236. EXPORT_SYMBOL(pl330_filter);
  237. static int pl330_alloc_chan_resources(struct dma_chan *chan)
  238. {
  239. struct dma_pl330_chan *pch = to_pchan(chan);
  240. struct dma_pl330_dmac *pdmac = pch->dmac;
  241. unsigned long flags;
  242. spin_lock_irqsave(&pch->lock, flags);
  243. pch->completed = chan->cookie = 1;
  244. pch->cyclic = false;
  245. pch->pl330_chid = pl330_request_channel(&pdmac->pif);
  246. if (!pch->pl330_chid) {
  247. spin_unlock_irqrestore(&pch->lock, flags);
  248. return 0;
  249. }
  250. tasklet_init(&pch->task, pl330_tasklet, (unsigned long) pch);
  251. spin_unlock_irqrestore(&pch->lock, flags);
  252. return 1;
  253. }
  254. static int pl330_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, unsigned long arg)
  255. {
  256. struct dma_pl330_chan *pch = to_pchan(chan);
  257. struct dma_pl330_desc *desc, *_dt;
  258. unsigned long flags;
  259. struct dma_pl330_dmac *pdmac = pch->dmac;
  260. struct dma_slave_config *slave_config;
  261. LIST_HEAD(list);
  262. switch (cmd) {
  263. case DMA_TERMINATE_ALL:
  264. spin_lock_irqsave(&pch->lock, flags);
  265. /* FLUSH the PL330 Channel thread */
  266. pl330_chan_ctrl(pch->pl330_chid, PL330_OP_FLUSH);
  267. /* Mark all desc done */
  268. list_for_each_entry_safe(desc, _dt, &pch->work_list , node) {
  269. desc->status = DONE;
  270. pch->completed = desc->txd.cookie;
  271. list_move_tail(&desc->node, &list);
  272. }
  273. list_splice_tail_init(&list, &pdmac->desc_pool);
  274. spin_unlock_irqrestore(&pch->lock, flags);
  275. break;
  276. case DMA_SLAVE_CONFIG:
  277. slave_config = (struct dma_slave_config *)arg;
  278. if (slave_config->direction == DMA_MEM_TO_DEV) {
  279. if (slave_config->dst_addr)
  280. pch->fifo_addr = slave_config->dst_addr;
  281. if (slave_config->dst_addr_width)
  282. pch->burst_sz = __ffs(slave_config->dst_addr_width);
  283. if (slave_config->dst_maxburst)
  284. pch->burst_len = slave_config->dst_maxburst;
  285. } else if (slave_config->direction == DMA_DEV_TO_MEM) {
  286. if (slave_config->src_addr)
  287. pch->fifo_addr = slave_config->src_addr;
  288. if (slave_config->src_addr_width)
  289. pch->burst_sz = __ffs(slave_config->src_addr_width);
  290. if (slave_config->src_maxburst)
  291. pch->burst_len = slave_config->src_maxburst;
  292. }
  293. break;
  294. default:
  295. dev_err(pch->dmac->pif.dev, "Not supported command.\n");
  296. return -ENXIO;
  297. }
  298. return 0;
  299. }
  300. static void pl330_free_chan_resources(struct dma_chan *chan)
  301. {
  302. struct dma_pl330_chan *pch = to_pchan(chan);
  303. unsigned long flags;
  304. spin_lock_irqsave(&pch->lock, flags);
  305. tasklet_kill(&pch->task);
  306. pl330_release_channel(pch->pl330_chid);
  307. pch->pl330_chid = NULL;
  308. if (pch->cyclic)
  309. list_splice_tail_init(&pch->work_list, &pch->dmac->desc_pool);
  310. spin_unlock_irqrestore(&pch->lock, flags);
  311. }
  312. static enum dma_status
  313. pl330_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
  314. struct dma_tx_state *txstate)
  315. {
  316. struct dma_pl330_chan *pch = to_pchan(chan);
  317. dma_cookie_t last_done, last_used;
  318. int ret;
  319. last_done = pch->completed;
  320. last_used = chan->cookie;
  321. ret = dma_async_is_complete(cookie, last_done, last_used);
  322. dma_set_tx_state(txstate, last_done, last_used, 0);
  323. return ret;
  324. }
  325. static void pl330_issue_pending(struct dma_chan *chan)
  326. {
  327. pl330_tasklet((unsigned long) to_pchan(chan));
  328. }
  329. /*
  330. * We returned the last one of the circular list of descriptor(s)
  331. * from prep_xxx, so the argument to submit corresponds to the last
  332. * descriptor of the list.
  333. */
  334. static dma_cookie_t pl330_tx_submit(struct dma_async_tx_descriptor *tx)
  335. {
  336. struct dma_pl330_desc *desc, *last = to_desc(tx);
  337. struct dma_pl330_chan *pch = to_pchan(tx->chan);
  338. dma_cookie_t cookie;
  339. unsigned long flags;
  340. spin_lock_irqsave(&pch->lock, flags);
  341. /* Assign cookies to all nodes */
  342. cookie = tx->chan->cookie;
  343. while (!list_empty(&last->node)) {
  344. desc = list_entry(last->node.next, struct dma_pl330_desc, node);
  345. if (++cookie < 0)
  346. cookie = 1;
  347. desc->txd.cookie = cookie;
  348. list_move_tail(&desc->node, &pch->work_list);
  349. }
  350. if (++cookie < 0)
  351. cookie = 1;
  352. last->txd.cookie = cookie;
  353. list_add_tail(&last->node, &pch->work_list);
  354. tx->chan->cookie = cookie;
  355. spin_unlock_irqrestore(&pch->lock, flags);
  356. return cookie;
  357. }
  358. static inline void _init_desc(struct dma_pl330_desc *desc)
  359. {
  360. desc->pchan = NULL;
  361. desc->req.x = &desc->px;
  362. desc->req.token = desc;
  363. desc->rqcfg.swap = SWAP_NO;
  364. desc->rqcfg.privileged = 0;
  365. desc->rqcfg.insnaccess = 0;
  366. desc->rqcfg.scctl = SCCTRL0;
  367. desc->rqcfg.dcctl = DCCTRL0;
  368. desc->req.cfg = &desc->rqcfg;
  369. desc->req.xfer_cb = dma_pl330_rqcb;
  370. desc->txd.tx_submit = pl330_tx_submit;
  371. INIT_LIST_HEAD(&desc->node);
  372. }
  373. /* Returns the number of descriptors added to the DMAC pool */
  374. int add_desc(struct dma_pl330_dmac *pdmac, gfp_t flg, int count)
  375. {
  376. struct dma_pl330_desc *desc;
  377. unsigned long flags;
  378. int i;
  379. if (!pdmac)
  380. return 0;
  381. desc = kmalloc(count * sizeof(*desc), flg);
  382. if (!desc)
  383. return 0;
  384. spin_lock_irqsave(&pdmac->pool_lock, flags);
  385. for (i = 0; i < count; i++) {
  386. _init_desc(&desc[i]);
  387. list_add_tail(&desc[i].node, &pdmac->desc_pool);
  388. }
  389. spin_unlock_irqrestore(&pdmac->pool_lock, flags);
  390. return count;
  391. }
  392. static struct dma_pl330_desc *
  393. pluck_desc(struct dma_pl330_dmac *pdmac)
  394. {
  395. struct dma_pl330_desc *desc = NULL;
  396. unsigned long flags;
  397. if (!pdmac)
  398. return NULL;
  399. spin_lock_irqsave(&pdmac->pool_lock, flags);
  400. if (!list_empty(&pdmac->desc_pool)) {
  401. desc = list_entry(pdmac->desc_pool.next,
  402. struct dma_pl330_desc, node);
  403. list_del_init(&desc->node);
  404. desc->status = PREP;
  405. desc->txd.callback = NULL;
  406. }
  407. spin_unlock_irqrestore(&pdmac->pool_lock, flags);
  408. return desc;
  409. }
  410. static struct dma_pl330_desc *pl330_get_desc(struct dma_pl330_chan *pch)
  411. {
  412. struct dma_pl330_dmac *pdmac = pch->dmac;
  413. u8 *peri_id = pch->chan.private;
  414. struct dma_pl330_desc *desc;
  415. /* Pluck one desc from the pool of DMAC */
  416. desc = pluck_desc(pdmac);
  417. /* If the DMAC pool is empty, alloc new */
  418. if (!desc) {
  419. if (!add_desc(pdmac, GFP_ATOMIC, 1))
  420. return NULL;
  421. /* Try again */
  422. desc = pluck_desc(pdmac);
  423. if (!desc) {
  424. dev_err(pch->dmac->pif.dev,
  425. "%s:%d ALERT!\n", __func__, __LINE__);
  426. return NULL;
  427. }
  428. }
  429. /* Initialize the descriptor */
  430. desc->pchan = pch;
  431. desc->txd.cookie = 0;
  432. async_tx_ack(&desc->txd);
  433. desc->req.peri = peri_id ? pch->chan.chan_id : 0;
  434. dma_async_tx_descriptor_init(&desc->txd, &pch->chan);
  435. return desc;
  436. }
  437. static inline void fill_px(struct pl330_xfer *px,
  438. dma_addr_t dst, dma_addr_t src, size_t len)
  439. {
  440. px->next = NULL;
  441. px->bytes = len;
  442. px->dst_addr = dst;
  443. px->src_addr = src;
  444. }
  445. static struct dma_pl330_desc *
  446. __pl330_prep_dma_memcpy(struct dma_pl330_chan *pch, dma_addr_t dst,
  447. dma_addr_t src, size_t len)
  448. {
  449. struct dma_pl330_desc *desc = pl330_get_desc(pch);
  450. if (!desc) {
  451. dev_err(pch->dmac->pif.dev, "%s:%d Unable to fetch desc\n",
  452. __func__, __LINE__);
  453. return NULL;
  454. }
  455. /*
  456. * Ideally we should lookout for reqs bigger than
  457. * those that can be programmed with 256 bytes of
  458. * MC buffer, but considering a req size is seldom
  459. * going to be word-unaligned and more than 200MB,
  460. * we take it easy.
  461. * Also, should the limit is reached we'd rather
  462. * have the platform increase MC buffer size than
  463. * complicating this API driver.
  464. */
  465. fill_px(&desc->px, dst, src, len);
  466. return desc;
  467. }
  468. /* Call after fixing burst size */
  469. static inline int get_burst_len(struct dma_pl330_desc *desc, size_t len)
  470. {
  471. struct dma_pl330_chan *pch = desc->pchan;
  472. struct pl330_info *pi = &pch->dmac->pif;
  473. int burst_len;
  474. burst_len = pi->pcfg.data_bus_width / 8;
  475. burst_len *= pi->pcfg.data_buf_dep;
  476. burst_len >>= desc->rqcfg.brst_size;
  477. /* src/dst_burst_len can't be more than 16 */
  478. if (burst_len > 16)
  479. burst_len = 16;
  480. while (burst_len > 1) {
  481. if (!(len % (burst_len << desc->rqcfg.brst_size)))
  482. break;
  483. burst_len--;
  484. }
  485. return burst_len;
  486. }
  487. static struct dma_async_tx_descriptor *pl330_prep_dma_cyclic(
  488. struct dma_chan *chan, dma_addr_t dma_addr, size_t len,
  489. size_t period_len, enum dma_transfer_direction direction)
  490. {
  491. struct dma_pl330_desc *desc;
  492. struct dma_pl330_chan *pch = to_pchan(chan);
  493. dma_addr_t dst;
  494. dma_addr_t src;
  495. desc = pl330_get_desc(pch);
  496. if (!desc) {
  497. dev_err(pch->dmac->pif.dev, "%s:%d Unable to fetch desc\n",
  498. __func__, __LINE__);
  499. return NULL;
  500. }
  501. switch (direction) {
  502. case DMA_MEM_TO_DEV:
  503. desc->rqcfg.src_inc = 1;
  504. desc->rqcfg.dst_inc = 0;
  505. desc->req.rqtype = MEMTODEV;
  506. src = dma_addr;
  507. dst = pch->fifo_addr;
  508. break;
  509. case DMA_DEV_TO_MEM:
  510. desc->rqcfg.src_inc = 0;
  511. desc->rqcfg.dst_inc = 1;
  512. desc->req.rqtype = DEVTOMEM;
  513. src = pch->fifo_addr;
  514. dst = dma_addr;
  515. break;
  516. default:
  517. dev_err(pch->dmac->pif.dev, "%s:%d Invalid dma direction\n",
  518. __func__, __LINE__);
  519. return NULL;
  520. }
  521. desc->rqcfg.brst_size = pch->burst_sz;
  522. desc->rqcfg.brst_len = 1;
  523. pch->cyclic = true;
  524. fill_px(&desc->px, dst, src, period_len);
  525. return &desc->txd;
  526. }
  527. static struct dma_async_tx_descriptor *
  528. pl330_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dst,
  529. dma_addr_t src, size_t len, unsigned long flags)
  530. {
  531. struct dma_pl330_desc *desc;
  532. struct dma_pl330_chan *pch = to_pchan(chan);
  533. struct pl330_info *pi;
  534. int burst;
  535. if (unlikely(!pch || !len))
  536. return NULL;
  537. pi = &pch->dmac->pif;
  538. desc = __pl330_prep_dma_memcpy(pch, dst, src, len);
  539. if (!desc)
  540. return NULL;
  541. desc->rqcfg.src_inc = 1;
  542. desc->rqcfg.dst_inc = 1;
  543. desc->req.rqtype = MEMTOMEM;
  544. /* Select max possible burst size */
  545. burst = pi->pcfg.data_bus_width / 8;
  546. while (burst > 1) {
  547. if (!(len % burst))
  548. break;
  549. burst /= 2;
  550. }
  551. desc->rqcfg.brst_size = 0;
  552. while (burst != (1 << desc->rqcfg.brst_size))
  553. desc->rqcfg.brst_size++;
  554. desc->rqcfg.brst_len = get_burst_len(desc, len);
  555. desc->txd.flags = flags;
  556. return &desc->txd;
  557. }
  558. static struct dma_async_tx_descriptor *
  559. pl330_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
  560. unsigned int sg_len, enum dma_transfer_direction direction,
  561. unsigned long flg)
  562. {
  563. struct dma_pl330_desc *first, *desc = NULL;
  564. struct dma_pl330_chan *pch = to_pchan(chan);
  565. struct scatterlist *sg;
  566. unsigned long flags;
  567. int i;
  568. dma_addr_t addr;
  569. if (unlikely(!pch || !sgl || !sg_len))
  570. return NULL;
  571. addr = pch->fifo_addr;
  572. first = NULL;
  573. for_each_sg(sgl, sg, sg_len, i) {
  574. desc = pl330_get_desc(pch);
  575. if (!desc) {
  576. struct dma_pl330_dmac *pdmac = pch->dmac;
  577. dev_err(pch->dmac->pif.dev,
  578. "%s:%d Unable to fetch desc\n",
  579. __func__, __LINE__);
  580. if (!first)
  581. return NULL;
  582. spin_lock_irqsave(&pdmac->pool_lock, flags);
  583. while (!list_empty(&first->node)) {
  584. desc = list_entry(first->node.next,
  585. struct dma_pl330_desc, node);
  586. list_move_tail(&desc->node, &pdmac->desc_pool);
  587. }
  588. list_move_tail(&first->node, &pdmac->desc_pool);
  589. spin_unlock_irqrestore(&pdmac->pool_lock, flags);
  590. return NULL;
  591. }
  592. if (!first)
  593. first = desc;
  594. else
  595. list_add_tail(&desc->node, &first->node);
  596. if (direction == DMA_MEM_TO_DEV) {
  597. desc->rqcfg.src_inc = 1;
  598. desc->rqcfg.dst_inc = 0;
  599. desc->req.rqtype = MEMTODEV;
  600. fill_px(&desc->px,
  601. addr, sg_dma_address(sg), sg_dma_len(sg));
  602. } else {
  603. desc->rqcfg.src_inc = 0;
  604. desc->rqcfg.dst_inc = 1;
  605. desc->req.rqtype = DEVTOMEM;
  606. fill_px(&desc->px,
  607. sg_dma_address(sg), addr, sg_dma_len(sg));
  608. }
  609. desc->rqcfg.brst_size = pch->burst_sz;
  610. desc->rqcfg.brst_len = 1;
  611. }
  612. /* Return the last desc in the chain */
  613. desc->txd.flags = flg;
  614. return &desc->txd;
  615. }
  616. static irqreturn_t pl330_irq_handler(int irq, void *data)
  617. {
  618. if (pl330_update(data))
  619. return IRQ_HANDLED;
  620. else
  621. return IRQ_NONE;
  622. }
  623. static int __devinit
  624. pl330_probe(struct amba_device *adev, const struct amba_id *id)
  625. {
  626. struct dma_pl330_platdata *pdat;
  627. struct dma_pl330_dmac *pdmac;
  628. struct dma_pl330_chan *pch;
  629. struct pl330_info *pi;
  630. struct dma_device *pd;
  631. struct resource *res;
  632. int i, ret, irq;
  633. int num_chan;
  634. pdat = adev->dev.platform_data;
  635. /* Allocate a new DMAC and its Channels */
  636. pdmac = kzalloc(sizeof(*pdmac), GFP_KERNEL);
  637. if (!pdmac) {
  638. dev_err(&adev->dev, "unable to allocate mem\n");
  639. return -ENOMEM;
  640. }
  641. pi = &pdmac->pif;
  642. pi->dev = &adev->dev;
  643. pi->pl330_data = NULL;
  644. pi->mcbufsz = pdat ? pdat->mcbuf_sz : 0;
  645. res = &adev->res;
  646. request_mem_region(res->start, resource_size(res), "dma-pl330");
  647. pi->base = ioremap(res->start, resource_size(res));
  648. if (!pi->base) {
  649. ret = -ENXIO;
  650. goto probe_err1;
  651. }
  652. pdmac->clk = clk_get(&adev->dev, "dma");
  653. if (IS_ERR(pdmac->clk)) {
  654. dev_err(&adev->dev, "Cannot get operation clock.\n");
  655. ret = -EINVAL;
  656. goto probe_err1;
  657. }
  658. amba_set_drvdata(adev, pdmac);
  659. #ifndef CONFIG_PM_RUNTIME
  660. /* enable dma clk */
  661. clk_enable(pdmac->clk);
  662. #endif
  663. irq = adev->irq[0];
  664. ret = request_irq(irq, pl330_irq_handler, 0,
  665. dev_name(&adev->dev), pi);
  666. if (ret)
  667. goto probe_err2;
  668. ret = pl330_add(pi);
  669. if (ret)
  670. goto probe_err3;
  671. INIT_LIST_HEAD(&pdmac->desc_pool);
  672. spin_lock_init(&pdmac->pool_lock);
  673. /* Create a descriptor pool of default size */
  674. if (!add_desc(pdmac, GFP_KERNEL, NR_DEFAULT_DESC))
  675. dev_warn(&adev->dev, "unable to allocate desc\n");
  676. pd = &pdmac->ddma;
  677. INIT_LIST_HEAD(&pd->channels);
  678. /* Initialize channel parameters */
  679. num_chan = max(pdat ? pdat->nr_valid_peri : (u8)pi->pcfg.num_peri,
  680. (u8)pi->pcfg.num_chan);
  681. pdmac->peripherals = kzalloc(num_chan * sizeof(*pch), GFP_KERNEL);
  682. for (i = 0; i < num_chan; i++) {
  683. pch = &pdmac->peripherals[i];
  684. if (!adev->dev.of_node)
  685. pch->chan.private = pdat ? &pdat->peri_id[i] : NULL;
  686. else
  687. pch->chan.private = adev->dev.of_node;
  688. INIT_LIST_HEAD(&pch->work_list);
  689. spin_lock_init(&pch->lock);
  690. pch->pl330_chid = NULL;
  691. pch->chan.device = pd;
  692. pch->dmac = pdmac;
  693. /* Add the channel to the DMAC list */
  694. list_add_tail(&pch->chan.device_node, &pd->channels);
  695. }
  696. pd->dev = &adev->dev;
  697. if (pdat) {
  698. pd->cap_mask = pdat->cap_mask;
  699. } else {
  700. dma_cap_set(DMA_MEMCPY, pd->cap_mask);
  701. if (pi->pcfg.num_peri) {
  702. dma_cap_set(DMA_SLAVE, pd->cap_mask);
  703. dma_cap_set(DMA_CYCLIC, pd->cap_mask);
  704. }
  705. }
  706. pd->device_alloc_chan_resources = pl330_alloc_chan_resources;
  707. pd->device_free_chan_resources = pl330_free_chan_resources;
  708. pd->device_prep_dma_memcpy = pl330_prep_dma_memcpy;
  709. pd->device_prep_dma_cyclic = pl330_prep_dma_cyclic;
  710. pd->device_tx_status = pl330_tx_status;
  711. pd->device_prep_slave_sg = pl330_prep_slave_sg;
  712. pd->device_control = pl330_control;
  713. pd->device_issue_pending = pl330_issue_pending;
  714. ret = dma_async_device_register(pd);
  715. if (ret) {
  716. dev_err(&adev->dev, "unable to register DMAC\n");
  717. goto probe_err4;
  718. }
  719. dev_info(&adev->dev,
  720. "Loaded driver for PL330 DMAC-%d\n", adev->periphid);
  721. dev_info(&adev->dev,
  722. "\tDBUFF-%ux%ubytes Num_Chans-%u Num_Peri-%u Num_Events-%u\n",
  723. pi->pcfg.data_buf_dep,
  724. pi->pcfg.data_bus_width / 8, pi->pcfg.num_chan,
  725. pi->pcfg.num_peri, pi->pcfg.num_events);
  726. return 0;
  727. probe_err4:
  728. pl330_del(pi);
  729. probe_err3:
  730. free_irq(irq, pi);
  731. probe_err2:
  732. iounmap(pi->base);
  733. probe_err1:
  734. release_mem_region(res->start, resource_size(res));
  735. kfree(pdmac);
  736. return ret;
  737. }
  738. static int __devexit pl330_remove(struct amba_device *adev)
  739. {
  740. struct dma_pl330_dmac *pdmac = amba_get_drvdata(adev);
  741. struct dma_pl330_chan *pch, *_p;
  742. struct pl330_info *pi;
  743. struct resource *res;
  744. int irq;
  745. if (!pdmac)
  746. return 0;
  747. amba_set_drvdata(adev, NULL);
  748. /* Idle the DMAC */
  749. list_for_each_entry_safe(pch, _p, &pdmac->ddma.channels,
  750. chan.device_node) {
  751. /* Remove the channel */
  752. list_del(&pch->chan.device_node);
  753. /* Flush the channel */
  754. pl330_control(&pch->chan, DMA_TERMINATE_ALL, 0);
  755. pl330_free_chan_resources(&pch->chan);
  756. }
  757. pi = &pdmac->pif;
  758. pl330_del(pi);
  759. irq = adev->irq[0];
  760. free_irq(irq, pi);
  761. iounmap(pi->base);
  762. res = &adev->res;
  763. release_mem_region(res->start, resource_size(res));
  764. #ifndef CONFIG_PM_RUNTIME
  765. clk_disable(pdmac->clk);
  766. #endif
  767. kfree(pdmac);
  768. return 0;
  769. }
  770. static struct amba_id pl330_ids[] = {
  771. {
  772. .id = 0x00041330,
  773. .mask = 0x000fffff,
  774. },
  775. { 0, 0 },
  776. };
  777. MODULE_DEVICE_TABLE(amba, pl330_ids);
  778. #ifdef CONFIG_PM_RUNTIME
  779. static int pl330_runtime_suspend(struct device *dev)
  780. {
  781. struct dma_pl330_dmac *pdmac = dev_get_drvdata(dev);
  782. if (!pdmac) {
  783. dev_err(dev, "failed to get dmac\n");
  784. return -ENODEV;
  785. }
  786. clk_disable(pdmac->clk);
  787. return 0;
  788. }
  789. static int pl330_runtime_resume(struct device *dev)
  790. {
  791. struct dma_pl330_dmac *pdmac = dev_get_drvdata(dev);
  792. if (!pdmac) {
  793. dev_err(dev, "failed to get dmac\n");
  794. return -ENODEV;
  795. }
  796. clk_enable(pdmac->clk);
  797. return 0;
  798. }
  799. #else
  800. #define pl330_runtime_suspend NULL
  801. #define pl330_runtime_resume NULL
  802. #endif /* CONFIG_PM_RUNTIME */
  803. static const struct dev_pm_ops pl330_pm_ops = {
  804. .runtime_suspend = pl330_runtime_suspend,
  805. .runtime_resume = pl330_runtime_resume,
  806. };
  807. static struct amba_driver pl330_driver = {
  808. .drv = {
  809. .owner = THIS_MODULE,
  810. .name = "dma-pl330",
  811. .pm = &pl330_pm_ops,
  812. },
  813. .id_table = pl330_ids,
  814. .probe = pl330_probe,
  815. .remove = pl330_remove,
  816. };
  817. static int __init pl330_init(void)
  818. {
  819. return amba_driver_register(&pl330_driver);
  820. }
  821. module_init(pl330_init);
  822. static void __exit pl330_exit(void)
  823. {
  824. amba_driver_unregister(&pl330_driver);
  825. return;
  826. }
  827. module_exit(pl330_exit);
  828. MODULE_AUTHOR("Jaswinder Singh <jassi.brar@samsung.com>");
  829. MODULE_DESCRIPTION("API Driver for PL330 DMAC");
  830. MODULE_LICENSE("GPL");