pl330.c 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949
  1. /* linux/drivers/dma/pl330.c
  2. *
  3. * Copyright (C) 2010 Samsung Electronics Co. Ltd.
  4. * Jaswinder Singh <jassi.brar@samsung.com>
  5. *
  6. * This program is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU General Public License as published by
  8. * the Free Software Foundation; either version 2 of the License, or
  9. * (at your option) any later version.
  10. */
  11. #include <linux/io.h>
  12. #include <linux/init.h>
  13. #include <linux/slab.h>
  14. #include <linux/module.h>
  15. #include <linux/dmaengine.h>
  16. #include <linux/interrupt.h>
  17. #include <linux/amba/bus.h>
  18. #include <linux/amba/pl330.h>
  19. #include <linux/pm_runtime.h>
  20. #include <linux/scatterlist.h>
  21. #define NR_DEFAULT_DESC 16
  22. enum desc_status {
  23. /* In the DMAC pool */
  24. FREE,
  25. /*
  26. * Allocted to some channel during prep_xxx
  27. * Also may be sitting on the work_list.
  28. */
  29. PREP,
  30. /*
  31. * Sitting on the work_list and already submitted
  32. * to the PL330 core. Not more than two descriptors
  33. * of a channel can be BUSY at any time.
  34. */
  35. BUSY,
  36. /*
  37. * Sitting on the channel work_list but xfer done
  38. * by PL330 core
  39. */
  40. DONE,
  41. };
  42. struct dma_pl330_chan {
  43. /* Schedule desc completion */
  44. struct tasklet_struct task;
  45. /* DMA-Engine Channel */
  46. struct dma_chan chan;
  47. /* Last completed cookie */
  48. dma_cookie_t completed;
  49. /* List of to be xfered descriptors */
  50. struct list_head work_list;
  51. /* Pointer to the DMAC that manages this channel,
  52. * NULL if the channel is available to be acquired.
  53. * As the parent, this DMAC also provides descriptors
  54. * to the channel.
  55. */
  56. struct dma_pl330_dmac *dmac;
  57. /* To protect channel manipulation */
  58. spinlock_t lock;
  59. /* Token of a hardware channel thread of PL330 DMAC
  60. * NULL if the channel is available to be acquired.
  61. */
  62. void *pl330_chid;
  63. /* For D-to-M and M-to-D channels */
  64. int burst_sz; /* the peripheral fifo width */
  65. dma_addr_t fifo_addr;
  66. };
  67. struct dma_pl330_dmac {
  68. struct pl330_info pif;
  69. /* DMA-Engine Device */
  70. struct dma_device ddma;
  71. /* Pool of descriptors available for the DMAC's channels */
  72. struct list_head desc_pool;
  73. /* To protect desc_pool manipulation */
  74. spinlock_t pool_lock;
  75. /* Peripheral channels connected to this DMAC */
  76. struct dma_pl330_chan *peripherals; /* keep at end */
  77. struct clk *clk;
  78. };
  79. struct dma_pl330_desc {
  80. /* To attach to a queue as child */
  81. struct list_head node;
  82. /* Descriptor for the DMA Engine API */
  83. struct dma_async_tx_descriptor txd;
  84. /* Xfer for PL330 core */
  85. struct pl330_xfer px;
  86. struct pl330_reqcfg rqcfg;
  87. struct pl330_req req;
  88. enum desc_status status;
  89. /* The channel which currently holds this desc */
  90. struct dma_pl330_chan *pchan;
  91. };
  92. static inline struct dma_pl330_chan *
  93. to_pchan(struct dma_chan *ch)
  94. {
  95. if (!ch)
  96. return NULL;
  97. return container_of(ch, struct dma_pl330_chan, chan);
  98. }
  99. static inline struct dma_pl330_desc *
  100. to_desc(struct dma_async_tx_descriptor *tx)
  101. {
  102. return container_of(tx, struct dma_pl330_desc, txd);
  103. }
  104. static inline void free_desc_list(struct list_head *list)
  105. {
  106. struct dma_pl330_dmac *pdmac;
  107. struct dma_pl330_desc *desc;
  108. struct dma_pl330_chan *pch;
  109. unsigned long flags;
  110. if (list_empty(list))
  111. return;
  112. /* Finish off the work list */
  113. list_for_each_entry(desc, list, node) {
  114. dma_async_tx_callback callback;
  115. void *param;
  116. /* All desc in a list belong to same channel */
  117. pch = desc->pchan;
  118. callback = desc->txd.callback;
  119. param = desc->txd.callback_param;
  120. if (callback)
  121. callback(param);
  122. desc->pchan = NULL;
  123. }
  124. pdmac = pch->dmac;
  125. spin_lock_irqsave(&pdmac->pool_lock, flags);
  126. list_splice_tail_init(list, &pdmac->desc_pool);
  127. spin_unlock_irqrestore(&pdmac->pool_lock, flags);
  128. }
  129. static inline void fill_queue(struct dma_pl330_chan *pch)
  130. {
  131. struct dma_pl330_desc *desc;
  132. int ret;
  133. list_for_each_entry(desc, &pch->work_list, node) {
  134. /* If already submitted */
  135. if (desc->status == BUSY)
  136. break;
  137. ret = pl330_submit_req(pch->pl330_chid,
  138. &desc->req);
  139. if (!ret) {
  140. desc->status = BUSY;
  141. break;
  142. } else if (ret == -EAGAIN) {
  143. /* QFull or DMAC Dying */
  144. break;
  145. } else {
  146. /* Unacceptable request */
  147. desc->status = DONE;
  148. dev_err(pch->dmac->pif.dev, "%s:%d Bad Desc(%d)\n",
  149. __func__, __LINE__, desc->txd.cookie);
  150. tasklet_schedule(&pch->task);
  151. }
  152. }
  153. }
  154. static void pl330_tasklet(unsigned long data)
  155. {
  156. struct dma_pl330_chan *pch = (struct dma_pl330_chan *)data;
  157. struct dma_pl330_desc *desc, *_dt;
  158. unsigned long flags;
  159. LIST_HEAD(list);
  160. spin_lock_irqsave(&pch->lock, flags);
  161. /* Pick up ripe tomatoes */
  162. list_for_each_entry_safe(desc, _dt, &pch->work_list, node)
  163. if (desc->status == DONE) {
  164. pch->completed = desc->txd.cookie;
  165. list_move_tail(&desc->node, &list);
  166. }
  167. /* Try to submit a req imm. next to the last completed cookie */
  168. fill_queue(pch);
  169. /* Make sure the PL330 Channel thread is active */
  170. pl330_chan_ctrl(pch->pl330_chid, PL330_OP_START);
  171. spin_unlock_irqrestore(&pch->lock, flags);
  172. free_desc_list(&list);
  173. }
  174. static void dma_pl330_rqcb(void *token, enum pl330_op_err err)
  175. {
  176. struct dma_pl330_desc *desc = token;
  177. struct dma_pl330_chan *pch = desc->pchan;
  178. unsigned long flags;
  179. /* If desc aborted */
  180. if (!pch)
  181. return;
  182. spin_lock_irqsave(&pch->lock, flags);
  183. desc->status = DONE;
  184. spin_unlock_irqrestore(&pch->lock, flags);
  185. tasklet_schedule(&pch->task);
  186. }
  187. static int pl330_alloc_chan_resources(struct dma_chan *chan)
  188. {
  189. struct dma_pl330_chan *pch = to_pchan(chan);
  190. struct dma_pl330_dmac *pdmac = pch->dmac;
  191. unsigned long flags;
  192. spin_lock_irqsave(&pch->lock, flags);
  193. pch->completed = chan->cookie = 1;
  194. pch->pl330_chid = pl330_request_channel(&pdmac->pif);
  195. if (!pch->pl330_chid) {
  196. spin_unlock_irqrestore(&pch->lock, flags);
  197. return 0;
  198. }
  199. tasklet_init(&pch->task, pl330_tasklet, (unsigned long) pch);
  200. spin_unlock_irqrestore(&pch->lock, flags);
  201. return 1;
  202. }
  203. static int pl330_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, unsigned long arg)
  204. {
  205. struct dma_pl330_chan *pch = to_pchan(chan);
  206. struct dma_pl330_desc *desc;
  207. unsigned long flags;
  208. /* Only supports DMA_TERMINATE_ALL */
  209. if (cmd != DMA_TERMINATE_ALL)
  210. return -ENXIO;
  211. spin_lock_irqsave(&pch->lock, flags);
  212. /* FLUSH the PL330 Channel thread */
  213. pl330_chan_ctrl(pch->pl330_chid, PL330_OP_FLUSH);
  214. /* Mark all desc done */
  215. list_for_each_entry(desc, &pch->work_list, node)
  216. desc->status = DONE;
  217. spin_unlock_irqrestore(&pch->lock, flags);
  218. pl330_tasklet((unsigned long) pch);
  219. return 0;
  220. }
  221. static void pl330_free_chan_resources(struct dma_chan *chan)
  222. {
  223. struct dma_pl330_chan *pch = to_pchan(chan);
  224. unsigned long flags;
  225. spin_lock_irqsave(&pch->lock, flags);
  226. tasklet_kill(&pch->task);
  227. pl330_release_channel(pch->pl330_chid);
  228. pch->pl330_chid = NULL;
  229. spin_unlock_irqrestore(&pch->lock, flags);
  230. }
  231. static enum dma_status
  232. pl330_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
  233. struct dma_tx_state *txstate)
  234. {
  235. struct dma_pl330_chan *pch = to_pchan(chan);
  236. dma_cookie_t last_done, last_used;
  237. int ret;
  238. last_done = pch->completed;
  239. last_used = chan->cookie;
  240. ret = dma_async_is_complete(cookie, last_done, last_used);
  241. dma_set_tx_state(txstate, last_done, last_used, 0);
  242. return ret;
  243. }
  244. static void pl330_issue_pending(struct dma_chan *chan)
  245. {
  246. pl330_tasklet((unsigned long) to_pchan(chan));
  247. }
  248. /*
  249. * We returned the last one of the circular list of descriptor(s)
  250. * from prep_xxx, so the argument to submit corresponds to the last
  251. * descriptor of the list.
  252. */
  253. static dma_cookie_t pl330_tx_submit(struct dma_async_tx_descriptor *tx)
  254. {
  255. struct dma_pl330_desc *desc, *last = to_desc(tx);
  256. struct dma_pl330_chan *pch = to_pchan(tx->chan);
  257. dma_cookie_t cookie;
  258. unsigned long flags;
  259. spin_lock_irqsave(&pch->lock, flags);
  260. /* Assign cookies to all nodes */
  261. cookie = tx->chan->cookie;
  262. while (!list_empty(&last->node)) {
  263. desc = list_entry(last->node.next, struct dma_pl330_desc, node);
  264. if (++cookie < 0)
  265. cookie = 1;
  266. desc->txd.cookie = cookie;
  267. list_move_tail(&desc->node, &pch->work_list);
  268. }
  269. if (++cookie < 0)
  270. cookie = 1;
  271. last->txd.cookie = cookie;
  272. list_add_tail(&last->node, &pch->work_list);
  273. tx->chan->cookie = cookie;
  274. spin_unlock_irqrestore(&pch->lock, flags);
  275. return cookie;
  276. }
  277. static inline void _init_desc(struct dma_pl330_desc *desc)
  278. {
  279. desc->pchan = NULL;
  280. desc->req.x = &desc->px;
  281. desc->req.token = desc;
  282. desc->rqcfg.swap = SWAP_NO;
  283. desc->rqcfg.privileged = 0;
  284. desc->rqcfg.insnaccess = 0;
  285. desc->rqcfg.scctl = SCCTRL0;
  286. desc->rqcfg.dcctl = DCCTRL0;
  287. desc->req.cfg = &desc->rqcfg;
  288. desc->req.xfer_cb = dma_pl330_rqcb;
  289. desc->txd.tx_submit = pl330_tx_submit;
  290. INIT_LIST_HEAD(&desc->node);
  291. }
  292. /* Returns the number of descriptors added to the DMAC pool */
  293. int add_desc(struct dma_pl330_dmac *pdmac, gfp_t flg, int count)
  294. {
  295. struct dma_pl330_desc *desc;
  296. unsigned long flags;
  297. int i;
  298. if (!pdmac)
  299. return 0;
  300. desc = kmalloc(count * sizeof(*desc), flg);
  301. if (!desc)
  302. return 0;
  303. spin_lock_irqsave(&pdmac->pool_lock, flags);
  304. for (i = 0; i < count; i++) {
  305. _init_desc(&desc[i]);
  306. list_add_tail(&desc[i].node, &pdmac->desc_pool);
  307. }
  308. spin_unlock_irqrestore(&pdmac->pool_lock, flags);
  309. return count;
  310. }
  311. static struct dma_pl330_desc *
  312. pluck_desc(struct dma_pl330_dmac *pdmac)
  313. {
  314. struct dma_pl330_desc *desc = NULL;
  315. unsigned long flags;
  316. if (!pdmac)
  317. return NULL;
  318. spin_lock_irqsave(&pdmac->pool_lock, flags);
  319. if (!list_empty(&pdmac->desc_pool)) {
  320. desc = list_entry(pdmac->desc_pool.next,
  321. struct dma_pl330_desc, node);
  322. list_del_init(&desc->node);
  323. desc->status = PREP;
  324. desc->txd.callback = NULL;
  325. }
  326. spin_unlock_irqrestore(&pdmac->pool_lock, flags);
  327. return desc;
  328. }
  329. static struct dma_pl330_desc *pl330_get_desc(struct dma_pl330_chan *pch)
  330. {
  331. struct dma_pl330_dmac *pdmac = pch->dmac;
  332. struct dma_pl330_peri *peri = pch->chan.private;
  333. struct dma_pl330_desc *desc;
  334. /* Pluck one desc from the pool of DMAC */
  335. desc = pluck_desc(pdmac);
  336. /* If the DMAC pool is empty, alloc new */
  337. if (!desc) {
  338. if (!add_desc(pdmac, GFP_ATOMIC, 1))
  339. return NULL;
  340. /* Try again */
  341. desc = pluck_desc(pdmac);
  342. if (!desc) {
  343. dev_err(pch->dmac->pif.dev,
  344. "%s:%d ALERT!\n", __func__, __LINE__);
  345. return NULL;
  346. }
  347. }
  348. /* Initialize the descriptor */
  349. desc->pchan = pch;
  350. desc->txd.cookie = 0;
  351. async_tx_ack(&desc->txd);
  352. if (peri) {
  353. desc->req.rqtype = peri->rqtype;
  354. desc->req.peri = pch->chan.chan_id;
  355. } else {
  356. desc->req.rqtype = MEMTOMEM;
  357. desc->req.peri = 0;
  358. }
  359. dma_async_tx_descriptor_init(&desc->txd, &pch->chan);
  360. return desc;
  361. }
  362. static inline void fill_px(struct pl330_xfer *px,
  363. dma_addr_t dst, dma_addr_t src, size_t len)
  364. {
  365. px->next = NULL;
  366. px->bytes = len;
  367. px->dst_addr = dst;
  368. px->src_addr = src;
  369. }
  370. static struct dma_pl330_desc *
  371. __pl330_prep_dma_memcpy(struct dma_pl330_chan *pch, dma_addr_t dst,
  372. dma_addr_t src, size_t len)
  373. {
  374. struct dma_pl330_desc *desc = pl330_get_desc(pch);
  375. if (!desc) {
  376. dev_err(pch->dmac->pif.dev, "%s:%d Unable to fetch desc\n",
  377. __func__, __LINE__);
  378. return NULL;
  379. }
  380. /*
  381. * Ideally we should lookout for reqs bigger than
  382. * those that can be programmed with 256 bytes of
  383. * MC buffer, but considering a req size is seldom
  384. * going to be word-unaligned and more than 200MB,
  385. * we take it easy.
  386. * Also, should the limit is reached we'd rather
  387. * have the platform increase MC buffer size than
  388. * complicating this API driver.
  389. */
  390. fill_px(&desc->px, dst, src, len);
  391. return desc;
  392. }
  393. /* Call after fixing burst size */
  394. static inline int get_burst_len(struct dma_pl330_desc *desc, size_t len)
  395. {
  396. struct dma_pl330_chan *pch = desc->pchan;
  397. struct pl330_info *pi = &pch->dmac->pif;
  398. int burst_len;
  399. burst_len = pi->pcfg.data_bus_width / 8;
  400. burst_len *= pi->pcfg.data_buf_dep;
  401. burst_len >>= desc->rqcfg.brst_size;
  402. /* src/dst_burst_len can't be more than 16 */
  403. if (burst_len > 16)
  404. burst_len = 16;
  405. while (burst_len > 1) {
  406. if (!(len % (burst_len << desc->rqcfg.brst_size)))
  407. break;
  408. burst_len--;
  409. }
  410. return burst_len;
  411. }
  412. static struct dma_async_tx_descriptor *
  413. pl330_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dst,
  414. dma_addr_t src, size_t len, unsigned long flags)
  415. {
  416. struct dma_pl330_desc *desc;
  417. struct dma_pl330_chan *pch = to_pchan(chan);
  418. struct dma_pl330_peri *peri = chan->private;
  419. struct pl330_info *pi;
  420. int burst;
  421. if (unlikely(!pch || !len))
  422. return NULL;
  423. if (peri && peri->rqtype != MEMTOMEM)
  424. return NULL;
  425. pi = &pch->dmac->pif;
  426. desc = __pl330_prep_dma_memcpy(pch, dst, src, len);
  427. if (!desc)
  428. return NULL;
  429. desc->rqcfg.src_inc = 1;
  430. desc->rqcfg.dst_inc = 1;
  431. /* Select max possible burst size */
  432. burst = pi->pcfg.data_bus_width / 8;
  433. while (burst > 1) {
  434. if (!(len % burst))
  435. break;
  436. burst /= 2;
  437. }
  438. desc->rqcfg.brst_size = 0;
  439. while (burst != (1 << desc->rqcfg.brst_size))
  440. desc->rqcfg.brst_size++;
  441. desc->rqcfg.brst_len = get_burst_len(desc, len);
  442. desc->txd.flags = flags;
  443. return &desc->txd;
  444. }
  445. static struct dma_async_tx_descriptor *
  446. pl330_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
  447. unsigned int sg_len, enum dma_data_direction direction,
  448. unsigned long flg)
  449. {
  450. struct dma_pl330_desc *first, *desc = NULL;
  451. struct dma_pl330_chan *pch = to_pchan(chan);
  452. struct dma_pl330_peri *peri = chan->private;
  453. struct scatterlist *sg;
  454. unsigned long flags;
  455. int i;
  456. dma_addr_t addr;
  457. if (unlikely(!pch || !sgl || !sg_len || !peri))
  458. return NULL;
  459. /* Make sure the direction is consistent */
  460. if ((direction == DMA_TO_DEVICE &&
  461. peri->rqtype != MEMTODEV) ||
  462. (direction == DMA_FROM_DEVICE &&
  463. peri->rqtype != DEVTOMEM)) {
  464. dev_err(pch->dmac->pif.dev, "%s:%d Invalid Direction\n",
  465. __func__, __LINE__);
  466. return NULL;
  467. }
  468. addr = pch->fifo_addr;
  469. first = NULL;
  470. for_each_sg(sgl, sg, sg_len, i) {
  471. desc = pl330_get_desc(pch);
  472. if (!desc) {
  473. struct dma_pl330_dmac *pdmac = pch->dmac;
  474. dev_err(pch->dmac->pif.dev,
  475. "%s:%d Unable to fetch desc\n",
  476. __func__, __LINE__);
  477. if (!first)
  478. return NULL;
  479. spin_lock_irqsave(&pdmac->pool_lock, flags);
  480. while (!list_empty(&first->node)) {
  481. desc = list_entry(first->node.next,
  482. struct dma_pl330_desc, node);
  483. list_move_tail(&desc->node, &pdmac->desc_pool);
  484. }
  485. list_move_tail(&first->node, &pdmac->desc_pool);
  486. spin_unlock_irqrestore(&pdmac->pool_lock, flags);
  487. return NULL;
  488. }
  489. if (!first)
  490. first = desc;
  491. else
  492. list_add_tail(&desc->node, &first->node);
  493. if (direction == DMA_TO_DEVICE) {
  494. desc->rqcfg.src_inc = 1;
  495. desc->rqcfg.dst_inc = 0;
  496. fill_px(&desc->px,
  497. addr, sg_dma_address(sg), sg_dma_len(sg));
  498. } else {
  499. desc->rqcfg.src_inc = 0;
  500. desc->rqcfg.dst_inc = 1;
  501. fill_px(&desc->px,
  502. sg_dma_address(sg), addr, sg_dma_len(sg));
  503. }
  504. desc->rqcfg.brst_size = pch->burst_sz;
  505. desc->rqcfg.brst_len = 1;
  506. }
  507. /* Return the last desc in the chain */
  508. desc->txd.flags = flg;
  509. return &desc->txd;
  510. }
  511. static irqreturn_t pl330_irq_handler(int irq, void *data)
  512. {
  513. if (pl330_update(data))
  514. return IRQ_HANDLED;
  515. else
  516. return IRQ_NONE;
  517. }
  518. static int __devinit
  519. pl330_probe(struct amba_device *adev, const struct amba_id *id)
  520. {
  521. struct dma_pl330_platdata *pdat;
  522. struct dma_pl330_dmac *pdmac;
  523. struct dma_pl330_chan *pch;
  524. struct pl330_info *pi;
  525. struct dma_device *pd;
  526. struct resource *res;
  527. int i, ret, irq;
  528. int num_chan;
  529. pdat = adev->dev.platform_data;
  530. /* Allocate a new DMAC and its Channels */
  531. pdmac = kzalloc(sizeof(*pdmac), GFP_KERNEL);
  532. if (!pdmac) {
  533. dev_err(&adev->dev, "unable to allocate mem\n");
  534. return -ENOMEM;
  535. }
  536. pi = &pdmac->pif;
  537. pi->dev = &adev->dev;
  538. pi->pl330_data = NULL;
  539. pi->mcbufsz = pdat ? pdat->mcbuf_sz : 0;
  540. res = &adev->res;
  541. request_mem_region(res->start, resource_size(res), "dma-pl330");
  542. pi->base = ioremap(res->start, resource_size(res));
  543. if (!pi->base) {
  544. ret = -ENXIO;
  545. goto probe_err1;
  546. }
  547. pdmac->clk = clk_get(&adev->dev, "dma");
  548. if (IS_ERR(pdmac->clk)) {
  549. dev_err(&adev->dev, "Cannot get operation clock.\n");
  550. ret = -EINVAL;
  551. goto probe_err1;
  552. }
  553. amba_set_drvdata(adev, pdmac);
  554. #ifdef CONFIG_PM_RUNTIME
  555. /* to use the runtime PM helper functions */
  556. pm_runtime_enable(&adev->dev);
  557. /* enable the power domain */
  558. if (pm_runtime_get_sync(&adev->dev)) {
  559. dev_err(&adev->dev, "failed to get runtime pm\n");
  560. ret = -ENODEV;
  561. goto probe_err1;
  562. }
  563. #else
  564. /* enable dma clk */
  565. clk_enable(pdmac->clk);
  566. #endif
  567. irq = adev->irq[0];
  568. ret = request_irq(irq, pl330_irq_handler, 0,
  569. dev_name(&adev->dev), pi);
  570. if (ret)
  571. goto probe_err2;
  572. ret = pl330_add(pi);
  573. if (ret)
  574. goto probe_err3;
  575. INIT_LIST_HEAD(&pdmac->desc_pool);
  576. spin_lock_init(&pdmac->pool_lock);
  577. /* Create a descriptor pool of default size */
  578. if (!add_desc(pdmac, GFP_KERNEL, NR_DEFAULT_DESC))
  579. dev_warn(&adev->dev, "unable to allocate desc\n");
  580. pd = &pdmac->ddma;
  581. INIT_LIST_HEAD(&pd->channels);
  582. /* Initialize channel parameters */
  583. num_chan = max(pdat ? pdat->nr_valid_peri : 0, (u8)pi->pcfg.num_chan);
  584. pdmac->peripherals = kzalloc(num_chan * sizeof(*pch), GFP_KERNEL);
  585. for (i = 0; i < num_chan; i++) {
  586. pch = &pdmac->peripherals[i];
  587. if (pdat) {
  588. struct dma_pl330_peri *peri = &pdat->peri[i];
  589. switch (peri->rqtype) {
  590. case MEMTOMEM:
  591. dma_cap_set(DMA_MEMCPY, pd->cap_mask);
  592. break;
  593. case MEMTODEV:
  594. case DEVTOMEM:
  595. dma_cap_set(DMA_SLAVE, pd->cap_mask);
  596. break;
  597. default:
  598. dev_err(&adev->dev, "DEVTODEV Not Supported\n");
  599. continue;
  600. }
  601. pch->chan.private = peri;
  602. } else {
  603. dma_cap_set(DMA_MEMCPY, pd->cap_mask);
  604. pch->chan.private = NULL;
  605. }
  606. INIT_LIST_HEAD(&pch->work_list);
  607. spin_lock_init(&pch->lock);
  608. pch->pl330_chid = NULL;
  609. pch->chan.device = pd;
  610. pch->chan.chan_id = i;
  611. pch->dmac = pdmac;
  612. /* Add the channel to the DMAC list */
  613. pd->chancnt++;
  614. list_add_tail(&pch->chan.device_node, &pd->channels);
  615. }
  616. pd->dev = &adev->dev;
  617. pd->device_alloc_chan_resources = pl330_alloc_chan_resources;
  618. pd->device_free_chan_resources = pl330_free_chan_resources;
  619. pd->device_prep_dma_memcpy = pl330_prep_dma_memcpy;
  620. pd->device_tx_status = pl330_tx_status;
  621. pd->device_prep_slave_sg = pl330_prep_slave_sg;
  622. pd->device_control = pl330_control;
  623. pd->device_issue_pending = pl330_issue_pending;
  624. ret = dma_async_device_register(pd);
  625. if (ret) {
  626. dev_err(&adev->dev, "unable to register DMAC\n");
  627. goto probe_err4;
  628. }
  629. dev_info(&adev->dev,
  630. "Loaded driver for PL330 DMAC-%d\n", adev->periphid);
  631. dev_info(&adev->dev,
  632. "\tDBUFF-%ux%ubytes Num_Chans-%u Num_Peri-%u Num_Events-%u\n",
  633. pi->pcfg.data_buf_dep,
  634. pi->pcfg.data_bus_width / 8, pi->pcfg.num_chan,
  635. pi->pcfg.num_peri, pi->pcfg.num_events);
  636. return 0;
  637. probe_err4:
  638. pl330_del(pi);
  639. probe_err3:
  640. free_irq(irq, pi);
  641. probe_err2:
  642. iounmap(pi->base);
  643. probe_err1:
  644. release_mem_region(res->start, resource_size(res));
  645. kfree(pdmac);
  646. return ret;
  647. }
  648. static int __devexit pl330_remove(struct amba_device *adev)
  649. {
  650. struct dma_pl330_dmac *pdmac = amba_get_drvdata(adev);
  651. struct dma_pl330_chan *pch, *_p;
  652. struct pl330_info *pi;
  653. struct resource *res;
  654. int irq;
  655. if (!pdmac)
  656. return 0;
  657. amba_set_drvdata(adev, NULL);
  658. /* Idle the DMAC */
  659. list_for_each_entry_safe(pch, _p, &pdmac->ddma.channels,
  660. chan.device_node) {
  661. /* Remove the channel */
  662. list_del(&pch->chan.device_node);
  663. /* Flush the channel */
  664. pl330_control(&pch->chan, DMA_TERMINATE_ALL, 0);
  665. pl330_free_chan_resources(&pch->chan);
  666. }
  667. pi = &pdmac->pif;
  668. pl330_del(pi);
  669. irq = adev->irq[0];
  670. free_irq(irq, pi);
  671. iounmap(pi->base);
  672. res = &adev->res;
  673. release_mem_region(res->start, resource_size(res));
  674. #ifdef CONFIG_PM_RUNTIME
  675. pm_runtime_put(&adev->dev);
  676. pm_runtime_disable(&adev->dev);
  677. #else
  678. clk_disable(pdmac->clk);
  679. #endif
  680. kfree(pdmac);
  681. return 0;
  682. }
  683. static struct amba_id pl330_ids[] = {
  684. {
  685. .id = 0x00041330,
  686. .mask = 0x000fffff,
  687. },
  688. { 0, 0 },
  689. };
  690. #ifdef CONFIG_PM_RUNTIME
  691. static int pl330_runtime_suspend(struct device *dev)
  692. {
  693. struct dma_pl330_dmac *pdmac = dev_get_drvdata(dev);
  694. if (!pdmac) {
  695. dev_err(dev, "failed to get dmac\n");
  696. return -ENODEV;
  697. }
  698. clk_disable(pdmac->clk);
  699. return 0;
  700. }
  701. static int pl330_runtime_resume(struct device *dev)
  702. {
  703. struct dma_pl330_dmac *pdmac = dev_get_drvdata(dev);
  704. if (!pdmac) {
  705. dev_err(dev, "failed to get dmac\n");
  706. return -ENODEV;
  707. }
  708. clk_enable(pdmac->clk);
  709. return 0;
  710. }
  711. #else
  712. #define pl330_runtime_suspend NULL
  713. #define pl330_runtime_resume NULL
  714. #endif /* CONFIG_PM_RUNTIME */
  715. static const struct dev_pm_ops pl330_pm_ops = {
  716. .runtime_suspend = pl330_runtime_suspend,
  717. .runtime_resume = pl330_runtime_resume,
  718. };
  719. static struct amba_driver pl330_driver = {
  720. .drv = {
  721. .owner = THIS_MODULE,
  722. .name = "dma-pl330",
  723. .pm = &pl330_pm_ops,
  724. },
  725. .id_table = pl330_ids,
  726. .probe = pl330_probe,
  727. .remove = pl330_remove,
  728. };
  729. static int __init pl330_init(void)
  730. {
  731. return amba_driver_register(&pl330_driver);
  732. }
  733. module_init(pl330_init);
  734. static void __exit pl330_exit(void)
  735. {
  736. amba_driver_unregister(&pl330_driver);
  737. return;
  738. }
  739. module_exit(pl330_exit);
  740. MODULE_AUTHOR("Jaswinder Singh <jassi.brar@samsung.com>");
  741. MODULE_DESCRIPTION("API Driver for PL330 DMAC");
  742. MODULE_LICENSE("GPL");