edma.c 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756
  1. /*
  2. * TI EDMA DMA engine driver
  3. *
  4. * Copyright 2012 Texas Instruments
  5. *
  6. * This program is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU General Public License as
  8. * published by the Free Software Foundation version 2.
  9. *
  10. * This program is distributed "as is" WITHOUT ANY WARRANTY of any
  11. * kind, whether express or implied; without even the implied warranty
  12. * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  13. * GNU General Public License for more details.
  14. */
  15. #include <linux/dmaengine.h>
  16. #include <linux/dma-mapping.h>
  17. #include <linux/err.h>
  18. #include <linux/init.h>
  19. #include <linux/interrupt.h>
  20. #include <linux/list.h>
  21. #include <linux/module.h>
  22. #include <linux/platform_device.h>
  23. #include <linux/slab.h>
  24. #include <linux/spinlock.h>
  25. #include <linux/platform_data/edma.h>
  26. #include "dmaengine.h"
  27. #include "virt-dma.h"
  28. /*
  29. * This will go away when the private EDMA API is folded
  30. * into this driver and the platform device(s) are
  31. * instantiated in the arch code. We can only get away
  32. * with this simplification because DA8XX may not be built
  33. * in the same kernel image with other DaVinci parts. This
  34. * avoids having to sprinkle dmaengine driver platform devices
  35. * and data throughout all the existing board files.
  36. */
  37. #ifdef CONFIG_ARCH_DAVINCI_DA8XX
  38. #define EDMA_CTLRS 2
  39. #define EDMA_CHANS 32
  40. #else
  41. #define EDMA_CTLRS 1
  42. #define EDMA_CHANS 64
  43. #endif /* CONFIG_ARCH_DAVINCI_DA8XX */
  44. /* Max of 16 segments per channel to conserve PaRAM slots */
  45. #define MAX_NR_SG 16
  46. #define EDMA_MAX_SLOTS MAX_NR_SG
  47. #define EDMA_DESCRIPTORS 16
  48. struct edma_desc {
  49. struct virt_dma_desc vdesc;
  50. struct list_head node;
  51. int absync;
  52. int pset_nr;
  53. int processed;
  54. struct edmacc_param pset[0];
  55. };
  56. struct edma_cc;
  57. struct edma_chan {
  58. struct virt_dma_chan vchan;
  59. struct list_head node;
  60. struct edma_desc *edesc;
  61. struct edma_cc *ecc;
  62. int ch_num;
  63. bool alloced;
  64. int slot[EDMA_MAX_SLOTS];
  65. int missed;
  66. struct dma_slave_config cfg;
  67. };
  68. struct edma_cc {
  69. int ctlr;
  70. struct dma_device dma_slave;
  71. struct edma_chan slave_chans[EDMA_CHANS];
  72. int num_slave_chans;
  73. int dummy_slot;
  74. };
  75. static inline struct edma_cc *to_edma_cc(struct dma_device *d)
  76. {
  77. return container_of(d, struct edma_cc, dma_slave);
  78. }
  79. static inline struct edma_chan *to_edma_chan(struct dma_chan *c)
  80. {
  81. return container_of(c, struct edma_chan, vchan.chan);
  82. }
  83. static inline struct edma_desc
  84. *to_edma_desc(struct dma_async_tx_descriptor *tx)
  85. {
  86. return container_of(tx, struct edma_desc, vdesc.tx);
  87. }
  88. static void edma_desc_free(struct virt_dma_desc *vdesc)
  89. {
  90. kfree(container_of(vdesc, struct edma_desc, vdesc));
  91. }
  92. /* Dispatch a queued descriptor to the controller (caller holds lock) */
  93. static void edma_execute(struct edma_chan *echan)
  94. {
  95. struct virt_dma_desc *vdesc;
  96. struct edma_desc *edesc;
  97. struct device *dev = echan->vchan.chan.device->dev;
  98. int i, j, left, nslots;
  99. /* If either we processed all psets or we're still not started */
  100. if (!echan->edesc ||
  101. echan->edesc->pset_nr == echan->edesc->processed) {
  102. /* Get next vdesc */
  103. vdesc = vchan_next_desc(&echan->vchan);
  104. if (!vdesc) {
  105. echan->edesc = NULL;
  106. return;
  107. }
  108. list_del(&vdesc->node);
  109. echan->edesc = to_edma_desc(&vdesc->tx);
  110. }
  111. edesc = echan->edesc;
  112. /* Find out how many left */
  113. left = edesc->pset_nr - edesc->processed;
  114. nslots = min(MAX_NR_SG, left);
  115. /* Write descriptor PaRAM set(s) */
  116. for (i = 0; i < nslots; i++) {
  117. j = i + edesc->processed;
  118. edma_write_slot(echan->slot[i], &edesc->pset[j]);
  119. dev_dbg(echan->vchan.chan.device->dev,
  120. "\n pset[%d]:\n"
  121. " chnum\t%d\n"
  122. " slot\t%d\n"
  123. " opt\t%08x\n"
  124. " src\t%08x\n"
  125. " dst\t%08x\n"
  126. " abcnt\t%08x\n"
  127. " ccnt\t%08x\n"
  128. " bidx\t%08x\n"
  129. " cidx\t%08x\n"
  130. " lkrld\t%08x\n",
  131. j, echan->ch_num, echan->slot[i],
  132. edesc->pset[j].opt,
  133. edesc->pset[j].src,
  134. edesc->pset[j].dst,
  135. edesc->pset[j].a_b_cnt,
  136. edesc->pset[j].ccnt,
  137. edesc->pset[j].src_dst_bidx,
  138. edesc->pset[j].src_dst_cidx,
  139. edesc->pset[j].link_bcntrld);
  140. /* Link to the previous slot if not the last set */
  141. if (i != (nslots - 1))
  142. edma_link(echan->slot[i], echan->slot[i+1]);
  143. }
  144. edesc->processed += nslots;
  145. /*
  146. * If this is either the last set in a set of SG-list transactions
  147. * then setup a link to the dummy slot, this results in all future
  148. * events being absorbed and that's OK because we're done
  149. */
  150. if (edesc->processed == edesc->pset_nr)
  151. edma_link(echan->slot[nslots-1], echan->ecc->dummy_slot);
  152. edma_resume(echan->ch_num);
  153. if (edesc->processed <= MAX_NR_SG) {
  154. dev_dbg(dev, "first transfer starting %d\n", echan->ch_num);
  155. edma_start(echan->ch_num);
  156. }
  157. /*
  158. * This happens due to setup times between intermediate transfers
  159. * in long SG lists which have to be broken up into transfers of
  160. * MAX_NR_SG
  161. */
  162. if (echan->missed) {
  163. dev_dbg(dev, "missed event in execute detected\n");
  164. edma_clean_channel(echan->ch_num);
  165. edma_stop(echan->ch_num);
  166. edma_start(echan->ch_num);
  167. edma_trigger_channel(echan->ch_num);
  168. echan->missed = 0;
  169. }
  170. }
  171. static int edma_terminate_all(struct edma_chan *echan)
  172. {
  173. unsigned long flags;
  174. LIST_HEAD(head);
  175. spin_lock_irqsave(&echan->vchan.lock, flags);
  176. /*
  177. * Stop DMA activity: we assume the callback will not be called
  178. * after edma_dma() returns (even if it does, it will see
  179. * echan->edesc is NULL and exit.)
  180. */
  181. if (echan->edesc) {
  182. echan->edesc = NULL;
  183. edma_stop(echan->ch_num);
  184. }
  185. vchan_get_all_descriptors(&echan->vchan, &head);
  186. spin_unlock_irqrestore(&echan->vchan.lock, flags);
  187. vchan_dma_desc_free_list(&echan->vchan, &head);
  188. return 0;
  189. }
  190. static int edma_slave_config(struct edma_chan *echan,
  191. struct dma_slave_config *cfg)
  192. {
  193. if (cfg->src_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES ||
  194. cfg->dst_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES)
  195. return -EINVAL;
  196. memcpy(&echan->cfg, cfg, sizeof(echan->cfg));
  197. return 0;
  198. }
  199. static int edma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
  200. unsigned long arg)
  201. {
  202. int ret = 0;
  203. struct dma_slave_config *config;
  204. struct edma_chan *echan = to_edma_chan(chan);
  205. switch (cmd) {
  206. case DMA_TERMINATE_ALL:
  207. edma_terminate_all(echan);
  208. break;
  209. case DMA_SLAVE_CONFIG:
  210. config = (struct dma_slave_config *)arg;
  211. ret = edma_slave_config(echan, config);
  212. break;
  213. default:
  214. ret = -ENOSYS;
  215. }
  216. return ret;
  217. }
  218. static struct dma_async_tx_descriptor *edma_prep_slave_sg(
  219. struct dma_chan *chan, struct scatterlist *sgl,
  220. unsigned int sg_len, enum dma_transfer_direction direction,
  221. unsigned long tx_flags, void *context)
  222. {
  223. struct edma_chan *echan = to_edma_chan(chan);
  224. struct device *dev = chan->device->dev;
  225. struct edma_desc *edesc;
  226. dma_addr_t dev_addr;
  227. enum dma_slave_buswidth dev_width;
  228. u32 burst;
  229. struct scatterlist *sg;
  230. int acnt, bcnt, ccnt, src, dst, cidx;
  231. int src_bidx, dst_bidx, src_cidx, dst_cidx;
  232. int i, nslots;
  233. if (unlikely(!echan || !sgl || !sg_len))
  234. return NULL;
  235. if (direction == DMA_DEV_TO_MEM) {
  236. dev_addr = echan->cfg.src_addr;
  237. dev_width = echan->cfg.src_addr_width;
  238. burst = echan->cfg.src_maxburst;
  239. } else if (direction == DMA_MEM_TO_DEV) {
  240. dev_addr = echan->cfg.dst_addr;
  241. dev_width = echan->cfg.dst_addr_width;
  242. burst = echan->cfg.dst_maxburst;
  243. } else {
  244. dev_err(dev, "%s: bad direction?\n", __func__);
  245. return NULL;
  246. }
  247. if (dev_width == DMA_SLAVE_BUSWIDTH_UNDEFINED) {
  248. dev_err(dev, "Undefined slave buswidth\n");
  249. return NULL;
  250. }
  251. edesc = kzalloc(sizeof(*edesc) + sg_len *
  252. sizeof(edesc->pset[0]), GFP_ATOMIC);
  253. if (!edesc) {
  254. dev_dbg(dev, "Failed to allocate a descriptor\n");
  255. return NULL;
  256. }
  257. edesc->pset_nr = sg_len;
  258. /* Allocate a PaRAM slot, if needed */
  259. nslots = min_t(unsigned, MAX_NR_SG, sg_len);
  260. for (i = 0; i < nslots; i++) {
  261. if (echan->slot[i] < 0) {
  262. echan->slot[i] =
  263. edma_alloc_slot(EDMA_CTLR(echan->ch_num),
  264. EDMA_SLOT_ANY);
  265. if (echan->slot[i] < 0) {
  266. kfree(edesc);
  267. dev_err(dev, "Failed to allocate slot\n");
  268. kfree(edesc);
  269. return NULL;
  270. }
  271. }
  272. }
  273. /* Configure PaRAM sets for each SG */
  274. for_each_sg(sgl, sg, sg_len, i) {
  275. acnt = dev_width;
  276. /*
  277. * If the maxburst is equal to the fifo width, use
  278. * A-synced transfers. This allows for large contiguous
  279. * buffer transfers using only one PaRAM set.
  280. */
  281. if (burst == 1) {
  282. edesc->absync = false;
  283. ccnt = sg_dma_len(sg) / acnt / (SZ_64K - 1);
  284. bcnt = sg_dma_len(sg) / acnt - ccnt * (SZ_64K - 1);
  285. if (bcnt)
  286. ccnt++;
  287. else
  288. bcnt = SZ_64K - 1;
  289. cidx = acnt;
  290. /*
  291. * If maxburst is greater than the fifo address_width,
  292. * use AB-synced transfers where A count is the fifo
  293. * address_width and B count is the maxburst. In this
  294. * case, we are limited to transfers of C count frames
  295. * of (address_width * maxburst) where C count is limited
  296. * to SZ_64K-1. This places an upper bound on the length
  297. * of an SG segment that can be handled.
  298. */
  299. } else {
  300. edesc->absync = true;
  301. bcnt = burst;
  302. ccnt = sg_dma_len(sg) / (acnt * bcnt);
  303. if (ccnt > (SZ_64K - 1)) {
  304. dev_err(dev, "Exceeded max SG segment size\n");
  305. return NULL;
  306. }
  307. cidx = acnt * bcnt;
  308. }
  309. if (direction == DMA_MEM_TO_DEV) {
  310. src = sg_dma_address(sg);
  311. dst = dev_addr;
  312. src_bidx = acnt;
  313. src_cidx = cidx;
  314. dst_bidx = 0;
  315. dst_cidx = 0;
  316. } else {
  317. src = dev_addr;
  318. dst = sg_dma_address(sg);
  319. src_bidx = 0;
  320. src_cidx = 0;
  321. dst_bidx = acnt;
  322. dst_cidx = cidx;
  323. }
  324. edesc->pset[i].opt = EDMA_TCC(EDMA_CHAN_SLOT(echan->ch_num));
  325. /* Configure A or AB synchronized transfers */
  326. if (edesc->absync)
  327. edesc->pset[i].opt |= SYNCDIM;
  328. /* If this is the last in a current SG set of transactions,
  329. enable interrupts so that next set is processed */
  330. if (!((i+1) % MAX_NR_SG))
  331. edesc->pset[i].opt |= TCINTEN;
  332. /* If this is the last set, enable completion interrupt flag */
  333. if (i == sg_len - 1)
  334. edesc->pset[i].opt |= TCINTEN;
  335. edesc->pset[i].src = src;
  336. edesc->pset[i].dst = dst;
  337. edesc->pset[i].src_dst_bidx = (dst_bidx << 16) | src_bidx;
  338. edesc->pset[i].src_dst_cidx = (dst_cidx << 16) | src_cidx;
  339. edesc->pset[i].a_b_cnt = bcnt << 16 | acnt;
  340. edesc->pset[i].ccnt = ccnt;
  341. edesc->pset[i].link_bcntrld = 0xffffffff;
  342. }
  343. return vchan_tx_prep(&echan->vchan, &edesc->vdesc, tx_flags);
  344. }
  345. static void edma_callback(unsigned ch_num, u16 ch_status, void *data)
  346. {
  347. struct edma_chan *echan = data;
  348. struct device *dev = echan->vchan.chan.device->dev;
  349. struct edma_desc *edesc;
  350. unsigned long flags;
  351. struct edmacc_param p;
  352. /* Pause the channel */
  353. edma_pause(echan->ch_num);
  354. switch (ch_status) {
  355. case DMA_COMPLETE:
  356. spin_lock_irqsave(&echan->vchan.lock, flags);
  357. edesc = echan->edesc;
  358. if (edesc) {
  359. if (edesc->processed == edesc->pset_nr) {
  360. dev_dbg(dev, "Transfer complete, stopping channel %d\n", ch_num);
  361. edma_stop(echan->ch_num);
  362. vchan_cookie_complete(&edesc->vdesc);
  363. } else {
  364. dev_dbg(dev, "Intermediate transfer complete on channel %d\n", ch_num);
  365. }
  366. edma_execute(echan);
  367. }
  368. spin_unlock_irqrestore(&echan->vchan.lock, flags);
  369. break;
  370. case DMA_CC_ERROR:
  371. spin_lock_irqsave(&echan->vchan.lock, flags);
  372. edma_read_slot(EDMA_CHAN_SLOT(echan->slot[0]), &p);
  373. /*
  374. * Issue later based on missed flag which will be sure
  375. * to happen as:
  376. * (1) we finished transmitting an intermediate slot and
  377. * edma_execute is coming up.
  378. * (2) or we finished current transfer and issue will
  379. * call edma_execute.
  380. *
  381. * Important note: issuing can be dangerous here and
  382. * lead to some nasty recursion when we are in a NULL
  383. * slot. So we avoid doing so and set the missed flag.
  384. */
  385. if (p.a_b_cnt == 0 && p.ccnt == 0) {
  386. dev_dbg(dev, "Error occurred, looks like slot is null, just setting miss\n");
  387. echan->missed = 1;
  388. } else {
  389. /*
  390. * The slot is already programmed but the event got
  391. * missed, so its safe to issue it here.
  392. */
  393. dev_dbg(dev, "Error occurred but slot is non-null, TRIGGERING\n");
  394. edma_clean_channel(echan->ch_num);
  395. edma_stop(echan->ch_num);
  396. edma_start(echan->ch_num);
  397. edma_trigger_channel(echan->ch_num);
  398. }
  399. spin_unlock_irqrestore(&echan->vchan.lock, flags);
  400. break;
  401. default:
  402. break;
  403. }
  404. }
  405. /* Alloc channel resources */
  406. static int edma_alloc_chan_resources(struct dma_chan *chan)
  407. {
  408. struct edma_chan *echan = to_edma_chan(chan);
  409. struct device *dev = chan->device->dev;
  410. int ret;
  411. int a_ch_num;
  412. LIST_HEAD(descs);
  413. a_ch_num = edma_alloc_channel(echan->ch_num, edma_callback,
  414. chan, EVENTQ_DEFAULT);
  415. if (a_ch_num < 0) {
  416. ret = -ENODEV;
  417. goto err_no_chan;
  418. }
  419. if (a_ch_num != echan->ch_num) {
  420. dev_err(dev, "failed to allocate requested channel %u:%u\n",
  421. EDMA_CTLR(echan->ch_num),
  422. EDMA_CHAN_SLOT(echan->ch_num));
  423. ret = -ENODEV;
  424. goto err_wrong_chan;
  425. }
  426. echan->alloced = true;
  427. echan->slot[0] = echan->ch_num;
  428. dev_info(dev, "allocated channel for %u:%u\n",
  429. EDMA_CTLR(echan->ch_num), EDMA_CHAN_SLOT(echan->ch_num));
  430. return 0;
  431. err_wrong_chan:
  432. edma_free_channel(a_ch_num);
  433. err_no_chan:
  434. return ret;
  435. }
  436. /* Free channel resources */
  437. static void edma_free_chan_resources(struct dma_chan *chan)
  438. {
  439. struct edma_chan *echan = to_edma_chan(chan);
  440. struct device *dev = chan->device->dev;
  441. int i;
  442. /* Terminate transfers */
  443. edma_stop(echan->ch_num);
  444. vchan_free_chan_resources(&echan->vchan);
  445. /* Free EDMA PaRAM slots */
  446. for (i = 1; i < EDMA_MAX_SLOTS; i++) {
  447. if (echan->slot[i] >= 0) {
  448. edma_free_slot(echan->slot[i]);
  449. echan->slot[i] = -1;
  450. }
  451. }
  452. /* Free EDMA channel */
  453. if (echan->alloced) {
  454. edma_free_channel(echan->ch_num);
  455. echan->alloced = false;
  456. }
  457. dev_info(dev, "freeing channel for %u\n", echan->ch_num);
  458. }
  459. /* Send pending descriptor to hardware */
  460. static void edma_issue_pending(struct dma_chan *chan)
  461. {
  462. struct edma_chan *echan = to_edma_chan(chan);
  463. unsigned long flags;
  464. spin_lock_irqsave(&echan->vchan.lock, flags);
  465. if (vchan_issue_pending(&echan->vchan) && !echan->edesc)
  466. edma_execute(echan);
  467. spin_unlock_irqrestore(&echan->vchan.lock, flags);
  468. }
  469. static size_t edma_desc_size(struct edma_desc *edesc)
  470. {
  471. int i;
  472. size_t size;
  473. if (edesc->absync)
  474. for (size = i = 0; i < edesc->pset_nr; i++)
  475. size += (edesc->pset[i].a_b_cnt & 0xffff) *
  476. (edesc->pset[i].a_b_cnt >> 16) *
  477. edesc->pset[i].ccnt;
  478. else
  479. size = (edesc->pset[0].a_b_cnt & 0xffff) *
  480. (edesc->pset[0].a_b_cnt >> 16) +
  481. (edesc->pset[0].a_b_cnt & 0xffff) *
  482. (SZ_64K - 1) * edesc->pset[0].ccnt;
  483. return size;
  484. }
  485. /* Check request completion status */
  486. static enum dma_status edma_tx_status(struct dma_chan *chan,
  487. dma_cookie_t cookie,
  488. struct dma_tx_state *txstate)
  489. {
  490. struct edma_chan *echan = to_edma_chan(chan);
  491. struct virt_dma_desc *vdesc;
  492. enum dma_status ret;
  493. unsigned long flags;
  494. ret = dma_cookie_status(chan, cookie, txstate);
  495. if (ret == DMA_SUCCESS || !txstate)
  496. return ret;
  497. spin_lock_irqsave(&echan->vchan.lock, flags);
  498. vdesc = vchan_find_desc(&echan->vchan, cookie);
  499. if (vdesc) {
  500. txstate->residue = edma_desc_size(to_edma_desc(&vdesc->tx));
  501. } else if (echan->edesc && echan->edesc->vdesc.tx.cookie == cookie) {
  502. struct edma_desc *edesc = echan->edesc;
  503. txstate->residue = edma_desc_size(edesc);
  504. }
  505. spin_unlock_irqrestore(&echan->vchan.lock, flags);
  506. return ret;
  507. }
  508. static void __init edma_chan_init(struct edma_cc *ecc,
  509. struct dma_device *dma,
  510. struct edma_chan *echans)
  511. {
  512. int i, j;
  513. for (i = 0; i < EDMA_CHANS; i++) {
  514. struct edma_chan *echan = &echans[i];
  515. echan->ch_num = EDMA_CTLR_CHAN(ecc->ctlr, i);
  516. echan->ecc = ecc;
  517. echan->vchan.desc_free = edma_desc_free;
  518. vchan_init(&echan->vchan, dma);
  519. INIT_LIST_HEAD(&echan->node);
  520. for (j = 0; j < EDMA_MAX_SLOTS; j++)
  521. echan->slot[j] = -1;
  522. }
  523. }
  524. static void edma_dma_init(struct edma_cc *ecc, struct dma_device *dma,
  525. struct device *dev)
  526. {
  527. dma->device_prep_slave_sg = edma_prep_slave_sg;
  528. dma->device_alloc_chan_resources = edma_alloc_chan_resources;
  529. dma->device_free_chan_resources = edma_free_chan_resources;
  530. dma->device_issue_pending = edma_issue_pending;
  531. dma->device_tx_status = edma_tx_status;
  532. dma->device_control = edma_control;
  533. dma->dev = dev;
  534. INIT_LIST_HEAD(&dma->channels);
  535. }
  536. static int edma_probe(struct platform_device *pdev)
  537. {
  538. struct edma_cc *ecc;
  539. int ret;
  540. ecc = devm_kzalloc(&pdev->dev, sizeof(*ecc), GFP_KERNEL);
  541. if (!ecc) {
  542. dev_err(&pdev->dev, "Can't allocate controller\n");
  543. return -ENOMEM;
  544. }
  545. ecc->ctlr = pdev->id;
  546. ecc->dummy_slot = edma_alloc_slot(ecc->ctlr, EDMA_SLOT_ANY);
  547. if (ecc->dummy_slot < 0) {
  548. dev_err(&pdev->dev, "Can't allocate PaRAM dummy slot\n");
  549. return -EIO;
  550. }
  551. dma_cap_zero(ecc->dma_slave.cap_mask);
  552. dma_cap_set(DMA_SLAVE, ecc->dma_slave.cap_mask);
  553. edma_dma_init(ecc, &ecc->dma_slave, &pdev->dev);
  554. edma_chan_init(ecc, &ecc->dma_slave, ecc->slave_chans);
  555. ret = dma_async_device_register(&ecc->dma_slave);
  556. if (ret)
  557. goto err_reg1;
  558. platform_set_drvdata(pdev, ecc);
  559. dev_info(&pdev->dev, "TI EDMA DMA engine driver\n");
  560. return 0;
  561. err_reg1:
  562. edma_free_slot(ecc->dummy_slot);
  563. return ret;
  564. }
  565. static int edma_remove(struct platform_device *pdev)
  566. {
  567. struct device *dev = &pdev->dev;
  568. struct edma_cc *ecc = dev_get_drvdata(dev);
  569. dma_async_device_unregister(&ecc->dma_slave);
  570. edma_free_slot(ecc->dummy_slot);
  571. return 0;
  572. }
  573. static struct platform_driver edma_driver = {
  574. .probe = edma_probe,
  575. .remove = edma_remove,
  576. .driver = {
  577. .name = "edma-dma-engine",
  578. .owner = THIS_MODULE,
  579. },
  580. };
  581. bool edma_filter_fn(struct dma_chan *chan, void *param)
  582. {
  583. if (chan->device->dev->driver == &edma_driver.driver) {
  584. struct edma_chan *echan = to_edma_chan(chan);
  585. unsigned ch_req = *(unsigned *)param;
  586. return ch_req == echan->ch_num;
  587. }
  588. return false;
  589. }
  590. EXPORT_SYMBOL(edma_filter_fn);
  591. static struct platform_device *pdev0, *pdev1;
  592. static const struct platform_device_info edma_dev_info0 = {
  593. .name = "edma-dma-engine",
  594. .id = 0,
  595. };
  596. static const struct platform_device_info edma_dev_info1 = {
  597. .name = "edma-dma-engine",
  598. .id = 1,
  599. };
  600. static int edma_init(void)
  601. {
  602. int ret = platform_driver_register(&edma_driver);
  603. if (ret == 0) {
  604. pdev0 = platform_device_register_full(&edma_dev_info0);
  605. if (IS_ERR(pdev0)) {
  606. platform_driver_unregister(&edma_driver);
  607. ret = PTR_ERR(pdev0);
  608. goto out;
  609. }
  610. pdev0->dev.dma_mask = &pdev0->dev.coherent_dma_mask;
  611. pdev0->dev.coherent_dma_mask = DMA_BIT_MASK(32);
  612. }
  613. if (EDMA_CTLRS == 2) {
  614. pdev1 = platform_device_register_full(&edma_dev_info1);
  615. if (IS_ERR(pdev1)) {
  616. platform_driver_unregister(&edma_driver);
  617. platform_device_unregister(pdev0);
  618. ret = PTR_ERR(pdev1);
  619. }
  620. pdev1->dev.dma_mask = &pdev1->dev.coherent_dma_mask;
  621. pdev1->dev.coherent_dma_mask = DMA_BIT_MASK(32);
  622. }
  623. out:
  624. return ret;
  625. }
  626. subsys_initcall(edma_init);
  627. static void __exit edma_exit(void)
  628. {
  629. platform_device_unregister(pdev0);
  630. if (pdev1)
  631. platform_device_unregister(pdev1);
  632. platform_driver_unregister(&edma_driver);
  633. }
  634. module_exit(edma_exit);
  635. MODULE_AUTHOR("Matt Porter <matt.porter@linaro.org>");
  636. MODULE_DESCRIPTION("TI EDMA DMA engine driver");
  637. MODULE_LICENSE("GPL v2");