omap-dma.c 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677
  1. /*
  2. * OMAP DMAengine support
  3. *
  4. * This program is free software; you can redistribute it and/or modify
  5. * it under the terms of the GNU General Public License version 2 as
  6. * published by the Free Software Foundation.
  7. */
  8. #include <linux/dmaengine.h>
  9. #include <linux/dma-mapping.h>
  10. #include <linux/err.h>
  11. #include <linux/init.h>
  12. #include <linux/interrupt.h>
  13. #include <linux/list.h>
  14. #include <linux/module.h>
  15. #include <linux/omap-dma.h>
  16. #include <linux/platform_device.h>
  17. #include <linux/slab.h>
  18. #include <linux/spinlock.h>
  19. #include "virt-dma.h"
  20. struct omap_dmadev {
  21. struct dma_device ddev;
  22. spinlock_t lock;
  23. struct tasklet_struct task;
  24. struct list_head pending;
  25. };
  26. struct omap_chan {
  27. struct virt_dma_chan vc;
  28. struct list_head node;
  29. struct dma_slave_config cfg;
  30. unsigned dma_sig;
  31. bool cyclic;
  32. bool paused;
  33. int dma_ch;
  34. struct omap_desc *desc;
  35. unsigned sgidx;
  36. };
  37. struct omap_sg {
  38. dma_addr_t addr;
  39. uint32_t en; /* number of elements (24-bit) */
  40. uint32_t fn; /* number of frames (16-bit) */
  41. };
  42. struct omap_desc {
  43. struct virt_dma_desc vd;
  44. enum dma_transfer_direction dir;
  45. dma_addr_t dev_addr;
  46. int16_t fi; /* for OMAP_DMA_SYNC_PACKET */
  47. uint8_t es; /* OMAP_DMA_DATA_TYPE_xxx */
  48. uint8_t sync_mode; /* OMAP_DMA_SYNC_xxx */
  49. uint8_t sync_type; /* OMAP_DMA_xxx_SYNC* */
  50. uint8_t periph_port; /* Peripheral port */
  51. unsigned sglen;
  52. struct omap_sg sg[0];
  53. };
  54. static const unsigned es_bytes[] = {
  55. [OMAP_DMA_DATA_TYPE_S8] = 1,
  56. [OMAP_DMA_DATA_TYPE_S16] = 2,
  57. [OMAP_DMA_DATA_TYPE_S32] = 4,
  58. };
  59. static inline struct omap_dmadev *to_omap_dma_dev(struct dma_device *d)
  60. {
  61. return container_of(d, struct omap_dmadev, ddev);
  62. }
  63. static inline struct omap_chan *to_omap_dma_chan(struct dma_chan *c)
  64. {
  65. return container_of(c, struct omap_chan, vc.chan);
  66. }
  67. static inline struct omap_desc *to_omap_dma_desc(struct dma_async_tx_descriptor *t)
  68. {
  69. return container_of(t, struct omap_desc, vd.tx);
  70. }
  71. static void omap_dma_desc_free(struct virt_dma_desc *vd)
  72. {
  73. kfree(container_of(vd, struct omap_desc, vd));
  74. }
  75. static void omap_dma_start_sg(struct omap_chan *c, struct omap_desc *d,
  76. unsigned idx)
  77. {
  78. struct omap_sg *sg = d->sg + idx;
  79. if (d->dir == DMA_DEV_TO_MEM)
  80. omap_set_dma_dest_params(c->dma_ch, OMAP_DMA_PORT_EMIFF,
  81. OMAP_DMA_AMODE_POST_INC, sg->addr, 0, 0);
  82. else
  83. omap_set_dma_src_params(c->dma_ch, OMAP_DMA_PORT_EMIFF,
  84. OMAP_DMA_AMODE_POST_INC, sg->addr, 0, 0);
  85. omap_set_dma_transfer_params(c->dma_ch, d->es, sg->en, sg->fn,
  86. d->sync_mode, c->dma_sig, d->sync_type);
  87. omap_start_dma(c->dma_ch);
  88. }
  89. static void omap_dma_start_desc(struct omap_chan *c)
  90. {
  91. struct virt_dma_desc *vd = vchan_next_desc(&c->vc);
  92. struct omap_desc *d;
  93. if (!vd) {
  94. c->desc = NULL;
  95. return;
  96. }
  97. list_del(&vd->node);
  98. c->desc = d = to_omap_dma_desc(&vd->tx);
  99. c->sgidx = 0;
  100. if (d->dir == DMA_DEV_TO_MEM)
  101. omap_set_dma_src_params(c->dma_ch, d->periph_port,
  102. OMAP_DMA_AMODE_CONSTANT, d->dev_addr, 0, d->fi);
  103. else
  104. omap_set_dma_dest_params(c->dma_ch, d->periph_port,
  105. OMAP_DMA_AMODE_CONSTANT, d->dev_addr, 0, d->fi);
  106. omap_dma_start_sg(c, d, 0);
  107. }
  108. static void omap_dma_callback(int ch, u16 status, void *data)
  109. {
  110. struct omap_chan *c = data;
  111. struct omap_desc *d;
  112. unsigned long flags;
  113. spin_lock_irqsave(&c->vc.lock, flags);
  114. d = c->desc;
  115. if (d) {
  116. if (!c->cyclic) {
  117. if (++c->sgidx < d->sglen) {
  118. omap_dma_start_sg(c, d, c->sgidx);
  119. } else {
  120. omap_dma_start_desc(c);
  121. vchan_cookie_complete(&d->vd);
  122. }
  123. } else {
  124. vchan_cyclic_callback(&d->vd);
  125. }
  126. }
  127. spin_unlock_irqrestore(&c->vc.lock, flags);
  128. }
  129. /*
  130. * This callback schedules all pending channels. We could be more
  131. * clever here by postponing allocation of the real DMA channels to
  132. * this point, and freeing them when our virtual channel becomes idle.
  133. *
  134. * We would then need to deal with 'all channels in-use'
  135. */
  136. static void omap_dma_sched(unsigned long data)
  137. {
  138. struct omap_dmadev *d = (struct omap_dmadev *)data;
  139. LIST_HEAD(head);
  140. spin_lock_irq(&d->lock);
  141. list_splice_tail_init(&d->pending, &head);
  142. spin_unlock_irq(&d->lock);
  143. while (!list_empty(&head)) {
  144. struct omap_chan *c = list_first_entry(&head,
  145. struct omap_chan, node);
  146. spin_lock_irq(&c->vc.lock);
  147. list_del_init(&c->node);
  148. omap_dma_start_desc(c);
  149. spin_unlock_irq(&c->vc.lock);
  150. }
  151. }
  152. static int omap_dma_alloc_chan_resources(struct dma_chan *chan)
  153. {
  154. struct omap_chan *c = to_omap_dma_chan(chan);
  155. dev_info(c->vc.chan.device->dev, "allocating channel for %u\n", c->dma_sig);
  156. return omap_request_dma(c->dma_sig, "DMA engine",
  157. omap_dma_callback, c, &c->dma_ch);
  158. }
  159. static void omap_dma_free_chan_resources(struct dma_chan *chan)
  160. {
  161. struct omap_chan *c = to_omap_dma_chan(chan);
  162. vchan_free_chan_resources(&c->vc);
  163. omap_free_dma(c->dma_ch);
  164. dev_info(c->vc.chan.device->dev, "freeing channel for %u\n", c->dma_sig);
  165. }
  166. static size_t omap_dma_sg_size(struct omap_sg *sg)
  167. {
  168. return sg->en * sg->fn;
  169. }
  170. static size_t omap_dma_desc_size(struct omap_desc *d)
  171. {
  172. unsigned i;
  173. size_t size;
  174. for (size = i = 0; i < d->sglen; i++)
  175. size += omap_dma_sg_size(&d->sg[i]);
  176. return size * es_bytes[d->es];
  177. }
  178. static size_t omap_dma_desc_size_pos(struct omap_desc *d, dma_addr_t addr)
  179. {
  180. unsigned i;
  181. size_t size, es_size = es_bytes[d->es];
  182. for (size = i = 0; i < d->sglen; i++) {
  183. size_t this_size = omap_dma_sg_size(&d->sg[i]) * es_size;
  184. if (size)
  185. size += this_size;
  186. else if (addr >= d->sg[i].addr &&
  187. addr < d->sg[i].addr + this_size)
  188. size += d->sg[i].addr + this_size - addr;
  189. }
  190. return size;
  191. }
  192. static enum dma_status omap_dma_tx_status(struct dma_chan *chan,
  193. dma_cookie_t cookie, struct dma_tx_state *txstate)
  194. {
  195. struct omap_chan *c = to_omap_dma_chan(chan);
  196. struct virt_dma_desc *vd;
  197. enum dma_status ret;
  198. unsigned long flags;
  199. ret = dma_cookie_status(chan, cookie, txstate);
  200. if (ret == DMA_SUCCESS || !txstate)
  201. return ret;
  202. spin_lock_irqsave(&c->vc.lock, flags);
  203. vd = vchan_find_desc(&c->vc, cookie);
  204. if (vd) {
  205. txstate->residue = omap_dma_desc_size(to_omap_dma_desc(&vd->tx));
  206. } else if (c->desc && c->desc->vd.tx.cookie == cookie) {
  207. struct omap_desc *d = c->desc;
  208. dma_addr_t pos;
  209. if (d->dir == DMA_MEM_TO_DEV)
  210. pos = omap_get_dma_src_pos(c->dma_ch);
  211. else if (d->dir == DMA_DEV_TO_MEM)
  212. pos = omap_get_dma_dst_pos(c->dma_ch);
  213. else
  214. pos = 0;
  215. txstate->residue = omap_dma_desc_size_pos(d, pos);
  216. } else {
  217. txstate->residue = 0;
  218. }
  219. spin_unlock_irqrestore(&c->vc.lock, flags);
  220. return ret;
  221. }
  222. static void omap_dma_issue_pending(struct dma_chan *chan)
  223. {
  224. struct omap_chan *c = to_omap_dma_chan(chan);
  225. unsigned long flags;
  226. spin_lock_irqsave(&c->vc.lock, flags);
  227. if (vchan_issue_pending(&c->vc) && !c->desc) {
  228. struct omap_dmadev *d = to_omap_dma_dev(chan->device);
  229. spin_lock(&d->lock);
  230. if (list_empty(&c->node))
  231. list_add_tail(&c->node, &d->pending);
  232. spin_unlock(&d->lock);
  233. tasklet_schedule(&d->task);
  234. }
  235. spin_unlock_irqrestore(&c->vc.lock, flags);
  236. }
  237. static struct dma_async_tx_descriptor *omap_dma_prep_slave_sg(
  238. struct dma_chan *chan, struct scatterlist *sgl, unsigned sglen,
  239. enum dma_transfer_direction dir, unsigned long tx_flags, void *context)
  240. {
  241. struct omap_chan *c = to_omap_dma_chan(chan);
  242. enum dma_slave_buswidth dev_width;
  243. struct scatterlist *sgent;
  244. struct omap_desc *d;
  245. dma_addr_t dev_addr;
  246. unsigned i, j = 0, es, en, frame_bytes, sync_type;
  247. u32 burst;
  248. if (dir == DMA_DEV_TO_MEM) {
  249. dev_addr = c->cfg.src_addr;
  250. dev_width = c->cfg.src_addr_width;
  251. burst = c->cfg.src_maxburst;
  252. sync_type = OMAP_DMA_SRC_SYNC;
  253. } else if (dir == DMA_MEM_TO_DEV) {
  254. dev_addr = c->cfg.dst_addr;
  255. dev_width = c->cfg.dst_addr_width;
  256. burst = c->cfg.dst_maxburst;
  257. sync_type = OMAP_DMA_DST_SYNC;
  258. } else {
  259. dev_err(chan->device->dev, "%s: bad direction?\n", __func__);
  260. return NULL;
  261. }
  262. /* Bus width translates to the element size (ES) */
  263. switch (dev_width) {
  264. case DMA_SLAVE_BUSWIDTH_1_BYTE:
  265. es = OMAP_DMA_DATA_TYPE_S8;
  266. break;
  267. case DMA_SLAVE_BUSWIDTH_2_BYTES:
  268. es = OMAP_DMA_DATA_TYPE_S16;
  269. break;
  270. case DMA_SLAVE_BUSWIDTH_4_BYTES:
  271. es = OMAP_DMA_DATA_TYPE_S32;
  272. break;
  273. default: /* not reached */
  274. return NULL;
  275. }
  276. /* Now allocate and setup the descriptor. */
  277. d = kzalloc(sizeof(*d) + sglen * sizeof(d->sg[0]), GFP_ATOMIC);
  278. if (!d)
  279. return NULL;
  280. d->dir = dir;
  281. d->dev_addr = dev_addr;
  282. d->es = es;
  283. d->sync_mode = OMAP_DMA_SYNC_FRAME;
  284. d->sync_type = sync_type;
  285. d->periph_port = OMAP_DMA_PORT_TIPB;
  286. /*
  287. * Build our scatterlist entries: each contains the address,
  288. * the number of elements (EN) in each frame, and the number of
  289. * frames (FN). Number of bytes for this entry = ES * EN * FN.
  290. *
  291. * Burst size translates to number of elements with frame sync.
  292. * Note: DMA engine defines burst to be the number of dev-width
  293. * transfers.
  294. */
  295. en = burst;
  296. frame_bytes = es_bytes[es] * en;
  297. for_each_sg(sgl, sgent, sglen, i) {
  298. d->sg[j].addr = sg_dma_address(sgent);
  299. d->sg[j].en = en;
  300. d->sg[j].fn = sg_dma_len(sgent) / frame_bytes;
  301. j++;
  302. }
  303. d->sglen = j;
  304. return vchan_tx_prep(&c->vc, &d->vd, tx_flags);
  305. }
  306. static struct dma_async_tx_descriptor *omap_dma_prep_dma_cyclic(
  307. struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
  308. size_t period_len, enum dma_transfer_direction dir, unsigned long flags,
  309. void *context)
  310. {
  311. struct omap_chan *c = to_omap_dma_chan(chan);
  312. enum dma_slave_buswidth dev_width;
  313. struct omap_desc *d;
  314. dma_addr_t dev_addr;
  315. unsigned es, sync_type;
  316. u32 burst;
  317. if (dir == DMA_DEV_TO_MEM) {
  318. dev_addr = c->cfg.src_addr;
  319. dev_width = c->cfg.src_addr_width;
  320. burst = c->cfg.src_maxburst;
  321. sync_type = OMAP_DMA_SRC_SYNC;
  322. } else if (dir == DMA_MEM_TO_DEV) {
  323. dev_addr = c->cfg.dst_addr;
  324. dev_width = c->cfg.dst_addr_width;
  325. burst = c->cfg.dst_maxburst;
  326. sync_type = OMAP_DMA_DST_SYNC;
  327. } else {
  328. dev_err(chan->device->dev, "%s: bad direction?\n", __func__);
  329. return NULL;
  330. }
  331. /* Bus width translates to the element size (ES) */
  332. switch (dev_width) {
  333. case DMA_SLAVE_BUSWIDTH_1_BYTE:
  334. es = OMAP_DMA_DATA_TYPE_S8;
  335. break;
  336. case DMA_SLAVE_BUSWIDTH_2_BYTES:
  337. es = OMAP_DMA_DATA_TYPE_S16;
  338. break;
  339. case DMA_SLAVE_BUSWIDTH_4_BYTES:
  340. es = OMAP_DMA_DATA_TYPE_S32;
  341. break;
  342. default: /* not reached */
  343. return NULL;
  344. }
  345. /* Now allocate and setup the descriptor. */
  346. d = kzalloc(sizeof(*d) + sizeof(d->sg[0]), GFP_ATOMIC);
  347. if (!d)
  348. return NULL;
  349. d->dir = dir;
  350. d->dev_addr = dev_addr;
  351. d->fi = burst;
  352. d->es = es;
  353. if (burst)
  354. d->sync_mode = OMAP_DMA_SYNC_PACKET;
  355. else
  356. d->sync_mode = OMAP_DMA_SYNC_ELEMENT;
  357. d->sync_type = sync_type;
  358. d->periph_port = OMAP_DMA_PORT_MPUI;
  359. d->sg[0].addr = buf_addr;
  360. d->sg[0].en = period_len / es_bytes[es];
  361. d->sg[0].fn = buf_len / period_len;
  362. d->sglen = 1;
  363. if (!c->cyclic) {
  364. c->cyclic = true;
  365. omap_dma_link_lch(c->dma_ch, c->dma_ch);
  366. if (flags & DMA_PREP_INTERRUPT)
  367. omap_enable_dma_irq(c->dma_ch, OMAP_DMA_FRAME_IRQ);
  368. omap_disable_dma_irq(c->dma_ch, OMAP_DMA_BLOCK_IRQ);
  369. }
  370. if (dma_omap2plus()) {
  371. omap_set_dma_src_burst_mode(c->dma_ch, OMAP_DMA_DATA_BURST_16);
  372. omap_set_dma_dest_burst_mode(c->dma_ch, OMAP_DMA_DATA_BURST_16);
  373. }
  374. return vchan_tx_prep(&c->vc, &d->vd, flags);
  375. }
  376. static int omap_dma_slave_config(struct omap_chan *c, struct dma_slave_config *cfg)
  377. {
  378. if (cfg->src_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES ||
  379. cfg->dst_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES)
  380. return -EINVAL;
  381. memcpy(&c->cfg, cfg, sizeof(c->cfg));
  382. return 0;
  383. }
  384. static int omap_dma_terminate_all(struct omap_chan *c)
  385. {
  386. struct omap_dmadev *d = to_omap_dma_dev(c->vc.chan.device);
  387. unsigned long flags;
  388. LIST_HEAD(head);
  389. spin_lock_irqsave(&c->vc.lock, flags);
  390. /* Prevent this channel being scheduled */
  391. spin_lock(&d->lock);
  392. list_del_init(&c->node);
  393. spin_unlock(&d->lock);
  394. /*
  395. * Stop DMA activity: we assume the callback will not be called
  396. * after omap_stop_dma() returns (even if it does, it will see
  397. * c->desc is NULL and exit.)
  398. */
  399. if (c->desc) {
  400. c->desc = NULL;
  401. /* Avoid stopping the dma twice */
  402. if (!c->paused)
  403. omap_stop_dma(c->dma_ch);
  404. }
  405. if (c->cyclic) {
  406. c->cyclic = false;
  407. c->paused = false;
  408. omap_dma_unlink_lch(c->dma_ch, c->dma_ch);
  409. }
  410. vchan_get_all_descriptors(&c->vc, &head);
  411. spin_unlock_irqrestore(&c->vc.lock, flags);
  412. vchan_dma_desc_free_list(&c->vc, &head);
  413. return 0;
  414. }
  415. static int omap_dma_pause(struct omap_chan *c)
  416. {
  417. /* Pause/Resume only allowed with cyclic mode */
  418. if (!c->cyclic)
  419. return -EINVAL;
  420. if (!c->paused) {
  421. omap_stop_dma(c->dma_ch);
  422. c->paused = true;
  423. }
  424. return 0;
  425. }
  426. static int omap_dma_resume(struct omap_chan *c)
  427. {
  428. /* Pause/Resume only allowed with cyclic mode */
  429. if (!c->cyclic)
  430. return -EINVAL;
  431. if (c->paused) {
  432. omap_start_dma(c->dma_ch);
  433. c->paused = false;
  434. }
  435. return 0;
  436. }
  437. static int omap_dma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
  438. unsigned long arg)
  439. {
  440. struct omap_chan *c = to_omap_dma_chan(chan);
  441. int ret;
  442. switch (cmd) {
  443. case DMA_SLAVE_CONFIG:
  444. ret = omap_dma_slave_config(c, (struct dma_slave_config *)arg);
  445. break;
  446. case DMA_TERMINATE_ALL:
  447. ret = omap_dma_terminate_all(c);
  448. break;
  449. case DMA_PAUSE:
  450. ret = omap_dma_pause(c);
  451. break;
  452. case DMA_RESUME:
  453. ret = omap_dma_resume(c);
  454. break;
  455. default:
  456. ret = -ENXIO;
  457. break;
  458. }
  459. return ret;
  460. }
  461. static int omap_dma_chan_init(struct omap_dmadev *od, int dma_sig)
  462. {
  463. struct omap_chan *c;
  464. c = kzalloc(sizeof(*c), GFP_KERNEL);
  465. if (!c)
  466. return -ENOMEM;
  467. c->dma_sig = dma_sig;
  468. c->vc.desc_free = omap_dma_desc_free;
  469. vchan_init(&c->vc, &od->ddev);
  470. INIT_LIST_HEAD(&c->node);
  471. od->ddev.chancnt++;
  472. return 0;
  473. }
  474. static void omap_dma_free(struct omap_dmadev *od)
  475. {
  476. tasklet_kill(&od->task);
  477. while (!list_empty(&od->ddev.channels)) {
  478. struct omap_chan *c = list_first_entry(&od->ddev.channels,
  479. struct omap_chan, vc.chan.device_node);
  480. list_del(&c->vc.chan.device_node);
  481. tasklet_kill(&c->vc.task);
  482. kfree(c);
  483. }
  484. kfree(od);
  485. }
  486. static int omap_dma_probe(struct platform_device *pdev)
  487. {
  488. struct omap_dmadev *od;
  489. int rc, i;
  490. od = kzalloc(sizeof(*od), GFP_KERNEL);
  491. if (!od)
  492. return -ENOMEM;
  493. dma_cap_set(DMA_SLAVE, od->ddev.cap_mask);
  494. dma_cap_set(DMA_CYCLIC, od->ddev.cap_mask);
  495. od->ddev.device_alloc_chan_resources = omap_dma_alloc_chan_resources;
  496. od->ddev.device_free_chan_resources = omap_dma_free_chan_resources;
  497. od->ddev.device_tx_status = omap_dma_tx_status;
  498. od->ddev.device_issue_pending = omap_dma_issue_pending;
  499. od->ddev.device_prep_slave_sg = omap_dma_prep_slave_sg;
  500. od->ddev.device_prep_dma_cyclic = omap_dma_prep_dma_cyclic;
  501. od->ddev.device_control = omap_dma_control;
  502. od->ddev.dev = &pdev->dev;
  503. INIT_LIST_HEAD(&od->ddev.channels);
  504. INIT_LIST_HEAD(&od->pending);
  505. spin_lock_init(&od->lock);
  506. tasklet_init(&od->task, omap_dma_sched, (unsigned long)od);
  507. for (i = 0; i < 127; i++) {
  508. rc = omap_dma_chan_init(od, i);
  509. if (rc) {
  510. omap_dma_free(od);
  511. return rc;
  512. }
  513. }
  514. rc = dma_async_device_register(&od->ddev);
  515. if (rc) {
  516. pr_warn("OMAP-DMA: failed to register slave DMA engine device: %d\n",
  517. rc);
  518. omap_dma_free(od);
  519. } else {
  520. platform_set_drvdata(pdev, od);
  521. }
  522. dev_info(&pdev->dev, "OMAP DMA engine driver\n");
  523. return rc;
  524. }
  525. static int omap_dma_remove(struct platform_device *pdev)
  526. {
  527. struct omap_dmadev *od = platform_get_drvdata(pdev);
  528. dma_async_device_unregister(&od->ddev);
  529. omap_dma_free(od);
  530. return 0;
  531. }
  532. static struct platform_driver omap_dma_driver = {
  533. .probe = omap_dma_probe,
  534. .remove = omap_dma_remove,
  535. .driver = {
  536. .name = "omap-dma-engine",
  537. .owner = THIS_MODULE,
  538. },
  539. };
  540. bool omap_dma_filter_fn(struct dma_chan *chan, void *param)
  541. {
  542. if (chan->device->dev->driver == &omap_dma_driver.driver) {
  543. struct omap_chan *c = to_omap_dma_chan(chan);
  544. unsigned req = *(unsigned *)param;
  545. return req == c->dma_sig;
  546. }
  547. return false;
  548. }
  549. EXPORT_SYMBOL_GPL(omap_dma_filter_fn);
  550. static int omap_dma_init(void)
  551. {
  552. return platform_driver_register(&omap_dma_driver);
  553. }
  554. subsys_initcall(omap_dma_init);
  555. static void __exit omap_dma_exit(void)
  556. {
  557. platform_driver_unregister(&omap_dma_driver);
  558. }
  559. module_exit(omap_dma_exit);
  560. MODULE_AUTHOR("Russell King");
  561. MODULE_LICENSE("GPL");