imx-dma.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637
  1. /*
  2. * drivers/dma/imx-dma.c
  3. *
  4. * This file contains a driver for the Freescale i.MX DMA engine
  5. * found on i.MX1/21/27
  6. *
  7. * Copyright 2010 Sascha Hauer, Pengutronix <s.hauer@pengutronix.de>
  8. * Copyright 2012 Javier Martin, Vista Silicon <javier.martin@vista-silicon.com>
  9. *
  10. * The code contained herein is licensed under the GNU General Public
  11. * License. You may obtain a copy of the GNU General Public License
  12. * Version 2 or later at the following locations:
  13. *
  14. * http://www.opensource.org/licenses/gpl-license.html
  15. * http://www.gnu.org/copyleft/gpl.html
  16. */
  17. #include <linux/init.h>
  18. #include <linux/module.h>
  19. #include <linux/types.h>
  20. #include <linux/mm.h>
  21. #include <linux/interrupt.h>
  22. #include <linux/spinlock.h>
  23. #include <linux/device.h>
  24. #include <linux/dma-mapping.h>
  25. #include <linux/slab.h>
  26. #include <linux/platform_device.h>
  27. #include <linux/dmaengine.h>
  28. #include <linux/module.h>
  29. #include <asm/irq.h>
  30. #include <mach/dma-v1.h>
  31. #include <mach/hardware.h>
  32. #include "dmaengine.h"
  33. #define IMXDMA_MAX_CHAN_DESCRIPTORS 16
  34. enum imxdma_prep_type {
  35. IMXDMA_DESC_MEMCPY,
  36. IMXDMA_DESC_INTERLEAVED,
  37. IMXDMA_DESC_SLAVE_SG,
  38. IMXDMA_DESC_CYCLIC,
  39. };
  40. struct imxdma_desc {
  41. struct list_head node;
  42. struct dma_async_tx_descriptor desc;
  43. enum dma_status status;
  44. dma_addr_t src;
  45. dma_addr_t dest;
  46. size_t len;
  47. unsigned int dmamode;
  48. enum imxdma_prep_type type;
  49. /* For memcpy and interleaved */
  50. unsigned int config_port;
  51. unsigned int config_mem;
  52. /* For interleaved transfers */
  53. unsigned int x;
  54. unsigned int y;
  55. unsigned int w;
  56. /* For slave sg and cyclic */
  57. struct scatterlist *sg;
  58. unsigned int sgcount;
  59. };
  60. struct imxdma_channel {
  61. struct imxdma_engine *imxdma;
  62. unsigned int channel;
  63. unsigned int imxdma_channel;
  64. struct tasklet_struct dma_tasklet;
  65. struct list_head ld_free;
  66. struct list_head ld_queue;
  67. struct list_head ld_active;
  68. int descs_allocated;
  69. enum dma_slave_buswidth word_size;
  70. dma_addr_t per_address;
  71. u32 watermark_level;
  72. struct dma_chan chan;
  73. spinlock_t lock;
  74. struct dma_async_tx_descriptor desc;
  75. enum dma_status status;
  76. int dma_request;
  77. struct scatterlist *sg_list;
  78. };
  79. #define MAX_DMA_CHANNELS 8
  80. struct imxdma_engine {
  81. struct device *dev;
  82. struct device_dma_parameters dma_parms;
  83. struct dma_device dma_device;
  84. struct imxdma_channel channel[MAX_DMA_CHANNELS];
  85. };
  86. static struct imxdma_channel *to_imxdma_chan(struct dma_chan *chan)
  87. {
  88. return container_of(chan, struct imxdma_channel, chan);
  89. }
  90. static inline bool imxdma_chan_is_doing_cyclic(struct imxdma_channel *imxdmac)
  91. {
  92. struct imxdma_desc *desc;
  93. if (!list_empty(&imxdmac->ld_active)) {
  94. desc = list_first_entry(&imxdmac->ld_active, struct imxdma_desc,
  95. node);
  96. if (desc->type == IMXDMA_DESC_CYCLIC)
  97. return true;
  98. }
  99. return false;
  100. }
  101. static void imxdma_irq_handler(int channel, void *data)
  102. {
  103. struct imxdma_channel *imxdmac = data;
  104. tasklet_schedule(&imxdmac->dma_tasklet);
  105. }
  106. static void imxdma_err_handler(int channel, void *data, int error)
  107. {
  108. struct imxdma_channel *imxdmac = data;
  109. tasklet_schedule(&imxdmac->dma_tasklet);
  110. }
  111. static void imxdma_progression(int channel, void *data,
  112. struct scatterlist *sg)
  113. {
  114. struct imxdma_channel *imxdmac = data;
  115. tasklet_schedule(&imxdmac->dma_tasklet);
  116. }
  117. static int imxdma_xfer_desc(struct imxdma_desc *d)
  118. {
  119. struct imxdma_channel *imxdmac = to_imxdma_chan(d->desc.chan);
  120. int ret;
  121. /* Configure and enable */
  122. switch (d->type) {
  123. case IMXDMA_DESC_MEMCPY:
  124. ret = imx_dma_config_channel(imxdmac->imxdma_channel,
  125. d->config_port, d->config_mem, 0, 0);
  126. if (ret < 0)
  127. return ret;
  128. ret = imx_dma_setup_single(imxdmac->imxdma_channel, d->src,
  129. d->len, d->dest, d->dmamode);
  130. if (ret < 0)
  131. return ret;
  132. break;
  133. case IMXDMA_DESC_CYCLIC:
  134. ret = imx_dma_setup_progression_handler(imxdmac->imxdma_channel,
  135. imxdma_progression);
  136. if (ret < 0)
  137. return ret;
  138. /*
  139. * We fall through here since cyclic transfer is the same as
  140. * slave_sg adding a progression handler and a specific sg
  141. * configuration which is done in 'imxdma_prep_dma_cyclic'.
  142. */
  143. case IMXDMA_DESC_SLAVE_SG:
  144. if (d->dmamode == DMA_MODE_READ)
  145. ret = imx_dma_setup_sg(imxdmac->imxdma_channel, d->sg,
  146. d->sgcount, d->len, d->src, d->dmamode);
  147. else
  148. ret = imx_dma_setup_sg(imxdmac->imxdma_channel, d->sg,
  149. d->sgcount, d->len, d->dest, d->dmamode);
  150. if (ret < 0)
  151. return ret;
  152. break;
  153. default:
  154. return -EINVAL;
  155. }
  156. imx_dma_enable(imxdmac->imxdma_channel);
  157. return 0;
  158. }
  159. static void imxdma_tasklet(unsigned long data)
  160. {
  161. struct imxdma_channel *imxdmac = (void *)data;
  162. struct imxdma_engine *imxdma = imxdmac->imxdma;
  163. struct imxdma_desc *desc;
  164. spin_lock(&imxdmac->lock);
  165. if (list_empty(&imxdmac->ld_active)) {
  166. /* Someone might have called terminate all */
  167. goto out;
  168. }
  169. desc = list_first_entry(&imxdmac->ld_active, struct imxdma_desc, node);
  170. if (desc->desc.callback)
  171. desc->desc.callback(desc->desc.callback_param);
  172. dma_cookie_complete(&desc->desc);
  173. /* If we are dealing with a cyclic descriptor keep it on ld_active */
  174. if (imxdma_chan_is_doing_cyclic(imxdmac))
  175. goto out;
  176. list_move_tail(imxdmac->ld_active.next, &imxdmac->ld_free);
  177. if (!list_empty(&imxdmac->ld_queue)) {
  178. desc = list_first_entry(&imxdmac->ld_queue, struct imxdma_desc,
  179. node);
  180. list_move_tail(imxdmac->ld_queue.next, &imxdmac->ld_active);
  181. if (imxdma_xfer_desc(desc) < 0)
  182. dev_warn(imxdma->dev, "%s: channel: %d couldn't xfer desc\n",
  183. __func__, imxdmac->channel);
  184. }
  185. out:
  186. spin_unlock(&imxdmac->lock);
  187. }
  188. static int imxdma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
  189. unsigned long arg)
  190. {
  191. struct imxdma_channel *imxdmac = to_imxdma_chan(chan);
  192. struct dma_slave_config *dmaengine_cfg = (void *)arg;
  193. int ret;
  194. unsigned long flags;
  195. unsigned int mode = 0;
  196. switch (cmd) {
  197. case DMA_TERMINATE_ALL:
  198. imx_dma_disable(imxdmac->imxdma_channel);
  199. spin_lock_irqsave(&imxdmac->lock, flags);
  200. list_splice_tail_init(&imxdmac->ld_active, &imxdmac->ld_free);
  201. list_splice_tail_init(&imxdmac->ld_queue, &imxdmac->ld_free);
  202. spin_unlock_irqrestore(&imxdmac->lock, flags);
  203. return 0;
  204. case DMA_SLAVE_CONFIG:
  205. if (dmaengine_cfg->direction == DMA_DEV_TO_MEM) {
  206. imxdmac->per_address = dmaengine_cfg->src_addr;
  207. imxdmac->watermark_level = dmaengine_cfg->src_maxburst;
  208. imxdmac->word_size = dmaengine_cfg->src_addr_width;
  209. } else {
  210. imxdmac->per_address = dmaengine_cfg->dst_addr;
  211. imxdmac->watermark_level = dmaengine_cfg->dst_maxburst;
  212. imxdmac->word_size = dmaengine_cfg->dst_addr_width;
  213. }
  214. switch (imxdmac->word_size) {
  215. case DMA_SLAVE_BUSWIDTH_1_BYTE:
  216. mode = IMX_DMA_MEMSIZE_8;
  217. break;
  218. case DMA_SLAVE_BUSWIDTH_2_BYTES:
  219. mode = IMX_DMA_MEMSIZE_16;
  220. break;
  221. default:
  222. case DMA_SLAVE_BUSWIDTH_4_BYTES:
  223. mode = IMX_DMA_MEMSIZE_32;
  224. break;
  225. }
  226. ret = imx_dma_config_channel(imxdmac->imxdma_channel,
  227. mode | IMX_DMA_TYPE_FIFO,
  228. IMX_DMA_MEMSIZE_32 | IMX_DMA_TYPE_LINEAR,
  229. imxdmac->dma_request, 1);
  230. if (ret)
  231. return ret;
  232. imx_dma_config_burstlen(imxdmac->imxdma_channel,
  233. imxdmac->watermark_level * imxdmac->word_size);
  234. return 0;
  235. default:
  236. return -ENOSYS;
  237. }
  238. return -EINVAL;
  239. }
  240. static enum dma_status imxdma_tx_status(struct dma_chan *chan,
  241. dma_cookie_t cookie,
  242. struct dma_tx_state *txstate)
  243. {
  244. return dma_cookie_status(chan, cookie, txstate);
  245. }
  246. static dma_cookie_t imxdma_tx_submit(struct dma_async_tx_descriptor *tx)
  247. {
  248. struct imxdma_channel *imxdmac = to_imxdma_chan(tx->chan);
  249. dma_cookie_t cookie;
  250. unsigned long flags;
  251. spin_lock_irqsave(&imxdmac->lock, flags);
  252. cookie = dma_cookie_assign(tx);
  253. spin_unlock_irqrestore(&imxdmac->lock, flags);
  254. return cookie;
  255. }
  256. static int imxdma_alloc_chan_resources(struct dma_chan *chan)
  257. {
  258. struct imxdma_channel *imxdmac = to_imxdma_chan(chan);
  259. struct imx_dma_data *data = chan->private;
  260. if (data != NULL)
  261. imxdmac->dma_request = data->dma_request;
  262. while (imxdmac->descs_allocated < IMXDMA_MAX_CHAN_DESCRIPTORS) {
  263. struct imxdma_desc *desc;
  264. desc = kzalloc(sizeof(*desc), GFP_KERNEL);
  265. if (!desc)
  266. break;
  267. __memzero(&desc->desc, sizeof(struct dma_async_tx_descriptor));
  268. dma_async_tx_descriptor_init(&desc->desc, chan);
  269. desc->desc.tx_submit = imxdma_tx_submit;
  270. /* txd.flags will be overwritten in prep funcs */
  271. desc->desc.flags = DMA_CTRL_ACK;
  272. desc->status = DMA_SUCCESS;
  273. list_add_tail(&desc->node, &imxdmac->ld_free);
  274. imxdmac->descs_allocated++;
  275. }
  276. if (!imxdmac->descs_allocated)
  277. return -ENOMEM;
  278. return imxdmac->descs_allocated;
  279. }
  280. static void imxdma_free_chan_resources(struct dma_chan *chan)
  281. {
  282. struct imxdma_channel *imxdmac = to_imxdma_chan(chan);
  283. struct imxdma_desc *desc, *_desc;
  284. unsigned long flags;
  285. spin_lock_irqsave(&imxdmac->lock, flags);
  286. imx_dma_disable(imxdmac->imxdma_channel);
  287. list_splice_tail_init(&imxdmac->ld_active, &imxdmac->ld_free);
  288. list_splice_tail_init(&imxdmac->ld_queue, &imxdmac->ld_free);
  289. spin_unlock_irqrestore(&imxdmac->lock, flags);
  290. list_for_each_entry_safe(desc, _desc, &imxdmac->ld_free, node) {
  291. kfree(desc);
  292. imxdmac->descs_allocated--;
  293. }
  294. INIT_LIST_HEAD(&imxdmac->ld_free);
  295. if (imxdmac->sg_list) {
  296. kfree(imxdmac->sg_list);
  297. imxdmac->sg_list = NULL;
  298. }
  299. }
  300. static struct dma_async_tx_descriptor *imxdma_prep_slave_sg(
  301. struct dma_chan *chan, struct scatterlist *sgl,
  302. unsigned int sg_len, enum dma_transfer_direction direction,
  303. unsigned long flags)
  304. {
  305. struct imxdma_channel *imxdmac = to_imxdma_chan(chan);
  306. struct scatterlist *sg;
  307. int i, dma_length = 0;
  308. struct imxdma_desc *desc;
  309. if (list_empty(&imxdmac->ld_free) ||
  310. imxdma_chan_is_doing_cyclic(imxdmac))
  311. return NULL;
  312. desc = list_first_entry(&imxdmac->ld_free, struct imxdma_desc, node);
  313. for_each_sg(sgl, sg, sg_len, i) {
  314. dma_length += sg->length;
  315. }
  316. switch (imxdmac->word_size) {
  317. case DMA_SLAVE_BUSWIDTH_4_BYTES:
  318. if (sgl->length & 3 || sgl->dma_address & 3)
  319. return NULL;
  320. break;
  321. case DMA_SLAVE_BUSWIDTH_2_BYTES:
  322. if (sgl->length & 1 || sgl->dma_address & 1)
  323. return NULL;
  324. break;
  325. case DMA_SLAVE_BUSWIDTH_1_BYTE:
  326. break;
  327. default:
  328. return NULL;
  329. }
  330. desc->type = IMXDMA_DESC_SLAVE_SG;
  331. desc->sg = sgl;
  332. desc->sgcount = sg_len;
  333. desc->len = dma_length;
  334. if (direction == DMA_DEV_TO_MEM) {
  335. desc->dmamode = DMA_MODE_READ;
  336. desc->src = imxdmac->per_address;
  337. } else {
  338. desc->dmamode = DMA_MODE_WRITE;
  339. desc->dest = imxdmac->per_address;
  340. }
  341. desc->desc.callback = NULL;
  342. desc->desc.callback_param = NULL;
  343. return &desc->desc;
  344. }
  345. static struct dma_async_tx_descriptor *imxdma_prep_dma_cyclic(
  346. struct dma_chan *chan, dma_addr_t dma_addr, size_t buf_len,
  347. size_t period_len, enum dma_transfer_direction direction)
  348. {
  349. struct imxdma_channel *imxdmac = to_imxdma_chan(chan);
  350. struct imxdma_engine *imxdma = imxdmac->imxdma;
  351. struct imxdma_desc *desc;
  352. int i;
  353. unsigned int periods = buf_len / period_len;
  354. dev_dbg(imxdma->dev, "%s channel: %d buf_len=%d period_len=%d\n",
  355. __func__, imxdmac->channel, buf_len, period_len);
  356. if (list_empty(&imxdmac->ld_free) ||
  357. imxdma_chan_is_doing_cyclic(imxdmac))
  358. return NULL;
  359. desc = list_first_entry(&imxdmac->ld_free, struct imxdma_desc, node);
  360. if (imxdmac->sg_list)
  361. kfree(imxdmac->sg_list);
  362. imxdmac->sg_list = kcalloc(periods + 1,
  363. sizeof(struct scatterlist), GFP_KERNEL);
  364. if (!imxdmac->sg_list)
  365. return NULL;
  366. sg_init_table(imxdmac->sg_list, periods);
  367. for (i = 0; i < periods; i++) {
  368. imxdmac->sg_list[i].page_link = 0;
  369. imxdmac->sg_list[i].offset = 0;
  370. imxdmac->sg_list[i].dma_address = dma_addr;
  371. imxdmac->sg_list[i].length = period_len;
  372. dma_addr += period_len;
  373. }
  374. /* close the loop */
  375. imxdmac->sg_list[periods].offset = 0;
  376. imxdmac->sg_list[periods].length = 0;
  377. imxdmac->sg_list[periods].page_link =
  378. ((unsigned long)imxdmac->sg_list | 0x01) & ~0x02;
  379. desc->type = IMXDMA_DESC_CYCLIC;
  380. desc->sg = imxdmac->sg_list;
  381. desc->sgcount = periods;
  382. desc->len = IMX_DMA_LENGTH_LOOP;
  383. if (direction == DMA_DEV_TO_MEM) {
  384. desc->dmamode = DMA_MODE_READ;
  385. desc->src = imxdmac->per_address;
  386. } else {
  387. desc->dmamode = DMA_MODE_WRITE;
  388. desc->dest = imxdmac->per_address;
  389. }
  390. desc->desc.callback = NULL;
  391. desc->desc.callback_param = NULL;
  392. return &desc->desc;
  393. }
  394. static struct dma_async_tx_descriptor *imxdma_prep_dma_memcpy(
  395. struct dma_chan *chan, dma_addr_t dest,
  396. dma_addr_t src, size_t len, unsigned long flags)
  397. {
  398. struct imxdma_channel *imxdmac = to_imxdma_chan(chan);
  399. struct imxdma_engine *imxdma = imxdmac->imxdma;
  400. struct imxdma_desc *desc;
  401. dev_dbg(imxdma->dev, "%s channel: %d src=0x%x dst=0x%x len=%d\n",
  402. __func__, imxdmac->channel, src, dest, len);
  403. if (list_empty(&imxdmac->ld_free) ||
  404. imxdma_chan_is_doing_cyclic(imxdmac))
  405. return NULL;
  406. desc = list_first_entry(&imxdmac->ld_free, struct imxdma_desc, node);
  407. desc->type = IMXDMA_DESC_MEMCPY;
  408. desc->src = src;
  409. desc->dest = dest;
  410. desc->len = len;
  411. desc->dmamode = DMA_MODE_WRITE;
  412. desc->config_port = IMX_DMA_MEMSIZE_32 | IMX_DMA_TYPE_LINEAR;
  413. desc->config_mem = IMX_DMA_MEMSIZE_32 | IMX_DMA_TYPE_LINEAR;
  414. desc->desc.callback = NULL;
  415. desc->desc.callback_param = NULL;
  416. return &desc->desc;
  417. }
  418. static void imxdma_issue_pending(struct dma_chan *chan)
  419. {
  420. struct imxdma_channel *imxdmac = to_imxdma_chan(chan);
  421. struct imxdma_engine *imxdma = imxdmac->imxdma;
  422. struct imxdma_desc *desc;
  423. unsigned long flags;
  424. spin_lock_irqsave(&imxdmac->lock, flags);
  425. if (list_empty(&imxdmac->ld_active) &&
  426. !list_empty(&imxdmac->ld_queue)) {
  427. desc = list_first_entry(&imxdmac->ld_queue,
  428. struct imxdma_desc, node);
  429. if (imxdma_xfer_desc(desc) < 0) {
  430. dev_warn(imxdma->dev,
  431. "%s: channel: %d couldn't issue DMA xfer\n",
  432. __func__, imxdmac->channel);
  433. } else {
  434. list_move_tail(imxdmac->ld_queue.next,
  435. &imxdmac->ld_active);
  436. }
  437. }
  438. spin_unlock_irqrestore(&imxdmac->lock, flags);
  439. }
  440. static int __init imxdma_probe(struct platform_device *pdev)
  441. {
  442. struct imxdma_engine *imxdma;
  443. int ret, i;
  444. imxdma = kzalloc(sizeof(*imxdma), GFP_KERNEL);
  445. if (!imxdma)
  446. return -ENOMEM;
  447. INIT_LIST_HEAD(&imxdma->dma_device.channels);
  448. dma_cap_set(DMA_SLAVE, imxdma->dma_device.cap_mask);
  449. dma_cap_set(DMA_CYCLIC, imxdma->dma_device.cap_mask);
  450. dma_cap_set(DMA_MEMCPY, imxdma->dma_device.cap_mask);
  451. /* Initialize channel parameters */
  452. for (i = 0; i < MAX_DMA_CHANNELS; i++) {
  453. struct imxdma_channel *imxdmac = &imxdma->channel[i];
  454. imxdmac->imxdma_channel = imx_dma_request_by_prio("dmaengine",
  455. DMA_PRIO_MEDIUM);
  456. if ((int)imxdmac->channel < 0) {
  457. ret = -ENODEV;
  458. goto err_init;
  459. }
  460. imx_dma_setup_handlers(imxdmac->imxdma_channel,
  461. imxdma_irq_handler, imxdma_err_handler, imxdmac);
  462. imxdmac->imxdma = imxdma;
  463. spin_lock_init(&imxdmac->lock);
  464. INIT_LIST_HEAD(&imxdmac->ld_queue);
  465. INIT_LIST_HEAD(&imxdmac->ld_free);
  466. INIT_LIST_HEAD(&imxdmac->ld_active);
  467. tasklet_init(&imxdmac->dma_tasklet, imxdma_tasklet,
  468. (unsigned long)imxdmac);
  469. imxdmac->chan.device = &imxdma->dma_device;
  470. dma_cookie_init(&imxdmac->chan);
  471. imxdmac->channel = i;
  472. /* Add the channel to the DMAC list */
  473. list_add_tail(&imxdmac->chan.device_node,
  474. &imxdma->dma_device.channels);
  475. }
  476. imxdma->dev = &pdev->dev;
  477. imxdma->dma_device.dev = &pdev->dev;
  478. imxdma->dma_device.device_alloc_chan_resources = imxdma_alloc_chan_resources;
  479. imxdma->dma_device.device_free_chan_resources = imxdma_free_chan_resources;
  480. imxdma->dma_device.device_tx_status = imxdma_tx_status;
  481. imxdma->dma_device.device_prep_slave_sg = imxdma_prep_slave_sg;
  482. imxdma->dma_device.device_prep_dma_cyclic = imxdma_prep_dma_cyclic;
  483. imxdma->dma_device.device_prep_dma_memcpy = imxdma_prep_dma_memcpy;
  484. imxdma->dma_device.device_control = imxdma_control;
  485. imxdma->dma_device.device_issue_pending = imxdma_issue_pending;
  486. platform_set_drvdata(pdev, imxdma);
  487. imxdma->dma_device.copy_align = 2; /* 2^2 = 4 bytes alignment */
  488. imxdma->dma_device.dev->dma_parms = &imxdma->dma_parms;
  489. dma_set_max_seg_size(imxdma->dma_device.dev, 0xffffff);
  490. ret = dma_async_device_register(&imxdma->dma_device);
  491. if (ret) {
  492. dev_err(&pdev->dev, "unable to register\n");
  493. goto err_init;
  494. }
  495. return 0;
  496. err_init:
  497. while (--i >= 0) {
  498. struct imxdma_channel *imxdmac = &imxdma->channel[i];
  499. imx_dma_free(imxdmac->imxdma_channel);
  500. }
  501. kfree(imxdma);
  502. return ret;
  503. }
  504. static int __exit imxdma_remove(struct platform_device *pdev)
  505. {
  506. struct imxdma_engine *imxdma = platform_get_drvdata(pdev);
  507. int i;
  508. dma_async_device_unregister(&imxdma->dma_device);
  509. for (i = 0; i < MAX_DMA_CHANNELS; i++) {
  510. struct imxdma_channel *imxdmac = &imxdma->channel[i];
  511. imx_dma_free(imxdmac->imxdma_channel);
  512. }
  513. kfree(imxdma);
  514. return 0;
  515. }
  516. static struct platform_driver imxdma_driver = {
  517. .driver = {
  518. .name = "imx-dma",
  519. },
  520. .remove = __exit_p(imxdma_remove),
  521. };
  522. static int __init imxdma_module_init(void)
  523. {
  524. return platform_driver_probe(&imxdma_driver, imxdma_probe);
  525. }
  526. subsys_initcall(imxdma_module_init);
  527. MODULE_AUTHOR("Sascha Hauer, Pengutronix <s.hauer@pengutronix.de>");
  528. MODULE_DESCRIPTION("i.MX dma driver");
  529. MODULE_LICENSE("GPL");