imx-dma.c 31 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138
  1. /*
  2. * drivers/dma/imx-dma.c
  3. *
  4. * This file contains a driver for the Freescale i.MX DMA engine
  5. * found on i.MX1/21/27
  6. *
  7. * Copyright 2010 Sascha Hauer, Pengutronix <s.hauer@pengutronix.de>
  8. * Copyright 2012 Javier Martin, Vista Silicon <javier.martin@vista-silicon.com>
  9. *
  10. * The code contained herein is licensed under the GNU General Public
  11. * License. You may obtain a copy of the GNU General Public License
  12. * Version 2 or later at the following locations:
  13. *
  14. * http://www.opensource.org/licenses/gpl-license.html
  15. * http://www.gnu.org/copyleft/gpl.html
  16. */
  17. #include <linux/init.h>
  18. #include <linux/module.h>
  19. #include <linux/types.h>
  20. #include <linux/mm.h>
  21. #include <linux/interrupt.h>
  22. #include <linux/spinlock.h>
  23. #include <linux/device.h>
  24. #include <linux/dma-mapping.h>
  25. #include <linux/slab.h>
  26. #include <linux/platform_device.h>
  27. #include <linux/clk.h>
  28. #include <linux/dmaengine.h>
  29. #include <linux/module.h>
  30. #include <asm/irq.h>
  31. #include <mach/dma.h>
  32. #include <mach/hardware.h>
  33. #include "dmaengine.h"
  34. #define IMXDMA_MAX_CHAN_DESCRIPTORS 16
  35. #define IMX_DMA_CHANNELS 16
  36. #define DMA_MODE_READ 0
  37. #define DMA_MODE_WRITE 1
  38. #define DMA_MODE_MASK 1
  39. #define IMX_DMA_LENGTH_LOOP ((unsigned int)-1)
  40. #define IMX_DMA_MEMSIZE_32 (0 << 4)
  41. #define IMX_DMA_MEMSIZE_8 (1 << 4)
  42. #define IMX_DMA_MEMSIZE_16 (2 << 4)
  43. #define IMX_DMA_TYPE_LINEAR (0 << 10)
  44. #define IMX_DMA_TYPE_2D (1 << 10)
  45. #define IMX_DMA_TYPE_FIFO (2 << 10)
  46. #define IMX_DMA_ERR_BURST (1 << 0)
  47. #define IMX_DMA_ERR_REQUEST (1 << 1)
  48. #define IMX_DMA_ERR_TRANSFER (1 << 2)
  49. #define IMX_DMA_ERR_BUFFER (1 << 3)
  50. #define IMX_DMA_ERR_TIMEOUT (1 << 4)
  51. #define DMA_DCR 0x00 /* Control Register */
  52. #define DMA_DISR 0x04 /* Interrupt status Register */
  53. #define DMA_DIMR 0x08 /* Interrupt mask Register */
  54. #define DMA_DBTOSR 0x0c /* Burst timeout status Register */
  55. #define DMA_DRTOSR 0x10 /* Request timeout Register */
  56. #define DMA_DSESR 0x14 /* Transfer Error Status Register */
  57. #define DMA_DBOSR 0x18 /* Buffer overflow status Register */
  58. #define DMA_DBTOCR 0x1c /* Burst timeout control Register */
  59. #define DMA_WSRA 0x40 /* W-Size Register A */
  60. #define DMA_XSRA 0x44 /* X-Size Register A */
  61. #define DMA_YSRA 0x48 /* Y-Size Register A */
  62. #define DMA_WSRB 0x4c /* W-Size Register B */
  63. #define DMA_XSRB 0x50 /* X-Size Register B */
  64. #define DMA_YSRB 0x54 /* Y-Size Register B */
  65. #define DMA_SAR(x) (0x80 + ((x) << 6)) /* Source Address Registers */
  66. #define DMA_DAR(x) (0x84 + ((x) << 6)) /* Destination Address Registers */
  67. #define DMA_CNTR(x) (0x88 + ((x) << 6)) /* Count Registers */
  68. #define DMA_CCR(x) (0x8c + ((x) << 6)) /* Control Registers */
  69. #define DMA_RSSR(x) (0x90 + ((x) << 6)) /* Request source select Registers */
  70. #define DMA_BLR(x) (0x94 + ((x) << 6)) /* Burst length Registers */
  71. #define DMA_RTOR(x) (0x98 + ((x) << 6)) /* Request timeout Registers */
  72. #define DMA_BUCR(x) (0x98 + ((x) << 6)) /* Bus Utilization Registers */
  73. #define DMA_CCNR(x) (0x9C + ((x) << 6)) /* Channel counter Registers */
  74. #define DCR_DRST (1<<1)
  75. #define DCR_DEN (1<<0)
  76. #define DBTOCR_EN (1<<15)
  77. #define DBTOCR_CNT(x) ((x) & 0x7fff)
  78. #define CNTR_CNT(x) ((x) & 0xffffff)
  79. #define CCR_ACRPT (1<<14)
  80. #define CCR_DMOD_LINEAR (0x0 << 12)
  81. #define CCR_DMOD_2D (0x1 << 12)
  82. #define CCR_DMOD_FIFO (0x2 << 12)
  83. #define CCR_DMOD_EOBFIFO (0x3 << 12)
  84. #define CCR_SMOD_LINEAR (0x0 << 10)
  85. #define CCR_SMOD_2D (0x1 << 10)
  86. #define CCR_SMOD_FIFO (0x2 << 10)
  87. #define CCR_SMOD_EOBFIFO (0x3 << 10)
  88. #define CCR_MDIR_DEC (1<<9)
  89. #define CCR_MSEL_B (1<<8)
  90. #define CCR_DSIZ_32 (0x0 << 6)
  91. #define CCR_DSIZ_8 (0x1 << 6)
  92. #define CCR_DSIZ_16 (0x2 << 6)
  93. #define CCR_SSIZ_32 (0x0 << 4)
  94. #define CCR_SSIZ_8 (0x1 << 4)
  95. #define CCR_SSIZ_16 (0x2 << 4)
  96. #define CCR_REN (1<<3)
  97. #define CCR_RPT (1<<2)
  98. #define CCR_FRC (1<<1)
  99. #define CCR_CEN (1<<0)
  100. #define RTOR_EN (1<<15)
  101. #define RTOR_CLK (1<<14)
  102. #define RTOR_PSC (1<<13)
  103. enum imxdma_prep_type {
  104. IMXDMA_DESC_MEMCPY,
  105. IMXDMA_DESC_INTERLEAVED,
  106. IMXDMA_DESC_SLAVE_SG,
  107. IMXDMA_DESC_CYCLIC,
  108. };
  109. /*
  110. * struct imxdma_channel_internal - i.MX specific DMA extension
  111. * @name: name specified by DMA client
  112. * @irq_handler: client callback for end of transfer
  113. * @err_handler: client callback for error condition
  114. * @data: clients context data for callbacks
  115. * @dma_mode: direction of the transfer %DMA_MODE_READ or %DMA_MODE_WRITE
  116. * @sg: pointer to the actual read/written chunk for scatter-gather emulation
  117. * @resbytes: total residual number of bytes to transfer
  118. * (it can be lower or same as sum of SG mapped chunk sizes)
  119. * @sgcount: number of chunks to be read/written
  120. *
  121. * Structure is used for IMX DMA processing. It would be probably good
  122. * @struct dma_struct in the future for external interfacing and use
  123. * @struct imxdma_channel_internal only as extension to it.
  124. */
  125. struct imxdma_channel_internal {
  126. void *data;
  127. unsigned int dma_mode;
  128. struct scatterlist *sg;
  129. unsigned int resbytes;
  130. int in_use;
  131. u32 ccr_from_device;
  132. u32 ccr_to_device;
  133. struct timer_list watchdog;
  134. int hw_chaining;
  135. };
  136. struct imxdma_desc {
  137. struct list_head node;
  138. struct dma_async_tx_descriptor desc;
  139. enum dma_status status;
  140. dma_addr_t src;
  141. dma_addr_t dest;
  142. size_t len;
  143. unsigned int dmamode;
  144. enum imxdma_prep_type type;
  145. /* For memcpy and interleaved */
  146. unsigned int config_port;
  147. unsigned int config_mem;
  148. /* For interleaved transfers */
  149. unsigned int x;
  150. unsigned int y;
  151. unsigned int w;
  152. /* For slave sg and cyclic */
  153. struct scatterlist *sg;
  154. unsigned int sgcount;
  155. };
  156. struct imxdma_channel {
  157. struct imxdma_channel_internal internal;
  158. struct imxdma_engine *imxdma;
  159. unsigned int channel;
  160. struct tasklet_struct dma_tasklet;
  161. struct list_head ld_free;
  162. struct list_head ld_queue;
  163. struct list_head ld_active;
  164. int descs_allocated;
  165. enum dma_slave_buswidth word_size;
  166. dma_addr_t per_address;
  167. u32 watermark_level;
  168. struct dma_chan chan;
  169. spinlock_t lock;
  170. struct dma_async_tx_descriptor desc;
  171. enum dma_status status;
  172. int dma_request;
  173. struct scatterlist *sg_list;
  174. };
  175. struct imxdma_engine {
  176. struct device *dev;
  177. struct device_dma_parameters dma_parms;
  178. struct dma_device dma_device;
  179. struct imxdma_channel channel[IMX_DMA_CHANNELS];
  180. };
  181. static struct imxdma_channel *to_imxdma_chan(struct dma_chan *chan)
  182. {
  183. return container_of(chan, struct imxdma_channel, chan);
  184. }
  185. static inline bool imxdma_chan_is_doing_cyclic(struct imxdma_channel *imxdmac)
  186. {
  187. struct imxdma_desc *desc;
  188. if (!list_empty(&imxdmac->ld_active)) {
  189. desc = list_first_entry(&imxdmac->ld_active, struct imxdma_desc,
  190. node);
  191. if (desc->type == IMXDMA_DESC_CYCLIC)
  192. return true;
  193. }
  194. return false;
  195. }
  196. /* TODO: put this inside any struct */
  197. static void __iomem *imx_dmav1_baseaddr;
  198. static struct clk *dma_clk;
  199. static void imx_dmav1_writel(unsigned val, unsigned offset)
  200. {
  201. __raw_writel(val, imx_dmav1_baseaddr + offset);
  202. }
  203. static unsigned imx_dmav1_readl(unsigned offset)
  204. {
  205. return __raw_readl(imx_dmav1_baseaddr + offset);
  206. }
  207. static int imxdma_hw_chain(struct imxdma_channel_internal *imxdma)
  208. {
  209. if (cpu_is_mx27())
  210. return imxdma->hw_chaining;
  211. else
  212. return 0;
  213. }
  214. /*
  215. * imxdma_sg_next - prepare next chunk for scatter-gather DMA emulation
  216. */
  217. static inline int imxdma_sg_next(struct imxdma_channel *imxdmac, struct scatterlist *sg)
  218. {
  219. struct imxdma_channel_internal *imxdma = &imxdmac->internal;
  220. unsigned long now;
  221. now = min(imxdma->resbytes, sg->length);
  222. if (imxdma->resbytes != IMX_DMA_LENGTH_LOOP)
  223. imxdma->resbytes -= now;
  224. if ((imxdma->dma_mode & DMA_MODE_MASK) == DMA_MODE_READ)
  225. imx_dmav1_writel(sg->dma_address, DMA_DAR(imxdmac->channel));
  226. else
  227. imx_dmav1_writel(sg->dma_address, DMA_SAR(imxdmac->channel));
  228. imx_dmav1_writel(now, DMA_CNTR(imxdmac->channel));
  229. pr_debug("imxdma%d: next sg chunk dst 0x%08x, src 0x%08x, "
  230. "size 0x%08x\n", imxdmac->channel,
  231. imx_dmav1_readl(DMA_DAR(imxdmac->channel)),
  232. imx_dmav1_readl(DMA_SAR(imxdmac->channel)),
  233. imx_dmav1_readl(DMA_CNTR(imxdmac->channel)));
  234. return now;
  235. }
  236. static int
  237. imxdma_setup_single_hw(struct imxdma_channel *imxdmac, dma_addr_t dma_address,
  238. unsigned int dma_length, unsigned int dev_addr,
  239. unsigned int dmamode)
  240. {
  241. int channel = imxdmac->channel;
  242. imxdmac->internal.sg = NULL;
  243. imxdmac->internal.dma_mode = dmamode;
  244. if (!dma_address) {
  245. printk(KERN_ERR "imxdma%d: imx_dma_setup_single null address\n",
  246. channel);
  247. return -EINVAL;
  248. }
  249. if (!dma_length) {
  250. printk(KERN_ERR "imxdma%d: imx_dma_setup_single zero length\n",
  251. channel);
  252. return -EINVAL;
  253. }
  254. if ((dmamode & DMA_MODE_MASK) == DMA_MODE_READ) {
  255. pr_debug("imxdma%d: %s dma_addressg=0x%08x dma_length=%d "
  256. "dev_addr=0x%08x for read\n",
  257. channel, __func__, (unsigned int)dma_address,
  258. dma_length, dev_addr);
  259. imx_dmav1_writel(dev_addr, DMA_SAR(channel));
  260. imx_dmav1_writel(dma_address, DMA_DAR(channel));
  261. imx_dmav1_writel(imxdmac->internal.ccr_from_device, DMA_CCR(channel));
  262. } else if ((dmamode & DMA_MODE_MASK) == DMA_MODE_WRITE) {
  263. pr_debug("imxdma%d: %s dma_addressg=0x%08x dma_length=%d "
  264. "dev_addr=0x%08x for write\n",
  265. channel, __func__, (unsigned int)dma_address,
  266. dma_length, dev_addr);
  267. imx_dmav1_writel(dma_address, DMA_SAR(channel));
  268. imx_dmav1_writel(dev_addr, DMA_DAR(channel));
  269. imx_dmav1_writel(imxdmac->internal.ccr_to_device,
  270. DMA_CCR(channel));
  271. } else {
  272. printk(KERN_ERR "imxdma%d: imx_dma_setup_single bad dmamode\n",
  273. channel);
  274. return -EINVAL;
  275. }
  276. imx_dmav1_writel(dma_length, DMA_CNTR(channel));
  277. return 0;
  278. }
  279. static void imxdma_enable_hw(struct imxdma_channel *imxdmac)
  280. {
  281. int channel = imxdmac->channel;
  282. unsigned long flags;
  283. pr_debug("imxdma%d: imx_dma_enable\n", channel);
  284. if (imxdmac->internal.in_use)
  285. return;
  286. local_irq_save(flags);
  287. imx_dmav1_writel(1 << channel, DMA_DISR);
  288. imx_dmav1_writel(imx_dmav1_readl(DMA_DIMR) & ~(1 << channel), DMA_DIMR);
  289. imx_dmav1_writel(imx_dmav1_readl(DMA_CCR(channel)) | CCR_CEN |
  290. CCR_ACRPT, DMA_CCR(channel));
  291. if ((cpu_is_mx21() || cpu_is_mx27()) &&
  292. imxdmac->internal.sg && imxdma_hw_chain(&imxdmac->internal)) {
  293. imxdmac->internal.sg = sg_next(imxdmac->internal.sg);
  294. if (imxdmac->internal.sg) {
  295. u32 tmp;
  296. imxdma_sg_next(imxdmac, imxdmac->internal.sg);
  297. tmp = imx_dmav1_readl(DMA_CCR(channel));
  298. imx_dmav1_writel(tmp | CCR_RPT | CCR_ACRPT,
  299. DMA_CCR(channel));
  300. }
  301. }
  302. imxdmac->internal.in_use = 1;
  303. local_irq_restore(flags);
  304. }
  305. static void imxdma_disable_hw(struct imxdma_channel *imxdmac)
  306. {
  307. int channel = imxdmac->channel;
  308. unsigned long flags;
  309. pr_debug("imxdma%d: imx_dma_disable\n", channel);
  310. if (imxdma_hw_chain(&imxdmac->internal))
  311. del_timer(&imxdmac->internal.watchdog);
  312. local_irq_save(flags);
  313. imx_dmav1_writel(imx_dmav1_readl(DMA_DIMR) | (1 << channel), DMA_DIMR);
  314. imx_dmav1_writel(imx_dmav1_readl(DMA_CCR(channel)) & ~CCR_CEN,
  315. DMA_CCR(channel));
  316. imx_dmav1_writel(1 << channel, DMA_DISR);
  317. imxdmac->internal.in_use = 0;
  318. local_irq_restore(flags);
  319. }
  320. static int
  321. imxdma_config_channel_hw(struct imxdma_channel *imxdmac, unsigned int config_port,
  322. unsigned int config_mem, unsigned int dmareq, int hw_chaining)
  323. {
  324. int channel = imxdmac->channel;
  325. u32 dreq = 0;
  326. imxdmac->internal.hw_chaining = 0;
  327. if (hw_chaining) {
  328. imxdmac->internal.hw_chaining = 1;
  329. if (!imxdma_hw_chain(&imxdmac->internal))
  330. return -EINVAL;
  331. }
  332. if (dmareq)
  333. dreq = CCR_REN;
  334. imxdmac->internal.ccr_from_device = config_port | (config_mem << 2) | dreq;
  335. imxdmac->internal.ccr_to_device = config_mem | (config_port << 2) | dreq;
  336. imx_dmav1_writel(dmareq, DMA_RSSR(channel));
  337. return 0;
  338. }
  339. static int
  340. imxdma_setup_sg_hw(struct imxdma_channel *imxdmac,
  341. struct scatterlist *sg, unsigned int sgcount,
  342. unsigned int dma_length, unsigned int dev_addr,
  343. unsigned int dmamode)
  344. {
  345. int channel = imxdmac->channel;
  346. if (imxdmac->internal.in_use)
  347. return -EBUSY;
  348. imxdmac->internal.sg = sg;
  349. imxdmac->internal.dma_mode = dmamode;
  350. imxdmac->internal.resbytes = dma_length;
  351. if (!sg || !sgcount) {
  352. printk(KERN_ERR "imxdma%d: imx_dma_setup_sg empty sg list\n",
  353. channel);
  354. return -EINVAL;
  355. }
  356. if (!sg->length) {
  357. printk(KERN_ERR "imxdma%d: imx_dma_setup_sg zero length\n",
  358. channel);
  359. return -EINVAL;
  360. }
  361. if ((dmamode & DMA_MODE_MASK) == DMA_MODE_READ) {
  362. pr_debug("imxdma%d: %s sg=%p sgcount=%d total length=%d "
  363. "dev_addr=0x%08x for read\n",
  364. channel, __func__, sg, sgcount, dma_length, dev_addr);
  365. imx_dmav1_writel(dev_addr, DMA_SAR(channel));
  366. imx_dmav1_writel(imxdmac->internal.ccr_from_device, DMA_CCR(channel));
  367. } else if ((dmamode & DMA_MODE_MASK) == DMA_MODE_WRITE) {
  368. pr_debug("imxdma%d: %s sg=%p sgcount=%d total length=%d "
  369. "dev_addr=0x%08x for write\n",
  370. channel, __func__, sg, sgcount, dma_length, dev_addr);
  371. imx_dmav1_writel(dev_addr, DMA_DAR(channel));
  372. imx_dmav1_writel(imxdmac->internal.ccr_to_device, DMA_CCR(channel));
  373. } else {
  374. printk(KERN_ERR "imxdma%d: imx_dma_setup_sg bad dmamode\n",
  375. channel);
  376. return -EINVAL;
  377. }
  378. imxdma_sg_next(imxdmac, sg);
  379. return 0;
  380. }
  381. static void imxdma_watchdog(unsigned long data)
  382. {
  383. struct imxdma_channel *imxdmac = (struct imxdma_channel *)data;
  384. int channel = imxdmac->channel;
  385. imx_dmav1_writel(0, DMA_CCR(channel));
  386. imxdmac->internal.in_use = 0;
  387. imxdmac->internal.sg = NULL;
  388. /* Tasklet watchdog error handler */
  389. tasklet_schedule(&imxdmac->dma_tasklet);
  390. pr_debug("imxdma%d: watchdog timeout!\n", imxdmac->channel);
  391. }
  392. static irqreturn_t imxdma_err_handler(int irq, void *dev_id)
  393. {
  394. struct imxdma_engine *imxdma = dev_id;
  395. struct imxdma_channel_internal *internal;
  396. unsigned int err_mask;
  397. int i, disr;
  398. int errcode;
  399. disr = imx_dmav1_readl(DMA_DISR);
  400. err_mask = imx_dmav1_readl(DMA_DBTOSR) |
  401. imx_dmav1_readl(DMA_DRTOSR) |
  402. imx_dmav1_readl(DMA_DSESR) |
  403. imx_dmav1_readl(DMA_DBOSR);
  404. if (!err_mask)
  405. return IRQ_HANDLED;
  406. imx_dmav1_writel(disr & err_mask, DMA_DISR);
  407. for (i = 0; i < IMX_DMA_CHANNELS; i++) {
  408. if (!(err_mask & (1 << i)))
  409. continue;
  410. internal = &imxdma->channel[i].internal;
  411. errcode = 0;
  412. if (imx_dmav1_readl(DMA_DBTOSR) & (1 << i)) {
  413. imx_dmav1_writel(1 << i, DMA_DBTOSR);
  414. errcode |= IMX_DMA_ERR_BURST;
  415. }
  416. if (imx_dmav1_readl(DMA_DRTOSR) & (1 << i)) {
  417. imx_dmav1_writel(1 << i, DMA_DRTOSR);
  418. errcode |= IMX_DMA_ERR_REQUEST;
  419. }
  420. if (imx_dmav1_readl(DMA_DSESR) & (1 << i)) {
  421. imx_dmav1_writel(1 << i, DMA_DSESR);
  422. errcode |= IMX_DMA_ERR_TRANSFER;
  423. }
  424. if (imx_dmav1_readl(DMA_DBOSR) & (1 << i)) {
  425. imx_dmav1_writel(1 << i, DMA_DBOSR);
  426. errcode |= IMX_DMA_ERR_BUFFER;
  427. }
  428. /* Tasklet error handler */
  429. tasklet_schedule(&imxdma->channel[i].dma_tasklet);
  430. printk(KERN_WARNING
  431. "DMA timeout on channel %d -%s%s%s%s\n", i,
  432. errcode & IMX_DMA_ERR_BURST ? " burst" : "",
  433. errcode & IMX_DMA_ERR_REQUEST ? " request" : "",
  434. errcode & IMX_DMA_ERR_TRANSFER ? " transfer" : "",
  435. errcode & IMX_DMA_ERR_BUFFER ? " buffer" : "");
  436. }
  437. return IRQ_HANDLED;
  438. }
  439. static void dma_irq_handle_channel(struct imxdma_channel *imxdmac)
  440. {
  441. struct imxdma_channel_internal *imxdma = &imxdmac->internal;
  442. int chno = imxdmac->channel;
  443. if (imxdma->sg) {
  444. u32 tmp;
  445. imxdma->sg = sg_next(imxdma->sg);
  446. if (imxdma->sg) {
  447. imxdma_sg_next(imxdmac, imxdma->sg);
  448. tmp = imx_dmav1_readl(DMA_CCR(chno));
  449. if (imxdma_hw_chain(imxdma)) {
  450. /* FIXME: The timeout should probably be
  451. * configurable
  452. */
  453. mod_timer(&imxdma->watchdog,
  454. jiffies + msecs_to_jiffies(500));
  455. tmp |= CCR_CEN | CCR_RPT | CCR_ACRPT;
  456. imx_dmav1_writel(tmp, DMA_CCR(chno));
  457. } else {
  458. imx_dmav1_writel(tmp & ~CCR_CEN, DMA_CCR(chno));
  459. tmp |= CCR_CEN;
  460. }
  461. imx_dmav1_writel(tmp, DMA_CCR(chno));
  462. if (imxdma_chan_is_doing_cyclic(imxdmac))
  463. /* Tasklet progression */
  464. tasklet_schedule(&imxdmac->dma_tasklet);
  465. return;
  466. }
  467. if (imxdma_hw_chain(imxdma)) {
  468. del_timer(&imxdma->watchdog);
  469. return;
  470. }
  471. }
  472. imx_dmav1_writel(0, DMA_CCR(chno));
  473. imxdma->in_use = 0;
  474. /* Tasklet irq */
  475. tasklet_schedule(&imxdmac->dma_tasklet);
  476. }
  477. static irqreturn_t dma_irq_handler(int irq, void *dev_id)
  478. {
  479. struct imxdma_engine *imxdma = dev_id;
  480. struct imxdma_channel_internal *internal;
  481. int i, disr;
  482. if (cpu_is_mx21() || cpu_is_mx27())
  483. imxdma_err_handler(irq, dev_id);
  484. disr = imx_dmav1_readl(DMA_DISR);
  485. pr_debug("imxdma: dma_irq_handler called, disr=0x%08x\n",
  486. disr);
  487. imx_dmav1_writel(disr, DMA_DISR);
  488. for (i = 0; i < IMX_DMA_CHANNELS; i++) {
  489. if (disr & (1 << i)) {
  490. internal = &imxdma->channel[i].internal;
  491. dma_irq_handle_channel(&imxdma->channel[i]);
  492. }
  493. }
  494. return IRQ_HANDLED;
  495. }
  496. static int imxdma_xfer_desc(struct imxdma_desc *d)
  497. {
  498. struct imxdma_channel *imxdmac = to_imxdma_chan(d->desc.chan);
  499. int ret;
  500. /* Configure and enable */
  501. switch (d->type) {
  502. case IMXDMA_DESC_MEMCPY:
  503. ret = imxdma_config_channel_hw(imxdmac,
  504. d->config_port, d->config_mem, 0, 0);
  505. if (ret < 0)
  506. return ret;
  507. ret = imxdma_setup_single_hw(imxdmac, d->src,
  508. d->len, d->dest, d->dmamode);
  509. if (ret < 0)
  510. return ret;
  511. break;
  512. /* Cyclic transfer is the same as slave_sg with special sg configuration. */
  513. case IMXDMA_DESC_CYCLIC:
  514. case IMXDMA_DESC_SLAVE_SG:
  515. if (d->dmamode == DMA_MODE_READ)
  516. ret = imxdma_setup_sg_hw(imxdmac, d->sg,
  517. d->sgcount, d->len, d->src, d->dmamode);
  518. else
  519. ret = imxdma_setup_sg_hw(imxdmac, d->sg,
  520. d->sgcount, d->len, d->dest, d->dmamode);
  521. if (ret < 0)
  522. return ret;
  523. break;
  524. default:
  525. return -EINVAL;
  526. }
  527. imxdma_enable_hw(imxdmac);
  528. return 0;
  529. }
  530. static void imxdma_tasklet(unsigned long data)
  531. {
  532. struct imxdma_channel *imxdmac = (void *)data;
  533. struct imxdma_engine *imxdma = imxdmac->imxdma;
  534. struct imxdma_desc *desc;
  535. spin_lock(&imxdmac->lock);
  536. if (list_empty(&imxdmac->ld_active)) {
  537. /* Someone might have called terminate all */
  538. goto out;
  539. }
  540. desc = list_first_entry(&imxdmac->ld_active, struct imxdma_desc, node);
  541. if (desc->desc.callback)
  542. desc->desc.callback(desc->desc.callback_param);
  543. dma_cookie_complete(&desc->desc);
  544. /* If we are dealing with a cyclic descriptor keep it on ld_active */
  545. if (imxdma_chan_is_doing_cyclic(imxdmac))
  546. goto out;
  547. list_move_tail(imxdmac->ld_active.next, &imxdmac->ld_free);
  548. if (!list_empty(&imxdmac->ld_queue)) {
  549. desc = list_first_entry(&imxdmac->ld_queue, struct imxdma_desc,
  550. node);
  551. list_move_tail(imxdmac->ld_queue.next, &imxdmac->ld_active);
  552. if (imxdma_xfer_desc(desc) < 0)
  553. dev_warn(imxdma->dev, "%s: channel: %d couldn't xfer desc\n",
  554. __func__, imxdmac->channel);
  555. }
  556. out:
  557. spin_unlock(&imxdmac->lock);
  558. }
  559. static int imxdma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
  560. unsigned long arg)
  561. {
  562. struct imxdma_channel *imxdmac = to_imxdma_chan(chan);
  563. struct dma_slave_config *dmaengine_cfg = (void *)arg;
  564. int ret;
  565. unsigned long flags;
  566. unsigned int mode = 0;
  567. switch (cmd) {
  568. case DMA_TERMINATE_ALL:
  569. imxdma_disable_hw(imxdmac);
  570. spin_lock_irqsave(&imxdmac->lock, flags);
  571. list_splice_tail_init(&imxdmac->ld_active, &imxdmac->ld_free);
  572. list_splice_tail_init(&imxdmac->ld_queue, &imxdmac->ld_free);
  573. spin_unlock_irqrestore(&imxdmac->lock, flags);
  574. return 0;
  575. case DMA_SLAVE_CONFIG:
  576. if (dmaengine_cfg->direction == DMA_DEV_TO_MEM) {
  577. imxdmac->per_address = dmaengine_cfg->src_addr;
  578. imxdmac->watermark_level = dmaengine_cfg->src_maxburst;
  579. imxdmac->word_size = dmaengine_cfg->src_addr_width;
  580. } else {
  581. imxdmac->per_address = dmaengine_cfg->dst_addr;
  582. imxdmac->watermark_level = dmaengine_cfg->dst_maxburst;
  583. imxdmac->word_size = dmaengine_cfg->dst_addr_width;
  584. }
  585. switch (imxdmac->word_size) {
  586. case DMA_SLAVE_BUSWIDTH_1_BYTE:
  587. mode = IMX_DMA_MEMSIZE_8;
  588. break;
  589. case DMA_SLAVE_BUSWIDTH_2_BYTES:
  590. mode = IMX_DMA_MEMSIZE_16;
  591. break;
  592. default:
  593. case DMA_SLAVE_BUSWIDTH_4_BYTES:
  594. mode = IMX_DMA_MEMSIZE_32;
  595. break;
  596. }
  597. ret = imxdma_config_channel_hw(imxdmac,
  598. mode | IMX_DMA_TYPE_FIFO,
  599. IMX_DMA_MEMSIZE_32 | IMX_DMA_TYPE_LINEAR,
  600. imxdmac->dma_request, 1);
  601. if (ret)
  602. return ret;
  603. /* Set burst length */
  604. imx_dmav1_writel(imxdmac->watermark_level * imxdmac->word_size,
  605. DMA_BLR(imxdmac->channel));
  606. return 0;
  607. default:
  608. return -ENOSYS;
  609. }
  610. return -EINVAL;
  611. }
  612. static enum dma_status imxdma_tx_status(struct dma_chan *chan,
  613. dma_cookie_t cookie,
  614. struct dma_tx_state *txstate)
  615. {
  616. return dma_cookie_status(chan, cookie, txstate);
  617. }
  618. static dma_cookie_t imxdma_tx_submit(struct dma_async_tx_descriptor *tx)
  619. {
  620. struct imxdma_channel *imxdmac = to_imxdma_chan(tx->chan);
  621. dma_cookie_t cookie;
  622. unsigned long flags;
  623. spin_lock_irqsave(&imxdmac->lock, flags);
  624. cookie = dma_cookie_assign(tx);
  625. spin_unlock_irqrestore(&imxdmac->lock, flags);
  626. return cookie;
  627. }
  628. static int imxdma_alloc_chan_resources(struct dma_chan *chan)
  629. {
  630. struct imxdma_channel *imxdmac = to_imxdma_chan(chan);
  631. struct imx_dma_data *data = chan->private;
  632. if (data != NULL)
  633. imxdmac->dma_request = data->dma_request;
  634. while (imxdmac->descs_allocated < IMXDMA_MAX_CHAN_DESCRIPTORS) {
  635. struct imxdma_desc *desc;
  636. desc = kzalloc(sizeof(*desc), GFP_KERNEL);
  637. if (!desc)
  638. break;
  639. __memzero(&desc->desc, sizeof(struct dma_async_tx_descriptor));
  640. dma_async_tx_descriptor_init(&desc->desc, chan);
  641. desc->desc.tx_submit = imxdma_tx_submit;
  642. /* txd.flags will be overwritten in prep funcs */
  643. desc->desc.flags = DMA_CTRL_ACK;
  644. desc->status = DMA_SUCCESS;
  645. list_add_tail(&desc->node, &imxdmac->ld_free);
  646. imxdmac->descs_allocated++;
  647. }
  648. if (!imxdmac->descs_allocated)
  649. return -ENOMEM;
  650. return imxdmac->descs_allocated;
  651. }
  652. static void imxdma_free_chan_resources(struct dma_chan *chan)
  653. {
  654. struct imxdma_channel *imxdmac = to_imxdma_chan(chan);
  655. struct imxdma_desc *desc, *_desc;
  656. unsigned long flags;
  657. spin_lock_irqsave(&imxdmac->lock, flags);
  658. imxdma_disable_hw(imxdmac);
  659. list_splice_tail_init(&imxdmac->ld_active, &imxdmac->ld_free);
  660. list_splice_tail_init(&imxdmac->ld_queue, &imxdmac->ld_free);
  661. spin_unlock_irqrestore(&imxdmac->lock, flags);
  662. list_for_each_entry_safe(desc, _desc, &imxdmac->ld_free, node) {
  663. kfree(desc);
  664. imxdmac->descs_allocated--;
  665. }
  666. INIT_LIST_HEAD(&imxdmac->ld_free);
  667. if (imxdmac->sg_list) {
  668. kfree(imxdmac->sg_list);
  669. imxdmac->sg_list = NULL;
  670. }
  671. }
  672. static struct dma_async_tx_descriptor *imxdma_prep_slave_sg(
  673. struct dma_chan *chan, struct scatterlist *sgl,
  674. unsigned int sg_len, enum dma_transfer_direction direction,
  675. unsigned long flags, void *context)
  676. {
  677. struct imxdma_channel *imxdmac = to_imxdma_chan(chan);
  678. struct scatterlist *sg;
  679. int i, dma_length = 0;
  680. struct imxdma_desc *desc;
  681. if (list_empty(&imxdmac->ld_free) ||
  682. imxdma_chan_is_doing_cyclic(imxdmac))
  683. return NULL;
  684. desc = list_first_entry(&imxdmac->ld_free, struct imxdma_desc, node);
  685. for_each_sg(sgl, sg, sg_len, i) {
  686. dma_length += sg->length;
  687. }
  688. switch (imxdmac->word_size) {
  689. case DMA_SLAVE_BUSWIDTH_4_BYTES:
  690. if (sgl->length & 3 || sgl->dma_address & 3)
  691. return NULL;
  692. break;
  693. case DMA_SLAVE_BUSWIDTH_2_BYTES:
  694. if (sgl->length & 1 || sgl->dma_address & 1)
  695. return NULL;
  696. break;
  697. case DMA_SLAVE_BUSWIDTH_1_BYTE:
  698. break;
  699. default:
  700. return NULL;
  701. }
  702. desc->type = IMXDMA_DESC_SLAVE_SG;
  703. desc->sg = sgl;
  704. desc->sgcount = sg_len;
  705. desc->len = dma_length;
  706. if (direction == DMA_DEV_TO_MEM) {
  707. desc->dmamode = DMA_MODE_READ;
  708. desc->src = imxdmac->per_address;
  709. } else {
  710. desc->dmamode = DMA_MODE_WRITE;
  711. desc->dest = imxdmac->per_address;
  712. }
  713. desc->desc.callback = NULL;
  714. desc->desc.callback_param = NULL;
  715. return &desc->desc;
  716. }
  717. static struct dma_async_tx_descriptor *imxdma_prep_dma_cyclic(
  718. struct dma_chan *chan, dma_addr_t dma_addr, size_t buf_len,
  719. size_t period_len, enum dma_transfer_direction direction,
  720. void *context)
  721. {
  722. struct imxdma_channel *imxdmac = to_imxdma_chan(chan);
  723. struct imxdma_engine *imxdma = imxdmac->imxdma;
  724. struct imxdma_desc *desc;
  725. int i;
  726. unsigned int periods = buf_len / period_len;
  727. dev_dbg(imxdma->dev, "%s channel: %d buf_len=%d period_len=%d\n",
  728. __func__, imxdmac->channel, buf_len, period_len);
  729. if (list_empty(&imxdmac->ld_free) ||
  730. imxdma_chan_is_doing_cyclic(imxdmac))
  731. return NULL;
  732. desc = list_first_entry(&imxdmac->ld_free, struct imxdma_desc, node);
  733. if (imxdmac->sg_list)
  734. kfree(imxdmac->sg_list);
  735. imxdmac->sg_list = kcalloc(periods + 1,
  736. sizeof(struct scatterlist), GFP_KERNEL);
  737. if (!imxdmac->sg_list)
  738. return NULL;
  739. sg_init_table(imxdmac->sg_list, periods);
  740. for (i = 0; i < periods; i++) {
  741. imxdmac->sg_list[i].page_link = 0;
  742. imxdmac->sg_list[i].offset = 0;
  743. imxdmac->sg_list[i].dma_address = dma_addr;
  744. imxdmac->sg_list[i].length = period_len;
  745. dma_addr += period_len;
  746. }
  747. /* close the loop */
  748. imxdmac->sg_list[periods].offset = 0;
  749. imxdmac->sg_list[periods].length = 0;
  750. imxdmac->sg_list[periods].page_link =
  751. ((unsigned long)imxdmac->sg_list | 0x01) & ~0x02;
  752. desc->type = IMXDMA_DESC_CYCLIC;
  753. desc->sg = imxdmac->sg_list;
  754. desc->sgcount = periods;
  755. desc->len = IMX_DMA_LENGTH_LOOP;
  756. if (direction == DMA_DEV_TO_MEM) {
  757. desc->dmamode = DMA_MODE_READ;
  758. desc->src = imxdmac->per_address;
  759. } else {
  760. desc->dmamode = DMA_MODE_WRITE;
  761. desc->dest = imxdmac->per_address;
  762. }
  763. desc->desc.callback = NULL;
  764. desc->desc.callback_param = NULL;
  765. return &desc->desc;
  766. }
  767. static struct dma_async_tx_descriptor *imxdma_prep_dma_memcpy(
  768. struct dma_chan *chan, dma_addr_t dest,
  769. dma_addr_t src, size_t len, unsigned long flags)
  770. {
  771. struct imxdma_channel *imxdmac = to_imxdma_chan(chan);
  772. struct imxdma_engine *imxdma = imxdmac->imxdma;
  773. struct imxdma_desc *desc;
  774. dev_dbg(imxdma->dev, "%s channel: %d src=0x%x dst=0x%x len=%d\n",
  775. __func__, imxdmac->channel, src, dest, len);
  776. if (list_empty(&imxdmac->ld_free) ||
  777. imxdma_chan_is_doing_cyclic(imxdmac))
  778. return NULL;
  779. desc = list_first_entry(&imxdmac->ld_free, struct imxdma_desc, node);
  780. desc->type = IMXDMA_DESC_MEMCPY;
  781. desc->src = src;
  782. desc->dest = dest;
  783. desc->len = len;
  784. desc->dmamode = DMA_MODE_WRITE;
  785. desc->config_port = IMX_DMA_MEMSIZE_32 | IMX_DMA_TYPE_LINEAR;
  786. desc->config_mem = IMX_DMA_MEMSIZE_32 | IMX_DMA_TYPE_LINEAR;
  787. desc->desc.callback = NULL;
  788. desc->desc.callback_param = NULL;
  789. return &desc->desc;
  790. }
  791. static void imxdma_issue_pending(struct dma_chan *chan)
  792. {
  793. struct imxdma_channel *imxdmac = to_imxdma_chan(chan);
  794. struct imxdma_engine *imxdma = imxdmac->imxdma;
  795. struct imxdma_desc *desc;
  796. unsigned long flags;
  797. spin_lock_irqsave(&imxdmac->lock, flags);
  798. if (list_empty(&imxdmac->ld_active) &&
  799. !list_empty(&imxdmac->ld_queue)) {
  800. desc = list_first_entry(&imxdmac->ld_queue,
  801. struct imxdma_desc, node);
  802. if (imxdma_xfer_desc(desc) < 0) {
  803. dev_warn(imxdma->dev,
  804. "%s: channel: %d couldn't issue DMA xfer\n",
  805. __func__, imxdmac->channel);
  806. } else {
  807. list_move_tail(imxdmac->ld_queue.next,
  808. &imxdmac->ld_active);
  809. }
  810. }
  811. spin_unlock_irqrestore(&imxdmac->lock, flags);
  812. }
  813. static int __init imxdma_probe(struct platform_device *pdev)
  814. {
  815. struct imxdma_engine *imxdma;
  816. int ret, i;
  817. if (cpu_is_mx1())
  818. imx_dmav1_baseaddr = MX1_IO_ADDRESS(MX1_DMA_BASE_ADDR);
  819. else if (cpu_is_mx21())
  820. imx_dmav1_baseaddr = MX21_IO_ADDRESS(MX21_DMA_BASE_ADDR);
  821. else if (cpu_is_mx27())
  822. imx_dmav1_baseaddr = MX27_IO_ADDRESS(MX27_DMA_BASE_ADDR);
  823. else
  824. return 0;
  825. dma_clk = clk_get(NULL, "dma");
  826. if (IS_ERR(dma_clk))
  827. return PTR_ERR(dma_clk);
  828. clk_enable(dma_clk);
  829. /* reset DMA module */
  830. imx_dmav1_writel(DCR_DRST, DMA_DCR);
  831. if (cpu_is_mx1()) {
  832. ret = request_irq(MX1_DMA_INT, dma_irq_handler, 0, "DMA", imxdma);
  833. if (ret) {
  834. pr_crit("Can't register IRQ for DMA\n");
  835. return ret;
  836. }
  837. ret = request_irq(MX1_DMA_ERR, imxdma_err_handler, 0, "DMA", imxdma);
  838. if (ret) {
  839. pr_crit("Can't register ERRIRQ for DMA\n");
  840. free_irq(MX1_DMA_INT, NULL);
  841. return ret;
  842. }
  843. }
  844. /* enable DMA module */
  845. imx_dmav1_writel(DCR_DEN, DMA_DCR);
  846. /* clear all interrupts */
  847. imx_dmav1_writel((1 << IMX_DMA_CHANNELS) - 1, DMA_DISR);
  848. /* disable interrupts */
  849. imx_dmav1_writel((1 << IMX_DMA_CHANNELS) - 1, DMA_DIMR);
  850. imxdma = kzalloc(sizeof(*imxdma), GFP_KERNEL);
  851. if (!imxdma)
  852. return -ENOMEM;
  853. INIT_LIST_HEAD(&imxdma->dma_device.channels);
  854. dma_cap_set(DMA_SLAVE, imxdma->dma_device.cap_mask);
  855. dma_cap_set(DMA_CYCLIC, imxdma->dma_device.cap_mask);
  856. dma_cap_set(DMA_MEMCPY, imxdma->dma_device.cap_mask);
  857. /* Initialize channel parameters */
  858. for (i = 0; i < IMX_DMA_CHANNELS; i++) {
  859. struct imxdma_channel *imxdmac = &imxdma->channel[i];
  860. memset(&imxdmac->internal, 0, sizeof(imxdmac->internal));
  861. if (cpu_is_mx21() || cpu_is_mx27()) {
  862. ret = request_irq(MX2x_INT_DMACH0 + i,
  863. dma_irq_handler, 0, "DMA", imxdma);
  864. if (ret) {
  865. pr_crit("Can't register IRQ %d for DMA channel %d\n",
  866. MX2x_INT_DMACH0 + i, i);
  867. goto err_init;
  868. }
  869. init_timer(&imxdmac->internal.watchdog);
  870. imxdmac->internal.watchdog.function = &imxdma_watchdog;
  871. imxdmac->internal.watchdog.data = (unsigned long)imxdmac;
  872. }
  873. imxdmac->imxdma = imxdma;
  874. spin_lock_init(&imxdmac->lock);
  875. INIT_LIST_HEAD(&imxdmac->ld_queue);
  876. INIT_LIST_HEAD(&imxdmac->ld_free);
  877. INIT_LIST_HEAD(&imxdmac->ld_active);
  878. tasklet_init(&imxdmac->dma_tasklet, imxdma_tasklet,
  879. (unsigned long)imxdmac);
  880. imxdmac->chan.device = &imxdma->dma_device;
  881. dma_cookie_init(&imxdmac->chan);
  882. imxdmac->channel = i;
  883. /* Add the channel to the DMAC list */
  884. list_add_tail(&imxdmac->chan.device_node,
  885. &imxdma->dma_device.channels);
  886. }
  887. imxdma->dev = &pdev->dev;
  888. imxdma->dma_device.dev = &pdev->dev;
  889. imxdma->dma_device.device_alloc_chan_resources = imxdma_alloc_chan_resources;
  890. imxdma->dma_device.device_free_chan_resources = imxdma_free_chan_resources;
  891. imxdma->dma_device.device_tx_status = imxdma_tx_status;
  892. imxdma->dma_device.device_prep_slave_sg = imxdma_prep_slave_sg;
  893. imxdma->dma_device.device_prep_dma_cyclic = imxdma_prep_dma_cyclic;
  894. imxdma->dma_device.device_prep_dma_memcpy = imxdma_prep_dma_memcpy;
  895. imxdma->dma_device.device_control = imxdma_control;
  896. imxdma->dma_device.device_issue_pending = imxdma_issue_pending;
  897. platform_set_drvdata(pdev, imxdma);
  898. imxdma->dma_device.copy_align = 2; /* 2^2 = 4 bytes alignment */
  899. imxdma->dma_device.dev->dma_parms = &imxdma->dma_parms;
  900. dma_set_max_seg_size(imxdma->dma_device.dev, 0xffffff);
  901. ret = dma_async_device_register(&imxdma->dma_device);
  902. if (ret) {
  903. dev_err(&pdev->dev, "unable to register\n");
  904. goto err_init;
  905. }
  906. return 0;
  907. err_init:
  908. if (cpu_is_mx21() || cpu_is_mx27()) {
  909. while (--i >= 0)
  910. free_irq(MX2x_INT_DMACH0 + i, NULL);
  911. } else if cpu_is_mx1() {
  912. free_irq(MX1_DMA_INT, NULL);
  913. free_irq(MX1_DMA_ERR, NULL);
  914. }
  915. kfree(imxdma);
  916. return ret;
  917. }
  918. static int __exit imxdma_remove(struct platform_device *pdev)
  919. {
  920. struct imxdma_engine *imxdma = platform_get_drvdata(pdev);
  921. int i;
  922. dma_async_device_unregister(&imxdma->dma_device);
  923. if (cpu_is_mx21() || cpu_is_mx27()) {
  924. for (i = 0; i < IMX_DMA_CHANNELS; i++)
  925. free_irq(MX2x_INT_DMACH0 + i, NULL);
  926. } else if cpu_is_mx1() {
  927. free_irq(MX1_DMA_INT, NULL);
  928. free_irq(MX1_DMA_ERR, NULL);
  929. }
  930. kfree(imxdma);
  931. return 0;
  932. }
  933. static struct platform_driver imxdma_driver = {
  934. .driver = {
  935. .name = "imx-dma",
  936. },
  937. .remove = __exit_p(imxdma_remove),
  938. };
  939. static int __init imxdma_module_init(void)
  940. {
  941. return platform_driver_probe(&imxdma_driver, imxdma_probe);
  942. }
  943. subsys_initcall(imxdma_module_init);
  944. MODULE_AUTHOR("Sascha Hauer, Pengutronix <s.hauer@pengutronix.de>");
  945. MODULE_DESCRIPTION("i.MX dma driver");
  946. MODULE_LICENSE("GPL");