imx-dma.c 28 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027
  1. /*
  2. * drivers/dma/imx-dma.c
  3. *
  4. * This file contains a driver for the Freescale i.MX DMA engine
  5. * found on i.MX1/21/27
  6. *
  7. * Copyright 2010 Sascha Hauer, Pengutronix <s.hauer@pengutronix.de>
  8. * Copyright 2012 Javier Martin, Vista Silicon <javier.martin@vista-silicon.com>
  9. *
  10. * The code contained herein is licensed under the GNU General Public
  11. * License. You may obtain a copy of the GNU General Public License
  12. * Version 2 or later at the following locations:
  13. *
  14. * http://www.opensource.org/licenses/gpl-license.html
  15. * http://www.gnu.org/copyleft/gpl.html
  16. */
  17. #include <linux/init.h>
  18. #include <linux/module.h>
  19. #include <linux/types.h>
  20. #include <linux/mm.h>
  21. #include <linux/interrupt.h>
  22. #include <linux/spinlock.h>
  23. #include <linux/device.h>
  24. #include <linux/dma-mapping.h>
  25. #include <linux/slab.h>
  26. #include <linux/platform_device.h>
  27. #include <linux/clk.h>
  28. #include <linux/dmaengine.h>
  29. #include <linux/module.h>
  30. #include <asm/irq.h>
  31. #include <mach/dma.h>
  32. #include <mach/hardware.h>
  33. #include "dmaengine.h"
  34. #define IMXDMA_MAX_CHAN_DESCRIPTORS 16
  35. #define IMX_DMA_CHANNELS 16
  36. #define IMX_DMA_LENGTH_LOOP ((unsigned int)-1)
  37. #define IMX_DMA_MEMSIZE_32 (0 << 4)
  38. #define IMX_DMA_MEMSIZE_8 (1 << 4)
  39. #define IMX_DMA_MEMSIZE_16 (2 << 4)
  40. #define IMX_DMA_TYPE_LINEAR (0 << 10)
  41. #define IMX_DMA_TYPE_2D (1 << 10)
  42. #define IMX_DMA_TYPE_FIFO (2 << 10)
  43. #define IMX_DMA_ERR_BURST (1 << 0)
  44. #define IMX_DMA_ERR_REQUEST (1 << 1)
  45. #define IMX_DMA_ERR_TRANSFER (1 << 2)
  46. #define IMX_DMA_ERR_BUFFER (1 << 3)
  47. #define IMX_DMA_ERR_TIMEOUT (1 << 4)
  48. #define DMA_DCR 0x00 /* Control Register */
  49. #define DMA_DISR 0x04 /* Interrupt status Register */
  50. #define DMA_DIMR 0x08 /* Interrupt mask Register */
  51. #define DMA_DBTOSR 0x0c /* Burst timeout status Register */
  52. #define DMA_DRTOSR 0x10 /* Request timeout Register */
  53. #define DMA_DSESR 0x14 /* Transfer Error Status Register */
  54. #define DMA_DBOSR 0x18 /* Buffer overflow status Register */
  55. #define DMA_DBTOCR 0x1c /* Burst timeout control Register */
  56. #define DMA_WSRA 0x40 /* W-Size Register A */
  57. #define DMA_XSRA 0x44 /* X-Size Register A */
  58. #define DMA_YSRA 0x48 /* Y-Size Register A */
  59. #define DMA_WSRB 0x4c /* W-Size Register B */
  60. #define DMA_XSRB 0x50 /* X-Size Register B */
  61. #define DMA_YSRB 0x54 /* Y-Size Register B */
  62. #define DMA_SAR(x) (0x80 + ((x) << 6)) /* Source Address Registers */
  63. #define DMA_DAR(x) (0x84 + ((x) << 6)) /* Destination Address Registers */
  64. #define DMA_CNTR(x) (0x88 + ((x) << 6)) /* Count Registers */
  65. #define DMA_CCR(x) (0x8c + ((x) << 6)) /* Control Registers */
  66. #define DMA_RSSR(x) (0x90 + ((x) << 6)) /* Request source select Registers */
  67. #define DMA_BLR(x) (0x94 + ((x) << 6)) /* Burst length Registers */
  68. #define DMA_RTOR(x) (0x98 + ((x) << 6)) /* Request timeout Registers */
  69. #define DMA_BUCR(x) (0x98 + ((x) << 6)) /* Bus Utilization Registers */
  70. #define DMA_CCNR(x) (0x9C + ((x) << 6)) /* Channel counter Registers */
  71. #define DCR_DRST (1<<1)
  72. #define DCR_DEN (1<<0)
  73. #define DBTOCR_EN (1<<15)
  74. #define DBTOCR_CNT(x) ((x) & 0x7fff)
  75. #define CNTR_CNT(x) ((x) & 0xffffff)
  76. #define CCR_ACRPT (1<<14)
  77. #define CCR_DMOD_LINEAR (0x0 << 12)
  78. #define CCR_DMOD_2D (0x1 << 12)
  79. #define CCR_DMOD_FIFO (0x2 << 12)
  80. #define CCR_DMOD_EOBFIFO (0x3 << 12)
  81. #define CCR_SMOD_LINEAR (0x0 << 10)
  82. #define CCR_SMOD_2D (0x1 << 10)
  83. #define CCR_SMOD_FIFO (0x2 << 10)
  84. #define CCR_SMOD_EOBFIFO (0x3 << 10)
  85. #define CCR_MDIR_DEC (1<<9)
  86. #define CCR_MSEL_B (1<<8)
  87. #define CCR_DSIZ_32 (0x0 << 6)
  88. #define CCR_DSIZ_8 (0x1 << 6)
  89. #define CCR_DSIZ_16 (0x2 << 6)
  90. #define CCR_SSIZ_32 (0x0 << 4)
  91. #define CCR_SSIZ_8 (0x1 << 4)
  92. #define CCR_SSIZ_16 (0x2 << 4)
  93. #define CCR_REN (1<<3)
  94. #define CCR_RPT (1<<2)
  95. #define CCR_FRC (1<<1)
  96. #define CCR_CEN (1<<0)
  97. #define RTOR_EN (1<<15)
  98. #define RTOR_CLK (1<<14)
  99. #define RTOR_PSC (1<<13)
  100. enum imxdma_prep_type {
  101. IMXDMA_DESC_MEMCPY,
  102. IMXDMA_DESC_INTERLEAVED,
  103. IMXDMA_DESC_SLAVE_SG,
  104. IMXDMA_DESC_CYCLIC,
  105. };
  106. /*
  107. * struct imxdma_channel_internal - i.MX specific DMA extension
  108. * @name: name specified by DMA client
  109. * @irq_handler: client callback for end of transfer
  110. * @err_handler: client callback for error condition
  111. * @data: clients context data for callbacks
  112. * @dma_mode: direction of the transfer %DMA_MODE_READ or %DMA_MODE_WRITE
  113. * @sg: pointer to the actual read/written chunk for scatter-gather emulation
  114. * @resbytes: total residual number of bytes to transfer
  115. * (it can be lower or same as sum of SG mapped chunk sizes)
  116. * @sgcount: number of chunks to be read/written
  117. *
  118. * Structure is used for IMX DMA processing. It would be probably good
  119. * @struct dma_struct in the future for external interfacing and use
  120. * @struct imxdma_channel_internal only as extension to it.
  121. */
  122. struct imxdma_channel_internal {
  123. struct timer_list watchdog;
  124. int hw_chaining;
  125. };
  126. struct imxdma_desc {
  127. struct list_head node;
  128. struct dma_async_tx_descriptor desc;
  129. enum dma_status status;
  130. dma_addr_t src;
  131. dma_addr_t dest;
  132. size_t len;
  133. enum dma_transfer_direction direction;
  134. enum imxdma_prep_type type;
  135. /* For memcpy and interleaved */
  136. unsigned int config_port;
  137. unsigned int config_mem;
  138. /* For interleaved transfers */
  139. unsigned int x;
  140. unsigned int y;
  141. unsigned int w;
  142. /* For slave sg and cyclic */
  143. struct scatterlist *sg;
  144. unsigned int sgcount;
  145. };
  146. struct imxdma_channel {
  147. struct imxdma_channel_internal internal;
  148. struct imxdma_engine *imxdma;
  149. unsigned int channel;
  150. struct tasklet_struct dma_tasklet;
  151. struct list_head ld_free;
  152. struct list_head ld_queue;
  153. struct list_head ld_active;
  154. int descs_allocated;
  155. enum dma_slave_buswidth word_size;
  156. dma_addr_t per_address;
  157. u32 watermark_level;
  158. struct dma_chan chan;
  159. spinlock_t lock;
  160. struct dma_async_tx_descriptor desc;
  161. enum dma_status status;
  162. int dma_request;
  163. struct scatterlist *sg_list;
  164. u32 ccr_from_device;
  165. u32 ccr_to_device;
  166. };
  167. struct imxdma_engine {
  168. struct device *dev;
  169. struct device_dma_parameters dma_parms;
  170. struct dma_device dma_device;
  171. struct imxdma_channel channel[IMX_DMA_CHANNELS];
  172. };
  173. static struct imxdma_channel *to_imxdma_chan(struct dma_chan *chan)
  174. {
  175. return container_of(chan, struct imxdma_channel, chan);
  176. }
  177. static inline bool imxdma_chan_is_doing_cyclic(struct imxdma_channel *imxdmac)
  178. {
  179. struct imxdma_desc *desc;
  180. if (!list_empty(&imxdmac->ld_active)) {
  181. desc = list_first_entry(&imxdmac->ld_active, struct imxdma_desc,
  182. node);
  183. if (desc->type == IMXDMA_DESC_CYCLIC)
  184. return true;
  185. }
  186. return false;
  187. }
  188. /* TODO: put this inside any struct */
  189. static void __iomem *imx_dmav1_baseaddr;
  190. static struct clk *dma_clk;
  191. static void imx_dmav1_writel(unsigned val, unsigned offset)
  192. {
  193. __raw_writel(val, imx_dmav1_baseaddr + offset);
  194. }
  195. static unsigned imx_dmav1_readl(unsigned offset)
  196. {
  197. return __raw_readl(imx_dmav1_baseaddr + offset);
  198. }
  199. static int imxdma_hw_chain(struct imxdma_channel_internal *imxdma)
  200. {
  201. if (cpu_is_mx27())
  202. return imxdma->hw_chaining;
  203. else
  204. return 0;
  205. }
  206. /*
  207. * imxdma_sg_next - prepare next chunk for scatter-gather DMA emulation
  208. */
  209. static inline int imxdma_sg_next(struct imxdma_desc *d, struct scatterlist *sg)
  210. {
  211. struct imxdma_channel *imxdmac = to_imxdma_chan(d->desc.chan);
  212. unsigned long now;
  213. now = min(d->len, sg->length);
  214. if (d->len != IMX_DMA_LENGTH_LOOP)
  215. d->len -= now;
  216. if (d->direction == DMA_DEV_TO_MEM)
  217. imx_dmav1_writel(sg->dma_address, DMA_DAR(imxdmac->channel));
  218. else
  219. imx_dmav1_writel(sg->dma_address, DMA_SAR(imxdmac->channel));
  220. imx_dmav1_writel(now, DMA_CNTR(imxdmac->channel));
  221. pr_debug("imxdma%d: next sg chunk dst 0x%08x, src 0x%08x, "
  222. "size 0x%08x\n", imxdmac->channel,
  223. imx_dmav1_readl(DMA_DAR(imxdmac->channel)),
  224. imx_dmav1_readl(DMA_SAR(imxdmac->channel)),
  225. imx_dmav1_readl(DMA_CNTR(imxdmac->channel)));
  226. return now;
  227. }
  228. static void imxdma_enable_hw(struct imxdma_desc *d)
  229. {
  230. struct imxdma_channel *imxdmac = to_imxdma_chan(d->desc.chan);
  231. int channel = imxdmac->channel;
  232. unsigned long flags;
  233. pr_debug("imxdma%d: imx_dma_enable\n", channel);
  234. local_irq_save(flags);
  235. imx_dmav1_writel(1 << channel, DMA_DISR);
  236. imx_dmav1_writel(imx_dmav1_readl(DMA_DIMR) & ~(1 << channel), DMA_DIMR);
  237. imx_dmav1_writel(imx_dmav1_readl(DMA_CCR(channel)) | CCR_CEN |
  238. CCR_ACRPT, DMA_CCR(channel));
  239. if ((cpu_is_mx21() || cpu_is_mx27()) &&
  240. d->sg && imxdma_hw_chain(&imxdmac->internal)) {
  241. d->sg = sg_next(d->sg);
  242. if (d->sg) {
  243. u32 tmp;
  244. imxdma_sg_next(d, d->sg);
  245. tmp = imx_dmav1_readl(DMA_CCR(channel));
  246. imx_dmav1_writel(tmp | CCR_RPT | CCR_ACRPT,
  247. DMA_CCR(channel));
  248. }
  249. }
  250. local_irq_restore(flags);
  251. }
  252. static void imxdma_disable_hw(struct imxdma_channel *imxdmac)
  253. {
  254. int channel = imxdmac->channel;
  255. unsigned long flags;
  256. pr_debug("imxdma%d: imx_dma_disable\n", channel);
  257. if (imxdma_hw_chain(&imxdmac->internal))
  258. del_timer(&imxdmac->internal.watchdog);
  259. local_irq_save(flags);
  260. imx_dmav1_writel(imx_dmav1_readl(DMA_DIMR) | (1 << channel), DMA_DIMR);
  261. imx_dmav1_writel(imx_dmav1_readl(DMA_CCR(channel)) & ~CCR_CEN,
  262. DMA_CCR(channel));
  263. imx_dmav1_writel(1 << channel, DMA_DISR);
  264. local_irq_restore(flags);
  265. }
  266. static void imxdma_watchdog(unsigned long data)
  267. {
  268. struct imxdma_channel *imxdmac = (struct imxdma_channel *)data;
  269. int channel = imxdmac->channel;
  270. imx_dmav1_writel(0, DMA_CCR(channel));
  271. /* Tasklet watchdog error handler */
  272. tasklet_schedule(&imxdmac->dma_tasklet);
  273. pr_debug("imxdma%d: watchdog timeout!\n", imxdmac->channel);
  274. }
  275. static irqreturn_t imxdma_err_handler(int irq, void *dev_id)
  276. {
  277. struct imxdma_engine *imxdma = dev_id;
  278. struct imxdma_channel_internal *internal;
  279. unsigned int err_mask;
  280. int i, disr;
  281. int errcode;
  282. disr = imx_dmav1_readl(DMA_DISR);
  283. err_mask = imx_dmav1_readl(DMA_DBTOSR) |
  284. imx_dmav1_readl(DMA_DRTOSR) |
  285. imx_dmav1_readl(DMA_DSESR) |
  286. imx_dmav1_readl(DMA_DBOSR);
  287. if (!err_mask)
  288. return IRQ_HANDLED;
  289. imx_dmav1_writel(disr & err_mask, DMA_DISR);
  290. for (i = 0; i < IMX_DMA_CHANNELS; i++) {
  291. if (!(err_mask & (1 << i)))
  292. continue;
  293. internal = &imxdma->channel[i].internal;
  294. errcode = 0;
  295. if (imx_dmav1_readl(DMA_DBTOSR) & (1 << i)) {
  296. imx_dmav1_writel(1 << i, DMA_DBTOSR);
  297. errcode |= IMX_DMA_ERR_BURST;
  298. }
  299. if (imx_dmav1_readl(DMA_DRTOSR) & (1 << i)) {
  300. imx_dmav1_writel(1 << i, DMA_DRTOSR);
  301. errcode |= IMX_DMA_ERR_REQUEST;
  302. }
  303. if (imx_dmav1_readl(DMA_DSESR) & (1 << i)) {
  304. imx_dmav1_writel(1 << i, DMA_DSESR);
  305. errcode |= IMX_DMA_ERR_TRANSFER;
  306. }
  307. if (imx_dmav1_readl(DMA_DBOSR) & (1 << i)) {
  308. imx_dmav1_writel(1 << i, DMA_DBOSR);
  309. errcode |= IMX_DMA_ERR_BUFFER;
  310. }
  311. /* Tasklet error handler */
  312. tasklet_schedule(&imxdma->channel[i].dma_tasklet);
  313. printk(KERN_WARNING
  314. "DMA timeout on channel %d -%s%s%s%s\n", i,
  315. errcode & IMX_DMA_ERR_BURST ? " burst" : "",
  316. errcode & IMX_DMA_ERR_REQUEST ? " request" : "",
  317. errcode & IMX_DMA_ERR_TRANSFER ? " transfer" : "",
  318. errcode & IMX_DMA_ERR_BUFFER ? " buffer" : "");
  319. }
  320. return IRQ_HANDLED;
  321. }
  322. static void dma_irq_handle_channel(struct imxdma_channel *imxdmac)
  323. {
  324. struct imxdma_channel_internal *imxdma = &imxdmac->internal;
  325. int chno = imxdmac->channel;
  326. struct imxdma_desc *desc;
  327. spin_lock(&imxdmac->lock);
  328. if (list_empty(&imxdmac->ld_active)) {
  329. spin_unlock(&imxdmac->lock);
  330. goto out;
  331. }
  332. desc = list_first_entry(&imxdmac->ld_active,
  333. struct imxdma_desc,
  334. node);
  335. spin_unlock(&imxdmac->lock);
  336. if (desc->sg) {
  337. u32 tmp;
  338. desc->sg = sg_next(desc->sg);
  339. if (desc->sg) {
  340. imxdma_sg_next(desc, desc->sg);
  341. tmp = imx_dmav1_readl(DMA_CCR(chno));
  342. if (imxdma_hw_chain(imxdma)) {
  343. /* FIXME: The timeout should probably be
  344. * configurable
  345. */
  346. mod_timer(&imxdma->watchdog,
  347. jiffies + msecs_to_jiffies(500));
  348. tmp |= CCR_CEN | CCR_RPT | CCR_ACRPT;
  349. imx_dmav1_writel(tmp, DMA_CCR(chno));
  350. } else {
  351. imx_dmav1_writel(tmp & ~CCR_CEN, DMA_CCR(chno));
  352. tmp |= CCR_CEN;
  353. }
  354. imx_dmav1_writel(tmp, DMA_CCR(chno));
  355. if (imxdma_chan_is_doing_cyclic(imxdmac))
  356. /* Tasklet progression */
  357. tasklet_schedule(&imxdmac->dma_tasklet);
  358. return;
  359. }
  360. if (imxdma_hw_chain(imxdma)) {
  361. del_timer(&imxdma->watchdog);
  362. return;
  363. }
  364. }
  365. out:
  366. imx_dmav1_writel(0, DMA_CCR(chno));
  367. /* Tasklet irq */
  368. tasklet_schedule(&imxdmac->dma_tasklet);
  369. }
  370. static irqreturn_t dma_irq_handler(int irq, void *dev_id)
  371. {
  372. struct imxdma_engine *imxdma = dev_id;
  373. struct imxdma_channel_internal *internal;
  374. int i, disr;
  375. if (cpu_is_mx21() || cpu_is_mx27())
  376. imxdma_err_handler(irq, dev_id);
  377. disr = imx_dmav1_readl(DMA_DISR);
  378. pr_debug("imxdma: dma_irq_handler called, disr=0x%08x\n",
  379. disr);
  380. imx_dmav1_writel(disr, DMA_DISR);
  381. for (i = 0; i < IMX_DMA_CHANNELS; i++) {
  382. if (disr & (1 << i)) {
  383. internal = &imxdma->channel[i].internal;
  384. dma_irq_handle_channel(&imxdma->channel[i]);
  385. }
  386. }
  387. return IRQ_HANDLED;
  388. }
  389. static int imxdma_xfer_desc(struct imxdma_desc *d)
  390. {
  391. struct imxdma_channel *imxdmac = to_imxdma_chan(d->desc.chan);
  392. struct imxdma_engine *imxdma = imxdmac->imxdma;
  393. /* Configure and enable */
  394. switch (d->type) {
  395. case IMXDMA_DESC_MEMCPY:
  396. imx_dmav1_writel(d->src, DMA_SAR(imxdmac->channel));
  397. imx_dmav1_writel(d->dest, DMA_DAR(imxdmac->channel));
  398. imx_dmav1_writel(d->config_mem | (d->config_port << 2),
  399. DMA_CCR(imxdmac->channel));
  400. imx_dmav1_writel(d->len, DMA_CNTR(imxdmac->channel));
  401. dev_dbg(imxdma->dev, "%s channel: %d dest=0x%08x src=0x%08x "
  402. "dma_length=%d\n", __func__, imxdmac->channel,
  403. d->dest, d->src, d->len);
  404. break;
  405. /* Cyclic transfer is the same as slave_sg with special sg configuration. */
  406. case IMXDMA_DESC_CYCLIC:
  407. case IMXDMA_DESC_SLAVE_SG:
  408. if (d->direction == DMA_DEV_TO_MEM) {
  409. imx_dmav1_writel(imxdmac->per_address,
  410. DMA_SAR(imxdmac->channel));
  411. imx_dmav1_writel(imxdmac->ccr_from_device,
  412. DMA_CCR(imxdmac->channel));
  413. dev_dbg(imxdma->dev, "%s channel: %d sg=%p sgcount=%d "
  414. "total length=%d dev_addr=0x%08x (dev2mem)\n",
  415. __func__, imxdmac->channel, d->sg, d->sgcount,
  416. d->len, imxdmac->per_address);
  417. } else if (d->direction == DMA_MEM_TO_DEV) {
  418. imx_dmav1_writel(imxdmac->per_address,
  419. DMA_DAR(imxdmac->channel));
  420. imx_dmav1_writel(imxdmac->ccr_to_device,
  421. DMA_CCR(imxdmac->channel));
  422. dev_dbg(imxdma->dev, "%s channel: %d sg=%p sgcount=%d "
  423. "total length=%d dev_addr=0x%08x (mem2dev)\n",
  424. __func__, imxdmac->channel, d->sg, d->sgcount,
  425. d->len, imxdmac->per_address);
  426. } else {
  427. dev_err(imxdma->dev, "%s channel: %d bad dma mode\n",
  428. __func__, imxdmac->channel);
  429. return -EINVAL;
  430. }
  431. imxdma_sg_next(d, d->sg);
  432. break;
  433. default:
  434. return -EINVAL;
  435. }
  436. imxdma_enable_hw(d);
  437. return 0;
  438. }
  439. static void imxdma_tasklet(unsigned long data)
  440. {
  441. struct imxdma_channel *imxdmac = (void *)data;
  442. struct imxdma_engine *imxdma = imxdmac->imxdma;
  443. struct imxdma_desc *desc;
  444. spin_lock(&imxdmac->lock);
  445. if (list_empty(&imxdmac->ld_active)) {
  446. /* Someone might have called terminate all */
  447. goto out;
  448. }
  449. desc = list_first_entry(&imxdmac->ld_active, struct imxdma_desc, node);
  450. if (desc->desc.callback)
  451. desc->desc.callback(desc->desc.callback_param);
  452. dma_cookie_complete(&desc->desc);
  453. /* If we are dealing with a cyclic descriptor keep it on ld_active */
  454. if (imxdma_chan_is_doing_cyclic(imxdmac))
  455. goto out;
  456. list_move_tail(imxdmac->ld_active.next, &imxdmac->ld_free);
  457. if (!list_empty(&imxdmac->ld_queue)) {
  458. desc = list_first_entry(&imxdmac->ld_queue, struct imxdma_desc,
  459. node);
  460. list_move_tail(imxdmac->ld_queue.next, &imxdmac->ld_active);
  461. if (imxdma_xfer_desc(desc) < 0)
  462. dev_warn(imxdma->dev, "%s: channel: %d couldn't xfer desc\n",
  463. __func__, imxdmac->channel);
  464. }
  465. out:
  466. spin_unlock(&imxdmac->lock);
  467. }
  468. static int imxdma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
  469. unsigned long arg)
  470. {
  471. struct imxdma_channel *imxdmac = to_imxdma_chan(chan);
  472. struct dma_slave_config *dmaengine_cfg = (void *)arg;
  473. unsigned long flags;
  474. unsigned int mode = 0;
  475. switch (cmd) {
  476. case DMA_TERMINATE_ALL:
  477. imxdma_disable_hw(imxdmac);
  478. spin_lock_irqsave(&imxdmac->lock, flags);
  479. list_splice_tail_init(&imxdmac->ld_active, &imxdmac->ld_free);
  480. list_splice_tail_init(&imxdmac->ld_queue, &imxdmac->ld_free);
  481. spin_unlock_irqrestore(&imxdmac->lock, flags);
  482. return 0;
  483. case DMA_SLAVE_CONFIG:
  484. if (dmaengine_cfg->direction == DMA_DEV_TO_MEM) {
  485. imxdmac->per_address = dmaengine_cfg->src_addr;
  486. imxdmac->watermark_level = dmaengine_cfg->src_maxburst;
  487. imxdmac->word_size = dmaengine_cfg->src_addr_width;
  488. } else {
  489. imxdmac->per_address = dmaengine_cfg->dst_addr;
  490. imxdmac->watermark_level = dmaengine_cfg->dst_maxburst;
  491. imxdmac->word_size = dmaengine_cfg->dst_addr_width;
  492. }
  493. switch (imxdmac->word_size) {
  494. case DMA_SLAVE_BUSWIDTH_1_BYTE:
  495. mode = IMX_DMA_MEMSIZE_8;
  496. break;
  497. case DMA_SLAVE_BUSWIDTH_2_BYTES:
  498. mode = IMX_DMA_MEMSIZE_16;
  499. break;
  500. default:
  501. case DMA_SLAVE_BUSWIDTH_4_BYTES:
  502. mode = IMX_DMA_MEMSIZE_32;
  503. break;
  504. }
  505. imxdmac->internal.hw_chaining = 1;
  506. if (!imxdma_hw_chain(&imxdmac->internal))
  507. return -EINVAL;
  508. imxdmac->ccr_from_device = (mode | IMX_DMA_TYPE_FIFO) |
  509. ((IMX_DMA_MEMSIZE_32 | IMX_DMA_TYPE_LINEAR) << 2) |
  510. CCR_REN;
  511. imxdmac->ccr_to_device =
  512. (IMX_DMA_MEMSIZE_32 | IMX_DMA_TYPE_LINEAR) |
  513. ((mode | IMX_DMA_TYPE_FIFO) << 2) | CCR_REN;
  514. imx_dmav1_writel(imxdmac->dma_request,
  515. DMA_RSSR(imxdmac->channel));
  516. /* Set burst length */
  517. imx_dmav1_writel(imxdmac->watermark_level * imxdmac->word_size,
  518. DMA_BLR(imxdmac->channel));
  519. return 0;
  520. default:
  521. return -ENOSYS;
  522. }
  523. return -EINVAL;
  524. }
  525. static enum dma_status imxdma_tx_status(struct dma_chan *chan,
  526. dma_cookie_t cookie,
  527. struct dma_tx_state *txstate)
  528. {
  529. return dma_cookie_status(chan, cookie, txstate);
  530. }
  531. static dma_cookie_t imxdma_tx_submit(struct dma_async_tx_descriptor *tx)
  532. {
  533. struct imxdma_channel *imxdmac = to_imxdma_chan(tx->chan);
  534. dma_cookie_t cookie;
  535. unsigned long flags;
  536. spin_lock_irqsave(&imxdmac->lock, flags);
  537. cookie = dma_cookie_assign(tx);
  538. spin_unlock_irqrestore(&imxdmac->lock, flags);
  539. return cookie;
  540. }
  541. static int imxdma_alloc_chan_resources(struct dma_chan *chan)
  542. {
  543. struct imxdma_channel *imxdmac = to_imxdma_chan(chan);
  544. struct imx_dma_data *data = chan->private;
  545. if (data != NULL)
  546. imxdmac->dma_request = data->dma_request;
  547. while (imxdmac->descs_allocated < IMXDMA_MAX_CHAN_DESCRIPTORS) {
  548. struct imxdma_desc *desc;
  549. desc = kzalloc(sizeof(*desc), GFP_KERNEL);
  550. if (!desc)
  551. break;
  552. __memzero(&desc->desc, sizeof(struct dma_async_tx_descriptor));
  553. dma_async_tx_descriptor_init(&desc->desc, chan);
  554. desc->desc.tx_submit = imxdma_tx_submit;
  555. /* txd.flags will be overwritten in prep funcs */
  556. desc->desc.flags = DMA_CTRL_ACK;
  557. desc->status = DMA_SUCCESS;
  558. list_add_tail(&desc->node, &imxdmac->ld_free);
  559. imxdmac->descs_allocated++;
  560. }
  561. if (!imxdmac->descs_allocated)
  562. return -ENOMEM;
  563. return imxdmac->descs_allocated;
  564. }
  565. static void imxdma_free_chan_resources(struct dma_chan *chan)
  566. {
  567. struct imxdma_channel *imxdmac = to_imxdma_chan(chan);
  568. struct imxdma_desc *desc, *_desc;
  569. unsigned long flags;
  570. spin_lock_irqsave(&imxdmac->lock, flags);
  571. imxdma_disable_hw(imxdmac);
  572. list_splice_tail_init(&imxdmac->ld_active, &imxdmac->ld_free);
  573. list_splice_tail_init(&imxdmac->ld_queue, &imxdmac->ld_free);
  574. spin_unlock_irqrestore(&imxdmac->lock, flags);
  575. list_for_each_entry_safe(desc, _desc, &imxdmac->ld_free, node) {
  576. kfree(desc);
  577. imxdmac->descs_allocated--;
  578. }
  579. INIT_LIST_HEAD(&imxdmac->ld_free);
  580. if (imxdmac->sg_list) {
  581. kfree(imxdmac->sg_list);
  582. imxdmac->sg_list = NULL;
  583. }
  584. }
  585. static struct dma_async_tx_descriptor *imxdma_prep_slave_sg(
  586. struct dma_chan *chan, struct scatterlist *sgl,
  587. unsigned int sg_len, enum dma_transfer_direction direction,
  588. unsigned long flags, void *context)
  589. {
  590. struct imxdma_channel *imxdmac = to_imxdma_chan(chan);
  591. struct scatterlist *sg;
  592. int i, dma_length = 0;
  593. struct imxdma_desc *desc;
  594. if (list_empty(&imxdmac->ld_free) ||
  595. imxdma_chan_is_doing_cyclic(imxdmac))
  596. return NULL;
  597. desc = list_first_entry(&imxdmac->ld_free, struct imxdma_desc, node);
  598. for_each_sg(sgl, sg, sg_len, i) {
  599. dma_length += sg->length;
  600. }
  601. switch (imxdmac->word_size) {
  602. case DMA_SLAVE_BUSWIDTH_4_BYTES:
  603. if (sgl->length & 3 || sgl->dma_address & 3)
  604. return NULL;
  605. break;
  606. case DMA_SLAVE_BUSWIDTH_2_BYTES:
  607. if (sgl->length & 1 || sgl->dma_address & 1)
  608. return NULL;
  609. break;
  610. case DMA_SLAVE_BUSWIDTH_1_BYTE:
  611. break;
  612. default:
  613. return NULL;
  614. }
  615. desc->type = IMXDMA_DESC_SLAVE_SG;
  616. desc->sg = sgl;
  617. desc->sgcount = sg_len;
  618. desc->len = dma_length;
  619. desc->direction = direction;
  620. if (direction == DMA_DEV_TO_MEM) {
  621. desc->src = imxdmac->per_address;
  622. } else {
  623. desc->dest = imxdmac->per_address;
  624. }
  625. desc->desc.callback = NULL;
  626. desc->desc.callback_param = NULL;
  627. return &desc->desc;
  628. }
  629. static struct dma_async_tx_descriptor *imxdma_prep_dma_cyclic(
  630. struct dma_chan *chan, dma_addr_t dma_addr, size_t buf_len,
  631. size_t period_len, enum dma_transfer_direction direction,
  632. void *context)
  633. {
  634. struct imxdma_channel *imxdmac = to_imxdma_chan(chan);
  635. struct imxdma_engine *imxdma = imxdmac->imxdma;
  636. struct imxdma_desc *desc;
  637. int i;
  638. unsigned int periods = buf_len / period_len;
  639. dev_dbg(imxdma->dev, "%s channel: %d buf_len=%d period_len=%d\n",
  640. __func__, imxdmac->channel, buf_len, period_len);
  641. if (list_empty(&imxdmac->ld_free) ||
  642. imxdma_chan_is_doing_cyclic(imxdmac))
  643. return NULL;
  644. desc = list_first_entry(&imxdmac->ld_free, struct imxdma_desc, node);
  645. if (imxdmac->sg_list)
  646. kfree(imxdmac->sg_list);
  647. imxdmac->sg_list = kcalloc(periods + 1,
  648. sizeof(struct scatterlist), GFP_KERNEL);
  649. if (!imxdmac->sg_list)
  650. return NULL;
  651. sg_init_table(imxdmac->sg_list, periods);
  652. for (i = 0; i < periods; i++) {
  653. imxdmac->sg_list[i].page_link = 0;
  654. imxdmac->sg_list[i].offset = 0;
  655. imxdmac->sg_list[i].dma_address = dma_addr;
  656. imxdmac->sg_list[i].length = period_len;
  657. dma_addr += period_len;
  658. }
  659. /* close the loop */
  660. imxdmac->sg_list[periods].offset = 0;
  661. imxdmac->sg_list[periods].length = 0;
  662. imxdmac->sg_list[periods].page_link =
  663. ((unsigned long)imxdmac->sg_list | 0x01) & ~0x02;
  664. desc->type = IMXDMA_DESC_CYCLIC;
  665. desc->sg = imxdmac->sg_list;
  666. desc->sgcount = periods;
  667. desc->len = IMX_DMA_LENGTH_LOOP;
  668. desc->direction = direction;
  669. if (direction == DMA_DEV_TO_MEM) {
  670. desc->src = imxdmac->per_address;
  671. } else {
  672. desc->dest = imxdmac->per_address;
  673. }
  674. desc->desc.callback = NULL;
  675. desc->desc.callback_param = NULL;
  676. return &desc->desc;
  677. }
  678. static struct dma_async_tx_descriptor *imxdma_prep_dma_memcpy(
  679. struct dma_chan *chan, dma_addr_t dest,
  680. dma_addr_t src, size_t len, unsigned long flags)
  681. {
  682. struct imxdma_channel *imxdmac = to_imxdma_chan(chan);
  683. struct imxdma_engine *imxdma = imxdmac->imxdma;
  684. struct imxdma_desc *desc;
  685. dev_dbg(imxdma->dev, "%s channel: %d src=0x%x dst=0x%x len=%d\n",
  686. __func__, imxdmac->channel, src, dest, len);
  687. if (list_empty(&imxdmac->ld_free) ||
  688. imxdma_chan_is_doing_cyclic(imxdmac))
  689. return NULL;
  690. desc = list_first_entry(&imxdmac->ld_free, struct imxdma_desc, node);
  691. desc->type = IMXDMA_DESC_MEMCPY;
  692. desc->src = src;
  693. desc->dest = dest;
  694. desc->len = len;
  695. desc->direction = DMA_MEM_TO_MEM;
  696. desc->config_port = IMX_DMA_MEMSIZE_32 | IMX_DMA_TYPE_LINEAR;
  697. desc->config_mem = IMX_DMA_MEMSIZE_32 | IMX_DMA_TYPE_LINEAR;
  698. desc->desc.callback = NULL;
  699. desc->desc.callback_param = NULL;
  700. return &desc->desc;
  701. }
  702. static void imxdma_issue_pending(struct dma_chan *chan)
  703. {
  704. struct imxdma_channel *imxdmac = to_imxdma_chan(chan);
  705. struct imxdma_engine *imxdma = imxdmac->imxdma;
  706. struct imxdma_desc *desc;
  707. unsigned long flags;
  708. spin_lock_irqsave(&imxdmac->lock, flags);
  709. if (list_empty(&imxdmac->ld_active) &&
  710. !list_empty(&imxdmac->ld_queue)) {
  711. desc = list_first_entry(&imxdmac->ld_queue,
  712. struct imxdma_desc, node);
  713. if (imxdma_xfer_desc(desc) < 0) {
  714. dev_warn(imxdma->dev,
  715. "%s: channel: %d couldn't issue DMA xfer\n",
  716. __func__, imxdmac->channel);
  717. } else {
  718. list_move_tail(imxdmac->ld_queue.next,
  719. &imxdmac->ld_active);
  720. }
  721. }
  722. spin_unlock_irqrestore(&imxdmac->lock, flags);
  723. }
  724. static int __init imxdma_probe(struct platform_device *pdev)
  725. {
  726. struct imxdma_engine *imxdma;
  727. int ret, i;
  728. if (cpu_is_mx1())
  729. imx_dmav1_baseaddr = MX1_IO_ADDRESS(MX1_DMA_BASE_ADDR);
  730. else if (cpu_is_mx21())
  731. imx_dmav1_baseaddr = MX21_IO_ADDRESS(MX21_DMA_BASE_ADDR);
  732. else if (cpu_is_mx27())
  733. imx_dmav1_baseaddr = MX27_IO_ADDRESS(MX27_DMA_BASE_ADDR);
  734. else
  735. return 0;
  736. dma_clk = clk_get(NULL, "dma");
  737. if (IS_ERR(dma_clk))
  738. return PTR_ERR(dma_clk);
  739. clk_enable(dma_clk);
  740. /* reset DMA module */
  741. imx_dmav1_writel(DCR_DRST, DMA_DCR);
  742. if (cpu_is_mx1()) {
  743. ret = request_irq(MX1_DMA_INT, dma_irq_handler, 0, "DMA", imxdma);
  744. if (ret) {
  745. pr_crit("Can't register IRQ for DMA\n");
  746. return ret;
  747. }
  748. ret = request_irq(MX1_DMA_ERR, imxdma_err_handler, 0, "DMA", imxdma);
  749. if (ret) {
  750. pr_crit("Can't register ERRIRQ for DMA\n");
  751. free_irq(MX1_DMA_INT, NULL);
  752. return ret;
  753. }
  754. }
  755. /* enable DMA module */
  756. imx_dmav1_writel(DCR_DEN, DMA_DCR);
  757. /* clear all interrupts */
  758. imx_dmav1_writel((1 << IMX_DMA_CHANNELS) - 1, DMA_DISR);
  759. /* disable interrupts */
  760. imx_dmav1_writel((1 << IMX_DMA_CHANNELS) - 1, DMA_DIMR);
  761. imxdma = kzalloc(sizeof(*imxdma), GFP_KERNEL);
  762. if (!imxdma)
  763. return -ENOMEM;
  764. INIT_LIST_HEAD(&imxdma->dma_device.channels);
  765. dma_cap_set(DMA_SLAVE, imxdma->dma_device.cap_mask);
  766. dma_cap_set(DMA_CYCLIC, imxdma->dma_device.cap_mask);
  767. dma_cap_set(DMA_MEMCPY, imxdma->dma_device.cap_mask);
  768. /* Initialize channel parameters */
  769. for (i = 0; i < IMX_DMA_CHANNELS; i++) {
  770. struct imxdma_channel *imxdmac = &imxdma->channel[i];
  771. memset(&imxdmac->internal, 0, sizeof(imxdmac->internal));
  772. if (cpu_is_mx21() || cpu_is_mx27()) {
  773. ret = request_irq(MX2x_INT_DMACH0 + i,
  774. dma_irq_handler, 0, "DMA", imxdma);
  775. if (ret) {
  776. pr_crit("Can't register IRQ %d for DMA channel %d\n",
  777. MX2x_INT_DMACH0 + i, i);
  778. goto err_init;
  779. }
  780. init_timer(&imxdmac->internal.watchdog);
  781. imxdmac->internal.watchdog.function = &imxdma_watchdog;
  782. imxdmac->internal.watchdog.data = (unsigned long)imxdmac;
  783. }
  784. imxdmac->imxdma = imxdma;
  785. spin_lock_init(&imxdmac->lock);
  786. INIT_LIST_HEAD(&imxdmac->ld_queue);
  787. INIT_LIST_HEAD(&imxdmac->ld_free);
  788. INIT_LIST_HEAD(&imxdmac->ld_active);
  789. tasklet_init(&imxdmac->dma_tasklet, imxdma_tasklet,
  790. (unsigned long)imxdmac);
  791. imxdmac->chan.device = &imxdma->dma_device;
  792. dma_cookie_init(&imxdmac->chan);
  793. imxdmac->channel = i;
  794. /* Add the channel to the DMAC list */
  795. list_add_tail(&imxdmac->chan.device_node,
  796. &imxdma->dma_device.channels);
  797. }
  798. imxdma->dev = &pdev->dev;
  799. imxdma->dma_device.dev = &pdev->dev;
  800. imxdma->dma_device.device_alloc_chan_resources = imxdma_alloc_chan_resources;
  801. imxdma->dma_device.device_free_chan_resources = imxdma_free_chan_resources;
  802. imxdma->dma_device.device_tx_status = imxdma_tx_status;
  803. imxdma->dma_device.device_prep_slave_sg = imxdma_prep_slave_sg;
  804. imxdma->dma_device.device_prep_dma_cyclic = imxdma_prep_dma_cyclic;
  805. imxdma->dma_device.device_prep_dma_memcpy = imxdma_prep_dma_memcpy;
  806. imxdma->dma_device.device_control = imxdma_control;
  807. imxdma->dma_device.device_issue_pending = imxdma_issue_pending;
  808. platform_set_drvdata(pdev, imxdma);
  809. imxdma->dma_device.copy_align = 2; /* 2^2 = 4 bytes alignment */
  810. imxdma->dma_device.dev->dma_parms = &imxdma->dma_parms;
  811. dma_set_max_seg_size(imxdma->dma_device.dev, 0xffffff);
  812. ret = dma_async_device_register(&imxdma->dma_device);
  813. if (ret) {
  814. dev_err(&pdev->dev, "unable to register\n");
  815. goto err_init;
  816. }
  817. return 0;
  818. err_init:
  819. if (cpu_is_mx21() || cpu_is_mx27()) {
  820. while (--i >= 0)
  821. free_irq(MX2x_INT_DMACH0 + i, NULL);
  822. } else if cpu_is_mx1() {
  823. free_irq(MX1_DMA_INT, NULL);
  824. free_irq(MX1_DMA_ERR, NULL);
  825. }
  826. kfree(imxdma);
  827. return ret;
  828. }
  829. static int __exit imxdma_remove(struct platform_device *pdev)
  830. {
  831. struct imxdma_engine *imxdma = platform_get_drvdata(pdev);
  832. int i;
  833. dma_async_device_unregister(&imxdma->dma_device);
  834. if (cpu_is_mx21() || cpu_is_mx27()) {
  835. for (i = 0; i < IMX_DMA_CHANNELS; i++)
  836. free_irq(MX2x_INT_DMACH0 + i, NULL);
  837. } else if cpu_is_mx1() {
  838. free_irq(MX1_DMA_INT, NULL);
  839. free_irq(MX1_DMA_ERR, NULL);
  840. }
  841. kfree(imxdma);
  842. return 0;
  843. }
  844. static struct platform_driver imxdma_driver = {
  845. .driver = {
  846. .name = "imx-dma",
  847. },
  848. .remove = __exit_p(imxdma_remove),
  849. };
  850. static int __init imxdma_module_init(void)
  851. {
  852. return platform_driver_probe(&imxdma_driver, imxdma_probe);
  853. }
  854. subsys_initcall(imxdma_module_init);
  855. MODULE_AUTHOR("Sascha Hauer, Pengutronix <s.hauer@pengutronix.de>");
  856. MODULE_DESCRIPTION("i.MX dma driver");
  857. MODULE_LICENSE("GPL");