imx-dma.c 27 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000
  1. /*
  2. * drivers/dma/imx-dma.c
  3. *
  4. * This file contains a driver for the Freescale i.MX DMA engine
  5. * found on i.MX1/21/27
  6. *
  7. * Copyright 2010 Sascha Hauer, Pengutronix <s.hauer@pengutronix.de>
  8. * Copyright 2012 Javier Martin, Vista Silicon <javier.martin@vista-silicon.com>
  9. *
  10. * The code contained herein is licensed under the GNU General Public
  11. * License. You may obtain a copy of the GNU General Public License
  12. * Version 2 or later at the following locations:
  13. *
  14. * http://www.opensource.org/licenses/gpl-license.html
  15. * http://www.gnu.org/copyleft/gpl.html
  16. */
  17. #include <linux/init.h>
  18. #include <linux/module.h>
  19. #include <linux/types.h>
  20. #include <linux/mm.h>
  21. #include <linux/interrupt.h>
  22. #include <linux/spinlock.h>
  23. #include <linux/device.h>
  24. #include <linux/dma-mapping.h>
  25. #include <linux/slab.h>
  26. #include <linux/platform_device.h>
  27. #include <linux/clk.h>
  28. #include <linux/dmaengine.h>
  29. #include <linux/module.h>
  30. #include <asm/irq.h>
  31. #include <mach/dma.h>
  32. #include <mach/hardware.h>
  33. #include "dmaengine.h"
  34. #define IMXDMA_MAX_CHAN_DESCRIPTORS 16
  35. #define IMX_DMA_CHANNELS 16
  36. #define IMX_DMA_LENGTH_LOOP ((unsigned int)-1)
  37. #define IMX_DMA_MEMSIZE_32 (0 << 4)
  38. #define IMX_DMA_MEMSIZE_8 (1 << 4)
  39. #define IMX_DMA_MEMSIZE_16 (2 << 4)
  40. #define IMX_DMA_TYPE_LINEAR (0 << 10)
  41. #define IMX_DMA_TYPE_2D (1 << 10)
  42. #define IMX_DMA_TYPE_FIFO (2 << 10)
  43. #define IMX_DMA_ERR_BURST (1 << 0)
  44. #define IMX_DMA_ERR_REQUEST (1 << 1)
  45. #define IMX_DMA_ERR_TRANSFER (1 << 2)
  46. #define IMX_DMA_ERR_BUFFER (1 << 3)
  47. #define IMX_DMA_ERR_TIMEOUT (1 << 4)
  48. #define DMA_DCR 0x00 /* Control Register */
  49. #define DMA_DISR 0x04 /* Interrupt status Register */
  50. #define DMA_DIMR 0x08 /* Interrupt mask Register */
  51. #define DMA_DBTOSR 0x0c /* Burst timeout status Register */
  52. #define DMA_DRTOSR 0x10 /* Request timeout Register */
  53. #define DMA_DSESR 0x14 /* Transfer Error Status Register */
  54. #define DMA_DBOSR 0x18 /* Buffer overflow status Register */
  55. #define DMA_DBTOCR 0x1c /* Burst timeout control Register */
  56. #define DMA_WSRA 0x40 /* W-Size Register A */
  57. #define DMA_XSRA 0x44 /* X-Size Register A */
  58. #define DMA_YSRA 0x48 /* Y-Size Register A */
  59. #define DMA_WSRB 0x4c /* W-Size Register B */
  60. #define DMA_XSRB 0x50 /* X-Size Register B */
  61. #define DMA_YSRB 0x54 /* Y-Size Register B */
  62. #define DMA_SAR(x) (0x80 + ((x) << 6)) /* Source Address Registers */
  63. #define DMA_DAR(x) (0x84 + ((x) << 6)) /* Destination Address Registers */
  64. #define DMA_CNTR(x) (0x88 + ((x) << 6)) /* Count Registers */
  65. #define DMA_CCR(x) (0x8c + ((x) << 6)) /* Control Registers */
  66. #define DMA_RSSR(x) (0x90 + ((x) << 6)) /* Request source select Registers */
  67. #define DMA_BLR(x) (0x94 + ((x) << 6)) /* Burst length Registers */
  68. #define DMA_RTOR(x) (0x98 + ((x) << 6)) /* Request timeout Registers */
  69. #define DMA_BUCR(x) (0x98 + ((x) << 6)) /* Bus Utilization Registers */
  70. #define DMA_CCNR(x) (0x9C + ((x) << 6)) /* Channel counter Registers */
  71. #define DCR_DRST (1<<1)
  72. #define DCR_DEN (1<<0)
  73. #define DBTOCR_EN (1<<15)
  74. #define DBTOCR_CNT(x) ((x) & 0x7fff)
  75. #define CNTR_CNT(x) ((x) & 0xffffff)
  76. #define CCR_ACRPT (1<<14)
  77. #define CCR_DMOD_LINEAR (0x0 << 12)
  78. #define CCR_DMOD_2D (0x1 << 12)
  79. #define CCR_DMOD_FIFO (0x2 << 12)
  80. #define CCR_DMOD_EOBFIFO (0x3 << 12)
  81. #define CCR_SMOD_LINEAR (0x0 << 10)
  82. #define CCR_SMOD_2D (0x1 << 10)
  83. #define CCR_SMOD_FIFO (0x2 << 10)
  84. #define CCR_SMOD_EOBFIFO (0x3 << 10)
  85. #define CCR_MDIR_DEC (1<<9)
  86. #define CCR_MSEL_B (1<<8)
  87. #define CCR_DSIZ_32 (0x0 << 6)
  88. #define CCR_DSIZ_8 (0x1 << 6)
  89. #define CCR_DSIZ_16 (0x2 << 6)
  90. #define CCR_SSIZ_32 (0x0 << 4)
  91. #define CCR_SSIZ_8 (0x1 << 4)
  92. #define CCR_SSIZ_16 (0x2 << 4)
  93. #define CCR_REN (1<<3)
  94. #define CCR_RPT (1<<2)
  95. #define CCR_FRC (1<<1)
  96. #define CCR_CEN (1<<0)
  97. #define RTOR_EN (1<<15)
  98. #define RTOR_CLK (1<<14)
  99. #define RTOR_PSC (1<<13)
  100. enum imxdma_prep_type {
  101. IMXDMA_DESC_MEMCPY,
  102. IMXDMA_DESC_INTERLEAVED,
  103. IMXDMA_DESC_SLAVE_SG,
  104. IMXDMA_DESC_CYCLIC,
  105. };
  106. struct imxdma_desc {
  107. struct list_head node;
  108. struct dma_async_tx_descriptor desc;
  109. enum dma_status status;
  110. dma_addr_t src;
  111. dma_addr_t dest;
  112. size_t len;
  113. enum dma_transfer_direction direction;
  114. enum imxdma_prep_type type;
  115. /* For memcpy and interleaved */
  116. unsigned int config_port;
  117. unsigned int config_mem;
  118. /* For interleaved transfers */
  119. unsigned int x;
  120. unsigned int y;
  121. unsigned int w;
  122. /* For slave sg and cyclic */
  123. struct scatterlist *sg;
  124. unsigned int sgcount;
  125. };
  126. struct imxdma_channel {
  127. int hw_chaining;
  128. struct timer_list watchdog;
  129. struct imxdma_engine *imxdma;
  130. unsigned int channel;
  131. struct tasklet_struct dma_tasklet;
  132. struct list_head ld_free;
  133. struct list_head ld_queue;
  134. struct list_head ld_active;
  135. int descs_allocated;
  136. enum dma_slave_buswidth word_size;
  137. dma_addr_t per_address;
  138. u32 watermark_level;
  139. struct dma_chan chan;
  140. spinlock_t lock;
  141. struct dma_async_tx_descriptor desc;
  142. enum dma_status status;
  143. int dma_request;
  144. struct scatterlist *sg_list;
  145. u32 ccr_from_device;
  146. u32 ccr_to_device;
  147. };
  148. struct imxdma_engine {
  149. struct device *dev;
  150. struct device_dma_parameters dma_parms;
  151. struct dma_device dma_device;
  152. struct imxdma_channel channel[IMX_DMA_CHANNELS];
  153. };
  154. static struct imxdma_channel *to_imxdma_chan(struct dma_chan *chan)
  155. {
  156. return container_of(chan, struct imxdma_channel, chan);
  157. }
  158. static inline bool imxdma_chan_is_doing_cyclic(struct imxdma_channel *imxdmac)
  159. {
  160. struct imxdma_desc *desc;
  161. if (!list_empty(&imxdmac->ld_active)) {
  162. desc = list_first_entry(&imxdmac->ld_active, struct imxdma_desc,
  163. node);
  164. if (desc->type == IMXDMA_DESC_CYCLIC)
  165. return true;
  166. }
  167. return false;
  168. }
  169. /* TODO: put this inside any struct */
  170. static void __iomem *imx_dmav1_baseaddr;
  171. static struct clk *dma_clk;
  172. static void imx_dmav1_writel(unsigned val, unsigned offset)
  173. {
  174. __raw_writel(val, imx_dmav1_baseaddr + offset);
  175. }
  176. static unsigned imx_dmav1_readl(unsigned offset)
  177. {
  178. return __raw_readl(imx_dmav1_baseaddr + offset);
  179. }
  180. static int imxdma_hw_chain(struct imxdma_channel *imxdmac)
  181. {
  182. if (cpu_is_mx27())
  183. return imxdmac->hw_chaining;
  184. else
  185. return 0;
  186. }
  187. /*
  188. * imxdma_sg_next - prepare next chunk for scatter-gather DMA emulation
  189. */
  190. static inline int imxdma_sg_next(struct imxdma_desc *d)
  191. {
  192. struct imxdma_channel *imxdmac = to_imxdma_chan(d->desc.chan);
  193. struct scatterlist *sg = d->sg;
  194. unsigned long now;
  195. now = min(d->len, sg->length);
  196. if (d->len != IMX_DMA_LENGTH_LOOP)
  197. d->len -= now;
  198. if (d->direction == DMA_DEV_TO_MEM)
  199. imx_dmav1_writel(sg->dma_address, DMA_DAR(imxdmac->channel));
  200. else
  201. imx_dmav1_writel(sg->dma_address, DMA_SAR(imxdmac->channel));
  202. imx_dmav1_writel(now, DMA_CNTR(imxdmac->channel));
  203. pr_debug("imxdma%d: next sg chunk dst 0x%08x, src 0x%08x, "
  204. "size 0x%08x\n", imxdmac->channel,
  205. imx_dmav1_readl(DMA_DAR(imxdmac->channel)),
  206. imx_dmav1_readl(DMA_SAR(imxdmac->channel)),
  207. imx_dmav1_readl(DMA_CNTR(imxdmac->channel)));
  208. return now;
  209. }
  210. static void imxdma_enable_hw(struct imxdma_desc *d)
  211. {
  212. struct imxdma_channel *imxdmac = to_imxdma_chan(d->desc.chan);
  213. int channel = imxdmac->channel;
  214. unsigned long flags;
  215. pr_debug("imxdma%d: imx_dma_enable\n", channel);
  216. local_irq_save(flags);
  217. imx_dmav1_writel(1 << channel, DMA_DISR);
  218. imx_dmav1_writel(imx_dmav1_readl(DMA_DIMR) & ~(1 << channel), DMA_DIMR);
  219. imx_dmav1_writel(imx_dmav1_readl(DMA_CCR(channel)) | CCR_CEN |
  220. CCR_ACRPT, DMA_CCR(channel));
  221. if ((cpu_is_mx21() || cpu_is_mx27()) &&
  222. d->sg && imxdma_hw_chain(imxdmac)) {
  223. d->sg = sg_next(d->sg);
  224. if (d->sg) {
  225. u32 tmp;
  226. imxdma_sg_next(d);
  227. tmp = imx_dmav1_readl(DMA_CCR(channel));
  228. imx_dmav1_writel(tmp | CCR_RPT | CCR_ACRPT,
  229. DMA_CCR(channel));
  230. }
  231. }
  232. local_irq_restore(flags);
  233. }
  234. static void imxdma_disable_hw(struct imxdma_channel *imxdmac)
  235. {
  236. int channel = imxdmac->channel;
  237. unsigned long flags;
  238. pr_debug("imxdma%d: imx_dma_disable\n", channel);
  239. if (imxdma_hw_chain(imxdmac))
  240. del_timer(&imxdmac->watchdog);
  241. local_irq_save(flags);
  242. imx_dmav1_writel(imx_dmav1_readl(DMA_DIMR) | (1 << channel), DMA_DIMR);
  243. imx_dmav1_writel(imx_dmav1_readl(DMA_CCR(channel)) & ~CCR_CEN,
  244. DMA_CCR(channel));
  245. imx_dmav1_writel(1 << channel, DMA_DISR);
  246. local_irq_restore(flags);
  247. }
  248. static void imxdma_watchdog(unsigned long data)
  249. {
  250. struct imxdma_channel *imxdmac = (struct imxdma_channel *)data;
  251. int channel = imxdmac->channel;
  252. imx_dmav1_writel(0, DMA_CCR(channel));
  253. /* Tasklet watchdog error handler */
  254. tasklet_schedule(&imxdmac->dma_tasklet);
  255. pr_debug("imxdma%d: watchdog timeout!\n", imxdmac->channel);
  256. }
  257. static irqreturn_t imxdma_err_handler(int irq, void *dev_id)
  258. {
  259. struct imxdma_engine *imxdma = dev_id;
  260. unsigned int err_mask;
  261. int i, disr;
  262. int errcode;
  263. disr = imx_dmav1_readl(DMA_DISR);
  264. err_mask = imx_dmav1_readl(DMA_DBTOSR) |
  265. imx_dmav1_readl(DMA_DRTOSR) |
  266. imx_dmav1_readl(DMA_DSESR) |
  267. imx_dmav1_readl(DMA_DBOSR);
  268. if (!err_mask)
  269. return IRQ_HANDLED;
  270. imx_dmav1_writel(disr & err_mask, DMA_DISR);
  271. for (i = 0; i < IMX_DMA_CHANNELS; i++) {
  272. if (!(err_mask & (1 << i)))
  273. continue;
  274. errcode = 0;
  275. if (imx_dmav1_readl(DMA_DBTOSR) & (1 << i)) {
  276. imx_dmav1_writel(1 << i, DMA_DBTOSR);
  277. errcode |= IMX_DMA_ERR_BURST;
  278. }
  279. if (imx_dmav1_readl(DMA_DRTOSR) & (1 << i)) {
  280. imx_dmav1_writel(1 << i, DMA_DRTOSR);
  281. errcode |= IMX_DMA_ERR_REQUEST;
  282. }
  283. if (imx_dmav1_readl(DMA_DSESR) & (1 << i)) {
  284. imx_dmav1_writel(1 << i, DMA_DSESR);
  285. errcode |= IMX_DMA_ERR_TRANSFER;
  286. }
  287. if (imx_dmav1_readl(DMA_DBOSR) & (1 << i)) {
  288. imx_dmav1_writel(1 << i, DMA_DBOSR);
  289. errcode |= IMX_DMA_ERR_BUFFER;
  290. }
  291. /* Tasklet error handler */
  292. tasklet_schedule(&imxdma->channel[i].dma_tasklet);
  293. printk(KERN_WARNING
  294. "DMA timeout on channel %d -%s%s%s%s\n", i,
  295. errcode & IMX_DMA_ERR_BURST ? " burst" : "",
  296. errcode & IMX_DMA_ERR_REQUEST ? " request" : "",
  297. errcode & IMX_DMA_ERR_TRANSFER ? " transfer" : "",
  298. errcode & IMX_DMA_ERR_BUFFER ? " buffer" : "");
  299. }
  300. return IRQ_HANDLED;
  301. }
  302. static void dma_irq_handle_channel(struct imxdma_channel *imxdmac)
  303. {
  304. int chno = imxdmac->channel;
  305. struct imxdma_desc *desc;
  306. spin_lock(&imxdmac->lock);
  307. if (list_empty(&imxdmac->ld_active)) {
  308. spin_unlock(&imxdmac->lock);
  309. goto out;
  310. }
  311. desc = list_first_entry(&imxdmac->ld_active,
  312. struct imxdma_desc,
  313. node);
  314. spin_unlock(&imxdmac->lock);
  315. if (desc->sg) {
  316. u32 tmp;
  317. desc->sg = sg_next(desc->sg);
  318. if (desc->sg) {
  319. imxdma_sg_next(desc);
  320. tmp = imx_dmav1_readl(DMA_CCR(chno));
  321. if (imxdma_hw_chain(imxdmac)) {
  322. /* FIXME: The timeout should probably be
  323. * configurable
  324. */
  325. mod_timer(&imxdmac->watchdog,
  326. jiffies + msecs_to_jiffies(500));
  327. tmp |= CCR_CEN | CCR_RPT | CCR_ACRPT;
  328. imx_dmav1_writel(tmp, DMA_CCR(chno));
  329. } else {
  330. imx_dmav1_writel(tmp & ~CCR_CEN, DMA_CCR(chno));
  331. tmp |= CCR_CEN;
  332. }
  333. imx_dmav1_writel(tmp, DMA_CCR(chno));
  334. if (imxdma_chan_is_doing_cyclic(imxdmac))
  335. /* Tasklet progression */
  336. tasklet_schedule(&imxdmac->dma_tasklet);
  337. return;
  338. }
  339. if (imxdma_hw_chain(imxdmac)) {
  340. del_timer(&imxdmac->watchdog);
  341. return;
  342. }
  343. }
  344. out:
  345. imx_dmav1_writel(0, DMA_CCR(chno));
  346. /* Tasklet irq */
  347. tasklet_schedule(&imxdmac->dma_tasklet);
  348. }
  349. static irqreturn_t dma_irq_handler(int irq, void *dev_id)
  350. {
  351. struct imxdma_engine *imxdma = dev_id;
  352. int i, disr;
  353. if (cpu_is_mx21() || cpu_is_mx27())
  354. imxdma_err_handler(irq, dev_id);
  355. disr = imx_dmav1_readl(DMA_DISR);
  356. pr_debug("imxdma: dma_irq_handler called, disr=0x%08x\n",
  357. disr);
  358. imx_dmav1_writel(disr, DMA_DISR);
  359. for (i = 0; i < IMX_DMA_CHANNELS; i++) {
  360. if (disr & (1 << i))
  361. dma_irq_handle_channel(&imxdma->channel[i]);
  362. }
  363. return IRQ_HANDLED;
  364. }
  365. static int imxdma_xfer_desc(struct imxdma_desc *d)
  366. {
  367. struct imxdma_channel *imxdmac = to_imxdma_chan(d->desc.chan);
  368. struct imxdma_engine *imxdma = imxdmac->imxdma;
  369. /* Configure and enable */
  370. switch (d->type) {
  371. case IMXDMA_DESC_MEMCPY:
  372. imx_dmav1_writel(d->src, DMA_SAR(imxdmac->channel));
  373. imx_dmav1_writel(d->dest, DMA_DAR(imxdmac->channel));
  374. imx_dmav1_writel(d->config_mem | (d->config_port << 2),
  375. DMA_CCR(imxdmac->channel));
  376. imx_dmav1_writel(d->len, DMA_CNTR(imxdmac->channel));
  377. dev_dbg(imxdma->dev, "%s channel: %d dest=0x%08x src=0x%08x "
  378. "dma_length=%d\n", __func__, imxdmac->channel,
  379. d->dest, d->src, d->len);
  380. break;
  381. /* Cyclic transfer is the same as slave_sg with special sg configuration. */
  382. case IMXDMA_DESC_CYCLIC:
  383. case IMXDMA_DESC_SLAVE_SG:
  384. if (d->direction == DMA_DEV_TO_MEM) {
  385. imx_dmav1_writel(imxdmac->per_address,
  386. DMA_SAR(imxdmac->channel));
  387. imx_dmav1_writel(imxdmac->ccr_from_device,
  388. DMA_CCR(imxdmac->channel));
  389. dev_dbg(imxdma->dev, "%s channel: %d sg=%p sgcount=%d "
  390. "total length=%d dev_addr=0x%08x (dev2mem)\n",
  391. __func__, imxdmac->channel, d->sg, d->sgcount,
  392. d->len, imxdmac->per_address);
  393. } else if (d->direction == DMA_MEM_TO_DEV) {
  394. imx_dmav1_writel(imxdmac->per_address,
  395. DMA_DAR(imxdmac->channel));
  396. imx_dmav1_writel(imxdmac->ccr_to_device,
  397. DMA_CCR(imxdmac->channel));
  398. dev_dbg(imxdma->dev, "%s channel: %d sg=%p sgcount=%d "
  399. "total length=%d dev_addr=0x%08x (mem2dev)\n",
  400. __func__, imxdmac->channel, d->sg, d->sgcount,
  401. d->len, imxdmac->per_address);
  402. } else {
  403. dev_err(imxdma->dev, "%s channel: %d bad dma mode\n",
  404. __func__, imxdmac->channel);
  405. return -EINVAL;
  406. }
  407. imxdma_sg_next(d);
  408. break;
  409. default:
  410. return -EINVAL;
  411. }
  412. imxdma_enable_hw(d);
  413. return 0;
  414. }
  415. static void imxdma_tasklet(unsigned long data)
  416. {
  417. struct imxdma_channel *imxdmac = (void *)data;
  418. struct imxdma_engine *imxdma = imxdmac->imxdma;
  419. struct imxdma_desc *desc;
  420. spin_lock(&imxdmac->lock);
  421. if (list_empty(&imxdmac->ld_active)) {
  422. /* Someone might have called terminate all */
  423. goto out;
  424. }
  425. desc = list_first_entry(&imxdmac->ld_active, struct imxdma_desc, node);
  426. if (desc->desc.callback)
  427. desc->desc.callback(desc->desc.callback_param);
  428. dma_cookie_complete(&desc->desc);
  429. /* If we are dealing with a cyclic descriptor keep it on ld_active */
  430. if (imxdma_chan_is_doing_cyclic(imxdmac))
  431. goto out;
  432. list_move_tail(imxdmac->ld_active.next, &imxdmac->ld_free);
  433. if (!list_empty(&imxdmac->ld_queue)) {
  434. desc = list_first_entry(&imxdmac->ld_queue, struct imxdma_desc,
  435. node);
  436. list_move_tail(imxdmac->ld_queue.next, &imxdmac->ld_active);
  437. if (imxdma_xfer_desc(desc) < 0)
  438. dev_warn(imxdma->dev, "%s: channel: %d couldn't xfer desc\n",
  439. __func__, imxdmac->channel);
  440. }
  441. out:
  442. spin_unlock(&imxdmac->lock);
  443. }
  444. static int imxdma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
  445. unsigned long arg)
  446. {
  447. struct imxdma_channel *imxdmac = to_imxdma_chan(chan);
  448. struct dma_slave_config *dmaengine_cfg = (void *)arg;
  449. unsigned long flags;
  450. unsigned int mode = 0;
  451. switch (cmd) {
  452. case DMA_TERMINATE_ALL:
  453. imxdma_disable_hw(imxdmac);
  454. spin_lock_irqsave(&imxdmac->lock, flags);
  455. list_splice_tail_init(&imxdmac->ld_active, &imxdmac->ld_free);
  456. list_splice_tail_init(&imxdmac->ld_queue, &imxdmac->ld_free);
  457. spin_unlock_irqrestore(&imxdmac->lock, flags);
  458. return 0;
  459. case DMA_SLAVE_CONFIG:
  460. if (dmaengine_cfg->direction == DMA_DEV_TO_MEM) {
  461. imxdmac->per_address = dmaengine_cfg->src_addr;
  462. imxdmac->watermark_level = dmaengine_cfg->src_maxburst;
  463. imxdmac->word_size = dmaengine_cfg->src_addr_width;
  464. } else {
  465. imxdmac->per_address = dmaengine_cfg->dst_addr;
  466. imxdmac->watermark_level = dmaengine_cfg->dst_maxburst;
  467. imxdmac->word_size = dmaengine_cfg->dst_addr_width;
  468. }
  469. switch (imxdmac->word_size) {
  470. case DMA_SLAVE_BUSWIDTH_1_BYTE:
  471. mode = IMX_DMA_MEMSIZE_8;
  472. break;
  473. case DMA_SLAVE_BUSWIDTH_2_BYTES:
  474. mode = IMX_DMA_MEMSIZE_16;
  475. break;
  476. default:
  477. case DMA_SLAVE_BUSWIDTH_4_BYTES:
  478. mode = IMX_DMA_MEMSIZE_32;
  479. break;
  480. }
  481. imxdmac->hw_chaining = 1;
  482. if (!imxdma_hw_chain(imxdmac))
  483. return -EINVAL;
  484. imxdmac->ccr_from_device = (mode | IMX_DMA_TYPE_FIFO) |
  485. ((IMX_DMA_MEMSIZE_32 | IMX_DMA_TYPE_LINEAR) << 2) |
  486. CCR_REN;
  487. imxdmac->ccr_to_device =
  488. (IMX_DMA_MEMSIZE_32 | IMX_DMA_TYPE_LINEAR) |
  489. ((mode | IMX_DMA_TYPE_FIFO) << 2) | CCR_REN;
  490. imx_dmav1_writel(imxdmac->dma_request,
  491. DMA_RSSR(imxdmac->channel));
  492. /* Set burst length */
  493. imx_dmav1_writel(imxdmac->watermark_level * imxdmac->word_size,
  494. DMA_BLR(imxdmac->channel));
  495. return 0;
  496. default:
  497. return -ENOSYS;
  498. }
  499. return -EINVAL;
  500. }
  501. static enum dma_status imxdma_tx_status(struct dma_chan *chan,
  502. dma_cookie_t cookie,
  503. struct dma_tx_state *txstate)
  504. {
  505. return dma_cookie_status(chan, cookie, txstate);
  506. }
  507. static dma_cookie_t imxdma_tx_submit(struct dma_async_tx_descriptor *tx)
  508. {
  509. struct imxdma_channel *imxdmac = to_imxdma_chan(tx->chan);
  510. dma_cookie_t cookie;
  511. unsigned long flags;
  512. spin_lock_irqsave(&imxdmac->lock, flags);
  513. cookie = dma_cookie_assign(tx);
  514. spin_unlock_irqrestore(&imxdmac->lock, flags);
  515. return cookie;
  516. }
  517. static int imxdma_alloc_chan_resources(struct dma_chan *chan)
  518. {
  519. struct imxdma_channel *imxdmac = to_imxdma_chan(chan);
  520. struct imx_dma_data *data = chan->private;
  521. if (data != NULL)
  522. imxdmac->dma_request = data->dma_request;
  523. while (imxdmac->descs_allocated < IMXDMA_MAX_CHAN_DESCRIPTORS) {
  524. struct imxdma_desc *desc;
  525. desc = kzalloc(sizeof(*desc), GFP_KERNEL);
  526. if (!desc)
  527. break;
  528. __memzero(&desc->desc, sizeof(struct dma_async_tx_descriptor));
  529. dma_async_tx_descriptor_init(&desc->desc, chan);
  530. desc->desc.tx_submit = imxdma_tx_submit;
  531. /* txd.flags will be overwritten in prep funcs */
  532. desc->desc.flags = DMA_CTRL_ACK;
  533. desc->status = DMA_SUCCESS;
  534. list_add_tail(&desc->node, &imxdmac->ld_free);
  535. imxdmac->descs_allocated++;
  536. }
  537. if (!imxdmac->descs_allocated)
  538. return -ENOMEM;
  539. return imxdmac->descs_allocated;
  540. }
  541. static void imxdma_free_chan_resources(struct dma_chan *chan)
  542. {
  543. struct imxdma_channel *imxdmac = to_imxdma_chan(chan);
  544. struct imxdma_desc *desc, *_desc;
  545. unsigned long flags;
  546. spin_lock_irqsave(&imxdmac->lock, flags);
  547. imxdma_disable_hw(imxdmac);
  548. list_splice_tail_init(&imxdmac->ld_active, &imxdmac->ld_free);
  549. list_splice_tail_init(&imxdmac->ld_queue, &imxdmac->ld_free);
  550. spin_unlock_irqrestore(&imxdmac->lock, flags);
  551. list_for_each_entry_safe(desc, _desc, &imxdmac->ld_free, node) {
  552. kfree(desc);
  553. imxdmac->descs_allocated--;
  554. }
  555. INIT_LIST_HEAD(&imxdmac->ld_free);
  556. if (imxdmac->sg_list) {
  557. kfree(imxdmac->sg_list);
  558. imxdmac->sg_list = NULL;
  559. }
  560. }
  561. static struct dma_async_tx_descriptor *imxdma_prep_slave_sg(
  562. struct dma_chan *chan, struct scatterlist *sgl,
  563. unsigned int sg_len, enum dma_transfer_direction direction,
  564. unsigned long flags, void *context)
  565. {
  566. struct imxdma_channel *imxdmac = to_imxdma_chan(chan);
  567. struct scatterlist *sg;
  568. int i, dma_length = 0;
  569. struct imxdma_desc *desc;
  570. if (list_empty(&imxdmac->ld_free) ||
  571. imxdma_chan_is_doing_cyclic(imxdmac))
  572. return NULL;
  573. desc = list_first_entry(&imxdmac->ld_free, struct imxdma_desc, node);
  574. for_each_sg(sgl, sg, sg_len, i) {
  575. dma_length += sg->length;
  576. }
  577. switch (imxdmac->word_size) {
  578. case DMA_SLAVE_BUSWIDTH_4_BYTES:
  579. if (sgl->length & 3 || sgl->dma_address & 3)
  580. return NULL;
  581. break;
  582. case DMA_SLAVE_BUSWIDTH_2_BYTES:
  583. if (sgl->length & 1 || sgl->dma_address & 1)
  584. return NULL;
  585. break;
  586. case DMA_SLAVE_BUSWIDTH_1_BYTE:
  587. break;
  588. default:
  589. return NULL;
  590. }
  591. desc->type = IMXDMA_DESC_SLAVE_SG;
  592. desc->sg = sgl;
  593. desc->sgcount = sg_len;
  594. desc->len = dma_length;
  595. desc->direction = direction;
  596. if (direction == DMA_DEV_TO_MEM) {
  597. desc->src = imxdmac->per_address;
  598. } else {
  599. desc->dest = imxdmac->per_address;
  600. }
  601. desc->desc.callback = NULL;
  602. desc->desc.callback_param = NULL;
  603. return &desc->desc;
  604. }
  605. static struct dma_async_tx_descriptor *imxdma_prep_dma_cyclic(
  606. struct dma_chan *chan, dma_addr_t dma_addr, size_t buf_len,
  607. size_t period_len, enum dma_transfer_direction direction,
  608. void *context)
  609. {
  610. struct imxdma_channel *imxdmac = to_imxdma_chan(chan);
  611. struct imxdma_engine *imxdma = imxdmac->imxdma;
  612. struct imxdma_desc *desc;
  613. int i;
  614. unsigned int periods = buf_len / period_len;
  615. dev_dbg(imxdma->dev, "%s channel: %d buf_len=%d period_len=%d\n",
  616. __func__, imxdmac->channel, buf_len, period_len);
  617. if (list_empty(&imxdmac->ld_free) ||
  618. imxdma_chan_is_doing_cyclic(imxdmac))
  619. return NULL;
  620. desc = list_first_entry(&imxdmac->ld_free, struct imxdma_desc, node);
  621. if (imxdmac->sg_list)
  622. kfree(imxdmac->sg_list);
  623. imxdmac->sg_list = kcalloc(periods + 1,
  624. sizeof(struct scatterlist), GFP_KERNEL);
  625. if (!imxdmac->sg_list)
  626. return NULL;
  627. sg_init_table(imxdmac->sg_list, periods);
  628. for (i = 0; i < periods; i++) {
  629. imxdmac->sg_list[i].page_link = 0;
  630. imxdmac->sg_list[i].offset = 0;
  631. imxdmac->sg_list[i].dma_address = dma_addr;
  632. imxdmac->sg_list[i].length = period_len;
  633. dma_addr += period_len;
  634. }
  635. /* close the loop */
  636. imxdmac->sg_list[periods].offset = 0;
  637. imxdmac->sg_list[periods].length = 0;
  638. imxdmac->sg_list[periods].page_link =
  639. ((unsigned long)imxdmac->sg_list | 0x01) & ~0x02;
  640. desc->type = IMXDMA_DESC_CYCLIC;
  641. desc->sg = imxdmac->sg_list;
  642. desc->sgcount = periods;
  643. desc->len = IMX_DMA_LENGTH_LOOP;
  644. desc->direction = direction;
  645. if (direction == DMA_DEV_TO_MEM) {
  646. desc->src = imxdmac->per_address;
  647. } else {
  648. desc->dest = imxdmac->per_address;
  649. }
  650. desc->desc.callback = NULL;
  651. desc->desc.callback_param = NULL;
  652. return &desc->desc;
  653. }
  654. static struct dma_async_tx_descriptor *imxdma_prep_dma_memcpy(
  655. struct dma_chan *chan, dma_addr_t dest,
  656. dma_addr_t src, size_t len, unsigned long flags)
  657. {
  658. struct imxdma_channel *imxdmac = to_imxdma_chan(chan);
  659. struct imxdma_engine *imxdma = imxdmac->imxdma;
  660. struct imxdma_desc *desc;
  661. dev_dbg(imxdma->dev, "%s channel: %d src=0x%x dst=0x%x len=%d\n",
  662. __func__, imxdmac->channel, src, dest, len);
  663. if (list_empty(&imxdmac->ld_free) ||
  664. imxdma_chan_is_doing_cyclic(imxdmac))
  665. return NULL;
  666. desc = list_first_entry(&imxdmac->ld_free, struct imxdma_desc, node);
  667. desc->type = IMXDMA_DESC_MEMCPY;
  668. desc->src = src;
  669. desc->dest = dest;
  670. desc->len = len;
  671. desc->direction = DMA_MEM_TO_MEM;
  672. desc->config_port = IMX_DMA_MEMSIZE_32 | IMX_DMA_TYPE_LINEAR;
  673. desc->config_mem = IMX_DMA_MEMSIZE_32 | IMX_DMA_TYPE_LINEAR;
  674. desc->desc.callback = NULL;
  675. desc->desc.callback_param = NULL;
  676. return &desc->desc;
  677. }
  678. static void imxdma_issue_pending(struct dma_chan *chan)
  679. {
  680. struct imxdma_channel *imxdmac = to_imxdma_chan(chan);
  681. struct imxdma_engine *imxdma = imxdmac->imxdma;
  682. struct imxdma_desc *desc;
  683. unsigned long flags;
  684. spin_lock_irqsave(&imxdmac->lock, flags);
  685. if (list_empty(&imxdmac->ld_active) &&
  686. !list_empty(&imxdmac->ld_queue)) {
  687. desc = list_first_entry(&imxdmac->ld_queue,
  688. struct imxdma_desc, node);
  689. if (imxdma_xfer_desc(desc) < 0) {
  690. dev_warn(imxdma->dev,
  691. "%s: channel: %d couldn't issue DMA xfer\n",
  692. __func__, imxdmac->channel);
  693. } else {
  694. list_move_tail(imxdmac->ld_queue.next,
  695. &imxdmac->ld_active);
  696. }
  697. }
  698. spin_unlock_irqrestore(&imxdmac->lock, flags);
  699. }
  700. static int __init imxdma_probe(struct platform_device *pdev)
  701. {
  702. struct imxdma_engine *imxdma;
  703. int ret, i;
  704. if (cpu_is_mx1())
  705. imx_dmav1_baseaddr = MX1_IO_ADDRESS(MX1_DMA_BASE_ADDR);
  706. else if (cpu_is_mx21())
  707. imx_dmav1_baseaddr = MX21_IO_ADDRESS(MX21_DMA_BASE_ADDR);
  708. else if (cpu_is_mx27())
  709. imx_dmav1_baseaddr = MX27_IO_ADDRESS(MX27_DMA_BASE_ADDR);
  710. else
  711. return 0;
  712. dma_clk = clk_get(NULL, "dma");
  713. if (IS_ERR(dma_clk))
  714. return PTR_ERR(dma_clk);
  715. clk_enable(dma_clk);
  716. /* reset DMA module */
  717. imx_dmav1_writel(DCR_DRST, DMA_DCR);
  718. if (cpu_is_mx1()) {
  719. ret = request_irq(MX1_DMA_INT, dma_irq_handler, 0, "DMA", imxdma);
  720. if (ret) {
  721. pr_crit("Can't register IRQ for DMA\n");
  722. return ret;
  723. }
  724. ret = request_irq(MX1_DMA_ERR, imxdma_err_handler, 0, "DMA", imxdma);
  725. if (ret) {
  726. pr_crit("Can't register ERRIRQ for DMA\n");
  727. free_irq(MX1_DMA_INT, NULL);
  728. return ret;
  729. }
  730. }
  731. /* enable DMA module */
  732. imx_dmav1_writel(DCR_DEN, DMA_DCR);
  733. /* clear all interrupts */
  734. imx_dmav1_writel((1 << IMX_DMA_CHANNELS) - 1, DMA_DISR);
  735. /* disable interrupts */
  736. imx_dmav1_writel((1 << IMX_DMA_CHANNELS) - 1, DMA_DIMR);
  737. imxdma = kzalloc(sizeof(*imxdma), GFP_KERNEL);
  738. if (!imxdma)
  739. return -ENOMEM;
  740. INIT_LIST_HEAD(&imxdma->dma_device.channels);
  741. dma_cap_set(DMA_SLAVE, imxdma->dma_device.cap_mask);
  742. dma_cap_set(DMA_CYCLIC, imxdma->dma_device.cap_mask);
  743. dma_cap_set(DMA_MEMCPY, imxdma->dma_device.cap_mask);
  744. /* Initialize channel parameters */
  745. for (i = 0; i < IMX_DMA_CHANNELS; i++) {
  746. struct imxdma_channel *imxdmac = &imxdma->channel[i];
  747. if (cpu_is_mx21() || cpu_is_mx27()) {
  748. ret = request_irq(MX2x_INT_DMACH0 + i,
  749. dma_irq_handler, 0, "DMA", imxdma);
  750. if (ret) {
  751. pr_crit("Can't register IRQ %d for DMA channel %d\n",
  752. MX2x_INT_DMACH0 + i, i);
  753. goto err_init;
  754. }
  755. init_timer(&imxdmac->watchdog);
  756. imxdmac->watchdog.function = &imxdma_watchdog;
  757. imxdmac->watchdog.data = (unsigned long)imxdmac;
  758. }
  759. imxdmac->imxdma = imxdma;
  760. spin_lock_init(&imxdmac->lock);
  761. INIT_LIST_HEAD(&imxdmac->ld_queue);
  762. INIT_LIST_HEAD(&imxdmac->ld_free);
  763. INIT_LIST_HEAD(&imxdmac->ld_active);
  764. tasklet_init(&imxdmac->dma_tasklet, imxdma_tasklet,
  765. (unsigned long)imxdmac);
  766. imxdmac->chan.device = &imxdma->dma_device;
  767. dma_cookie_init(&imxdmac->chan);
  768. imxdmac->channel = i;
  769. /* Add the channel to the DMAC list */
  770. list_add_tail(&imxdmac->chan.device_node,
  771. &imxdma->dma_device.channels);
  772. }
  773. imxdma->dev = &pdev->dev;
  774. imxdma->dma_device.dev = &pdev->dev;
  775. imxdma->dma_device.device_alloc_chan_resources = imxdma_alloc_chan_resources;
  776. imxdma->dma_device.device_free_chan_resources = imxdma_free_chan_resources;
  777. imxdma->dma_device.device_tx_status = imxdma_tx_status;
  778. imxdma->dma_device.device_prep_slave_sg = imxdma_prep_slave_sg;
  779. imxdma->dma_device.device_prep_dma_cyclic = imxdma_prep_dma_cyclic;
  780. imxdma->dma_device.device_prep_dma_memcpy = imxdma_prep_dma_memcpy;
  781. imxdma->dma_device.device_control = imxdma_control;
  782. imxdma->dma_device.device_issue_pending = imxdma_issue_pending;
  783. platform_set_drvdata(pdev, imxdma);
  784. imxdma->dma_device.copy_align = 2; /* 2^2 = 4 bytes alignment */
  785. imxdma->dma_device.dev->dma_parms = &imxdma->dma_parms;
  786. dma_set_max_seg_size(imxdma->dma_device.dev, 0xffffff);
  787. ret = dma_async_device_register(&imxdma->dma_device);
  788. if (ret) {
  789. dev_err(&pdev->dev, "unable to register\n");
  790. goto err_init;
  791. }
  792. return 0;
  793. err_init:
  794. if (cpu_is_mx21() || cpu_is_mx27()) {
  795. while (--i >= 0)
  796. free_irq(MX2x_INT_DMACH0 + i, NULL);
  797. } else if cpu_is_mx1() {
  798. free_irq(MX1_DMA_INT, NULL);
  799. free_irq(MX1_DMA_ERR, NULL);
  800. }
  801. kfree(imxdma);
  802. return ret;
  803. }
  804. static int __exit imxdma_remove(struct platform_device *pdev)
  805. {
  806. struct imxdma_engine *imxdma = platform_get_drvdata(pdev);
  807. int i;
  808. dma_async_device_unregister(&imxdma->dma_device);
  809. if (cpu_is_mx21() || cpu_is_mx27()) {
  810. for (i = 0; i < IMX_DMA_CHANNELS; i++)
  811. free_irq(MX2x_INT_DMACH0 + i, NULL);
  812. } else if cpu_is_mx1() {
  813. free_irq(MX1_DMA_INT, NULL);
  814. free_irq(MX1_DMA_ERR, NULL);
  815. }
  816. kfree(imxdma);
  817. return 0;
  818. }
  819. static struct platform_driver imxdma_driver = {
  820. .driver = {
  821. .name = "imx-dma",
  822. },
  823. .remove = __exit_p(imxdma_remove),
  824. };
  825. static int __init imxdma_module_init(void)
  826. {
  827. return platform_driver_probe(&imxdma_driver, imxdma_probe);
  828. }
  829. subsys_initcall(imxdma_module_init);
  830. MODULE_AUTHOR("Sascha Hauer, Pengutronix <s.hauer@pengutronix.de>");
  831. MODULE_DESCRIPTION("i.MX dma driver");
  832. MODULE_LICENSE("GPL");