imx-dma.c 32 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176
  1. /*
  2. * drivers/dma/imx-dma.c
  3. *
  4. * This file contains a driver for the Freescale i.MX DMA engine
  5. * found on i.MX1/21/27
  6. *
  7. * Copyright 2010 Sascha Hauer, Pengutronix <s.hauer@pengutronix.de>
  8. * Copyright 2012 Javier Martin, Vista Silicon <javier.martin@vista-silicon.com>
  9. *
  10. * The code contained herein is licensed under the GNU General Public
  11. * License. You may obtain a copy of the GNU General Public License
  12. * Version 2 or later at the following locations:
  13. *
  14. * http://www.opensource.org/licenses/gpl-license.html
  15. * http://www.gnu.org/copyleft/gpl.html
  16. */
  17. #include <linux/init.h>
  18. #include <linux/types.h>
  19. #include <linux/mm.h>
  20. #include <linux/interrupt.h>
  21. #include <linux/spinlock.h>
  22. #include <linux/device.h>
  23. #include <linux/dma-mapping.h>
  24. #include <linux/slab.h>
  25. #include <linux/platform_device.h>
  26. #include <linux/clk.h>
  27. #include <linux/dmaengine.h>
  28. #include <linux/module.h>
  29. #include <asm/irq.h>
  30. #include <linux/platform_data/dma-imx.h>
  31. #include "dmaengine.h"
  32. #define IMXDMA_MAX_CHAN_DESCRIPTORS 16
  33. #define IMX_DMA_CHANNELS 16
  34. #define IMX_DMA_2D_SLOTS 2
  35. #define IMX_DMA_2D_SLOT_A 0
  36. #define IMX_DMA_2D_SLOT_B 1
  37. #define IMX_DMA_LENGTH_LOOP ((unsigned int)-1)
  38. #define IMX_DMA_MEMSIZE_32 (0 << 4)
  39. #define IMX_DMA_MEMSIZE_8 (1 << 4)
  40. #define IMX_DMA_MEMSIZE_16 (2 << 4)
  41. #define IMX_DMA_TYPE_LINEAR (0 << 10)
  42. #define IMX_DMA_TYPE_2D (1 << 10)
  43. #define IMX_DMA_TYPE_FIFO (2 << 10)
  44. #define IMX_DMA_ERR_BURST (1 << 0)
  45. #define IMX_DMA_ERR_REQUEST (1 << 1)
  46. #define IMX_DMA_ERR_TRANSFER (1 << 2)
  47. #define IMX_DMA_ERR_BUFFER (1 << 3)
  48. #define IMX_DMA_ERR_TIMEOUT (1 << 4)
  49. #define DMA_DCR 0x00 /* Control Register */
  50. #define DMA_DISR 0x04 /* Interrupt status Register */
  51. #define DMA_DIMR 0x08 /* Interrupt mask Register */
  52. #define DMA_DBTOSR 0x0c /* Burst timeout status Register */
  53. #define DMA_DRTOSR 0x10 /* Request timeout Register */
  54. #define DMA_DSESR 0x14 /* Transfer Error Status Register */
  55. #define DMA_DBOSR 0x18 /* Buffer overflow status Register */
  56. #define DMA_DBTOCR 0x1c /* Burst timeout control Register */
  57. #define DMA_WSRA 0x40 /* W-Size Register A */
  58. #define DMA_XSRA 0x44 /* X-Size Register A */
  59. #define DMA_YSRA 0x48 /* Y-Size Register A */
  60. #define DMA_WSRB 0x4c /* W-Size Register B */
  61. #define DMA_XSRB 0x50 /* X-Size Register B */
  62. #define DMA_YSRB 0x54 /* Y-Size Register B */
  63. #define DMA_SAR(x) (0x80 + ((x) << 6)) /* Source Address Registers */
  64. #define DMA_DAR(x) (0x84 + ((x) << 6)) /* Destination Address Registers */
  65. #define DMA_CNTR(x) (0x88 + ((x) << 6)) /* Count Registers */
  66. #define DMA_CCR(x) (0x8c + ((x) << 6)) /* Control Registers */
  67. #define DMA_RSSR(x) (0x90 + ((x) << 6)) /* Request source select Registers */
  68. #define DMA_BLR(x) (0x94 + ((x) << 6)) /* Burst length Registers */
  69. #define DMA_RTOR(x) (0x98 + ((x) << 6)) /* Request timeout Registers */
  70. #define DMA_BUCR(x) (0x98 + ((x) << 6)) /* Bus Utilization Registers */
  71. #define DMA_CCNR(x) (0x9C + ((x) << 6)) /* Channel counter Registers */
  72. #define DCR_DRST (1<<1)
  73. #define DCR_DEN (1<<0)
  74. #define DBTOCR_EN (1<<15)
  75. #define DBTOCR_CNT(x) ((x) & 0x7fff)
  76. #define CNTR_CNT(x) ((x) & 0xffffff)
  77. #define CCR_ACRPT (1<<14)
  78. #define CCR_DMOD_LINEAR (0x0 << 12)
  79. #define CCR_DMOD_2D (0x1 << 12)
  80. #define CCR_DMOD_FIFO (0x2 << 12)
  81. #define CCR_DMOD_EOBFIFO (0x3 << 12)
  82. #define CCR_SMOD_LINEAR (0x0 << 10)
  83. #define CCR_SMOD_2D (0x1 << 10)
  84. #define CCR_SMOD_FIFO (0x2 << 10)
  85. #define CCR_SMOD_EOBFIFO (0x3 << 10)
  86. #define CCR_MDIR_DEC (1<<9)
  87. #define CCR_MSEL_B (1<<8)
  88. #define CCR_DSIZ_32 (0x0 << 6)
  89. #define CCR_DSIZ_8 (0x1 << 6)
  90. #define CCR_DSIZ_16 (0x2 << 6)
  91. #define CCR_SSIZ_32 (0x0 << 4)
  92. #define CCR_SSIZ_8 (0x1 << 4)
  93. #define CCR_SSIZ_16 (0x2 << 4)
  94. #define CCR_REN (1<<3)
  95. #define CCR_RPT (1<<2)
  96. #define CCR_FRC (1<<1)
  97. #define CCR_CEN (1<<0)
  98. #define RTOR_EN (1<<15)
  99. #define RTOR_CLK (1<<14)
  100. #define RTOR_PSC (1<<13)
  101. enum imxdma_prep_type {
  102. IMXDMA_DESC_MEMCPY,
  103. IMXDMA_DESC_INTERLEAVED,
  104. IMXDMA_DESC_SLAVE_SG,
  105. IMXDMA_DESC_CYCLIC,
  106. };
  107. struct imx_dma_2d_config {
  108. u16 xsr;
  109. u16 ysr;
  110. u16 wsr;
  111. int count;
  112. };
  113. struct imxdma_desc {
  114. struct list_head node;
  115. struct dma_async_tx_descriptor desc;
  116. enum dma_status status;
  117. dma_addr_t src;
  118. dma_addr_t dest;
  119. size_t len;
  120. enum dma_transfer_direction direction;
  121. enum imxdma_prep_type type;
  122. /* For memcpy and interleaved */
  123. unsigned int config_port;
  124. unsigned int config_mem;
  125. /* For interleaved transfers */
  126. unsigned int x;
  127. unsigned int y;
  128. unsigned int w;
  129. /* For slave sg and cyclic */
  130. struct scatterlist *sg;
  131. unsigned int sgcount;
  132. };
  133. struct imxdma_channel {
  134. int hw_chaining;
  135. struct timer_list watchdog;
  136. struct imxdma_engine *imxdma;
  137. unsigned int channel;
  138. struct tasklet_struct dma_tasklet;
  139. struct list_head ld_free;
  140. struct list_head ld_queue;
  141. struct list_head ld_active;
  142. int descs_allocated;
  143. enum dma_slave_buswidth word_size;
  144. dma_addr_t per_address;
  145. u32 watermark_level;
  146. struct dma_chan chan;
  147. struct dma_async_tx_descriptor desc;
  148. enum dma_status status;
  149. int dma_request;
  150. struct scatterlist *sg_list;
  151. u32 ccr_from_device;
  152. u32 ccr_to_device;
  153. bool enabled_2d;
  154. int slot_2d;
  155. };
  156. enum imx_dma_type {
  157. IMX1_DMA,
  158. IMX21_DMA,
  159. IMX27_DMA,
  160. };
  161. struct imxdma_engine {
  162. struct device *dev;
  163. struct device_dma_parameters dma_parms;
  164. struct dma_device dma_device;
  165. void __iomem *base;
  166. struct clk *dma_ahb;
  167. struct clk *dma_ipg;
  168. spinlock_t lock;
  169. struct imx_dma_2d_config slots_2d[IMX_DMA_2D_SLOTS];
  170. struct imxdma_channel channel[IMX_DMA_CHANNELS];
  171. enum imx_dma_type devtype;
  172. };
  173. static struct platform_device_id imx_dma_devtype[] = {
  174. {
  175. .name = "imx1-dma",
  176. .driver_data = IMX1_DMA,
  177. }, {
  178. .name = "imx21-dma",
  179. .driver_data = IMX21_DMA,
  180. }, {
  181. .name = "imx27-dma",
  182. .driver_data = IMX27_DMA,
  183. }, {
  184. /* sentinel */
  185. }
  186. };
  187. MODULE_DEVICE_TABLE(platform, imx_dma_devtype);
  188. static inline int is_imx1_dma(struct imxdma_engine *imxdma)
  189. {
  190. return imxdma->devtype == IMX1_DMA;
  191. }
  192. static inline int is_imx21_dma(struct imxdma_engine *imxdma)
  193. {
  194. return imxdma->devtype == IMX21_DMA;
  195. }
  196. static inline int is_imx27_dma(struct imxdma_engine *imxdma)
  197. {
  198. return imxdma->devtype == IMX27_DMA;
  199. }
  200. static struct imxdma_channel *to_imxdma_chan(struct dma_chan *chan)
  201. {
  202. return container_of(chan, struct imxdma_channel, chan);
  203. }
  204. static inline bool imxdma_chan_is_doing_cyclic(struct imxdma_channel *imxdmac)
  205. {
  206. struct imxdma_desc *desc;
  207. if (!list_empty(&imxdmac->ld_active)) {
  208. desc = list_first_entry(&imxdmac->ld_active, struct imxdma_desc,
  209. node);
  210. if (desc->type == IMXDMA_DESC_CYCLIC)
  211. return true;
  212. }
  213. return false;
  214. }
  215. static void imx_dmav1_writel(struct imxdma_engine *imxdma, unsigned val,
  216. unsigned offset)
  217. {
  218. __raw_writel(val, imxdma->base + offset);
  219. }
  220. static unsigned imx_dmav1_readl(struct imxdma_engine *imxdma, unsigned offset)
  221. {
  222. return __raw_readl(imxdma->base + offset);
  223. }
  224. static int imxdma_hw_chain(struct imxdma_channel *imxdmac)
  225. {
  226. struct imxdma_engine *imxdma = imxdmac->imxdma;
  227. if (is_imx27_dma(imxdma))
  228. return imxdmac->hw_chaining;
  229. else
  230. return 0;
  231. }
  232. /*
  233. * imxdma_sg_next - prepare next chunk for scatter-gather DMA emulation
  234. */
  235. static inline int imxdma_sg_next(struct imxdma_desc *d)
  236. {
  237. struct imxdma_channel *imxdmac = to_imxdma_chan(d->desc.chan);
  238. struct imxdma_engine *imxdma = imxdmac->imxdma;
  239. struct scatterlist *sg = d->sg;
  240. unsigned long now;
  241. now = min(d->len, sg_dma_len(sg));
  242. if (d->len != IMX_DMA_LENGTH_LOOP)
  243. d->len -= now;
  244. if (d->direction == DMA_DEV_TO_MEM)
  245. imx_dmav1_writel(imxdma, sg->dma_address,
  246. DMA_DAR(imxdmac->channel));
  247. else
  248. imx_dmav1_writel(imxdma, sg->dma_address,
  249. DMA_SAR(imxdmac->channel));
  250. imx_dmav1_writel(imxdma, now, DMA_CNTR(imxdmac->channel));
  251. dev_dbg(imxdma->dev, " %s channel: %d dst 0x%08x, src 0x%08x, "
  252. "size 0x%08x\n", __func__, imxdmac->channel,
  253. imx_dmav1_readl(imxdma, DMA_DAR(imxdmac->channel)),
  254. imx_dmav1_readl(imxdma, DMA_SAR(imxdmac->channel)),
  255. imx_dmav1_readl(imxdma, DMA_CNTR(imxdmac->channel)));
  256. return now;
  257. }
  258. static void imxdma_enable_hw(struct imxdma_desc *d)
  259. {
  260. struct imxdma_channel *imxdmac = to_imxdma_chan(d->desc.chan);
  261. struct imxdma_engine *imxdma = imxdmac->imxdma;
  262. int channel = imxdmac->channel;
  263. unsigned long flags;
  264. dev_dbg(imxdma->dev, "%s channel %d\n", __func__, channel);
  265. local_irq_save(flags);
  266. imx_dmav1_writel(imxdma, 1 << channel, DMA_DISR);
  267. imx_dmav1_writel(imxdma, imx_dmav1_readl(imxdma, DMA_DIMR) &
  268. ~(1 << channel), DMA_DIMR);
  269. imx_dmav1_writel(imxdma, imx_dmav1_readl(imxdma, DMA_CCR(channel)) |
  270. CCR_CEN | CCR_ACRPT, DMA_CCR(channel));
  271. if (!is_imx1_dma(imxdma) &&
  272. d->sg && imxdma_hw_chain(imxdmac)) {
  273. d->sg = sg_next(d->sg);
  274. if (d->sg) {
  275. u32 tmp;
  276. imxdma_sg_next(d);
  277. tmp = imx_dmav1_readl(imxdma, DMA_CCR(channel));
  278. imx_dmav1_writel(imxdma, tmp | CCR_RPT | CCR_ACRPT,
  279. DMA_CCR(channel));
  280. }
  281. }
  282. local_irq_restore(flags);
  283. }
  284. static void imxdma_disable_hw(struct imxdma_channel *imxdmac)
  285. {
  286. struct imxdma_engine *imxdma = imxdmac->imxdma;
  287. int channel = imxdmac->channel;
  288. unsigned long flags;
  289. dev_dbg(imxdma->dev, "%s channel %d\n", __func__, channel);
  290. if (imxdma_hw_chain(imxdmac))
  291. del_timer(&imxdmac->watchdog);
  292. local_irq_save(flags);
  293. imx_dmav1_writel(imxdma, imx_dmav1_readl(imxdma, DMA_DIMR) |
  294. (1 << channel), DMA_DIMR);
  295. imx_dmav1_writel(imxdma, imx_dmav1_readl(imxdma, DMA_CCR(channel)) &
  296. ~CCR_CEN, DMA_CCR(channel));
  297. imx_dmav1_writel(imxdma, 1 << channel, DMA_DISR);
  298. local_irq_restore(flags);
  299. }
  300. static void imxdma_watchdog(unsigned long data)
  301. {
  302. struct imxdma_channel *imxdmac = (struct imxdma_channel *)data;
  303. struct imxdma_engine *imxdma = imxdmac->imxdma;
  304. int channel = imxdmac->channel;
  305. imx_dmav1_writel(imxdma, 0, DMA_CCR(channel));
  306. /* Tasklet watchdog error handler */
  307. tasklet_schedule(&imxdmac->dma_tasklet);
  308. dev_dbg(imxdma->dev, "channel %d: watchdog timeout!\n",
  309. imxdmac->channel);
  310. }
  311. static irqreturn_t imxdma_err_handler(int irq, void *dev_id)
  312. {
  313. struct imxdma_engine *imxdma = dev_id;
  314. unsigned int err_mask;
  315. int i, disr;
  316. int errcode;
  317. disr = imx_dmav1_readl(imxdma, DMA_DISR);
  318. err_mask = imx_dmav1_readl(imxdma, DMA_DBTOSR) |
  319. imx_dmav1_readl(imxdma, DMA_DRTOSR) |
  320. imx_dmav1_readl(imxdma, DMA_DSESR) |
  321. imx_dmav1_readl(imxdma, DMA_DBOSR);
  322. if (!err_mask)
  323. return IRQ_HANDLED;
  324. imx_dmav1_writel(imxdma, disr & err_mask, DMA_DISR);
  325. for (i = 0; i < IMX_DMA_CHANNELS; i++) {
  326. if (!(err_mask & (1 << i)))
  327. continue;
  328. errcode = 0;
  329. if (imx_dmav1_readl(imxdma, DMA_DBTOSR) & (1 << i)) {
  330. imx_dmav1_writel(imxdma, 1 << i, DMA_DBTOSR);
  331. errcode |= IMX_DMA_ERR_BURST;
  332. }
  333. if (imx_dmav1_readl(imxdma, DMA_DRTOSR) & (1 << i)) {
  334. imx_dmav1_writel(imxdma, 1 << i, DMA_DRTOSR);
  335. errcode |= IMX_DMA_ERR_REQUEST;
  336. }
  337. if (imx_dmav1_readl(imxdma, DMA_DSESR) & (1 << i)) {
  338. imx_dmav1_writel(imxdma, 1 << i, DMA_DSESR);
  339. errcode |= IMX_DMA_ERR_TRANSFER;
  340. }
  341. if (imx_dmav1_readl(imxdma, DMA_DBOSR) & (1 << i)) {
  342. imx_dmav1_writel(imxdma, 1 << i, DMA_DBOSR);
  343. errcode |= IMX_DMA_ERR_BUFFER;
  344. }
  345. /* Tasklet error handler */
  346. tasklet_schedule(&imxdma->channel[i].dma_tasklet);
  347. printk(KERN_WARNING
  348. "DMA timeout on channel %d -%s%s%s%s\n", i,
  349. errcode & IMX_DMA_ERR_BURST ? " burst" : "",
  350. errcode & IMX_DMA_ERR_REQUEST ? " request" : "",
  351. errcode & IMX_DMA_ERR_TRANSFER ? " transfer" : "",
  352. errcode & IMX_DMA_ERR_BUFFER ? " buffer" : "");
  353. }
  354. return IRQ_HANDLED;
  355. }
  356. static void dma_irq_handle_channel(struct imxdma_channel *imxdmac)
  357. {
  358. struct imxdma_engine *imxdma = imxdmac->imxdma;
  359. int chno = imxdmac->channel;
  360. struct imxdma_desc *desc;
  361. spin_lock(&imxdma->lock);
  362. if (list_empty(&imxdmac->ld_active)) {
  363. spin_unlock(&imxdma->lock);
  364. goto out;
  365. }
  366. desc = list_first_entry(&imxdmac->ld_active,
  367. struct imxdma_desc,
  368. node);
  369. spin_unlock(&imxdma->lock);
  370. if (desc->sg) {
  371. u32 tmp;
  372. desc->sg = sg_next(desc->sg);
  373. if (desc->sg) {
  374. imxdma_sg_next(desc);
  375. tmp = imx_dmav1_readl(imxdma, DMA_CCR(chno));
  376. if (imxdma_hw_chain(imxdmac)) {
  377. /* FIXME: The timeout should probably be
  378. * configurable
  379. */
  380. mod_timer(&imxdmac->watchdog,
  381. jiffies + msecs_to_jiffies(500));
  382. tmp |= CCR_CEN | CCR_RPT | CCR_ACRPT;
  383. imx_dmav1_writel(imxdma, tmp, DMA_CCR(chno));
  384. } else {
  385. imx_dmav1_writel(imxdma, tmp & ~CCR_CEN,
  386. DMA_CCR(chno));
  387. tmp |= CCR_CEN;
  388. }
  389. imx_dmav1_writel(imxdma, tmp, DMA_CCR(chno));
  390. if (imxdma_chan_is_doing_cyclic(imxdmac))
  391. /* Tasklet progression */
  392. tasklet_schedule(&imxdmac->dma_tasklet);
  393. return;
  394. }
  395. if (imxdma_hw_chain(imxdmac)) {
  396. del_timer(&imxdmac->watchdog);
  397. return;
  398. }
  399. }
  400. out:
  401. imx_dmav1_writel(imxdma, 0, DMA_CCR(chno));
  402. /* Tasklet irq */
  403. tasklet_schedule(&imxdmac->dma_tasklet);
  404. }
  405. static irqreturn_t dma_irq_handler(int irq, void *dev_id)
  406. {
  407. struct imxdma_engine *imxdma = dev_id;
  408. int i, disr;
  409. if (!is_imx1_dma(imxdma))
  410. imxdma_err_handler(irq, dev_id);
  411. disr = imx_dmav1_readl(imxdma, DMA_DISR);
  412. dev_dbg(imxdma->dev, "%s called, disr=0x%08x\n", __func__, disr);
  413. imx_dmav1_writel(imxdma, disr, DMA_DISR);
  414. for (i = 0; i < IMX_DMA_CHANNELS; i++) {
  415. if (disr & (1 << i))
  416. dma_irq_handle_channel(&imxdma->channel[i]);
  417. }
  418. return IRQ_HANDLED;
  419. }
  420. static int imxdma_xfer_desc(struct imxdma_desc *d)
  421. {
  422. struct imxdma_channel *imxdmac = to_imxdma_chan(d->desc.chan);
  423. struct imxdma_engine *imxdma = imxdmac->imxdma;
  424. unsigned long flags;
  425. int slot = -1;
  426. int i;
  427. /* Configure and enable */
  428. switch (d->type) {
  429. case IMXDMA_DESC_INTERLEAVED:
  430. /* Try to get a free 2D slot */
  431. spin_lock_irqsave(&imxdma->lock, flags);
  432. for (i = 0; i < IMX_DMA_2D_SLOTS; i++) {
  433. if ((imxdma->slots_2d[i].count > 0) &&
  434. ((imxdma->slots_2d[i].xsr != d->x) ||
  435. (imxdma->slots_2d[i].ysr != d->y) ||
  436. (imxdma->slots_2d[i].wsr != d->w)))
  437. continue;
  438. slot = i;
  439. break;
  440. }
  441. if (slot < 0) {
  442. spin_unlock_irqrestore(&imxdma->lock, flags);
  443. return -EBUSY;
  444. }
  445. imxdma->slots_2d[slot].xsr = d->x;
  446. imxdma->slots_2d[slot].ysr = d->y;
  447. imxdma->slots_2d[slot].wsr = d->w;
  448. imxdma->slots_2d[slot].count++;
  449. imxdmac->slot_2d = slot;
  450. imxdmac->enabled_2d = true;
  451. spin_unlock_irqrestore(&imxdma->lock, flags);
  452. if (slot == IMX_DMA_2D_SLOT_A) {
  453. d->config_mem &= ~CCR_MSEL_B;
  454. d->config_port &= ~CCR_MSEL_B;
  455. imx_dmav1_writel(imxdma, d->x, DMA_XSRA);
  456. imx_dmav1_writel(imxdma, d->y, DMA_YSRA);
  457. imx_dmav1_writel(imxdma, d->w, DMA_WSRA);
  458. } else {
  459. d->config_mem |= CCR_MSEL_B;
  460. d->config_port |= CCR_MSEL_B;
  461. imx_dmav1_writel(imxdma, d->x, DMA_XSRB);
  462. imx_dmav1_writel(imxdma, d->y, DMA_YSRB);
  463. imx_dmav1_writel(imxdma, d->w, DMA_WSRB);
  464. }
  465. /*
  466. * We fall-through here intentionally, since a 2D transfer is
  467. * similar to MEMCPY just adding the 2D slot configuration.
  468. */
  469. case IMXDMA_DESC_MEMCPY:
  470. imx_dmav1_writel(imxdma, d->src, DMA_SAR(imxdmac->channel));
  471. imx_dmav1_writel(imxdma, d->dest, DMA_DAR(imxdmac->channel));
  472. imx_dmav1_writel(imxdma, d->config_mem | (d->config_port << 2),
  473. DMA_CCR(imxdmac->channel));
  474. imx_dmav1_writel(imxdma, d->len, DMA_CNTR(imxdmac->channel));
  475. dev_dbg(imxdma->dev, "%s channel: %d dest=0x%08x src=0x%08x "
  476. "dma_length=%d\n", __func__, imxdmac->channel,
  477. d->dest, d->src, d->len);
  478. break;
  479. /* Cyclic transfer is the same as slave_sg with special sg configuration. */
  480. case IMXDMA_DESC_CYCLIC:
  481. case IMXDMA_DESC_SLAVE_SG:
  482. if (d->direction == DMA_DEV_TO_MEM) {
  483. imx_dmav1_writel(imxdma, imxdmac->per_address,
  484. DMA_SAR(imxdmac->channel));
  485. imx_dmav1_writel(imxdma, imxdmac->ccr_from_device,
  486. DMA_CCR(imxdmac->channel));
  487. dev_dbg(imxdma->dev, "%s channel: %d sg=%p sgcount=%d "
  488. "total length=%d dev_addr=0x%08x (dev2mem)\n",
  489. __func__, imxdmac->channel, d->sg, d->sgcount,
  490. d->len, imxdmac->per_address);
  491. } else if (d->direction == DMA_MEM_TO_DEV) {
  492. imx_dmav1_writel(imxdma, imxdmac->per_address,
  493. DMA_DAR(imxdmac->channel));
  494. imx_dmav1_writel(imxdma, imxdmac->ccr_to_device,
  495. DMA_CCR(imxdmac->channel));
  496. dev_dbg(imxdma->dev, "%s channel: %d sg=%p sgcount=%d "
  497. "total length=%d dev_addr=0x%08x (mem2dev)\n",
  498. __func__, imxdmac->channel, d->sg, d->sgcount,
  499. d->len, imxdmac->per_address);
  500. } else {
  501. dev_err(imxdma->dev, "%s channel: %d bad dma mode\n",
  502. __func__, imxdmac->channel);
  503. return -EINVAL;
  504. }
  505. imxdma_sg_next(d);
  506. break;
  507. default:
  508. return -EINVAL;
  509. }
  510. imxdma_enable_hw(d);
  511. return 0;
  512. }
  513. static void imxdma_tasklet(unsigned long data)
  514. {
  515. struct imxdma_channel *imxdmac = (void *)data;
  516. struct imxdma_engine *imxdma = imxdmac->imxdma;
  517. struct imxdma_desc *desc;
  518. spin_lock(&imxdma->lock);
  519. if (list_empty(&imxdmac->ld_active)) {
  520. /* Someone might have called terminate all */
  521. goto out;
  522. }
  523. desc = list_first_entry(&imxdmac->ld_active, struct imxdma_desc, node);
  524. if (desc->desc.callback)
  525. desc->desc.callback(desc->desc.callback_param);
  526. /* If we are dealing with a cyclic descriptor, keep it on ld_active
  527. * and dont mark the descriptor as complete.
  528. * Only in non-cyclic cases it would be marked as complete
  529. */
  530. if (imxdma_chan_is_doing_cyclic(imxdmac))
  531. goto out;
  532. else
  533. dma_cookie_complete(&desc->desc);
  534. /* Free 2D slot if it was an interleaved transfer */
  535. if (imxdmac->enabled_2d) {
  536. imxdma->slots_2d[imxdmac->slot_2d].count--;
  537. imxdmac->enabled_2d = false;
  538. }
  539. list_move_tail(imxdmac->ld_active.next, &imxdmac->ld_free);
  540. if (!list_empty(&imxdmac->ld_queue)) {
  541. desc = list_first_entry(&imxdmac->ld_queue, struct imxdma_desc,
  542. node);
  543. list_move_tail(imxdmac->ld_queue.next, &imxdmac->ld_active);
  544. if (imxdma_xfer_desc(desc) < 0)
  545. dev_warn(imxdma->dev, "%s: channel: %d couldn't xfer desc\n",
  546. __func__, imxdmac->channel);
  547. }
  548. out:
  549. spin_unlock(&imxdma->lock);
  550. }
  551. static int imxdma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
  552. unsigned long arg)
  553. {
  554. struct imxdma_channel *imxdmac = to_imxdma_chan(chan);
  555. struct dma_slave_config *dmaengine_cfg = (void *)arg;
  556. struct imxdma_engine *imxdma = imxdmac->imxdma;
  557. unsigned long flags;
  558. unsigned int mode = 0;
  559. switch (cmd) {
  560. case DMA_TERMINATE_ALL:
  561. imxdma_disable_hw(imxdmac);
  562. spin_lock_irqsave(&imxdma->lock, flags);
  563. list_splice_tail_init(&imxdmac->ld_active, &imxdmac->ld_free);
  564. list_splice_tail_init(&imxdmac->ld_queue, &imxdmac->ld_free);
  565. spin_unlock_irqrestore(&imxdma->lock, flags);
  566. return 0;
  567. case DMA_SLAVE_CONFIG:
  568. if (dmaengine_cfg->direction == DMA_DEV_TO_MEM) {
  569. imxdmac->per_address = dmaengine_cfg->src_addr;
  570. imxdmac->watermark_level = dmaengine_cfg->src_maxburst;
  571. imxdmac->word_size = dmaengine_cfg->src_addr_width;
  572. } else {
  573. imxdmac->per_address = dmaengine_cfg->dst_addr;
  574. imxdmac->watermark_level = dmaengine_cfg->dst_maxburst;
  575. imxdmac->word_size = dmaengine_cfg->dst_addr_width;
  576. }
  577. switch (imxdmac->word_size) {
  578. case DMA_SLAVE_BUSWIDTH_1_BYTE:
  579. mode = IMX_DMA_MEMSIZE_8;
  580. break;
  581. case DMA_SLAVE_BUSWIDTH_2_BYTES:
  582. mode = IMX_DMA_MEMSIZE_16;
  583. break;
  584. default:
  585. case DMA_SLAVE_BUSWIDTH_4_BYTES:
  586. mode = IMX_DMA_MEMSIZE_32;
  587. break;
  588. }
  589. imxdmac->hw_chaining = 1;
  590. if (!imxdma_hw_chain(imxdmac))
  591. return -EINVAL;
  592. imxdmac->ccr_from_device = (mode | IMX_DMA_TYPE_FIFO) |
  593. ((IMX_DMA_MEMSIZE_32 | IMX_DMA_TYPE_LINEAR) << 2) |
  594. CCR_REN;
  595. imxdmac->ccr_to_device =
  596. (IMX_DMA_MEMSIZE_32 | IMX_DMA_TYPE_LINEAR) |
  597. ((mode | IMX_DMA_TYPE_FIFO) << 2) | CCR_REN;
  598. imx_dmav1_writel(imxdma, imxdmac->dma_request,
  599. DMA_RSSR(imxdmac->channel));
  600. /* Set burst length */
  601. imx_dmav1_writel(imxdma, imxdmac->watermark_level *
  602. imxdmac->word_size, DMA_BLR(imxdmac->channel));
  603. return 0;
  604. default:
  605. return -ENOSYS;
  606. }
  607. return -EINVAL;
  608. }
  609. static enum dma_status imxdma_tx_status(struct dma_chan *chan,
  610. dma_cookie_t cookie,
  611. struct dma_tx_state *txstate)
  612. {
  613. return dma_cookie_status(chan, cookie, txstate);
  614. }
  615. static dma_cookie_t imxdma_tx_submit(struct dma_async_tx_descriptor *tx)
  616. {
  617. struct imxdma_channel *imxdmac = to_imxdma_chan(tx->chan);
  618. struct imxdma_engine *imxdma = imxdmac->imxdma;
  619. dma_cookie_t cookie;
  620. unsigned long flags;
  621. spin_lock_irqsave(&imxdma->lock, flags);
  622. list_move_tail(imxdmac->ld_free.next, &imxdmac->ld_queue);
  623. cookie = dma_cookie_assign(tx);
  624. spin_unlock_irqrestore(&imxdma->lock, flags);
  625. return cookie;
  626. }
  627. static int imxdma_alloc_chan_resources(struct dma_chan *chan)
  628. {
  629. struct imxdma_channel *imxdmac = to_imxdma_chan(chan);
  630. struct imx_dma_data *data = chan->private;
  631. if (data != NULL)
  632. imxdmac->dma_request = data->dma_request;
  633. while (imxdmac->descs_allocated < IMXDMA_MAX_CHAN_DESCRIPTORS) {
  634. struct imxdma_desc *desc;
  635. desc = kzalloc(sizeof(*desc), GFP_KERNEL);
  636. if (!desc)
  637. break;
  638. __memzero(&desc->desc, sizeof(struct dma_async_tx_descriptor));
  639. dma_async_tx_descriptor_init(&desc->desc, chan);
  640. desc->desc.tx_submit = imxdma_tx_submit;
  641. /* txd.flags will be overwritten in prep funcs */
  642. desc->desc.flags = DMA_CTRL_ACK;
  643. desc->status = DMA_SUCCESS;
  644. list_add_tail(&desc->node, &imxdmac->ld_free);
  645. imxdmac->descs_allocated++;
  646. }
  647. if (!imxdmac->descs_allocated)
  648. return -ENOMEM;
  649. return imxdmac->descs_allocated;
  650. }
  651. static void imxdma_free_chan_resources(struct dma_chan *chan)
  652. {
  653. struct imxdma_channel *imxdmac = to_imxdma_chan(chan);
  654. struct imxdma_engine *imxdma = imxdmac->imxdma;
  655. struct imxdma_desc *desc, *_desc;
  656. unsigned long flags;
  657. spin_lock_irqsave(&imxdma->lock, flags);
  658. imxdma_disable_hw(imxdmac);
  659. list_splice_tail_init(&imxdmac->ld_active, &imxdmac->ld_free);
  660. list_splice_tail_init(&imxdmac->ld_queue, &imxdmac->ld_free);
  661. spin_unlock_irqrestore(&imxdma->lock, flags);
  662. list_for_each_entry_safe(desc, _desc, &imxdmac->ld_free, node) {
  663. kfree(desc);
  664. imxdmac->descs_allocated--;
  665. }
  666. INIT_LIST_HEAD(&imxdmac->ld_free);
  667. if (imxdmac->sg_list) {
  668. kfree(imxdmac->sg_list);
  669. imxdmac->sg_list = NULL;
  670. }
  671. }
  672. static struct dma_async_tx_descriptor *imxdma_prep_slave_sg(
  673. struct dma_chan *chan, struct scatterlist *sgl,
  674. unsigned int sg_len, enum dma_transfer_direction direction,
  675. unsigned long flags, void *context)
  676. {
  677. struct imxdma_channel *imxdmac = to_imxdma_chan(chan);
  678. struct scatterlist *sg;
  679. int i, dma_length = 0;
  680. struct imxdma_desc *desc;
  681. if (list_empty(&imxdmac->ld_free) ||
  682. imxdma_chan_is_doing_cyclic(imxdmac))
  683. return NULL;
  684. desc = list_first_entry(&imxdmac->ld_free, struct imxdma_desc, node);
  685. for_each_sg(sgl, sg, sg_len, i) {
  686. dma_length += sg_dma_len(sg);
  687. }
  688. switch (imxdmac->word_size) {
  689. case DMA_SLAVE_BUSWIDTH_4_BYTES:
  690. if (sg_dma_len(sgl) & 3 || sgl->dma_address & 3)
  691. return NULL;
  692. break;
  693. case DMA_SLAVE_BUSWIDTH_2_BYTES:
  694. if (sg_dma_len(sgl) & 1 || sgl->dma_address & 1)
  695. return NULL;
  696. break;
  697. case DMA_SLAVE_BUSWIDTH_1_BYTE:
  698. break;
  699. default:
  700. return NULL;
  701. }
  702. desc->type = IMXDMA_DESC_SLAVE_SG;
  703. desc->sg = sgl;
  704. desc->sgcount = sg_len;
  705. desc->len = dma_length;
  706. desc->direction = direction;
  707. if (direction == DMA_DEV_TO_MEM) {
  708. desc->src = imxdmac->per_address;
  709. } else {
  710. desc->dest = imxdmac->per_address;
  711. }
  712. desc->desc.callback = NULL;
  713. desc->desc.callback_param = NULL;
  714. return &desc->desc;
  715. }
  716. static struct dma_async_tx_descriptor *imxdma_prep_dma_cyclic(
  717. struct dma_chan *chan, dma_addr_t dma_addr, size_t buf_len,
  718. size_t period_len, enum dma_transfer_direction direction,
  719. unsigned long flags, void *context)
  720. {
  721. struct imxdma_channel *imxdmac = to_imxdma_chan(chan);
  722. struct imxdma_engine *imxdma = imxdmac->imxdma;
  723. struct imxdma_desc *desc;
  724. int i;
  725. unsigned int periods = buf_len / period_len;
  726. dev_dbg(imxdma->dev, "%s channel: %d buf_len=%d period_len=%d\n",
  727. __func__, imxdmac->channel, buf_len, period_len);
  728. if (list_empty(&imxdmac->ld_free) ||
  729. imxdma_chan_is_doing_cyclic(imxdmac))
  730. return NULL;
  731. desc = list_first_entry(&imxdmac->ld_free, struct imxdma_desc, node);
  732. if (imxdmac->sg_list)
  733. kfree(imxdmac->sg_list);
  734. imxdmac->sg_list = kcalloc(periods + 1,
  735. sizeof(struct scatterlist), GFP_KERNEL);
  736. if (!imxdmac->sg_list)
  737. return NULL;
  738. sg_init_table(imxdmac->sg_list, periods);
  739. for (i = 0; i < periods; i++) {
  740. imxdmac->sg_list[i].page_link = 0;
  741. imxdmac->sg_list[i].offset = 0;
  742. imxdmac->sg_list[i].dma_address = dma_addr;
  743. sg_dma_len(&imxdmac->sg_list[i]) = period_len;
  744. dma_addr += period_len;
  745. }
  746. /* close the loop */
  747. imxdmac->sg_list[periods].offset = 0;
  748. sg_dma_len(&imxdmac->sg_list[periods]) = 0;
  749. imxdmac->sg_list[periods].page_link =
  750. ((unsigned long)imxdmac->sg_list | 0x01) & ~0x02;
  751. desc->type = IMXDMA_DESC_CYCLIC;
  752. desc->sg = imxdmac->sg_list;
  753. desc->sgcount = periods;
  754. desc->len = IMX_DMA_LENGTH_LOOP;
  755. desc->direction = direction;
  756. if (direction == DMA_DEV_TO_MEM) {
  757. desc->src = imxdmac->per_address;
  758. } else {
  759. desc->dest = imxdmac->per_address;
  760. }
  761. desc->desc.callback = NULL;
  762. desc->desc.callback_param = NULL;
  763. return &desc->desc;
  764. }
  765. static struct dma_async_tx_descriptor *imxdma_prep_dma_memcpy(
  766. struct dma_chan *chan, dma_addr_t dest,
  767. dma_addr_t src, size_t len, unsigned long flags)
  768. {
  769. struct imxdma_channel *imxdmac = to_imxdma_chan(chan);
  770. struct imxdma_engine *imxdma = imxdmac->imxdma;
  771. struct imxdma_desc *desc;
  772. dev_dbg(imxdma->dev, "%s channel: %d src=0x%x dst=0x%x len=%d\n",
  773. __func__, imxdmac->channel, src, dest, len);
  774. if (list_empty(&imxdmac->ld_free) ||
  775. imxdma_chan_is_doing_cyclic(imxdmac))
  776. return NULL;
  777. desc = list_first_entry(&imxdmac->ld_free, struct imxdma_desc, node);
  778. desc->type = IMXDMA_DESC_MEMCPY;
  779. desc->src = src;
  780. desc->dest = dest;
  781. desc->len = len;
  782. desc->direction = DMA_MEM_TO_MEM;
  783. desc->config_port = IMX_DMA_MEMSIZE_32 | IMX_DMA_TYPE_LINEAR;
  784. desc->config_mem = IMX_DMA_MEMSIZE_32 | IMX_DMA_TYPE_LINEAR;
  785. desc->desc.callback = NULL;
  786. desc->desc.callback_param = NULL;
  787. return &desc->desc;
  788. }
  789. static struct dma_async_tx_descriptor *imxdma_prep_dma_interleaved(
  790. struct dma_chan *chan, struct dma_interleaved_template *xt,
  791. unsigned long flags)
  792. {
  793. struct imxdma_channel *imxdmac = to_imxdma_chan(chan);
  794. struct imxdma_engine *imxdma = imxdmac->imxdma;
  795. struct imxdma_desc *desc;
  796. dev_dbg(imxdma->dev, "%s channel: %d src_start=0x%x dst_start=0x%x\n"
  797. " src_sgl=%s dst_sgl=%s numf=%d frame_size=%d\n", __func__,
  798. imxdmac->channel, xt->src_start, xt->dst_start,
  799. xt->src_sgl ? "true" : "false", xt->dst_sgl ? "true" : "false",
  800. xt->numf, xt->frame_size);
  801. if (list_empty(&imxdmac->ld_free) ||
  802. imxdma_chan_is_doing_cyclic(imxdmac))
  803. return NULL;
  804. if (xt->frame_size != 1 || xt->numf <= 0 || xt->dir != DMA_MEM_TO_MEM)
  805. return NULL;
  806. desc = list_first_entry(&imxdmac->ld_free, struct imxdma_desc, node);
  807. desc->type = IMXDMA_DESC_INTERLEAVED;
  808. desc->src = xt->src_start;
  809. desc->dest = xt->dst_start;
  810. desc->x = xt->sgl[0].size;
  811. desc->y = xt->numf;
  812. desc->w = xt->sgl[0].icg + desc->x;
  813. desc->len = desc->x * desc->y;
  814. desc->direction = DMA_MEM_TO_MEM;
  815. desc->config_port = IMX_DMA_MEMSIZE_32;
  816. desc->config_mem = IMX_DMA_MEMSIZE_32;
  817. if (xt->src_sgl)
  818. desc->config_mem |= IMX_DMA_TYPE_2D;
  819. if (xt->dst_sgl)
  820. desc->config_port |= IMX_DMA_TYPE_2D;
  821. desc->desc.callback = NULL;
  822. desc->desc.callback_param = NULL;
  823. return &desc->desc;
  824. }
  825. static void imxdma_issue_pending(struct dma_chan *chan)
  826. {
  827. struct imxdma_channel *imxdmac = to_imxdma_chan(chan);
  828. struct imxdma_engine *imxdma = imxdmac->imxdma;
  829. struct imxdma_desc *desc;
  830. unsigned long flags;
  831. spin_lock_irqsave(&imxdma->lock, flags);
  832. if (list_empty(&imxdmac->ld_active) &&
  833. !list_empty(&imxdmac->ld_queue)) {
  834. desc = list_first_entry(&imxdmac->ld_queue,
  835. struct imxdma_desc, node);
  836. if (imxdma_xfer_desc(desc) < 0) {
  837. dev_warn(imxdma->dev,
  838. "%s: channel: %d couldn't issue DMA xfer\n",
  839. __func__, imxdmac->channel);
  840. } else {
  841. list_move_tail(imxdmac->ld_queue.next,
  842. &imxdmac->ld_active);
  843. }
  844. }
  845. spin_unlock_irqrestore(&imxdma->lock, flags);
  846. }
  847. static int __init imxdma_probe(struct platform_device *pdev)
  848. {
  849. struct imxdma_engine *imxdma;
  850. struct resource *res;
  851. int ret, i;
  852. int irq, irq_err;
  853. imxdma = devm_kzalloc(&pdev->dev, sizeof(*imxdma), GFP_KERNEL);
  854. if (!imxdma)
  855. return -ENOMEM;
  856. imxdma->devtype = pdev->id_entry->driver_data;
  857. res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  858. imxdma->base = devm_request_and_ioremap(&pdev->dev, res);
  859. if (!imxdma->base)
  860. return -EADDRNOTAVAIL;
  861. irq = platform_get_irq(pdev, 0);
  862. if (irq < 0)
  863. return irq;
  864. imxdma->dma_ipg = devm_clk_get(&pdev->dev, "ipg");
  865. if (IS_ERR(imxdma->dma_ipg))
  866. return PTR_ERR(imxdma->dma_ipg);
  867. imxdma->dma_ahb = devm_clk_get(&pdev->dev, "ahb");
  868. if (IS_ERR(imxdma->dma_ahb))
  869. return PTR_ERR(imxdma->dma_ahb);
  870. clk_prepare_enable(imxdma->dma_ipg);
  871. clk_prepare_enable(imxdma->dma_ahb);
  872. /* reset DMA module */
  873. imx_dmav1_writel(imxdma, DCR_DRST, DMA_DCR);
  874. if (is_imx1_dma(imxdma)) {
  875. ret = devm_request_irq(&pdev->dev, irq,
  876. dma_irq_handler, 0, "DMA", imxdma);
  877. if (ret) {
  878. dev_warn(imxdma->dev, "Can't register IRQ for DMA\n");
  879. goto err;
  880. }
  881. irq_err = platform_get_irq(pdev, 1);
  882. if (irq_err < 0) {
  883. ret = irq_err;
  884. goto err;
  885. }
  886. ret = devm_request_irq(&pdev->dev, irq_err,
  887. imxdma_err_handler, 0, "DMA", imxdma);
  888. if (ret) {
  889. dev_warn(imxdma->dev, "Can't register ERRIRQ for DMA\n");
  890. goto err;
  891. }
  892. }
  893. /* enable DMA module */
  894. imx_dmav1_writel(imxdma, DCR_DEN, DMA_DCR);
  895. /* clear all interrupts */
  896. imx_dmav1_writel(imxdma, (1 << IMX_DMA_CHANNELS) - 1, DMA_DISR);
  897. /* disable interrupts */
  898. imx_dmav1_writel(imxdma, (1 << IMX_DMA_CHANNELS) - 1, DMA_DIMR);
  899. INIT_LIST_HEAD(&imxdma->dma_device.channels);
  900. dma_cap_set(DMA_SLAVE, imxdma->dma_device.cap_mask);
  901. dma_cap_set(DMA_CYCLIC, imxdma->dma_device.cap_mask);
  902. dma_cap_set(DMA_MEMCPY, imxdma->dma_device.cap_mask);
  903. dma_cap_set(DMA_INTERLEAVE, imxdma->dma_device.cap_mask);
  904. /* Initialize 2D global parameters */
  905. for (i = 0; i < IMX_DMA_2D_SLOTS; i++)
  906. imxdma->slots_2d[i].count = 0;
  907. spin_lock_init(&imxdma->lock);
  908. /* Initialize channel parameters */
  909. for (i = 0; i < IMX_DMA_CHANNELS; i++) {
  910. struct imxdma_channel *imxdmac = &imxdma->channel[i];
  911. if (!is_imx1_dma(imxdma)) {
  912. ret = devm_request_irq(&pdev->dev, irq + i,
  913. dma_irq_handler, 0, "DMA", imxdma);
  914. if (ret) {
  915. dev_warn(imxdma->dev, "Can't register IRQ %d "
  916. "for DMA channel %d\n",
  917. irq + i, i);
  918. goto err;
  919. }
  920. init_timer(&imxdmac->watchdog);
  921. imxdmac->watchdog.function = &imxdma_watchdog;
  922. imxdmac->watchdog.data = (unsigned long)imxdmac;
  923. }
  924. imxdmac->imxdma = imxdma;
  925. INIT_LIST_HEAD(&imxdmac->ld_queue);
  926. INIT_LIST_HEAD(&imxdmac->ld_free);
  927. INIT_LIST_HEAD(&imxdmac->ld_active);
  928. tasklet_init(&imxdmac->dma_tasklet, imxdma_tasklet,
  929. (unsigned long)imxdmac);
  930. imxdmac->chan.device = &imxdma->dma_device;
  931. dma_cookie_init(&imxdmac->chan);
  932. imxdmac->channel = i;
  933. /* Add the channel to the DMAC list */
  934. list_add_tail(&imxdmac->chan.device_node,
  935. &imxdma->dma_device.channels);
  936. }
  937. imxdma->dev = &pdev->dev;
  938. imxdma->dma_device.dev = &pdev->dev;
  939. imxdma->dma_device.device_alloc_chan_resources = imxdma_alloc_chan_resources;
  940. imxdma->dma_device.device_free_chan_resources = imxdma_free_chan_resources;
  941. imxdma->dma_device.device_tx_status = imxdma_tx_status;
  942. imxdma->dma_device.device_prep_slave_sg = imxdma_prep_slave_sg;
  943. imxdma->dma_device.device_prep_dma_cyclic = imxdma_prep_dma_cyclic;
  944. imxdma->dma_device.device_prep_dma_memcpy = imxdma_prep_dma_memcpy;
  945. imxdma->dma_device.device_prep_interleaved_dma = imxdma_prep_dma_interleaved;
  946. imxdma->dma_device.device_control = imxdma_control;
  947. imxdma->dma_device.device_issue_pending = imxdma_issue_pending;
  948. platform_set_drvdata(pdev, imxdma);
  949. imxdma->dma_device.copy_align = 2; /* 2^2 = 4 bytes alignment */
  950. imxdma->dma_device.dev->dma_parms = &imxdma->dma_parms;
  951. dma_set_max_seg_size(imxdma->dma_device.dev, 0xffffff);
  952. ret = dma_async_device_register(&imxdma->dma_device);
  953. if (ret) {
  954. dev_err(&pdev->dev, "unable to register\n");
  955. goto err;
  956. }
  957. return 0;
  958. err:
  959. clk_disable_unprepare(imxdma->dma_ipg);
  960. clk_disable_unprepare(imxdma->dma_ahb);
  961. return ret;
  962. }
  963. static int __exit imxdma_remove(struct platform_device *pdev)
  964. {
  965. struct imxdma_engine *imxdma = platform_get_drvdata(pdev);
  966. dma_async_device_unregister(&imxdma->dma_device);
  967. clk_disable_unprepare(imxdma->dma_ipg);
  968. clk_disable_unprepare(imxdma->dma_ahb);
  969. return 0;
  970. }
  971. static struct platform_driver imxdma_driver = {
  972. .driver = {
  973. .name = "imx-dma",
  974. },
  975. .id_table = imx_dma_devtype,
  976. .remove = __exit_p(imxdma_remove),
  977. };
  978. static int __init imxdma_module_init(void)
  979. {
  980. return platform_driver_probe(&imxdma_driver, imxdma_probe);
  981. }
  982. subsys_initcall(imxdma_module_init);
  983. MODULE_AUTHOR("Sascha Hauer, Pengutronix <s.hauer@pengutronix.de>");
  984. MODULE_DESCRIPTION("i.MX dma driver");
  985. MODULE_LICENSE("GPL");