fsldma.c 28 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070
  1. /*
  2. * Freescale MPC85xx, MPC83xx DMA Engine support
  3. *
  4. * Copyright (C) 2007 Freescale Semiconductor, Inc. All rights reserved.
  5. *
  6. * Author:
  7. * Zhang Wei <wei.zhang@freescale.com>, Jul 2007
  8. * Ebony Zhu <ebony.zhu@freescale.com>, May 2007
  9. *
  10. * Description:
  11. * DMA engine driver for Freescale MPC8540 DMA controller, which is
  12. * also fit for MPC8560, MPC8555, MPC8548, MPC8641, and etc.
  13. * The support for MPC8349 DMA contorller is also added.
  14. *
  15. * This driver instructs the DMA controller to issue the PCI Read Multiple
  16. * command for PCI read operations, instead of using the default PCI Read Line
  17. * command. Please be aware that this setting may result in read pre-fetching
  18. * on some platforms.
  19. *
  20. * This is free software; you can redistribute it and/or modify
  21. * it under the terms of the GNU General Public License as published by
  22. * the Free Software Foundation; either version 2 of the License, or
  23. * (at your option) any later version.
  24. *
  25. */
  26. #include <linux/init.h>
  27. #include <linux/module.h>
  28. #include <linux/pci.h>
  29. #include <linux/interrupt.h>
  30. #include <linux/dmaengine.h>
  31. #include <linux/delay.h>
  32. #include <linux/dma-mapping.h>
  33. #include <linux/dmapool.h>
  34. #include <linux/of_platform.h>
  35. #include "fsldma.h"
  36. static void dma_init(struct fsl_dma_chan *fsl_chan)
  37. {
  38. /* Reset the channel */
  39. DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr, 0, 32);
  40. switch (fsl_chan->feature & FSL_DMA_IP_MASK) {
  41. case FSL_DMA_IP_85XX:
  42. /* Set the channel to below modes:
  43. * EIE - Error interrupt enable
  44. * EOSIE - End of segments interrupt enable (basic mode)
  45. * EOLNIE - End of links interrupt enable
  46. */
  47. DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr, FSL_DMA_MR_EIE
  48. | FSL_DMA_MR_EOLNIE | FSL_DMA_MR_EOSIE, 32);
  49. break;
  50. case FSL_DMA_IP_83XX:
  51. /* Set the channel to below modes:
  52. * EOTIE - End-of-transfer interrupt enable
  53. * PRC_RM - PCI read multiple
  54. */
  55. DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr, FSL_DMA_MR_EOTIE
  56. | FSL_DMA_MR_PRC_RM, 32);
  57. break;
  58. }
  59. }
  60. static void set_sr(struct fsl_dma_chan *fsl_chan, u32 val)
  61. {
  62. DMA_OUT(fsl_chan, &fsl_chan->reg_base->sr, val, 32);
  63. }
  64. static u32 get_sr(struct fsl_dma_chan *fsl_chan)
  65. {
  66. return DMA_IN(fsl_chan, &fsl_chan->reg_base->sr, 32);
  67. }
  68. static void set_desc_cnt(struct fsl_dma_chan *fsl_chan,
  69. struct fsl_dma_ld_hw *hw, u32 count)
  70. {
  71. hw->count = CPU_TO_DMA(fsl_chan, count, 32);
  72. }
  73. static void set_desc_src(struct fsl_dma_chan *fsl_chan,
  74. struct fsl_dma_ld_hw *hw, dma_addr_t src)
  75. {
  76. u64 snoop_bits;
  77. snoop_bits = ((fsl_chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX)
  78. ? ((u64)FSL_DMA_SATR_SREADTYPE_SNOOP_READ << 32) : 0;
  79. hw->src_addr = CPU_TO_DMA(fsl_chan, snoop_bits | src, 64);
  80. }
  81. static void set_desc_dest(struct fsl_dma_chan *fsl_chan,
  82. struct fsl_dma_ld_hw *hw, dma_addr_t dest)
  83. {
  84. u64 snoop_bits;
  85. snoop_bits = ((fsl_chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX)
  86. ? ((u64)FSL_DMA_DATR_DWRITETYPE_SNOOP_WRITE << 32) : 0;
  87. hw->dst_addr = CPU_TO_DMA(fsl_chan, snoop_bits | dest, 64);
  88. }
  89. static void set_desc_next(struct fsl_dma_chan *fsl_chan,
  90. struct fsl_dma_ld_hw *hw, dma_addr_t next)
  91. {
  92. u64 snoop_bits;
  93. snoop_bits = ((fsl_chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_83XX)
  94. ? FSL_DMA_SNEN : 0;
  95. hw->next_ln_addr = CPU_TO_DMA(fsl_chan, snoop_bits | next, 64);
  96. }
  97. static void set_cdar(struct fsl_dma_chan *fsl_chan, dma_addr_t addr)
  98. {
  99. DMA_OUT(fsl_chan, &fsl_chan->reg_base->cdar, addr | FSL_DMA_SNEN, 64);
  100. }
  101. static dma_addr_t get_cdar(struct fsl_dma_chan *fsl_chan)
  102. {
  103. return DMA_IN(fsl_chan, &fsl_chan->reg_base->cdar, 64) & ~FSL_DMA_SNEN;
  104. }
  105. static void set_ndar(struct fsl_dma_chan *fsl_chan, dma_addr_t addr)
  106. {
  107. DMA_OUT(fsl_chan, &fsl_chan->reg_base->ndar, addr, 64);
  108. }
  109. static dma_addr_t get_ndar(struct fsl_dma_chan *fsl_chan)
  110. {
  111. return DMA_IN(fsl_chan, &fsl_chan->reg_base->ndar, 64);
  112. }
  113. static u32 get_bcr(struct fsl_dma_chan *fsl_chan)
  114. {
  115. return DMA_IN(fsl_chan, &fsl_chan->reg_base->bcr, 32);
  116. }
  117. static int dma_is_idle(struct fsl_dma_chan *fsl_chan)
  118. {
  119. u32 sr = get_sr(fsl_chan);
  120. return (!(sr & FSL_DMA_SR_CB)) || (sr & FSL_DMA_SR_CH);
  121. }
  122. static void dma_start(struct fsl_dma_chan *fsl_chan)
  123. {
  124. u32 mr_set = 0;
  125. if (fsl_chan->feature & FSL_DMA_CHAN_PAUSE_EXT) {
  126. DMA_OUT(fsl_chan, &fsl_chan->reg_base->bcr, 0, 32);
  127. mr_set |= FSL_DMA_MR_EMP_EN;
  128. } else if ((fsl_chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX) {
  129. DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr,
  130. DMA_IN(fsl_chan, &fsl_chan->reg_base->mr, 32)
  131. & ~FSL_DMA_MR_EMP_EN, 32);
  132. }
  133. if (fsl_chan->feature & FSL_DMA_CHAN_START_EXT)
  134. mr_set |= FSL_DMA_MR_EMS_EN;
  135. else
  136. mr_set |= FSL_DMA_MR_CS;
  137. DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr,
  138. DMA_IN(fsl_chan, &fsl_chan->reg_base->mr, 32)
  139. | mr_set, 32);
  140. }
  141. static void dma_halt(struct fsl_dma_chan *fsl_chan)
  142. {
  143. int i;
  144. DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr,
  145. DMA_IN(fsl_chan, &fsl_chan->reg_base->mr, 32) | FSL_DMA_MR_CA,
  146. 32);
  147. DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr,
  148. DMA_IN(fsl_chan, &fsl_chan->reg_base->mr, 32) & ~(FSL_DMA_MR_CS
  149. | FSL_DMA_MR_EMS_EN | FSL_DMA_MR_CA), 32);
  150. for (i = 0; i < 100; i++) {
  151. if (dma_is_idle(fsl_chan))
  152. break;
  153. udelay(10);
  154. }
  155. if (i >= 100 && !dma_is_idle(fsl_chan))
  156. dev_err(fsl_chan->dev, "DMA halt timeout!\n");
  157. }
  158. static void set_ld_eol(struct fsl_dma_chan *fsl_chan,
  159. struct fsl_desc_sw *desc)
  160. {
  161. u64 snoop_bits;
  162. snoop_bits = ((fsl_chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_83XX)
  163. ? FSL_DMA_SNEN : 0;
  164. desc->hw.next_ln_addr = CPU_TO_DMA(fsl_chan,
  165. DMA_TO_CPU(fsl_chan, desc->hw.next_ln_addr, 64) | FSL_DMA_EOL
  166. | snoop_bits, 64);
  167. }
  168. static void append_ld_queue(struct fsl_dma_chan *fsl_chan,
  169. struct fsl_desc_sw *new_desc)
  170. {
  171. struct fsl_desc_sw *queue_tail = to_fsl_desc(fsl_chan->ld_queue.prev);
  172. if (list_empty(&fsl_chan->ld_queue))
  173. return;
  174. /* Link to the new descriptor physical address and
  175. * Enable End-of-segment interrupt for
  176. * the last link descriptor.
  177. * (the previous node's next link descriptor)
  178. *
  179. * For FSL_DMA_IP_83xx, the snoop enable bit need be set.
  180. */
  181. queue_tail->hw.next_ln_addr = CPU_TO_DMA(fsl_chan,
  182. new_desc->async_tx.phys | FSL_DMA_EOSIE |
  183. (((fsl_chan->feature & FSL_DMA_IP_MASK)
  184. == FSL_DMA_IP_83XX) ? FSL_DMA_SNEN : 0), 64);
  185. }
  186. /**
  187. * fsl_chan_set_src_loop_size - Set source address hold transfer size
  188. * @fsl_chan : Freescale DMA channel
  189. * @size : Address loop size, 0 for disable loop
  190. *
  191. * The set source address hold transfer size. The source
  192. * address hold or loop transfer size is when the DMA transfer
  193. * data from source address (SA), if the loop size is 4, the DMA will
  194. * read data from SA, SA + 1, SA + 2, SA + 3, then loop back to SA,
  195. * SA + 1 ... and so on.
  196. */
  197. static void fsl_chan_set_src_loop_size(struct fsl_dma_chan *fsl_chan, int size)
  198. {
  199. switch (size) {
  200. case 0:
  201. DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr,
  202. DMA_IN(fsl_chan, &fsl_chan->reg_base->mr, 32) &
  203. (~FSL_DMA_MR_SAHE), 32);
  204. break;
  205. case 1:
  206. case 2:
  207. case 4:
  208. case 8:
  209. DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr,
  210. DMA_IN(fsl_chan, &fsl_chan->reg_base->mr, 32) |
  211. FSL_DMA_MR_SAHE | (__ilog2(size) << 14),
  212. 32);
  213. break;
  214. }
  215. }
  216. /**
  217. * fsl_chan_set_dest_loop_size - Set destination address hold transfer size
  218. * @fsl_chan : Freescale DMA channel
  219. * @size : Address loop size, 0 for disable loop
  220. *
  221. * The set destination address hold transfer size. The destination
  222. * address hold or loop transfer size is when the DMA transfer
  223. * data to destination address (TA), if the loop size is 4, the DMA will
  224. * write data to TA, TA + 1, TA + 2, TA + 3, then loop back to TA,
  225. * TA + 1 ... and so on.
  226. */
  227. static void fsl_chan_set_dest_loop_size(struct fsl_dma_chan *fsl_chan, int size)
  228. {
  229. switch (size) {
  230. case 0:
  231. DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr,
  232. DMA_IN(fsl_chan, &fsl_chan->reg_base->mr, 32) &
  233. (~FSL_DMA_MR_DAHE), 32);
  234. break;
  235. case 1:
  236. case 2:
  237. case 4:
  238. case 8:
  239. DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr,
  240. DMA_IN(fsl_chan, &fsl_chan->reg_base->mr, 32) |
  241. FSL_DMA_MR_DAHE | (__ilog2(size) << 16),
  242. 32);
  243. break;
  244. }
  245. }
  246. /**
  247. * fsl_chan_toggle_ext_pause - Toggle channel external pause status
  248. * @fsl_chan : Freescale DMA channel
  249. * @size : Pause control size, 0 for disable external pause control.
  250. * The maximum is 1024.
  251. *
  252. * The Freescale DMA channel can be controlled by the external
  253. * signal DREQ#. The pause control size is how many bytes are allowed
  254. * to transfer before pausing the channel, after which a new assertion
  255. * of DREQ# resumes channel operation.
  256. */
  257. static void fsl_chan_toggle_ext_pause(struct fsl_dma_chan *fsl_chan, int size)
  258. {
  259. if (size > 1024)
  260. return;
  261. if (size) {
  262. DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr,
  263. DMA_IN(fsl_chan, &fsl_chan->reg_base->mr, 32)
  264. | ((__ilog2(size) << 24) & 0x0f000000),
  265. 32);
  266. fsl_chan->feature |= FSL_DMA_CHAN_PAUSE_EXT;
  267. } else
  268. fsl_chan->feature &= ~FSL_DMA_CHAN_PAUSE_EXT;
  269. }
  270. /**
  271. * fsl_chan_toggle_ext_start - Toggle channel external start status
  272. * @fsl_chan : Freescale DMA channel
  273. * @enable : 0 is disabled, 1 is enabled.
  274. *
  275. * If enable the external start, the channel can be started by an
  276. * external DMA start pin. So the dma_start() does not start the
  277. * transfer immediately. The DMA channel will wait for the
  278. * control pin asserted.
  279. */
  280. static void fsl_chan_toggle_ext_start(struct fsl_dma_chan *fsl_chan, int enable)
  281. {
  282. if (enable)
  283. fsl_chan->feature |= FSL_DMA_CHAN_START_EXT;
  284. else
  285. fsl_chan->feature &= ~FSL_DMA_CHAN_START_EXT;
  286. }
  287. static dma_cookie_t fsl_dma_tx_submit(struct dma_async_tx_descriptor *tx)
  288. {
  289. struct fsl_dma_chan *fsl_chan = to_fsl_chan(tx->chan);
  290. struct fsl_desc_sw *desc;
  291. unsigned long flags;
  292. dma_cookie_t cookie;
  293. /* cookie increment and adding to ld_queue must be atomic */
  294. spin_lock_irqsave(&fsl_chan->desc_lock, flags);
  295. cookie = fsl_chan->common.cookie;
  296. list_for_each_entry(desc, &tx->tx_list, node) {
  297. cookie++;
  298. if (cookie < 0)
  299. cookie = 1;
  300. desc->async_tx.cookie = cookie;
  301. }
  302. fsl_chan->common.cookie = cookie;
  303. append_ld_queue(fsl_chan, tx_to_fsl_desc(tx));
  304. list_splice_init(&tx->tx_list, fsl_chan->ld_queue.prev);
  305. spin_unlock_irqrestore(&fsl_chan->desc_lock, flags);
  306. return cookie;
  307. }
  308. /**
  309. * fsl_dma_alloc_descriptor - Allocate descriptor from channel's DMA pool.
  310. * @fsl_chan : Freescale DMA channel
  311. *
  312. * Return - The descriptor allocated. NULL for failed.
  313. */
  314. static struct fsl_desc_sw *fsl_dma_alloc_descriptor(
  315. struct fsl_dma_chan *fsl_chan)
  316. {
  317. dma_addr_t pdesc;
  318. struct fsl_desc_sw *desc_sw;
  319. desc_sw = dma_pool_alloc(fsl_chan->desc_pool, GFP_ATOMIC, &pdesc);
  320. if (desc_sw) {
  321. memset(desc_sw, 0, sizeof(struct fsl_desc_sw));
  322. dma_async_tx_descriptor_init(&desc_sw->async_tx,
  323. &fsl_chan->common);
  324. desc_sw->async_tx.tx_submit = fsl_dma_tx_submit;
  325. desc_sw->async_tx.phys = pdesc;
  326. }
  327. return desc_sw;
  328. }
  329. /**
  330. * fsl_dma_alloc_chan_resources - Allocate resources for DMA channel.
  331. * @fsl_chan : Freescale DMA channel
  332. *
  333. * This function will create a dma pool for descriptor allocation.
  334. *
  335. * Return - The number of descriptors allocated.
  336. */
  337. static int fsl_dma_alloc_chan_resources(struct dma_chan *chan)
  338. {
  339. struct fsl_dma_chan *fsl_chan = to_fsl_chan(chan);
  340. /* Has this channel already been allocated? */
  341. if (fsl_chan->desc_pool)
  342. return 1;
  343. /* We need the descriptor to be aligned to 32bytes
  344. * for meeting FSL DMA specification requirement.
  345. */
  346. fsl_chan->desc_pool = dma_pool_create("fsl_dma_engine_desc_pool",
  347. fsl_chan->dev, sizeof(struct fsl_desc_sw),
  348. 32, 0);
  349. if (!fsl_chan->desc_pool) {
  350. dev_err(fsl_chan->dev, "No memory for channel %d "
  351. "descriptor dma pool.\n", fsl_chan->id);
  352. return 0;
  353. }
  354. return 1;
  355. }
  356. /**
  357. * fsl_dma_free_chan_resources - Free all resources of the channel.
  358. * @fsl_chan : Freescale DMA channel
  359. */
  360. static void fsl_dma_free_chan_resources(struct dma_chan *chan)
  361. {
  362. struct fsl_dma_chan *fsl_chan = to_fsl_chan(chan);
  363. struct fsl_desc_sw *desc, *_desc;
  364. unsigned long flags;
  365. dev_dbg(fsl_chan->dev, "Free all channel resources.\n");
  366. spin_lock_irqsave(&fsl_chan->desc_lock, flags);
  367. list_for_each_entry_safe(desc, _desc, &fsl_chan->ld_queue, node) {
  368. #ifdef FSL_DMA_LD_DEBUG
  369. dev_dbg(fsl_chan->dev,
  370. "LD %p will be released.\n", desc);
  371. #endif
  372. list_del(&desc->node);
  373. /* free link descriptor */
  374. dma_pool_free(fsl_chan->desc_pool, desc, desc->async_tx.phys);
  375. }
  376. spin_unlock_irqrestore(&fsl_chan->desc_lock, flags);
  377. dma_pool_destroy(fsl_chan->desc_pool);
  378. fsl_chan->desc_pool = NULL;
  379. }
  380. static struct dma_async_tx_descriptor *
  381. fsl_dma_prep_interrupt(struct dma_chan *chan, unsigned long flags)
  382. {
  383. struct fsl_dma_chan *fsl_chan;
  384. struct fsl_desc_sw *new;
  385. if (!chan)
  386. return NULL;
  387. fsl_chan = to_fsl_chan(chan);
  388. new = fsl_dma_alloc_descriptor(fsl_chan);
  389. if (!new) {
  390. dev_err(fsl_chan->dev, "No free memory for link descriptor\n");
  391. return NULL;
  392. }
  393. new->async_tx.cookie = -EBUSY;
  394. new->async_tx.flags = flags;
  395. /* Insert the link descriptor to the LD ring */
  396. list_add_tail(&new->node, &new->async_tx.tx_list);
  397. /* Set End-of-link to the last link descriptor of new list*/
  398. set_ld_eol(fsl_chan, new);
  399. return &new->async_tx;
  400. }
  401. static struct dma_async_tx_descriptor *fsl_dma_prep_memcpy(
  402. struct dma_chan *chan, dma_addr_t dma_dest, dma_addr_t dma_src,
  403. size_t len, unsigned long flags)
  404. {
  405. struct fsl_dma_chan *fsl_chan;
  406. struct fsl_desc_sw *first = NULL, *prev = NULL, *new;
  407. struct list_head *list;
  408. size_t copy;
  409. if (!chan)
  410. return NULL;
  411. if (!len)
  412. return NULL;
  413. fsl_chan = to_fsl_chan(chan);
  414. do {
  415. /* Allocate the link descriptor from DMA pool */
  416. new = fsl_dma_alloc_descriptor(fsl_chan);
  417. if (!new) {
  418. dev_err(fsl_chan->dev,
  419. "No free memory for link descriptor\n");
  420. goto fail;
  421. }
  422. #ifdef FSL_DMA_LD_DEBUG
  423. dev_dbg(fsl_chan->dev, "new link desc alloc %p\n", new);
  424. #endif
  425. copy = min(len, (size_t)FSL_DMA_BCR_MAX_CNT);
  426. set_desc_cnt(fsl_chan, &new->hw, copy);
  427. set_desc_src(fsl_chan, &new->hw, dma_src);
  428. set_desc_dest(fsl_chan, &new->hw, dma_dest);
  429. if (!first)
  430. first = new;
  431. else
  432. set_desc_next(fsl_chan, &prev->hw, new->async_tx.phys);
  433. new->async_tx.cookie = 0;
  434. async_tx_ack(&new->async_tx);
  435. prev = new;
  436. len -= copy;
  437. dma_src += copy;
  438. dma_dest += copy;
  439. /* Insert the link descriptor to the LD ring */
  440. list_add_tail(&new->node, &first->async_tx.tx_list);
  441. } while (len);
  442. new->async_tx.flags = flags; /* client is in control of this ack */
  443. new->async_tx.cookie = -EBUSY;
  444. /* Set End-of-link to the last link descriptor of new list*/
  445. set_ld_eol(fsl_chan, new);
  446. return &first->async_tx;
  447. fail:
  448. if (!first)
  449. return NULL;
  450. list = &first->async_tx.tx_list;
  451. list_for_each_entry_safe_reverse(new, prev, list, node) {
  452. list_del(&new->node);
  453. dma_pool_free(fsl_chan->desc_pool, new, new->async_tx.phys);
  454. }
  455. return NULL;
  456. }
  457. /**
  458. * fsl_dma_update_completed_cookie - Update the completed cookie.
  459. * @fsl_chan : Freescale DMA channel
  460. */
  461. static void fsl_dma_update_completed_cookie(struct fsl_dma_chan *fsl_chan)
  462. {
  463. struct fsl_desc_sw *cur_desc, *desc;
  464. dma_addr_t ld_phy;
  465. ld_phy = get_cdar(fsl_chan) & FSL_DMA_NLDA_MASK;
  466. if (ld_phy) {
  467. cur_desc = NULL;
  468. list_for_each_entry(desc, &fsl_chan->ld_queue, node)
  469. if (desc->async_tx.phys == ld_phy) {
  470. cur_desc = desc;
  471. break;
  472. }
  473. if (cur_desc && cur_desc->async_tx.cookie) {
  474. if (dma_is_idle(fsl_chan))
  475. fsl_chan->completed_cookie =
  476. cur_desc->async_tx.cookie;
  477. else
  478. fsl_chan->completed_cookie =
  479. cur_desc->async_tx.cookie - 1;
  480. }
  481. }
  482. }
  483. /**
  484. * fsl_chan_ld_cleanup - Clean up link descriptors
  485. * @fsl_chan : Freescale DMA channel
  486. *
  487. * This function clean up the ld_queue of DMA channel.
  488. * If 'in_intr' is set, the function will move the link descriptor to
  489. * the recycle list. Otherwise, free it directly.
  490. */
  491. static void fsl_chan_ld_cleanup(struct fsl_dma_chan *fsl_chan)
  492. {
  493. struct fsl_desc_sw *desc, *_desc;
  494. unsigned long flags;
  495. spin_lock_irqsave(&fsl_chan->desc_lock, flags);
  496. dev_dbg(fsl_chan->dev, "chan completed_cookie = %d\n",
  497. fsl_chan->completed_cookie);
  498. list_for_each_entry_safe(desc, _desc, &fsl_chan->ld_queue, node) {
  499. dma_async_tx_callback callback;
  500. void *callback_param;
  501. if (dma_async_is_complete(desc->async_tx.cookie,
  502. fsl_chan->completed_cookie, fsl_chan->common.cookie)
  503. == DMA_IN_PROGRESS)
  504. break;
  505. callback = desc->async_tx.callback;
  506. callback_param = desc->async_tx.callback_param;
  507. /* Remove from ld_queue list */
  508. list_del(&desc->node);
  509. dev_dbg(fsl_chan->dev, "link descriptor %p will be recycle.\n",
  510. desc);
  511. dma_pool_free(fsl_chan->desc_pool, desc, desc->async_tx.phys);
  512. /* Run the link descriptor callback function */
  513. if (callback) {
  514. spin_unlock_irqrestore(&fsl_chan->desc_lock, flags);
  515. dev_dbg(fsl_chan->dev, "link descriptor %p callback\n",
  516. desc);
  517. callback(callback_param);
  518. spin_lock_irqsave(&fsl_chan->desc_lock, flags);
  519. }
  520. }
  521. spin_unlock_irqrestore(&fsl_chan->desc_lock, flags);
  522. }
  523. /**
  524. * fsl_chan_xfer_ld_queue - Transfer link descriptors in channel ld_queue.
  525. * @fsl_chan : Freescale DMA channel
  526. */
  527. static void fsl_chan_xfer_ld_queue(struct fsl_dma_chan *fsl_chan)
  528. {
  529. struct list_head *ld_node;
  530. dma_addr_t next_dest_addr;
  531. unsigned long flags;
  532. spin_lock_irqsave(&fsl_chan->desc_lock, flags);
  533. if (!dma_is_idle(fsl_chan))
  534. goto out_unlock;
  535. dma_halt(fsl_chan);
  536. /* If there are some link descriptors
  537. * not transfered in queue. We need to start it.
  538. */
  539. /* Find the first un-transfer desciptor */
  540. for (ld_node = fsl_chan->ld_queue.next;
  541. (ld_node != &fsl_chan->ld_queue)
  542. && (dma_async_is_complete(
  543. to_fsl_desc(ld_node)->async_tx.cookie,
  544. fsl_chan->completed_cookie,
  545. fsl_chan->common.cookie) == DMA_SUCCESS);
  546. ld_node = ld_node->next);
  547. if (ld_node != &fsl_chan->ld_queue) {
  548. /* Get the ld start address from ld_queue */
  549. next_dest_addr = to_fsl_desc(ld_node)->async_tx.phys;
  550. dev_dbg(fsl_chan->dev, "xfer LDs staring from 0x%llx\n",
  551. (unsigned long long)next_dest_addr);
  552. set_cdar(fsl_chan, next_dest_addr);
  553. dma_start(fsl_chan);
  554. } else {
  555. set_cdar(fsl_chan, 0);
  556. set_ndar(fsl_chan, 0);
  557. }
  558. out_unlock:
  559. spin_unlock_irqrestore(&fsl_chan->desc_lock, flags);
  560. }
  561. /**
  562. * fsl_dma_memcpy_issue_pending - Issue the DMA start command
  563. * @fsl_chan : Freescale DMA channel
  564. */
  565. static void fsl_dma_memcpy_issue_pending(struct dma_chan *chan)
  566. {
  567. struct fsl_dma_chan *fsl_chan = to_fsl_chan(chan);
  568. #ifdef FSL_DMA_LD_DEBUG
  569. struct fsl_desc_sw *ld;
  570. unsigned long flags;
  571. spin_lock_irqsave(&fsl_chan->desc_lock, flags);
  572. if (list_empty(&fsl_chan->ld_queue)) {
  573. spin_unlock_irqrestore(&fsl_chan->desc_lock, flags);
  574. return;
  575. }
  576. dev_dbg(fsl_chan->dev, "--memcpy issue--\n");
  577. list_for_each_entry(ld, &fsl_chan->ld_queue, node) {
  578. int i;
  579. dev_dbg(fsl_chan->dev, "Ch %d, LD %08x\n",
  580. fsl_chan->id, ld->async_tx.phys);
  581. for (i = 0; i < 8; i++)
  582. dev_dbg(fsl_chan->dev, "LD offset %d: %08x\n",
  583. i, *(((u32 *)&ld->hw) + i));
  584. }
  585. dev_dbg(fsl_chan->dev, "----------------\n");
  586. spin_unlock_irqrestore(&fsl_chan->desc_lock, flags);
  587. #endif
  588. fsl_chan_xfer_ld_queue(fsl_chan);
  589. }
  590. /**
  591. * fsl_dma_is_complete - Determine the DMA status
  592. * @fsl_chan : Freescale DMA channel
  593. */
  594. static enum dma_status fsl_dma_is_complete(struct dma_chan *chan,
  595. dma_cookie_t cookie,
  596. dma_cookie_t *done,
  597. dma_cookie_t *used)
  598. {
  599. struct fsl_dma_chan *fsl_chan = to_fsl_chan(chan);
  600. dma_cookie_t last_used;
  601. dma_cookie_t last_complete;
  602. fsl_chan_ld_cleanup(fsl_chan);
  603. last_used = chan->cookie;
  604. last_complete = fsl_chan->completed_cookie;
  605. if (done)
  606. *done = last_complete;
  607. if (used)
  608. *used = last_used;
  609. return dma_async_is_complete(cookie, last_complete, last_used);
  610. }
  611. static irqreturn_t fsl_dma_chan_do_interrupt(int irq, void *data)
  612. {
  613. struct fsl_dma_chan *fsl_chan = (struct fsl_dma_chan *)data;
  614. u32 stat;
  615. int update_cookie = 0;
  616. int xfer_ld_q = 0;
  617. stat = get_sr(fsl_chan);
  618. dev_dbg(fsl_chan->dev, "event: channel %d, stat = 0x%x\n",
  619. fsl_chan->id, stat);
  620. set_sr(fsl_chan, stat); /* Clear the event register */
  621. stat &= ~(FSL_DMA_SR_CB | FSL_DMA_SR_CH);
  622. if (!stat)
  623. return IRQ_NONE;
  624. if (stat & FSL_DMA_SR_TE)
  625. dev_err(fsl_chan->dev, "Transfer Error!\n");
  626. /* Programming Error
  627. * The DMA_INTERRUPT async_tx is a NULL transfer, which will
  628. * triger a PE interrupt.
  629. */
  630. if (stat & FSL_DMA_SR_PE) {
  631. dev_dbg(fsl_chan->dev, "event: Programming Error INT\n");
  632. if (get_bcr(fsl_chan) == 0) {
  633. /* BCR register is 0, this is a DMA_INTERRUPT async_tx.
  634. * Now, update the completed cookie, and continue the
  635. * next uncompleted transfer.
  636. */
  637. update_cookie = 1;
  638. xfer_ld_q = 1;
  639. }
  640. stat &= ~FSL_DMA_SR_PE;
  641. }
  642. /* If the link descriptor segment transfer finishes,
  643. * we will recycle the used descriptor.
  644. */
  645. if (stat & FSL_DMA_SR_EOSI) {
  646. dev_dbg(fsl_chan->dev, "event: End-of-segments INT\n");
  647. dev_dbg(fsl_chan->dev, "event: clndar 0x%llx, nlndar 0x%llx\n",
  648. (unsigned long long)get_cdar(fsl_chan),
  649. (unsigned long long)get_ndar(fsl_chan));
  650. stat &= ~FSL_DMA_SR_EOSI;
  651. update_cookie = 1;
  652. }
  653. /* For MPC8349, EOCDI event need to update cookie
  654. * and start the next transfer if it exist.
  655. */
  656. if (stat & FSL_DMA_SR_EOCDI) {
  657. dev_dbg(fsl_chan->dev, "event: End-of-Chain link INT\n");
  658. stat &= ~FSL_DMA_SR_EOCDI;
  659. update_cookie = 1;
  660. xfer_ld_q = 1;
  661. }
  662. /* If it current transfer is the end-of-transfer,
  663. * we should clear the Channel Start bit for
  664. * prepare next transfer.
  665. */
  666. if (stat & FSL_DMA_SR_EOLNI) {
  667. dev_dbg(fsl_chan->dev, "event: End-of-link INT\n");
  668. stat &= ~FSL_DMA_SR_EOLNI;
  669. xfer_ld_q = 1;
  670. }
  671. if (update_cookie)
  672. fsl_dma_update_completed_cookie(fsl_chan);
  673. if (xfer_ld_q)
  674. fsl_chan_xfer_ld_queue(fsl_chan);
  675. if (stat)
  676. dev_dbg(fsl_chan->dev, "event: unhandled sr 0x%02x\n",
  677. stat);
  678. dev_dbg(fsl_chan->dev, "event: Exit\n");
  679. tasklet_schedule(&fsl_chan->tasklet);
  680. return IRQ_HANDLED;
  681. }
  682. static irqreturn_t fsl_dma_do_interrupt(int irq, void *data)
  683. {
  684. struct fsl_dma_device *fdev = (struct fsl_dma_device *)data;
  685. u32 gsr;
  686. int ch_nr;
  687. gsr = (fdev->feature & FSL_DMA_BIG_ENDIAN) ? in_be32(fdev->reg_base)
  688. : in_le32(fdev->reg_base);
  689. ch_nr = (32 - ffs(gsr)) / 8;
  690. return fdev->chan[ch_nr] ? fsl_dma_chan_do_interrupt(irq,
  691. fdev->chan[ch_nr]) : IRQ_NONE;
  692. }
  693. static void dma_do_tasklet(unsigned long data)
  694. {
  695. struct fsl_dma_chan *fsl_chan = (struct fsl_dma_chan *)data;
  696. fsl_chan_ld_cleanup(fsl_chan);
  697. }
  698. static int __devinit fsl_dma_chan_probe(struct fsl_dma_device *fdev,
  699. struct device_node *node, u32 feature, const char *compatible)
  700. {
  701. struct fsl_dma_chan *new_fsl_chan;
  702. int err;
  703. /* alloc channel */
  704. new_fsl_chan = kzalloc(sizeof(struct fsl_dma_chan), GFP_KERNEL);
  705. if (!new_fsl_chan) {
  706. dev_err(fdev->dev, "No free memory for allocating "
  707. "dma channels!\n");
  708. return -ENOMEM;
  709. }
  710. /* get dma channel register base */
  711. err = of_address_to_resource(node, 0, &new_fsl_chan->reg);
  712. if (err) {
  713. dev_err(fdev->dev, "Can't get %s property 'reg'\n",
  714. node->full_name);
  715. goto err_no_reg;
  716. }
  717. new_fsl_chan->feature = feature;
  718. if (!fdev->feature)
  719. fdev->feature = new_fsl_chan->feature;
  720. /* If the DMA device's feature is different than its channels',
  721. * report the bug.
  722. */
  723. WARN_ON(fdev->feature != new_fsl_chan->feature);
  724. new_fsl_chan->dev = fdev->dev;
  725. new_fsl_chan->reg_base = ioremap(new_fsl_chan->reg.start,
  726. new_fsl_chan->reg.end - new_fsl_chan->reg.start + 1);
  727. new_fsl_chan->id = ((new_fsl_chan->reg.start - 0x100) & 0xfff) >> 7;
  728. if (new_fsl_chan->id >= FSL_DMA_MAX_CHANS_PER_DEVICE) {
  729. dev_err(fdev->dev, "There is no %d channel!\n",
  730. new_fsl_chan->id);
  731. err = -EINVAL;
  732. goto err_no_chan;
  733. }
  734. fdev->chan[new_fsl_chan->id] = new_fsl_chan;
  735. tasklet_init(&new_fsl_chan->tasklet, dma_do_tasklet,
  736. (unsigned long)new_fsl_chan);
  737. /* Init the channel */
  738. dma_init(new_fsl_chan);
  739. /* Clear cdar registers */
  740. set_cdar(new_fsl_chan, 0);
  741. switch (new_fsl_chan->feature & FSL_DMA_IP_MASK) {
  742. case FSL_DMA_IP_85XX:
  743. new_fsl_chan->toggle_ext_pause = fsl_chan_toggle_ext_pause;
  744. case FSL_DMA_IP_83XX:
  745. new_fsl_chan->toggle_ext_start = fsl_chan_toggle_ext_start;
  746. new_fsl_chan->set_src_loop_size = fsl_chan_set_src_loop_size;
  747. new_fsl_chan->set_dest_loop_size = fsl_chan_set_dest_loop_size;
  748. }
  749. spin_lock_init(&new_fsl_chan->desc_lock);
  750. INIT_LIST_HEAD(&new_fsl_chan->ld_queue);
  751. new_fsl_chan->common.device = &fdev->common;
  752. /* Add the channel to DMA device channel list */
  753. list_add_tail(&new_fsl_chan->common.device_node,
  754. &fdev->common.channels);
  755. fdev->common.chancnt++;
  756. new_fsl_chan->irq = irq_of_parse_and_map(node, 0);
  757. if (new_fsl_chan->irq != NO_IRQ) {
  758. err = request_irq(new_fsl_chan->irq,
  759. &fsl_dma_chan_do_interrupt, IRQF_SHARED,
  760. "fsldma-channel", new_fsl_chan);
  761. if (err) {
  762. dev_err(fdev->dev, "DMA channel %s request_irq error "
  763. "with return %d\n", node->full_name, err);
  764. goto err_no_irq;
  765. }
  766. }
  767. dev_info(fdev->dev, "#%d (%s), irq %d\n", new_fsl_chan->id,
  768. compatible,
  769. new_fsl_chan->irq != NO_IRQ ? new_fsl_chan->irq : fdev->irq);
  770. return 0;
  771. err_no_irq:
  772. list_del(&new_fsl_chan->common.device_node);
  773. err_no_chan:
  774. iounmap(new_fsl_chan->reg_base);
  775. err_no_reg:
  776. kfree(new_fsl_chan);
  777. return err;
  778. }
  779. static void fsl_dma_chan_remove(struct fsl_dma_chan *fchan)
  780. {
  781. if (fchan->irq != NO_IRQ)
  782. free_irq(fchan->irq, fchan);
  783. list_del(&fchan->common.device_node);
  784. iounmap(fchan->reg_base);
  785. kfree(fchan);
  786. }
  787. static int __devinit of_fsl_dma_probe(struct of_device *dev,
  788. const struct of_device_id *match)
  789. {
  790. int err;
  791. struct fsl_dma_device *fdev;
  792. struct device_node *child;
  793. fdev = kzalloc(sizeof(struct fsl_dma_device), GFP_KERNEL);
  794. if (!fdev) {
  795. dev_err(&dev->dev, "No enough memory for 'priv'\n");
  796. return -ENOMEM;
  797. }
  798. fdev->dev = &dev->dev;
  799. INIT_LIST_HEAD(&fdev->common.channels);
  800. /* get DMA controller register base */
  801. err = of_address_to_resource(dev->node, 0, &fdev->reg);
  802. if (err) {
  803. dev_err(&dev->dev, "Can't get %s property 'reg'\n",
  804. dev->node->full_name);
  805. goto err_no_reg;
  806. }
  807. dev_info(&dev->dev, "Probe the Freescale DMA driver for %s "
  808. "controller at 0x%llx...\n",
  809. match->compatible, (unsigned long long)fdev->reg.start);
  810. fdev->reg_base = ioremap(fdev->reg.start, fdev->reg.end
  811. - fdev->reg.start + 1);
  812. dma_cap_set(DMA_MEMCPY, fdev->common.cap_mask);
  813. dma_cap_set(DMA_INTERRUPT, fdev->common.cap_mask);
  814. fdev->common.device_alloc_chan_resources = fsl_dma_alloc_chan_resources;
  815. fdev->common.device_free_chan_resources = fsl_dma_free_chan_resources;
  816. fdev->common.device_prep_dma_interrupt = fsl_dma_prep_interrupt;
  817. fdev->common.device_prep_dma_memcpy = fsl_dma_prep_memcpy;
  818. fdev->common.device_is_tx_complete = fsl_dma_is_complete;
  819. fdev->common.device_issue_pending = fsl_dma_memcpy_issue_pending;
  820. fdev->common.dev = &dev->dev;
  821. fdev->irq = irq_of_parse_and_map(dev->node, 0);
  822. if (fdev->irq != NO_IRQ) {
  823. err = request_irq(fdev->irq, &fsl_dma_do_interrupt, IRQF_SHARED,
  824. "fsldma-device", fdev);
  825. if (err) {
  826. dev_err(&dev->dev, "DMA device request_irq error "
  827. "with return %d\n", err);
  828. goto err;
  829. }
  830. }
  831. dev_set_drvdata(&(dev->dev), fdev);
  832. /* We cannot use of_platform_bus_probe() because there is no
  833. * of_platform_bus_remove. Instead, we manually instantiate every DMA
  834. * channel object.
  835. */
  836. for_each_child_of_node(dev->node, child) {
  837. if (of_device_is_compatible(child, "fsl,eloplus-dma-channel"))
  838. fsl_dma_chan_probe(fdev, child,
  839. FSL_DMA_IP_85XX | FSL_DMA_BIG_ENDIAN,
  840. "fsl,eloplus-dma-channel");
  841. if (of_device_is_compatible(child, "fsl,elo-dma-channel"))
  842. fsl_dma_chan_probe(fdev, child,
  843. FSL_DMA_IP_83XX | FSL_DMA_LITTLE_ENDIAN,
  844. "fsl,elo-dma-channel");
  845. }
  846. dma_async_device_register(&fdev->common);
  847. return 0;
  848. err:
  849. iounmap(fdev->reg_base);
  850. err_no_reg:
  851. kfree(fdev);
  852. return err;
  853. }
  854. static int of_fsl_dma_remove(struct of_device *of_dev)
  855. {
  856. struct fsl_dma_device *fdev;
  857. unsigned int i;
  858. fdev = dev_get_drvdata(&of_dev->dev);
  859. dma_async_device_unregister(&fdev->common);
  860. for (i = 0; i < FSL_DMA_MAX_CHANS_PER_DEVICE; i++)
  861. if (fdev->chan[i])
  862. fsl_dma_chan_remove(fdev->chan[i]);
  863. if (fdev->irq != NO_IRQ)
  864. free_irq(fdev->irq, fdev);
  865. iounmap(fdev->reg_base);
  866. kfree(fdev);
  867. dev_set_drvdata(&of_dev->dev, NULL);
  868. return 0;
  869. }
  870. static struct of_device_id of_fsl_dma_ids[] = {
  871. { .compatible = "fsl,eloplus-dma", },
  872. { .compatible = "fsl,elo-dma", },
  873. {}
  874. };
  875. static struct of_platform_driver of_fsl_dma_driver = {
  876. .name = "fsl-elo-dma",
  877. .match_table = of_fsl_dma_ids,
  878. .probe = of_fsl_dma_probe,
  879. .remove = of_fsl_dma_remove,
  880. };
  881. static __init int of_fsl_dma_init(void)
  882. {
  883. int ret;
  884. pr_info("Freescale Elo / Elo Plus DMA driver\n");
  885. ret = of_register_platform_driver(&of_fsl_dma_driver);
  886. if (ret)
  887. pr_err("fsldma: failed to register platform driver\n");
  888. return ret;
  889. }
  890. static void __exit of_fsl_dma_exit(void)
  891. {
  892. of_unregister_platform_driver(&of_fsl_dma_driver);
  893. }
  894. subsys_initcall(of_fsl_dma_init);
  895. module_exit(of_fsl_dma_exit);
  896. MODULE_DESCRIPTION("Freescale Elo / Elo Plus DMA driver");
  897. MODULE_LICENSE("GPL");