apbh_dma.c 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605
  1. /*
  2. * Freescale i.MX28 APBH DMA driver
  3. *
  4. * Copyright (C) 2011 Marek Vasut <marek.vasut@gmail.com>
  5. * on behalf of DENX Software Engineering GmbH
  6. *
  7. * Based on code from LTIB:
  8. * Copyright (C) 2010 Freescale Semiconductor, Inc. All Rights Reserved.
  9. *
  10. * This program is free software; you can redistribute it and/or modify
  11. * it under the terms of the GNU General Public License as published by
  12. * the Free Software Foundation; either version 2 of the License, or
  13. * (at your option) any later version.
  14. *
  15. * This program is distributed in the hope that it will be useful,
  16. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  17. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  18. * GNU General Public License for more details.
  19. *
  20. * You should have received a copy of the GNU General Public License along
  21. * with this program; if not, write to the Free Software Foundation, Inc.,
  22. * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
  23. */
  24. #include <linux/list.h>
  25. #include <common.h>
  26. #include <malloc.h>
  27. #include <asm/errno.h>
  28. #include <asm/io.h>
  29. #include <asm/arch/clock.h>
  30. #include <asm/arch/imx-regs.h>
  31. #include <asm/arch/sys_proto.h>
  32. #include <asm/arch/dma.h>
  33. static struct mxs_dma_chan mxs_dma_channels[MXS_MAX_DMA_CHANNELS];
  34. /*
  35. * Test is the DMA channel is valid channel
  36. */
  37. int mxs_dma_validate_chan(int channel)
  38. {
  39. struct mxs_dma_chan *pchan;
  40. if ((channel < 0) || (channel >= MXS_MAX_DMA_CHANNELS))
  41. return -EINVAL;
  42. pchan = mxs_dma_channels + channel;
  43. if (!(pchan->flags & MXS_DMA_FLAGS_ALLOCATED))
  44. return -EINVAL;
  45. return 0;
  46. }
  47. /*
  48. * Return the address of the command within a descriptor.
  49. */
  50. static unsigned int mxs_dma_cmd_address(struct mxs_dma_desc *desc)
  51. {
  52. return desc->address + offsetof(struct mxs_dma_desc, cmd);
  53. }
  54. /*
  55. * Read a DMA channel's hardware semaphore.
  56. *
  57. * As used by the MXS platform's DMA software, the DMA channel's hardware
  58. * semaphore reflects the number of DMA commands the hardware will process, but
  59. * has not yet finished. This is a volatile value read directly from hardware,
  60. * so it must be be viewed as immediately stale.
  61. *
  62. * If the channel is not marked busy, or has finished processing all its
  63. * commands, this value should be zero.
  64. *
  65. * See mxs_dma_append() for details on how DMA command blocks must be configured
  66. * to maintain the expected behavior of the semaphore's value.
  67. */
  68. static int mxs_dma_read_semaphore(int channel)
  69. {
  70. struct mxs_apbh_regs *apbh_regs =
  71. (struct mxs_apbh_regs *)MXS_APBH_BASE;
  72. uint32_t tmp;
  73. int ret;
  74. ret = mxs_dma_validate_chan(channel);
  75. if (ret)
  76. return ret;
  77. tmp = readl(&apbh_regs->ch[channel].hw_apbh_ch_sema);
  78. tmp &= APBH_CHn_SEMA_PHORE_MASK;
  79. tmp >>= APBH_CHn_SEMA_PHORE_OFFSET;
  80. return tmp;
  81. }
  82. #ifndef CONFIG_SYS_DCACHE_OFF
  83. void mxs_dma_flush_desc(struct mxs_dma_desc *desc)
  84. {
  85. uint32_t addr;
  86. uint32_t size;
  87. addr = (uint32_t)desc;
  88. size = roundup(sizeof(struct mxs_dma_desc), MXS_DMA_ALIGNMENT);
  89. flush_dcache_range(addr, addr + size);
  90. }
  91. #else
  92. inline void mxs_dma_flush_desc(struct mxs_dma_desc *desc) {}
  93. #endif
  94. /*
  95. * Enable a DMA channel.
  96. *
  97. * If the given channel has any DMA descriptors on its active list, this
  98. * function causes the DMA hardware to begin processing them.
  99. *
  100. * This function marks the DMA channel as "busy," whether or not there are any
  101. * descriptors to process.
  102. */
  103. static int mxs_dma_enable(int channel)
  104. {
  105. struct mxs_apbh_regs *apbh_regs =
  106. (struct mxs_apbh_regs *)MXS_APBH_BASE;
  107. unsigned int sem;
  108. struct mxs_dma_chan *pchan;
  109. struct mxs_dma_desc *pdesc;
  110. int ret;
  111. ret = mxs_dma_validate_chan(channel);
  112. if (ret)
  113. return ret;
  114. pchan = mxs_dma_channels + channel;
  115. if (pchan->pending_num == 0) {
  116. pchan->flags |= MXS_DMA_FLAGS_BUSY;
  117. return 0;
  118. }
  119. pdesc = list_first_entry(&pchan->active, struct mxs_dma_desc, node);
  120. if (pdesc == NULL)
  121. return -EFAULT;
  122. if (pchan->flags & MXS_DMA_FLAGS_BUSY) {
  123. if (!(pdesc->cmd.data & MXS_DMA_DESC_CHAIN))
  124. return 0;
  125. sem = mxs_dma_read_semaphore(channel);
  126. if (sem == 0)
  127. return 0;
  128. if (sem == 1) {
  129. pdesc = list_entry(pdesc->node.next,
  130. struct mxs_dma_desc, node);
  131. writel(mxs_dma_cmd_address(pdesc),
  132. &apbh_regs->ch[channel].hw_apbh_ch_nxtcmdar);
  133. }
  134. writel(pchan->pending_num,
  135. &apbh_regs->ch[channel].hw_apbh_ch_sema);
  136. pchan->active_num += pchan->pending_num;
  137. pchan->pending_num = 0;
  138. } else {
  139. pchan->active_num += pchan->pending_num;
  140. pchan->pending_num = 0;
  141. writel(mxs_dma_cmd_address(pdesc),
  142. &apbh_regs->ch[channel].hw_apbh_ch_nxtcmdar);
  143. writel(pchan->active_num,
  144. &apbh_regs->ch[channel].hw_apbh_ch_sema);
  145. writel(1 << (channel + APBH_CTRL0_CLKGATE_CHANNEL_OFFSET),
  146. &apbh_regs->hw_apbh_ctrl0_clr);
  147. }
  148. pchan->flags |= MXS_DMA_FLAGS_BUSY;
  149. return 0;
  150. }
  151. /*
  152. * Disable a DMA channel.
  153. *
  154. * This function shuts down a DMA channel and marks it as "not busy." Any
  155. * descriptors on the active list are immediately moved to the head of the
  156. * "done" list, whether or not they have actually been processed by the
  157. * hardware. The "ready" flags of these descriptors are NOT cleared, so they
  158. * still appear to be active.
  159. *
  160. * This function immediately shuts down a DMA channel's hardware, aborting any
  161. * I/O that may be in progress, potentially leaving I/O hardware in an undefined
  162. * state. It is unwise to call this function if there is ANY chance the hardware
  163. * is still processing a command.
  164. */
  165. static int mxs_dma_disable(int channel)
  166. {
  167. struct mxs_dma_chan *pchan;
  168. struct mxs_apbh_regs *apbh_regs =
  169. (struct mxs_apbh_regs *)MXS_APBH_BASE;
  170. int ret;
  171. ret = mxs_dma_validate_chan(channel);
  172. if (ret)
  173. return ret;
  174. pchan = mxs_dma_channels + channel;
  175. if (!(pchan->flags & MXS_DMA_FLAGS_BUSY))
  176. return -EINVAL;
  177. writel(1 << (channel + APBH_CTRL0_CLKGATE_CHANNEL_OFFSET),
  178. &apbh_regs->hw_apbh_ctrl0_set);
  179. pchan->flags &= ~MXS_DMA_FLAGS_BUSY;
  180. pchan->active_num = 0;
  181. pchan->pending_num = 0;
  182. list_splice_init(&pchan->active, &pchan->done);
  183. return 0;
  184. }
  185. /*
  186. * Resets the DMA channel hardware.
  187. */
  188. static int mxs_dma_reset(int channel)
  189. {
  190. struct mxs_apbh_regs *apbh_regs =
  191. (struct mxs_apbh_regs *)MXS_APBH_BASE;
  192. int ret;
  193. #if defined(CONFIG_MX23)
  194. uint32_t setreg = (uint32_t)(&apbh_regs->hw_apbh_ctrl0_set);
  195. uint32_t offset = APBH_CTRL0_RESET_CHANNEL_OFFSET;
  196. #elif defined(CONFIG_MX28)
  197. uint32_t setreg = (uint32_t)(&apbh_regs->hw_apbh_channel_ctrl_set);
  198. uint32_t offset = APBH_CHANNEL_CTRL_RESET_CHANNEL_OFFSET;
  199. #endif
  200. ret = mxs_dma_validate_chan(channel);
  201. if (ret)
  202. return ret;
  203. writel(1 << (channel + offset), setreg);
  204. return 0;
  205. }
  206. /*
  207. * Enable or disable DMA interrupt.
  208. *
  209. * This function enables the given DMA channel to interrupt the CPU.
  210. */
  211. static int mxs_dma_enable_irq(int channel, int enable)
  212. {
  213. struct mxs_apbh_regs *apbh_regs =
  214. (struct mxs_apbh_regs *)MXS_APBH_BASE;
  215. int ret;
  216. ret = mxs_dma_validate_chan(channel);
  217. if (ret)
  218. return ret;
  219. if (enable)
  220. writel(1 << (channel + APBH_CTRL1_CH_CMDCMPLT_IRQ_EN_OFFSET),
  221. &apbh_regs->hw_apbh_ctrl1_set);
  222. else
  223. writel(1 << (channel + APBH_CTRL1_CH_CMDCMPLT_IRQ_EN_OFFSET),
  224. &apbh_regs->hw_apbh_ctrl1_clr);
  225. return 0;
  226. }
  227. /*
  228. * Clear DMA interrupt.
  229. *
  230. * The software that is using the DMA channel must register to receive its
  231. * interrupts and, when they arrive, must call this function to clear them.
  232. */
  233. static int mxs_dma_ack_irq(int channel)
  234. {
  235. struct mxs_apbh_regs *apbh_regs =
  236. (struct mxs_apbh_regs *)MXS_APBH_BASE;
  237. int ret;
  238. ret = mxs_dma_validate_chan(channel);
  239. if (ret)
  240. return ret;
  241. writel(1 << channel, &apbh_regs->hw_apbh_ctrl1_clr);
  242. writel(1 << channel, &apbh_regs->hw_apbh_ctrl2_clr);
  243. return 0;
  244. }
  245. /*
  246. * Request to reserve a DMA channel
  247. */
  248. static int mxs_dma_request(int channel)
  249. {
  250. struct mxs_dma_chan *pchan;
  251. if ((channel < 0) || (channel >= MXS_MAX_DMA_CHANNELS))
  252. return -EINVAL;
  253. pchan = mxs_dma_channels + channel;
  254. if ((pchan->flags & MXS_DMA_FLAGS_VALID) != MXS_DMA_FLAGS_VALID)
  255. return -ENODEV;
  256. if (pchan->flags & MXS_DMA_FLAGS_ALLOCATED)
  257. return -EBUSY;
  258. pchan->flags |= MXS_DMA_FLAGS_ALLOCATED;
  259. pchan->active_num = 0;
  260. pchan->pending_num = 0;
  261. INIT_LIST_HEAD(&pchan->active);
  262. INIT_LIST_HEAD(&pchan->done);
  263. return 0;
  264. }
  265. /*
  266. * Release a DMA channel.
  267. *
  268. * This function releases a DMA channel from its current owner.
  269. *
  270. * The channel will NOT be released if it's marked "busy" (see
  271. * mxs_dma_enable()).
  272. */
  273. int mxs_dma_release(int channel)
  274. {
  275. struct mxs_dma_chan *pchan;
  276. int ret;
  277. ret = mxs_dma_validate_chan(channel);
  278. if (ret)
  279. return ret;
  280. pchan = mxs_dma_channels + channel;
  281. if (pchan->flags & MXS_DMA_FLAGS_BUSY)
  282. return -EBUSY;
  283. pchan->dev = 0;
  284. pchan->active_num = 0;
  285. pchan->pending_num = 0;
  286. pchan->flags &= ~MXS_DMA_FLAGS_ALLOCATED;
  287. return 0;
  288. }
  289. /*
  290. * Allocate DMA descriptor
  291. */
  292. struct mxs_dma_desc *mxs_dma_desc_alloc(void)
  293. {
  294. struct mxs_dma_desc *pdesc;
  295. uint32_t size;
  296. size = roundup(sizeof(struct mxs_dma_desc), MXS_DMA_ALIGNMENT);
  297. pdesc = memalign(MXS_DMA_ALIGNMENT, size);
  298. if (pdesc == NULL)
  299. return NULL;
  300. memset(pdesc, 0, sizeof(*pdesc));
  301. pdesc->address = (dma_addr_t)pdesc;
  302. return pdesc;
  303. };
  304. /*
  305. * Free DMA descriptor
  306. */
  307. void mxs_dma_desc_free(struct mxs_dma_desc *pdesc)
  308. {
  309. if (pdesc == NULL)
  310. return;
  311. free(pdesc);
  312. }
  313. /*
  314. * Add a DMA descriptor to a channel.
  315. *
  316. * If the descriptor list for this channel is not empty, this function sets the
  317. * CHAIN bit and the NEXTCMD_ADDR fields in the last descriptor's DMA command so
  318. * it will chain to the new descriptor's command.
  319. *
  320. * Then, this function marks the new descriptor as "ready," adds it to the end
  321. * of the active descriptor list, and increments the count of pending
  322. * descriptors.
  323. *
  324. * The MXS platform DMA software imposes some rules on DMA commands to maintain
  325. * important invariants. These rules are NOT checked, but they must be carefully
  326. * applied by software that uses MXS DMA channels.
  327. *
  328. * Invariant:
  329. * The DMA channel's hardware semaphore must reflect the number of DMA
  330. * commands the hardware will process, but has not yet finished.
  331. *
  332. * Explanation:
  333. * A DMA channel begins processing commands when its hardware semaphore is
  334. * written with a value greater than zero, and it stops processing commands
  335. * when the semaphore returns to zero.
  336. *
  337. * When a channel finishes a DMA command, it will decrement its semaphore if
  338. * the DECREMENT_SEMAPHORE bit is set in that command's flags bits.
  339. *
  340. * In principle, it's not necessary for the DECREMENT_SEMAPHORE to be set,
  341. * unless it suits the purposes of the software. For example, one could
  342. * construct a series of five DMA commands, with the DECREMENT_SEMAPHORE
  343. * bit set only in the last one. Then, setting the DMA channel's hardware
  344. * semaphore to one would cause the entire series of five commands to be
  345. * processed. However, this example would violate the invariant given above.
  346. *
  347. * Rule:
  348. * ALL DMA commands MUST have the DECREMENT_SEMAPHORE bit set so that the DMA
  349. * channel's hardware semaphore will be decremented EVERY time a command is
  350. * processed.
  351. */
  352. int mxs_dma_desc_append(int channel, struct mxs_dma_desc *pdesc)
  353. {
  354. struct mxs_dma_chan *pchan;
  355. struct mxs_dma_desc *last;
  356. int ret;
  357. ret = mxs_dma_validate_chan(channel);
  358. if (ret)
  359. return ret;
  360. pchan = mxs_dma_channels + channel;
  361. pdesc->cmd.next = mxs_dma_cmd_address(pdesc);
  362. pdesc->flags |= MXS_DMA_DESC_FIRST | MXS_DMA_DESC_LAST;
  363. if (!list_empty(&pchan->active)) {
  364. last = list_entry(pchan->active.prev, struct mxs_dma_desc,
  365. node);
  366. pdesc->flags &= ~MXS_DMA_DESC_FIRST;
  367. last->flags &= ~MXS_DMA_DESC_LAST;
  368. last->cmd.next = mxs_dma_cmd_address(pdesc);
  369. last->cmd.data |= MXS_DMA_DESC_CHAIN;
  370. mxs_dma_flush_desc(last);
  371. }
  372. pdesc->flags |= MXS_DMA_DESC_READY;
  373. if (pdesc->flags & MXS_DMA_DESC_FIRST)
  374. pchan->pending_num++;
  375. list_add_tail(&pdesc->node, &pchan->active);
  376. mxs_dma_flush_desc(pdesc);
  377. return ret;
  378. }
  379. /*
  380. * Clean up processed DMA descriptors.
  381. *
  382. * This function removes processed DMA descriptors from the "active" list. Pass
  383. * in a non-NULL list head to get the descriptors moved to your list. Pass NULL
  384. * to get the descriptors moved to the channel's "done" list. Descriptors on
  385. * the "done" list can be retrieved with mxs_dma_get_finished().
  386. *
  387. * This function marks the DMA channel as "not busy" if no unprocessed
  388. * descriptors remain on the "active" list.
  389. */
  390. static int mxs_dma_finish(int channel, struct list_head *head)
  391. {
  392. int sem;
  393. struct mxs_dma_chan *pchan;
  394. struct list_head *p, *q;
  395. struct mxs_dma_desc *pdesc;
  396. int ret;
  397. ret = mxs_dma_validate_chan(channel);
  398. if (ret)
  399. return ret;
  400. pchan = mxs_dma_channels + channel;
  401. sem = mxs_dma_read_semaphore(channel);
  402. if (sem < 0)
  403. return sem;
  404. if (sem == pchan->active_num)
  405. return 0;
  406. list_for_each_safe(p, q, &pchan->active) {
  407. if ((pchan->active_num) <= sem)
  408. break;
  409. pdesc = list_entry(p, struct mxs_dma_desc, node);
  410. pdesc->flags &= ~MXS_DMA_DESC_READY;
  411. if (head)
  412. list_move_tail(p, head);
  413. else
  414. list_move_tail(p, &pchan->done);
  415. if (pdesc->flags & MXS_DMA_DESC_LAST)
  416. pchan->active_num--;
  417. }
  418. if (sem == 0)
  419. pchan->flags &= ~MXS_DMA_FLAGS_BUSY;
  420. return 0;
  421. }
  422. /*
  423. * Wait for DMA channel to complete
  424. */
  425. static int mxs_dma_wait_complete(uint32_t timeout, unsigned int chan)
  426. {
  427. struct mxs_apbh_regs *apbh_regs =
  428. (struct mxs_apbh_regs *)MXS_APBH_BASE;
  429. int ret;
  430. ret = mxs_dma_validate_chan(chan);
  431. if (ret)
  432. return ret;
  433. if (mxs_wait_mask_set(&apbh_regs->hw_apbh_ctrl1_reg,
  434. 1 << chan, timeout)) {
  435. ret = -ETIMEDOUT;
  436. mxs_dma_reset(chan);
  437. }
  438. return ret;
  439. }
  440. /*
  441. * Execute the DMA channel
  442. */
  443. int mxs_dma_go(int chan)
  444. {
  445. uint32_t timeout = 10000000;
  446. int ret;
  447. LIST_HEAD(tmp_desc_list);
  448. mxs_dma_enable_irq(chan, 1);
  449. mxs_dma_enable(chan);
  450. /* Wait for DMA to finish. */
  451. ret = mxs_dma_wait_complete(timeout, chan);
  452. /* Clear out the descriptors we just ran. */
  453. mxs_dma_finish(chan, &tmp_desc_list);
  454. /* Shut the DMA channel down. */
  455. mxs_dma_ack_irq(chan);
  456. mxs_dma_reset(chan);
  457. mxs_dma_enable_irq(chan, 0);
  458. mxs_dma_disable(chan);
  459. return ret;
  460. }
  461. /*
  462. * Initialize the DMA hardware
  463. */
  464. void mxs_dma_init(void)
  465. {
  466. struct mxs_apbh_regs *apbh_regs =
  467. (struct mxs_apbh_regs *)MXS_APBH_BASE;
  468. mxs_reset_block(&apbh_regs->hw_apbh_ctrl0_reg);
  469. #ifdef CONFIG_APBH_DMA_BURST8
  470. writel(APBH_CTRL0_AHB_BURST8_EN,
  471. &apbh_regs->hw_apbh_ctrl0_set);
  472. #else
  473. writel(APBH_CTRL0_AHB_BURST8_EN,
  474. &apbh_regs->hw_apbh_ctrl0_clr);
  475. #endif
  476. #ifdef CONFIG_APBH_DMA_BURST
  477. writel(APBH_CTRL0_APB_BURST_EN,
  478. &apbh_regs->hw_apbh_ctrl0_set);
  479. #else
  480. writel(APBH_CTRL0_APB_BURST_EN,
  481. &apbh_regs->hw_apbh_ctrl0_clr);
  482. #endif
  483. }
  484. int mxs_dma_init_channel(int channel)
  485. {
  486. struct mxs_dma_chan *pchan;
  487. int ret;
  488. pchan = mxs_dma_channels + channel;
  489. pchan->flags = MXS_DMA_FLAGS_VALID;
  490. ret = mxs_dma_request(channel);
  491. if (ret) {
  492. printf("MXS DMA: Can't acquire DMA channel %i\n",
  493. channel);
  494. return ret;
  495. }
  496. mxs_dma_reset(channel);
  497. mxs_dma_ack_irq(channel);
  498. return 0;
  499. }