dma.c 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752
  1. /*
  2. * arch/arm/mach-tegra/dma.c
  3. *
  4. * System DMA driver for NVIDIA Tegra SoCs
  5. *
  6. * Copyright (c) 2008-2009, NVIDIA Corporation.
  7. *
  8. * This program is free software; you can redistribute it and/or modify
  9. * it under the terms of the GNU General Public License as published by
  10. * the Free Software Foundation; either version 2 of the License, or
  11. * (at your option) any later version.
  12. *
  13. * This program is distributed in the hope that it will be useful, but WITHOUT
  14. * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  15. * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  16. * more details.
  17. *
  18. * You should have received a copy of the GNU General Public License along
  19. * with this program; if not, write to the Free Software Foundation, Inc.,
  20. * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
  21. */
  22. #include <linux/io.h>
  23. #include <linux/interrupt.h>
  24. #include <linux/module.h>
  25. #include <linux/spinlock.h>
  26. #include <linux/err.h>
  27. #include <linux/irq.h>
  28. #include <linux/delay.h>
  29. #include <mach/dma.h>
  30. #include <mach/irqs.h>
  31. #include <mach/iomap.h>
  32. #define APB_DMA_GEN 0x000
  33. #define GEN_ENABLE (1<<31)
  34. #define APB_DMA_CNTRL 0x010
  35. #define APB_DMA_IRQ_MASK 0x01c
  36. #define APB_DMA_IRQ_MASK_SET 0x020
  37. #define APB_DMA_CHAN_CSR 0x000
  38. #define CSR_ENB (1<<31)
  39. #define CSR_IE_EOC (1<<30)
  40. #define CSR_HOLD (1<<29)
  41. #define CSR_DIR (1<<28)
  42. #define CSR_ONCE (1<<27)
  43. #define CSR_FLOW (1<<21)
  44. #define CSR_REQ_SEL_SHIFT 16
  45. #define CSR_REQ_SEL_MASK (0x1F<<CSR_REQ_SEL_SHIFT)
  46. #define CSR_REQ_SEL_INVALID (31<<CSR_REQ_SEL_SHIFT)
  47. #define CSR_WCOUNT_SHIFT 2
  48. #define CSR_WCOUNT_MASK 0xFFFC
  49. #define APB_DMA_CHAN_STA 0x004
  50. #define STA_BUSY (1<<31)
  51. #define STA_ISE_EOC (1<<30)
  52. #define STA_HALT (1<<29)
  53. #define STA_PING_PONG (1<<28)
  54. #define STA_COUNT_SHIFT 2
  55. #define STA_COUNT_MASK 0xFFFC
  56. #define APB_DMA_CHAN_AHB_PTR 0x010
  57. #define APB_DMA_CHAN_AHB_SEQ 0x014
  58. #define AHB_SEQ_INTR_ENB (1<<31)
  59. #define AHB_SEQ_BUS_WIDTH_SHIFT 28
  60. #define AHB_SEQ_BUS_WIDTH_MASK (0x7<<AHB_SEQ_BUS_WIDTH_SHIFT)
  61. #define AHB_SEQ_BUS_WIDTH_8 (0<<AHB_SEQ_BUS_WIDTH_SHIFT)
  62. #define AHB_SEQ_BUS_WIDTH_16 (1<<AHB_SEQ_BUS_WIDTH_SHIFT)
  63. #define AHB_SEQ_BUS_WIDTH_32 (2<<AHB_SEQ_BUS_WIDTH_SHIFT)
  64. #define AHB_SEQ_BUS_WIDTH_64 (3<<AHB_SEQ_BUS_WIDTH_SHIFT)
  65. #define AHB_SEQ_BUS_WIDTH_128 (4<<AHB_SEQ_BUS_WIDTH_SHIFT)
  66. #define AHB_SEQ_DATA_SWAP (1<<27)
  67. #define AHB_SEQ_BURST_MASK (0x7<<24)
  68. #define AHB_SEQ_BURST_1 (4<<24)
  69. #define AHB_SEQ_BURST_4 (5<<24)
  70. #define AHB_SEQ_BURST_8 (6<<24)
  71. #define AHB_SEQ_DBL_BUF (1<<19)
  72. #define AHB_SEQ_WRAP_SHIFT 16
  73. #define AHB_SEQ_WRAP_MASK (0x7<<AHB_SEQ_WRAP_SHIFT)
  74. #define APB_DMA_CHAN_APB_PTR 0x018
  75. #define APB_DMA_CHAN_APB_SEQ 0x01c
  76. #define APB_SEQ_BUS_WIDTH_SHIFT 28
  77. #define APB_SEQ_BUS_WIDTH_MASK (0x7<<APB_SEQ_BUS_WIDTH_SHIFT)
  78. #define APB_SEQ_BUS_WIDTH_8 (0<<APB_SEQ_BUS_WIDTH_SHIFT)
  79. #define APB_SEQ_BUS_WIDTH_16 (1<<APB_SEQ_BUS_WIDTH_SHIFT)
  80. #define APB_SEQ_BUS_WIDTH_32 (2<<APB_SEQ_BUS_WIDTH_SHIFT)
  81. #define APB_SEQ_BUS_WIDTH_64 (3<<APB_SEQ_BUS_WIDTH_SHIFT)
  82. #define APB_SEQ_BUS_WIDTH_128 (4<<APB_SEQ_BUS_WIDTH_SHIFT)
  83. #define APB_SEQ_DATA_SWAP (1<<27)
  84. #define APB_SEQ_WRAP_SHIFT 16
  85. #define APB_SEQ_WRAP_MASK (0x7<<APB_SEQ_WRAP_SHIFT)
  86. #define TEGRA_SYSTEM_DMA_CH_NR 16
  87. #define TEGRA_SYSTEM_DMA_AVP_CH_NUM 4
  88. #define TEGRA_SYSTEM_DMA_CH_MIN 0
  89. #define TEGRA_SYSTEM_DMA_CH_MAX \
  90. (TEGRA_SYSTEM_DMA_CH_NR - TEGRA_SYSTEM_DMA_AVP_CH_NUM - 1)
  91. #define NV_DMA_MAX_TRASFER_SIZE 0x10000
  92. const unsigned int ahb_addr_wrap_table[8] = {
  93. 0, 32, 64, 128, 256, 512, 1024, 2048
  94. };
  95. const unsigned int apb_addr_wrap_table[8] = {0, 1, 2, 4, 8, 16, 32, 64};
  96. const unsigned int bus_width_table[5] = {8, 16, 32, 64, 128};
  97. #define TEGRA_DMA_NAME_SIZE 16
  98. struct tegra_dma_channel {
  99. struct list_head list;
  100. int id;
  101. spinlock_t lock;
  102. char name[TEGRA_DMA_NAME_SIZE];
  103. void __iomem *addr;
  104. int mode;
  105. int irq;
  106. /* Register shadow */
  107. u32 csr;
  108. u32 ahb_seq;
  109. u32 ahb_ptr;
  110. u32 apb_seq;
  111. u32 apb_ptr;
  112. };
  113. #define NV_DMA_MAX_CHANNELS 32
  114. static DECLARE_BITMAP(channel_usage, NV_DMA_MAX_CHANNELS);
  115. static struct tegra_dma_channel dma_channels[NV_DMA_MAX_CHANNELS];
  116. static void tegra_dma_update_hw(struct tegra_dma_channel *ch,
  117. struct tegra_dma_req *req);
  118. static void tegra_dma_update_hw_partial(struct tegra_dma_channel *ch,
  119. struct tegra_dma_req *req);
  120. static void tegra_dma_init_hw(struct tegra_dma_channel *ch);
  121. static void tegra_dma_stop(struct tegra_dma_channel *ch);
  122. void tegra_dma_flush(struct tegra_dma_channel *ch)
  123. {
  124. }
  125. EXPORT_SYMBOL(tegra_dma_flush);
  126. void tegra_dma_dequeue(struct tegra_dma_channel *ch)
  127. {
  128. struct tegra_dma_req *req;
  129. req = list_entry(ch->list.next, typeof(*req), node);
  130. tegra_dma_dequeue_req(ch, req);
  131. return;
  132. }
  133. void tegra_dma_stop(struct tegra_dma_channel *ch)
  134. {
  135. unsigned int csr;
  136. unsigned int status;
  137. csr = ch->csr;
  138. csr &= ~CSR_IE_EOC;
  139. writel(csr, ch->addr + APB_DMA_CHAN_CSR);
  140. csr &= ~CSR_ENB;
  141. writel(csr, ch->addr + APB_DMA_CHAN_CSR);
  142. status = readl(ch->addr + APB_DMA_CHAN_STA);
  143. if (status & STA_ISE_EOC)
  144. writel(status, ch->addr + APB_DMA_CHAN_STA);
  145. }
  146. int tegra_dma_cancel(struct tegra_dma_channel *ch)
  147. {
  148. unsigned int csr;
  149. unsigned long irq_flags;
  150. spin_lock_irqsave(&ch->lock, irq_flags);
  151. while (!list_empty(&ch->list))
  152. list_del(ch->list.next);
  153. csr = ch->csr;
  154. csr &= ~CSR_REQ_SEL_MASK;
  155. csr |= CSR_REQ_SEL_INVALID;
  156. /* Set the enable as that is not shadowed */
  157. csr |= CSR_ENB;
  158. writel(csr, ch->addr + APB_DMA_CHAN_CSR);
  159. tegra_dma_stop(ch);
  160. spin_unlock_irqrestore(&ch->lock, irq_flags);
  161. return 0;
  162. }
  163. int tegra_dma_dequeue_req(struct tegra_dma_channel *ch,
  164. struct tegra_dma_req *_req)
  165. {
  166. unsigned int csr;
  167. unsigned int status;
  168. struct tegra_dma_req *req = NULL;
  169. int found = 0;
  170. unsigned long irq_flags;
  171. int to_transfer;
  172. int req_transfer_count;
  173. spin_lock_irqsave(&ch->lock, irq_flags);
  174. list_for_each_entry(req, &ch->list, node) {
  175. if (req == _req) {
  176. list_del(&req->node);
  177. found = 1;
  178. break;
  179. }
  180. }
  181. if (!found) {
  182. spin_unlock_irqrestore(&ch->lock, irq_flags);
  183. return 0;
  184. }
  185. /* STOP the DMA and get the transfer count.
  186. * Getting the transfer count is tricky.
  187. * - Change the source selector to invalid to stop the DMA from
  188. * FIFO to memory.
  189. * - Read the status register to know the number of pending
  190. * bytes to be transfered.
  191. * - Finally stop or program the DMA to the next buffer in the
  192. * list.
  193. */
  194. csr = ch->csr;
  195. csr &= ~CSR_REQ_SEL_MASK;
  196. csr |= CSR_REQ_SEL_INVALID;
  197. /* Set the enable as that is not shadowed */
  198. csr |= CSR_ENB;
  199. writel(csr, ch->addr + APB_DMA_CHAN_CSR);
  200. /* Get the transfer count */
  201. status = readl(ch->addr + APB_DMA_CHAN_STA);
  202. to_transfer = (status & STA_COUNT_MASK) >> STA_COUNT_SHIFT;
  203. req_transfer_count = (ch->csr & CSR_WCOUNT_MASK) >> CSR_WCOUNT_SHIFT;
  204. req_transfer_count += 1;
  205. to_transfer += 1;
  206. req->bytes_transferred = req_transfer_count;
  207. if (status & STA_BUSY)
  208. req->bytes_transferred -= to_transfer;
  209. /* In continous transfer mode, DMA only tracks the count of the
  210. * half DMA buffer. So, if the DMA already finished half the DMA
  211. * then add the half buffer to the completed count.
  212. *
  213. * FIXME: There can be a race here. What if the req to
  214. * dequue happens at the same time as the DMA just moved to
  215. * the new buffer and SW didn't yet received the interrupt?
  216. */
  217. if (ch->mode & TEGRA_DMA_MODE_CONTINOUS)
  218. if (req->buffer_status == TEGRA_DMA_REQ_BUF_STATUS_HALF_FULL)
  219. req->bytes_transferred += req_transfer_count;
  220. req->bytes_transferred *= 4;
  221. tegra_dma_stop(ch);
  222. if (!list_empty(&ch->list)) {
  223. /* if the list is not empty, queue the next request */
  224. struct tegra_dma_req *next_req;
  225. next_req = list_entry(ch->list.next,
  226. typeof(*next_req), node);
  227. tegra_dma_update_hw(ch, next_req);
  228. }
  229. req->status = -TEGRA_DMA_REQ_ERROR_ABORTED;
  230. spin_unlock_irqrestore(&ch->lock, irq_flags);
  231. /* Callback should be called without any lock */
  232. req->complete(req);
  233. return 0;
  234. }
  235. EXPORT_SYMBOL(tegra_dma_dequeue_req);
  236. bool tegra_dma_is_empty(struct tegra_dma_channel *ch)
  237. {
  238. unsigned long irq_flags;
  239. bool is_empty;
  240. spin_lock_irqsave(&ch->lock, irq_flags);
  241. if (list_empty(&ch->list))
  242. is_empty = true;
  243. else
  244. is_empty = false;
  245. spin_unlock_irqrestore(&ch->lock, irq_flags);
  246. return is_empty;
  247. }
  248. EXPORT_SYMBOL(tegra_dma_is_empty);
  249. bool tegra_dma_is_req_inflight(struct tegra_dma_channel *ch,
  250. struct tegra_dma_req *_req)
  251. {
  252. unsigned long irq_flags;
  253. struct tegra_dma_req *req;
  254. spin_lock_irqsave(&ch->lock, irq_flags);
  255. list_for_each_entry(req, &ch->list, node) {
  256. if (req == _req) {
  257. spin_unlock_irqrestore(&ch->lock, irq_flags);
  258. return true;
  259. }
  260. }
  261. spin_unlock_irqrestore(&ch->lock, irq_flags);
  262. return false;
  263. }
  264. EXPORT_SYMBOL(tegra_dma_is_req_inflight);
  265. int tegra_dma_enqueue_req(struct tegra_dma_channel *ch,
  266. struct tegra_dma_req *req)
  267. {
  268. unsigned long irq_flags;
  269. int start_dma = 0;
  270. if (req->size > NV_DMA_MAX_TRASFER_SIZE ||
  271. req->source_addr & 0x3 || req->dest_addr & 0x3) {
  272. pr_err("Invalid DMA request for channel %d\n", ch->id);
  273. return -EINVAL;
  274. }
  275. spin_lock_irqsave(&ch->lock, irq_flags);
  276. req->bytes_transferred = 0;
  277. req->status = 0;
  278. req->buffer_status = 0;
  279. if (list_empty(&ch->list))
  280. start_dma = 1;
  281. list_add_tail(&req->node, &ch->list);
  282. if (start_dma)
  283. tegra_dma_update_hw(ch, req);
  284. spin_unlock_irqrestore(&ch->lock, irq_flags);
  285. return 0;
  286. }
  287. EXPORT_SYMBOL(tegra_dma_enqueue_req);
  288. struct tegra_dma_channel *tegra_dma_allocate_channel(int mode)
  289. {
  290. int channel;
  291. struct tegra_dma_channel *ch;
  292. /* first channel is the shared channel */
  293. if (mode & TEGRA_DMA_SHARED) {
  294. channel = TEGRA_SYSTEM_DMA_CH_MIN;
  295. } else {
  296. channel = find_first_zero_bit(channel_usage,
  297. ARRAY_SIZE(dma_channels));
  298. if (channel >= ARRAY_SIZE(dma_channels))
  299. return NULL;
  300. }
  301. __set_bit(channel, channel_usage);
  302. ch = &dma_channels[channel];
  303. ch->mode = mode;
  304. return ch;
  305. }
  306. EXPORT_SYMBOL(tegra_dma_allocate_channel);
  307. void tegra_dma_free_channel(struct tegra_dma_channel *ch)
  308. {
  309. if (ch->mode & TEGRA_DMA_SHARED)
  310. return;
  311. tegra_dma_cancel(ch);
  312. __clear_bit(ch->id, channel_usage);
  313. }
  314. EXPORT_SYMBOL(tegra_dma_free_channel);
  315. static void tegra_dma_update_hw_partial(struct tegra_dma_channel *ch,
  316. struct tegra_dma_req *req)
  317. {
  318. if (req->to_memory) {
  319. ch->apb_ptr = req->source_addr;
  320. ch->ahb_ptr = req->dest_addr;
  321. } else {
  322. ch->apb_ptr = req->dest_addr;
  323. ch->ahb_ptr = req->source_addr;
  324. }
  325. writel(ch->apb_ptr, ch->addr + APB_DMA_CHAN_APB_PTR);
  326. writel(ch->ahb_ptr, ch->addr + APB_DMA_CHAN_AHB_PTR);
  327. req->status = TEGRA_DMA_REQ_INFLIGHT;
  328. return;
  329. }
  330. static void tegra_dma_update_hw(struct tegra_dma_channel *ch,
  331. struct tegra_dma_req *req)
  332. {
  333. int ahb_addr_wrap;
  334. int apb_addr_wrap;
  335. int ahb_bus_width;
  336. int apb_bus_width;
  337. int index;
  338. unsigned long csr;
  339. ch->csr |= CSR_FLOW;
  340. ch->csr &= ~CSR_REQ_SEL_MASK;
  341. ch->csr |= req->req_sel << CSR_REQ_SEL_SHIFT;
  342. ch->ahb_seq &= ~AHB_SEQ_BURST_MASK;
  343. ch->ahb_seq |= AHB_SEQ_BURST_1;
  344. /* One shot mode is always single buffered,
  345. * continuous mode is always double buffered
  346. * */
  347. if (ch->mode & TEGRA_DMA_MODE_ONESHOT) {
  348. ch->csr |= CSR_ONCE;
  349. ch->ahb_seq &= ~AHB_SEQ_DBL_BUF;
  350. ch->csr &= ~CSR_WCOUNT_MASK;
  351. ch->csr |= ((req->size>>2) - 1) << CSR_WCOUNT_SHIFT;
  352. } else {
  353. ch->csr &= ~CSR_ONCE;
  354. ch->ahb_seq |= AHB_SEQ_DBL_BUF;
  355. /* In double buffered mode, we set the size to half the
  356. * requested size and interrupt when half the buffer
  357. * is full */
  358. ch->csr &= ~CSR_WCOUNT_MASK;
  359. ch->csr |= ((req->size>>3) - 1) << CSR_WCOUNT_SHIFT;
  360. }
  361. if (req->to_memory) {
  362. ch->csr &= ~CSR_DIR;
  363. ch->apb_ptr = req->source_addr;
  364. ch->ahb_ptr = req->dest_addr;
  365. apb_addr_wrap = req->source_wrap;
  366. ahb_addr_wrap = req->dest_wrap;
  367. apb_bus_width = req->source_bus_width;
  368. ahb_bus_width = req->dest_bus_width;
  369. } else {
  370. ch->csr |= CSR_DIR;
  371. ch->apb_ptr = req->dest_addr;
  372. ch->ahb_ptr = req->source_addr;
  373. apb_addr_wrap = req->dest_wrap;
  374. ahb_addr_wrap = req->source_wrap;
  375. apb_bus_width = req->dest_bus_width;
  376. ahb_bus_width = req->source_bus_width;
  377. }
  378. apb_addr_wrap >>= 2;
  379. ahb_addr_wrap >>= 2;
  380. /* set address wrap for APB size */
  381. index = 0;
  382. do {
  383. if (apb_addr_wrap_table[index] == apb_addr_wrap)
  384. break;
  385. index++;
  386. } while (index < ARRAY_SIZE(apb_addr_wrap_table));
  387. BUG_ON(index == ARRAY_SIZE(apb_addr_wrap_table));
  388. ch->apb_seq &= ~APB_SEQ_WRAP_MASK;
  389. ch->apb_seq |= index << APB_SEQ_WRAP_SHIFT;
  390. /* set address wrap for AHB size */
  391. index = 0;
  392. do {
  393. if (ahb_addr_wrap_table[index] == ahb_addr_wrap)
  394. break;
  395. index++;
  396. } while (index < ARRAY_SIZE(ahb_addr_wrap_table));
  397. BUG_ON(index == ARRAY_SIZE(ahb_addr_wrap_table));
  398. ch->ahb_seq &= ~AHB_SEQ_WRAP_MASK;
  399. ch->ahb_seq |= index << AHB_SEQ_WRAP_SHIFT;
  400. for (index = 0; index < ARRAY_SIZE(bus_width_table); index++) {
  401. if (bus_width_table[index] == ahb_bus_width)
  402. break;
  403. }
  404. BUG_ON(index == ARRAY_SIZE(bus_width_table));
  405. ch->ahb_seq &= ~AHB_SEQ_BUS_WIDTH_MASK;
  406. ch->ahb_seq |= index << AHB_SEQ_BUS_WIDTH_SHIFT;
  407. for (index = 0; index < ARRAY_SIZE(bus_width_table); index++) {
  408. if (bus_width_table[index] == apb_bus_width)
  409. break;
  410. }
  411. BUG_ON(index == ARRAY_SIZE(bus_width_table));
  412. ch->apb_seq &= ~APB_SEQ_BUS_WIDTH_MASK;
  413. ch->apb_seq |= index << APB_SEQ_BUS_WIDTH_SHIFT;
  414. ch->csr |= CSR_IE_EOC;
  415. /* update hw registers with the shadow */
  416. writel(ch->csr, ch->addr + APB_DMA_CHAN_CSR);
  417. writel(ch->apb_seq, ch->addr + APB_DMA_CHAN_APB_SEQ);
  418. writel(ch->apb_ptr, ch->addr + APB_DMA_CHAN_APB_PTR);
  419. writel(ch->ahb_seq, ch->addr + APB_DMA_CHAN_AHB_SEQ);
  420. writel(ch->ahb_ptr, ch->addr + APB_DMA_CHAN_AHB_PTR);
  421. csr = ch->csr | CSR_ENB;
  422. writel(csr, ch->addr + APB_DMA_CHAN_CSR);
  423. req->status = TEGRA_DMA_REQ_INFLIGHT;
  424. }
  425. static void tegra_dma_init_hw(struct tegra_dma_channel *ch)
  426. {
  427. /* One shot with an interrupt to CPU after transfer */
  428. ch->csr = CSR_ONCE | CSR_IE_EOC;
  429. ch->ahb_seq = AHB_SEQ_BUS_WIDTH_32 | AHB_SEQ_INTR_ENB;
  430. ch->apb_seq = APB_SEQ_BUS_WIDTH_32 | 1 << APB_SEQ_WRAP_SHIFT;
  431. }
  432. static void handle_oneshot_dma(struct tegra_dma_channel *ch)
  433. {
  434. struct tegra_dma_req *req;
  435. spin_lock(&ch->lock);
  436. if (list_empty(&ch->list)) {
  437. spin_unlock(&ch->lock);
  438. return;
  439. }
  440. req = list_entry(ch->list.next, typeof(*req), node);
  441. if (req) {
  442. int bytes_transferred;
  443. bytes_transferred =
  444. (ch->csr & CSR_WCOUNT_MASK) >> CSR_WCOUNT_SHIFT;
  445. bytes_transferred += 1;
  446. bytes_transferred <<= 2;
  447. list_del(&req->node);
  448. req->bytes_transferred = bytes_transferred;
  449. req->status = TEGRA_DMA_REQ_SUCCESS;
  450. spin_unlock(&ch->lock);
  451. /* Callback should be called without any lock */
  452. pr_debug("%s: transferred %d bytes\n", __func__,
  453. req->bytes_transferred);
  454. req->complete(req);
  455. spin_lock(&ch->lock);
  456. }
  457. if (!list_empty(&ch->list)) {
  458. req = list_entry(ch->list.next, typeof(*req), node);
  459. /* the complete function we just called may have enqueued
  460. another req, in which case dma has already started */
  461. if (req->status != TEGRA_DMA_REQ_INFLIGHT)
  462. tegra_dma_update_hw(ch, req);
  463. }
  464. spin_unlock(&ch->lock);
  465. }
  466. static void handle_continuous_dma(struct tegra_dma_channel *ch)
  467. {
  468. struct tegra_dma_req *req;
  469. spin_lock(&ch->lock);
  470. if (list_empty(&ch->list)) {
  471. spin_unlock(&ch->lock);
  472. return;
  473. }
  474. req = list_entry(ch->list.next, typeof(*req), node);
  475. if (req) {
  476. if (req->buffer_status == TEGRA_DMA_REQ_BUF_STATUS_EMPTY) {
  477. /* Load the next request into the hardware, if available
  478. * */
  479. if (!list_is_last(&req->node, &ch->list)) {
  480. struct tegra_dma_req *next_req;
  481. next_req = list_entry(req->node.next,
  482. typeof(*next_req), node);
  483. tegra_dma_update_hw_partial(ch, next_req);
  484. }
  485. req->buffer_status = TEGRA_DMA_REQ_BUF_STATUS_HALF_FULL;
  486. req->status = TEGRA_DMA_REQ_SUCCESS;
  487. /* DMA lock is NOT held when callback is called */
  488. spin_unlock(&ch->lock);
  489. if (likely(req->threshold))
  490. req->threshold(req);
  491. return;
  492. } else if (req->buffer_status ==
  493. TEGRA_DMA_REQ_BUF_STATUS_HALF_FULL) {
  494. /* Callback when the buffer is completely full (i.e on
  495. * the second interrupt */
  496. int bytes_transferred;
  497. bytes_transferred =
  498. (ch->csr & CSR_WCOUNT_MASK) >> CSR_WCOUNT_SHIFT;
  499. bytes_transferred += 1;
  500. bytes_transferred <<= 3;
  501. req->buffer_status = TEGRA_DMA_REQ_BUF_STATUS_FULL;
  502. req->bytes_transferred = bytes_transferred;
  503. req->status = TEGRA_DMA_REQ_SUCCESS;
  504. list_del(&req->node);
  505. /* DMA lock is NOT held when callbak is called */
  506. spin_unlock(&ch->lock);
  507. req->complete(req);
  508. return;
  509. } else {
  510. BUG();
  511. }
  512. }
  513. spin_unlock(&ch->lock);
  514. }
  515. static irqreturn_t dma_isr(int irq, void *data)
  516. {
  517. struct tegra_dma_channel *ch = data;
  518. unsigned long status;
  519. status = readl(ch->addr + APB_DMA_CHAN_STA);
  520. if (status & STA_ISE_EOC)
  521. writel(status, ch->addr + APB_DMA_CHAN_STA);
  522. else {
  523. pr_warning("Got a spurious ISR for DMA channel %d\n", ch->id);
  524. return IRQ_HANDLED;
  525. }
  526. return IRQ_WAKE_THREAD;
  527. }
  528. static irqreturn_t dma_thread_fn(int irq, void *data)
  529. {
  530. struct tegra_dma_channel *ch = data;
  531. if (ch->mode & TEGRA_DMA_MODE_ONESHOT)
  532. handle_oneshot_dma(ch);
  533. else
  534. handle_continuous_dma(ch);
  535. return IRQ_HANDLED;
  536. }
  537. int __init tegra_dma_init(void)
  538. {
  539. int ret = 0;
  540. int i;
  541. unsigned int irq;
  542. void __iomem *addr;
  543. addr = IO_ADDRESS(TEGRA_APB_DMA_BASE);
  544. writel(GEN_ENABLE, addr + APB_DMA_GEN);
  545. writel(0, addr + APB_DMA_CNTRL);
  546. writel(0xFFFFFFFFul >> (31 - TEGRA_SYSTEM_DMA_CH_MAX),
  547. addr + APB_DMA_IRQ_MASK_SET);
  548. memset(channel_usage, 0, sizeof(channel_usage));
  549. memset(dma_channels, 0, sizeof(dma_channels));
  550. /* Reserve all the channels we are not supposed to touch */
  551. for (i = 0; i < TEGRA_SYSTEM_DMA_CH_MIN; i++)
  552. __set_bit(i, channel_usage);
  553. for (i = TEGRA_SYSTEM_DMA_CH_MIN; i <= TEGRA_SYSTEM_DMA_CH_MAX; i++) {
  554. struct tegra_dma_channel *ch = &dma_channels[i];
  555. __clear_bit(i, channel_usage);
  556. ch->id = i;
  557. snprintf(ch->name, TEGRA_DMA_NAME_SIZE, "dma_channel_%d", i);
  558. ch->addr = IO_ADDRESS(TEGRA_APB_DMA_CH0_BASE +
  559. TEGRA_APB_DMA_CH0_SIZE * i);
  560. spin_lock_init(&ch->lock);
  561. INIT_LIST_HEAD(&ch->list);
  562. tegra_dma_init_hw(ch);
  563. irq = INT_APB_DMA_CH0 + i;
  564. ret = request_threaded_irq(irq, dma_isr, dma_thread_fn, 0,
  565. dma_channels[i].name, ch);
  566. if (ret) {
  567. pr_err("Failed to register IRQ %d for DMA %d\n",
  568. irq, i);
  569. goto fail;
  570. }
  571. ch->irq = irq;
  572. }
  573. /* mark the shared channel allocated */
  574. __set_bit(TEGRA_SYSTEM_DMA_CH_MIN, channel_usage);
  575. for (i = TEGRA_SYSTEM_DMA_CH_MAX+1; i < NV_DMA_MAX_CHANNELS; i++)
  576. __set_bit(i, channel_usage);
  577. return ret;
  578. fail:
  579. writel(0, addr + APB_DMA_GEN);
  580. for (i = TEGRA_SYSTEM_DMA_CH_MIN; i <= TEGRA_SYSTEM_DMA_CH_MAX; i++) {
  581. struct tegra_dma_channel *ch = &dma_channels[i];
  582. if (ch->irq)
  583. free_irq(ch->irq, ch);
  584. }
  585. return ret;
  586. }
  587. #ifdef CONFIG_PM
  588. static u32 apb_dma[5*TEGRA_SYSTEM_DMA_CH_NR + 3];
  589. void tegra_dma_suspend(void)
  590. {
  591. void __iomem *addr = IO_ADDRESS(TEGRA_APB_DMA_BASE);
  592. u32 *ctx = apb_dma;
  593. int i;
  594. *ctx++ = readl(addr + APB_DMA_GEN);
  595. *ctx++ = readl(addr + APB_DMA_CNTRL);
  596. *ctx++ = readl(addr + APB_DMA_IRQ_MASK);
  597. for (i = 0; i < TEGRA_SYSTEM_DMA_CH_NR; i++) {
  598. addr = IO_ADDRESS(TEGRA_APB_DMA_CH0_BASE +
  599. TEGRA_APB_DMA_CH0_SIZE * i);
  600. *ctx++ = readl(addr + APB_DMA_CHAN_CSR);
  601. *ctx++ = readl(addr + APB_DMA_CHAN_AHB_PTR);
  602. *ctx++ = readl(addr + APB_DMA_CHAN_AHB_SEQ);
  603. *ctx++ = readl(addr + APB_DMA_CHAN_APB_PTR);
  604. *ctx++ = readl(addr + APB_DMA_CHAN_APB_SEQ);
  605. }
  606. }
  607. void tegra_dma_resume(void)
  608. {
  609. void __iomem *addr = IO_ADDRESS(TEGRA_APB_DMA_BASE);
  610. u32 *ctx = apb_dma;
  611. int i;
  612. writel(*ctx++, addr + APB_DMA_GEN);
  613. writel(*ctx++, addr + APB_DMA_CNTRL);
  614. writel(*ctx++, addr + APB_DMA_IRQ_MASK);
  615. for (i = 0; i < TEGRA_SYSTEM_DMA_CH_NR; i++) {
  616. addr = IO_ADDRESS(TEGRA_APB_DMA_CH0_BASE +
  617. TEGRA_APB_DMA_CH0_SIZE * i);
  618. writel(*ctx++, addr + APB_DMA_CHAN_CSR);
  619. writel(*ctx++, addr + APB_DMA_CHAN_AHB_PTR);
  620. writel(*ctx++, addr + APB_DMA_CHAN_AHB_SEQ);
  621. writel(*ctx++, addr + APB_DMA_CHAN_APB_PTR);
  622. writel(*ctx++, addr + APB_DMA_CHAN_APB_SEQ);
  623. }
  624. }
  625. #endif