dma.c 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772
  1. /*
  2. * arch/arm/mach-tegra/dma.c
  3. *
  4. * System DMA driver for NVIDIA Tegra SoCs
  5. *
  6. * Copyright (c) 2008-2009, NVIDIA Corporation.
  7. *
  8. * This program is free software; you can redistribute it and/or modify
  9. * it under the terms of the GNU General Public License as published by
  10. * the Free Software Foundation; either version 2 of the License, or
  11. * (at your option) any later version.
  12. *
  13. * This program is distributed in the hope that it will be useful, but WITHOUT
  14. * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  15. * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  16. * more details.
  17. *
  18. * You should have received a copy of the GNU General Public License along
  19. * with this program; if not, write to the Free Software Foundation, Inc.,
  20. * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
  21. */
  22. #include <linux/io.h>
  23. #include <linux/interrupt.h>
  24. #include <linux/module.h>
  25. #include <linux/spinlock.h>
  26. #include <linux/err.h>
  27. #include <linux/irq.h>
  28. #include <linux/delay.h>
  29. #include <mach/dma.h>
  30. #include <mach/irqs.h>
  31. #include <mach/iomap.h>
  32. #include <mach/suspend.h>
  33. #define APB_DMA_GEN 0x000
  34. #define GEN_ENABLE (1<<31)
  35. #define APB_DMA_CNTRL 0x010
  36. #define APB_DMA_IRQ_MASK 0x01c
  37. #define APB_DMA_IRQ_MASK_SET 0x020
  38. #define APB_DMA_CHAN_CSR 0x000
  39. #define CSR_ENB (1<<31)
  40. #define CSR_IE_EOC (1<<30)
  41. #define CSR_HOLD (1<<29)
  42. #define CSR_DIR (1<<28)
  43. #define CSR_ONCE (1<<27)
  44. #define CSR_FLOW (1<<21)
  45. #define CSR_REQ_SEL_SHIFT 16
  46. #define CSR_REQ_SEL_MASK (0x1F<<CSR_REQ_SEL_SHIFT)
  47. #define CSR_REQ_SEL_INVALID (31<<CSR_REQ_SEL_SHIFT)
  48. #define CSR_WCOUNT_SHIFT 2
  49. #define CSR_WCOUNT_MASK 0xFFFC
  50. #define APB_DMA_CHAN_STA 0x004
  51. #define STA_BUSY (1<<31)
  52. #define STA_ISE_EOC (1<<30)
  53. #define STA_HALT (1<<29)
  54. #define STA_PING_PONG (1<<28)
  55. #define STA_COUNT_SHIFT 2
  56. #define STA_COUNT_MASK 0xFFFC
  57. #define APB_DMA_CHAN_AHB_PTR 0x010
  58. #define APB_DMA_CHAN_AHB_SEQ 0x014
  59. #define AHB_SEQ_INTR_ENB (1<<31)
  60. #define AHB_SEQ_BUS_WIDTH_SHIFT 28
  61. #define AHB_SEQ_BUS_WIDTH_MASK (0x7<<AHB_SEQ_BUS_WIDTH_SHIFT)
  62. #define AHB_SEQ_BUS_WIDTH_8 (0<<AHB_SEQ_BUS_WIDTH_SHIFT)
  63. #define AHB_SEQ_BUS_WIDTH_16 (1<<AHB_SEQ_BUS_WIDTH_SHIFT)
  64. #define AHB_SEQ_BUS_WIDTH_32 (2<<AHB_SEQ_BUS_WIDTH_SHIFT)
  65. #define AHB_SEQ_BUS_WIDTH_64 (3<<AHB_SEQ_BUS_WIDTH_SHIFT)
  66. #define AHB_SEQ_BUS_WIDTH_128 (4<<AHB_SEQ_BUS_WIDTH_SHIFT)
  67. #define AHB_SEQ_DATA_SWAP (1<<27)
  68. #define AHB_SEQ_BURST_MASK (0x7<<24)
  69. #define AHB_SEQ_BURST_1 (4<<24)
  70. #define AHB_SEQ_BURST_4 (5<<24)
  71. #define AHB_SEQ_BURST_8 (6<<24)
  72. #define AHB_SEQ_DBL_BUF (1<<19)
  73. #define AHB_SEQ_WRAP_SHIFT 16
  74. #define AHB_SEQ_WRAP_MASK (0x7<<AHB_SEQ_WRAP_SHIFT)
  75. #define APB_DMA_CHAN_APB_PTR 0x018
  76. #define APB_DMA_CHAN_APB_SEQ 0x01c
  77. #define APB_SEQ_BUS_WIDTH_SHIFT 28
  78. #define APB_SEQ_BUS_WIDTH_MASK (0x7<<APB_SEQ_BUS_WIDTH_SHIFT)
  79. #define APB_SEQ_BUS_WIDTH_8 (0<<APB_SEQ_BUS_WIDTH_SHIFT)
  80. #define APB_SEQ_BUS_WIDTH_16 (1<<APB_SEQ_BUS_WIDTH_SHIFT)
  81. #define APB_SEQ_BUS_WIDTH_32 (2<<APB_SEQ_BUS_WIDTH_SHIFT)
  82. #define APB_SEQ_BUS_WIDTH_64 (3<<APB_SEQ_BUS_WIDTH_SHIFT)
  83. #define APB_SEQ_BUS_WIDTH_128 (4<<APB_SEQ_BUS_WIDTH_SHIFT)
  84. #define APB_SEQ_DATA_SWAP (1<<27)
  85. #define APB_SEQ_WRAP_SHIFT 16
  86. #define APB_SEQ_WRAP_MASK (0x7<<APB_SEQ_WRAP_SHIFT)
  87. #define TEGRA_SYSTEM_DMA_CH_NR 16
  88. #define TEGRA_SYSTEM_DMA_AVP_CH_NUM 4
  89. #define TEGRA_SYSTEM_DMA_CH_MIN 0
  90. #define TEGRA_SYSTEM_DMA_CH_MAX \
  91. (TEGRA_SYSTEM_DMA_CH_NR - TEGRA_SYSTEM_DMA_AVP_CH_NUM - 1)
  92. #define NV_DMA_MAX_TRASFER_SIZE 0x10000
  93. const unsigned int ahb_addr_wrap_table[8] = {
  94. 0, 32, 64, 128, 256, 512, 1024, 2048
  95. };
  96. const unsigned int apb_addr_wrap_table[8] = {0, 1, 2, 4, 8, 16, 32, 64};
  97. const unsigned int bus_width_table[5] = {8, 16, 32, 64, 128};
  98. #define TEGRA_DMA_NAME_SIZE 16
  99. struct tegra_dma_channel {
  100. struct list_head list;
  101. int id;
  102. spinlock_t lock;
  103. char name[TEGRA_DMA_NAME_SIZE];
  104. void __iomem *addr;
  105. int mode;
  106. int irq;
  107. int req_transfer_count;
  108. };
  109. #define NV_DMA_MAX_CHANNELS 32
  110. static DEFINE_MUTEX(tegra_dma_lock);
  111. static DECLARE_BITMAP(channel_usage, NV_DMA_MAX_CHANNELS);
  112. static struct tegra_dma_channel dma_channels[NV_DMA_MAX_CHANNELS];
  113. static void tegra_dma_update_hw(struct tegra_dma_channel *ch,
  114. struct tegra_dma_req *req);
  115. static void tegra_dma_update_hw_partial(struct tegra_dma_channel *ch,
  116. struct tegra_dma_req *req);
  117. static void tegra_dma_stop(struct tegra_dma_channel *ch);
  118. void tegra_dma_flush(struct tegra_dma_channel *ch)
  119. {
  120. }
  121. EXPORT_SYMBOL(tegra_dma_flush);
  122. void tegra_dma_dequeue(struct tegra_dma_channel *ch)
  123. {
  124. struct tegra_dma_req *req;
  125. if (tegra_dma_is_empty(ch))
  126. return;
  127. req = list_entry(ch->list.next, typeof(*req), node);
  128. tegra_dma_dequeue_req(ch, req);
  129. return;
  130. }
  131. void tegra_dma_stop(struct tegra_dma_channel *ch)
  132. {
  133. u32 csr;
  134. u32 status;
  135. csr = readl(ch->addr + APB_DMA_CHAN_CSR);
  136. csr &= ~CSR_IE_EOC;
  137. writel(csr, ch->addr + APB_DMA_CHAN_CSR);
  138. csr &= ~CSR_ENB;
  139. writel(csr, ch->addr + APB_DMA_CHAN_CSR);
  140. status = readl(ch->addr + APB_DMA_CHAN_STA);
  141. if (status & STA_ISE_EOC)
  142. writel(status, ch->addr + APB_DMA_CHAN_STA);
  143. }
  144. int tegra_dma_cancel(struct tegra_dma_channel *ch)
  145. {
  146. u32 csr;
  147. unsigned long irq_flags;
  148. spin_lock_irqsave(&ch->lock, irq_flags);
  149. while (!list_empty(&ch->list))
  150. list_del(ch->list.next);
  151. csr = readl(ch->addr + APB_DMA_CHAN_CSR);
  152. csr &= ~CSR_REQ_SEL_MASK;
  153. csr |= CSR_REQ_SEL_INVALID;
  154. writel(csr, ch->addr + APB_DMA_CHAN_CSR);
  155. tegra_dma_stop(ch);
  156. spin_unlock_irqrestore(&ch->lock, irq_flags);
  157. return 0;
  158. }
  159. int tegra_dma_dequeue_req(struct tegra_dma_channel *ch,
  160. struct tegra_dma_req *_req)
  161. {
  162. unsigned int csr;
  163. unsigned int status;
  164. struct tegra_dma_req *req = NULL;
  165. int found = 0;
  166. unsigned long irq_flags;
  167. int to_transfer;
  168. int req_transfer_count;
  169. spin_lock_irqsave(&ch->lock, irq_flags);
  170. list_for_each_entry(req, &ch->list, node) {
  171. if (req == _req) {
  172. list_del(&req->node);
  173. found = 1;
  174. break;
  175. }
  176. }
  177. if (!found) {
  178. spin_unlock_irqrestore(&ch->lock, irq_flags);
  179. return 0;
  180. }
  181. /* STOP the DMA and get the transfer count.
  182. * Getting the transfer count is tricky.
  183. * - Change the source selector to invalid to stop the DMA from
  184. * FIFO to memory.
  185. * - Read the status register to know the number of pending
  186. * bytes to be transfered.
  187. * - Finally stop or program the DMA to the next buffer in the
  188. * list.
  189. */
  190. csr = readl(ch->addr + APB_DMA_CHAN_CSR);
  191. csr &= ~CSR_REQ_SEL_MASK;
  192. csr |= CSR_REQ_SEL_INVALID;
  193. writel(csr, ch->addr + APB_DMA_CHAN_CSR);
  194. /* Get the transfer count */
  195. status = readl(ch->addr + APB_DMA_CHAN_STA);
  196. to_transfer = (status & STA_COUNT_MASK) >> STA_COUNT_SHIFT;
  197. req_transfer_count = ch->req_transfer_count;
  198. req_transfer_count += 1;
  199. to_transfer += 1;
  200. req->bytes_transferred = req_transfer_count;
  201. if (status & STA_BUSY)
  202. req->bytes_transferred -= to_transfer;
  203. /* In continous transfer mode, DMA only tracks the count of the
  204. * half DMA buffer. So, if the DMA already finished half the DMA
  205. * then add the half buffer to the completed count.
  206. *
  207. * FIXME: There can be a race here. What if the req to
  208. * dequue happens at the same time as the DMA just moved to
  209. * the new buffer and SW didn't yet received the interrupt?
  210. */
  211. if (ch->mode & TEGRA_DMA_MODE_CONTINOUS)
  212. if (req->buffer_status == TEGRA_DMA_REQ_BUF_STATUS_HALF_FULL)
  213. req->bytes_transferred += req_transfer_count;
  214. req->bytes_transferred *= 4;
  215. tegra_dma_stop(ch);
  216. if (!list_empty(&ch->list)) {
  217. /* if the list is not empty, queue the next request */
  218. struct tegra_dma_req *next_req;
  219. next_req = list_entry(ch->list.next,
  220. typeof(*next_req), node);
  221. tegra_dma_update_hw(ch, next_req);
  222. }
  223. req->status = -TEGRA_DMA_REQ_ERROR_ABORTED;
  224. spin_unlock_irqrestore(&ch->lock, irq_flags);
  225. /* Callback should be called without any lock */
  226. req->complete(req);
  227. return 0;
  228. }
  229. EXPORT_SYMBOL(tegra_dma_dequeue_req);
  230. bool tegra_dma_is_empty(struct tegra_dma_channel *ch)
  231. {
  232. unsigned long irq_flags;
  233. bool is_empty;
  234. spin_lock_irqsave(&ch->lock, irq_flags);
  235. if (list_empty(&ch->list))
  236. is_empty = true;
  237. else
  238. is_empty = false;
  239. spin_unlock_irqrestore(&ch->lock, irq_flags);
  240. return is_empty;
  241. }
  242. EXPORT_SYMBOL(tegra_dma_is_empty);
  243. bool tegra_dma_is_req_inflight(struct tegra_dma_channel *ch,
  244. struct tegra_dma_req *_req)
  245. {
  246. unsigned long irq_flags;
  247. struct tegra_dma_req *req;
  248. spin_lock_irqsave(&ch->lock, irq_flags);
  249. list_for_each_entry(req, &ch->list, node) {
  250. if (req == _req) {
  251. spin_unlock_irqrestore(&ch->lock, irq_flags);
  252. return true;
  253. }
  254. }
  255. spin_unlock_irqrestore(&ch->lock, irq_flags);
  256. return false;
  257. }
  258. EXPORT_SYMBOL(tegra_dma_is_req_inflight);
  259. int tegra_dma_enqueue_req(struct tegra_dma_channel *ch,
  260. struct tegra_dma_req *req)
  261. {
  262. unsigned long irq_flags;
  263. int start_dma = 0;
  264. if (req->size > NV_DMA_MAX_TRASFER_SIZE ||
  265. req->source_addr & 0x3 || req->dest_addr & 0x3) {
  266. pr_err("Invalid DMA request for channel %d\n", ch->id);
  267. return -EINVAL;
  268. }
  269. spin_lock_irqsave(&ch->lock, irq_flags);
  270. req->bytes_transferred = 0;
  271. req->status = 0;
  272. req->buffer_status = 0;
  273. if (list_empty(&ch->list))
  274. start_dma = 1;
  275. list_add_tail(&req->node, &ch->list);
  276. if (start_dma)
  277. tegra_dma_update_hw(ch, req);
  278. spin_unlock_irqrestore(&ch->lock, irq_flags);
  279. return 0;
  280. }
  281. EXPORT_SYMBOL(tegra_dma_enqueue_req);
  282. struct tegra_dma_channel *tegra_dma_allocate_channel(int mode)
  283. {
  284. int channel;
  285. struct tegra_dma_channel *ch = NULL;
  286. mutex_lock(&tegra_dma_lock);
  287. /* first channel is the shared channel */
  288. if (mode & TEGRA_DMA_SHARED) {
  289. channel = TEGRA_SYSTEM_DMA_CH_MIN;
  290. } else {
  291. channel = find_first_zero_bit(channel_usage,
  292. ARRAY_SIZE(dma_channels));
  293. if (channel >= ARRAY_SIZE(dma_channels))
  294. goto out;
  295. }
  296. __set_bit(channel, channel_usage);
  297. ch = &dma_channels[channel];
  298. ch->mode = mode;
  299. out:
  300. mutex_unlock(&tegra_dma_lock);
  301. return ch;
  302. }
  303. EXPORT_SYMBOL(tegra_dma_allocate_channel);
  304. void tegra_dma_free_channel(struct tegra_dma_channel *ch)
  305. {
  306. if (ch->mode & TEGRA_DMA_SHARED)
  307. return;
  308. tegra_dma_cancel(ch);
  309. mutex_lock(&tegra_dma_lock);
  310. __clear_bit(ch->id, channel_usage);
  311. mutex_unlock(&tegra_dma_lock);
  312. }
  313. EXPORT_SYMBOL(tegra_dma_free_channel);
  314. static void tegra_dma_update_hw_partial(struct tegra_dma_channel *ch,
  315. struct tegra_dma_req *req)
  316. {
  317. u32 apb_ptr;
  318. u32 ahb_ptr;
  319. if (req->to_memory) {
  320. apb_ptr = req->source_addr;
  321. ahb_ptr = req->dest_addr;
  322. } else {
  323. apb_ptr = req->dest_addr;
  324. ahb_ptr = req->source_addr;
  325. }
  326. writel(apb_ptr, ch->addr + APB_DMA_CHAN_APB_PTR);
  327. writel(ahb_ptr, ch->addr + APB_DMA_CHAN_AHB_PTR);
  328. req->status = TEGRA_DMA_REQ_INFLIGHT;
  329. return;
  330. }
  331. static void tegra_dma_update_hw(struct tegra_dma_channel *ch,
  332. struct tegra_dma_req *req)
  333. {
  334. int ahb_addr_wrap;
  335. int apb_addr_wrap;
  336. int ahb_bus_width;
  337. int apb_bus_width;
  338. int index;
  339. u32 ahb_seq;
  340. u32 apb_seq;
  341. u32 ahb_ptr;
  342. u32 apb_ptr;
  343. u32 csr;
  344. csr = CSR_IE_EOC | CSR_FLOW;
  345. ahb_seq = AHB_SEQ_INTR_ENB | AHB_SEQ_BURST_1;
  346. apb_seq = 0;
  347. csr |= req->req_sel << CSR_REQ_SEL_SHIFT;
  348. /* One shot mode is always single buffered,
  349. * continuous mode is always double buffered
  350. * */
  351. if (ch->mode & TEGRA_DMA_MODE_ONESHOT) {
  352. csr |= CSR_ONCE;
  353. ch->req_transfer_count = (req->size >> 2) - 1;
  354. } else {
  355. ahb_seq |= AHB_SEQ_DBL_BUF;
  356. /* In double buffered mode, we set the size to half the
  357. * requested size and interrupt when half the buffer
  358. * is full */
  359. ch->req_transfer_count = (req->size >> 3) - 1;
  360. }
  361. csr |= ch->req_transfer_count << CSR_WCOUNT_SHIFT;
  362. if (req->to_memory) {
  363. apb_ptr = req->source_addr;
  364. ahb_ptr = req->dest_addr;
  365. apb_addr_wrap = req->source_wrap;
  366. ahb_addr_wrap = req->dest_wrap;
  367. apb_bus_width = req->source_bus_width;
  368. ahb_bus_width = req->dest_bus_width;
  369. } else {
  370. csr |= CSR_DIR;
  371. apb_ptr = req->dest_addr;
  372. ahb_ptr = req->source_addr;
  373. apb_addr_wrap = req->dest_wrap;
  374. ahb_addr_wrap = req->source_wrap;
  375. apb_bus_width = req->dest_bus_width;
  376. ahb_bus_width = req->source_bus_width;
  377. }
  378. apb_addr_wrap >>= 2;
  379. ahb_addr_wrap >>= 2;
  380. /* set address wrap for APB size */
  381. index = 0;
  382. do {
  383. if (apb_addr_wrap_table[index] == apb_addr_wrap)
  384. break;
  385. index++;
  386. } while (index < ARRAY_SIZE(apb_addr_wrap_table));
  387. BUG_ON(index == ARRAY_SIZE(apb_addr_wrap_table));
  388. apb_seq |= index << APB_SEQ_WRAP_SHIFT;
  389. /* set address wrap for AHB size */
  390. index = 0;
  391. do {
  392. if (ahb_addr_wrap_table[index] == ahb_addr_wrap)
  393. break;
  394. index++;
  395. } while (index < ARRAY_SIZE(ahb_addr_wrap_table));
  396. BUG_ON(index == ARRAY_SIZE(ahb_addr_wrap_table));
  397. ahb_seq |= index << AHB_SEQ_WRAP_SHIFT;
  398. for (index = 0; index < ARRAY_SIZE(bus_width_table); index++) {
  399. if (bus_width_table[index] == ahb_bus_width)
  400. break;
  401. }
  402. BUG_ON(index == ARRAY_SIZE(bus_width_table));
  403. ahb_seq |= index << AHB_SEQ_BUS_WIDTH_SHIFT;
  404. for (index = 0; index < ARRAY_SIZE(bus_width_table); index++) {
  405. if (bus_width_table[index] == apb_bus_width)
  406. break;
  407. }
  408. BUG_ON(index == ARRAY_SIZE(bus_width_table));
  409. apb_seq |= index << APB_SEQ_BUS_WIDTH_SHIFT;
  410. writel(csr, ch->addr + APB_DMA_CHAN_CSR);
  411. writel(apb_seq, ch->addr + APB_DMA_CHAN_APB_SEQ);
  412. writel(apb_ptr, ch->addr + APB_DMA_CHAN_APB_PTR);
  413. writel(ahb_seq, ch->addr + APB_DMA_CHAN_AHB_SEQ);
  414. writel(ahb_ptr, ch->addr + APB_DMA_CHAN_AHB_PTR);
  415. csr |= CSR_ENB;
  416. writel(csr, ch->addr + APB_DMA_CHAN_CSR);
  417. req->status = TEGRA_DMA_REQ_INFLIGHT;
  418. }
  419. static void handle_oneshot_dma(struct tegra_dma_channel *ch)
  420. {
  421. struct tegra_dma_req *req;
  422. unsigned long irq_flags;
  423. spin_lock_irqsave(&ch->lock, irq_flags);
  424. if (list_empty(&ch->list)) {
  425. spin_unlock_irqrestore(&ch->lock, irq_flags);
  426. return;
  427. }
  428. req = list_entry(ch->list.next, typeof(*req), node);
  429. if (req) {
  430. int bytes_transferred;
  431. bytes_transferred = ch->req_transfer_count;
  432. bytes_transferred += 1;
  433. bytes_transferred <<= 2;
  434. list_del(&req->node);
  435. req->bytes_transferred = bytes_transferred;
  436. req->status = TEGRA_DMA_REQ_SUCCESS;
  437. spin_unlock_irqrestore(&ch->lock, irq_flags);
  438. /* Callback should be called without any lock */
  439. pr_debug("%s: transferred %d bytes\n", __func__,
  440. req->bytes_transferred);
  441. req->complete(req);
  442. spin_lock_irqsave(&ch->lock, irq_flags);
  443. }
  444. if (!list_empty(&ch->list)) {
  445. req = list_entry(ch->list.next, typeof(*req), node);
  446. /* the complete function we just called may have enqueued
  447. another req, in which case dma has already started */
  448. if (req->status != TEGRA_DMA_REQ_INFLIGHT)
  449. tegra_dma_update_hw(ch, req);
  450. }
  451. spin_unlock_irqrestore(&ch->lock, irq_flags);
  452. }
  453. static void handle_continuous_dma(struct tegra_dma_channel *ch)
  454. {
  455. struct tegra_dma_req *req;
  456. unsigned long irq_flags;
  457. spin_lock_irqsave(&ch->lock, irq_flags);
  458. if (list_empty(&ch->list)) {
  459. spin_unlock_irqrestore(&ch->lock, irq_flags);
  460. return;
  461. }
  462. req = list_entry(ch->list.next, typeof(*req), node);
  463. if (req) {
  464. if (req->buffer_status == TEGRA_DMA_REQ_BUF_STATUS_EMPTY) {
  465. bool is_dma_ping_complete;
  466. is_dma_ping_complete = (readl(ch->addr + APB_DMA_CHAN_STA)
  467. & STA_PING_PONG) ? true : false;
  468. if (req->to_memory)
  469. is_dma_ping_complete = !is_dma_ping_complete;
  470. /* Out of sync - Release current buffer */
  471. if (!is_dma_ping_complete) {
  472. int bytes_transferred;
  473. bytes_transferred = ch->req_transfer_count;
  474. bytes_transferred += 1;
  475. bytes_transferred <<= 3;
  476. req->buffer_status = TEGRA_DMA_REQ_BUF_STATUS_FULL;
  477. req->bytes_transferred = bytes_transferred;
  478. req->status = TEGRA_DMA_REQ_SUCCESS;
  479. tegra_dma_stop(ch);
  480. if (!list_is_last(&req->node, &ch->list)) {
  481. struct tegra_dma_req *next_req;
  482. next_req = list_entry(req->node.next,
  483. typeof(*next_req), node);
  484. tegra_dma_update_hw(ch, next_req);
  485. }
  486. list_del(&req->node);
  487. /* DMA lock is NOT held when callbak is called */
  488. spin_unlock_irqrestore(&ch->lock, irq_flags);
  489. req->complete(req);
  490. return;
  491. }
  492. /* Load the next request into the hardware, if available
  493. * */
  494. if (!list_is_last(&req->node, &ch->list)) {
  495. struct tegra_dma_req *next_req;
  496. next_req = list_entry(req->node.next,
  497. typeof(*next_req), node);
  498. tegra_dma_update_hw_partial(ch, next_req);
  499. }
  500. req->buffer_status = TEGRA_DMA_REQ_BUF_STATUS_HALF_FULL;
  501. req->status = TEGRA_DMA_REQ_SUCCESS;
  502. /* DMA lock is NOT held when callback is called */
  503. spin_unlock_irqrestore(&ch->lock, irq_flags);
  504. if (likely(req->threshold))
  505. req->threshold(req);
  506. return;
  507. } else if (req->buffer_status ==
  508. TEGRA_DMA_REQ_BUF_STATUS_HALF_FULL) {
  509. /* Callback when the buffer is completely full (i.e on
  510. * the second interrupt */
  511. int bytes_transferred;
  512. bytes_transferred = ch->req_transfer_count;
  513. bytes_transferred += 1;
  514. bytes_transferred <<= 3;
  515. req->buffer_status = TEGRA_DMA_REQ_BUF_STATUS_FULL;
  516. req->bytes_transferred = bytes_transferred;
  517. req->status = TEGRA_DMA_REQ_SUCCESS;
  518. list_del(&req->node);
  519. /* DMA lock is NOT held when callbak is called */
  520. spin_unlock_irqrestore(&ch->lock, irq_flags);
  521. req->complete(req);
  522. return;
  523. } else {
  524. BUG();
  525. }
  526. }
  527. spin_unlock_irqrestore(&ch->lock, irq_flags);
  528. }
  529. static irqreturn_t dma_isr(int irq, void *data)
  530. {
  531. struct tegra_dma_channel *ch = data;
  532. unsigned long status;
  533. status = readl(ch->addr + APB_DMA_CHAN_STA);
  534. if (status & STA_ISE_EOC)
  535. writel(status, ch->addr + APB_DMA_CHAN_STA);
  536. else {
  537. pr_warning("Got a spurious ISR for DMA channel %d\n", ch->id);
  538. return IRQ_HANDLED;
  539. }
  540. return IRQ_WAKE_THREAD;
  541. }
  542. static irqreturn_t dma_thread_fn(int irq, void *data)
  543. {
  544. struct tegra_dma_channel *ch = data;
  545. if (ch->mode & TEGRA_DMA_MODE_ONESHOT)
  546. handle_oneshot_dma(ch);
  547. else
  548. handle_continuous_dma(ch);
  549. return IRQ_HANDLED;
  550. }
  551. int __init tegra_dma_init(void)
  552. {
  553. int ret = 0;
  554. int i;
  555. unsigned int irq;
  556. void __iomem *addr;
  557. addr = IO_ADDRESS(TEGRA_APB_DMA_BASE);
  558. writel(GEN_ENABLE, addr + APB_DMA_GEN);
  559. writel(0, addr + APB_DMA_CNTRL);
  560. writel(0xFFFFFFFFul >> (31 - TEGRA_SYSTEM_DMA_CH_MAX),
  561. addr + APB_DMA_IRQ_MASK_SET);
  562. memset(channel_usage, 0, sizeof(channel_usage));
  563. memset(dma_channels, 0, sizeof(dma_channels));
  564. /* Reserve all the channels we are not supposed to touch */
  565. for (i = 0; i < TEGRA_SYSTEM_DMA_CH_MIN; i++)
  566. __set_bit(i, channel_usage);
  567. for (i = TEGRA_SYSTEM_DMA_CH_MIN; i <= TEGRA_SYSTEM_DMA_CH_MAX; i++) {
  568. struct tegra_dma_channel *ch = &dma_channels[i];
  569. __clear_bit(i, channel_usage);
  570. ch->id = i;
  571. snprintf(ch->name, TEGRA_DMA_NAME_SIZE, "dma_channel_%d", i);
  572. ch->addr = IO_ADDRESS(TEGRA_APB_DMA_CH0_BASE +
  573. TEGRA_APB_DMA_CH0_SIZE * i);
  574. spin_lock_init(&ch->lock);
  575. INIT_LIST_HEAD(&ch->list);
  576. irq = INT_APB_DMA_CH0 + i;
  577. ret = request_threaded_irq(irq, dma_isr, dma_thread_fn, 0,
  578. dma_channels[i].name, ch);
  579. if (ret) {
  580. pr_err("Failed to register IRQ %d for DMA %d\n",
  581. irq, i);
  582. goto fail;
  583. }
  584. ch->irq = irq;
  585. }
  586. /* mark the shared channel allocated */
  587. __set_bit(TEGRA_SYSTEM_DMA_CH_MIN, channel_usage);
  588. for (i = TEGRA_SYSTEM_DMA_CH_MAX+1; i < NV_DMA_MAX_CHANNELS; i++)
  589. __set_bit(i, channel_usage);
  590. return ret;
  591. fail:
  592. writel(0, addr + APB_DMA_GEN);
  593. for (i = TEGRA_SYSTEM_DMA_CH_MIN; i <= TEGRA_SYSTEM_DMA_CH_MAX; i++) {
  594. struct tegra_dma_channel *ch = &dma_channels[i];
  595. if (ch->irq)
  596. free_irq(ch->irq, ch);
  597. }
  598. return ret;
  599. }
  600. #ifdef CONFIG_PM
  601. static u32 apb_dma[5*TEGRA_SYSTEM_DMA_CH_NR + 3];
  602. void tegra_dma_suspend(void)
  603. {
  604. void __iomem *addr = IO_ADDRESS(TEGRA_APB_DMA_BASE);
  605. u32 *ctx = apb_dma;
  606. int i;
  607. *ctx++ = readl(addr + APB_DMA_GEN);
  608. *ctx++ = readl(addr + APB_DMA_CNTRL);
  609. *ctx++ = readl(addr + APB_DMA_IRQ_MASK);
  610. for (i = 0; i < TEGRA_SYSTEM_DMA_CH_NR; i++) {
  611. addr = IO_ADDRESS(TEGRA_APB_DMA_CH0_BASE +
  612. TEGRA_APB_DMA_CH0_SIZE * i);
  613. *ctx++ = readl(addr + APB_DMA_CHAN_CSR);
  614. *ctx++ = readl(addr + APB_DMA_CHAN_AHB_PTR);
  615. *ctx++ = readl(addr + APB_DMA_CHAN_AHB_SEQ);
  616. *ctx++ = readl(addr + APB_DMA_CHAN_APB_PTR);
  617. *ctx++ = readl(addr + APB_DMA_CHAN_APB_SEQ);
  618. }
  619. }
  620. void tegra_dma_resume(void)
  621. {
  622. void __iomem *addr = IO_ADDRESS(TEGRA_APB_DMA_BASE);
  623. u32 *ctx = apb_dma;
  624. int i;
  625. writel(*ctx++, addr + APB_DMA_GEN);
  626. writel(*ctx++, addr + APB_DMA_CNTRL);
  627. writel(*ctx++, addr + APB_DMA_IRQ_MASK);
  628. for (i = 0; i < TEGRA_SYSTEM_DMA_CH_NR; i++) {
  629. addr = IO_ADDRESS(TEGRA_APB_DMA_CH0_BASE +
  630. TEGRA_APB_DMA_CH0_SIZE * i);
  631. writel(*ctx++, addr + APB_DMA_CHAN_CSR);
  632. writel(*ctx++, addr + APB_DMA_CHAN_AHB_PTR);
  633. writel(*ctx++, addr + APB_DMA_CHAN_AHB_SEQ);
  634. writel(*ctx++, addr + APB_DMA_CHAN_APB_PTR);
  635. writel(*ctx++, addr + APB_DMA_CHAN_APB_SEQ);
  636. }
  637. }
  638. #endif