dma.c 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795
  1. /*
  2. * arch/arm/mach-tegra/dma.c
  3. *
  4. * System DMA driver for NVIDIA Tegra SoCs
  5. *
  6. * Copyright (c) 2008-2009, NVIDIA Corporation.
  7. *
  8. * This program is free software; you can redistribute it and/or modify
  9. * it under the terms of the GNU General Public License as published by
  10. * the Free Software Foundation; either version 2 of the License, or
  11. * (at your option) any later version.
  12. *
  13. * This program is distributed in the hope that it will be useful, but WITHOUT
  14. * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  15. * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  16. * more details.
  17. *
  18. * You should have received a copy of the GNU General Public License along
  19. * with this program; if not, write to the Free Software Foundation, Inc.,
  20. * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
  21. */
  22. #include <linux/io.h>
  23. #include <linux/interrupt.h>
  24. #include <linux/module.h>
  25. #include <linux/spinlock.h>
  26. #include <linux/err.h>
  27. #include <linux/irq.h>
  28. #include <linux/delay.h>
  29. #include <linux/clk.h>
  30. #include <mach/dma.h>
  31. #include <mach/irqs.h>
  32. #include <mach/iomap.h>
  33. #include <mach/suspend.h>
  34. #define APB_DMA_GEN 0x000
  35. #define GEN_ENABLE (1<<31)
  36. #define APB_DMA_CNTRL 0x010
  37. #define APB_DMA_IRQ_MASK 0x01c
  38. #define APB_DMA_IRQ_MASK_SET 0x020
  39. #define APB_DMA_CHAN_CSR 0x000
  40. #define CSR_ENB (1<<31)
  41. #define CSR_IE_EOC (1<<30)
  42. #define CSR_HOLD (1<<29)
  43. #define CSR_DIR (1<<28)
  44. #define CSR_ONCE (1<<27)
  45. #define CSR_FLOW (1<<21)
  46. #define CSR_REQ_SEL_SHIFT 16
  47. #define CSR_REQ_SEL_MASK (0x1F<<CSR_REQ_SEL_SHIFT)
  48. #define CSR_REQ_SEL_INVALID (31<<CSR_REQ_SEL_SHIFT)
  49. #define CSR_WCOUNT_SHIFT 2
  50. #define CSR_WCOUNT_MASK 0xFFFC
  51. #define APB_DMA_CHAN_STA 0x004
  52. #define STA_BUSY (1<<31)
  53. #define STA_ISE_EOC (1<<30)
  54. #define STA_HALT (1<<29)
  55. #define STA_PING_PONG (1<<28)
  56. #define STA_COUNT_SHIFT 2
  57. #define STA_COUNT_MASK 0xFFFC
  58. #define APB_DMA_CHAN_AHB_PTR 0x010
  59. #define APB_DMA_CHAN_AHB_SEQ 0x014
  60. #define AHB_SEQ_INTR_ENB (1<<31)
  61. #define AHB_SEQ_BUS_WIDTH_SHIFT 28
  62. #define AHB_SEQ_BUS_WIDTH_MASK (0x7<<AHB_SEQ_BUS_WIDTH_SHIFT)
  63. #define AHB_SEQ_BUS_WIDTH_8 (0<<AHB_SEQ_BUS_WIDTH_SHIFT)
  64. #define AHB_SEQ_BUS_WIDTH_16 (1<<AHB_SEQ_BUS_WIDTH_SHIFT)
  65. #define AHB_SEQ_BUS_WIDTH_32 (2<<AHB_SEQ_BUS_WIDTH_SHIFT)
  66. #define AHB_SEQ_BUS_WIDTH_64 (3<<AHB_SEQ_BUS_WIDTH_SHIFT)
  67. #define AHB_SEQ_BUS_WIDTH_128 (4<<AHB_SEQ_BUS_WIDTH_SHIFT)
  68. #define AHB_SEQ_DATA_SWAP (1<<27)
  69. #define AHB_SEQ_BURST_MASK (0x7<<24)
  70. #define AHB_SEQ_BURST_1 (4<<24)
  71. #define AHB_SEQ_BURST_4 (5<<24)
  72. #define AHB_SEQ_BURST_8 (6<<24)
  73. #define AHB_SEQ_DBL_BUF (1<<19)
  74. #define AHB_SEQ_WRAP_SHIFT 16
  75. #define AHB_SEQ_WRAP_MASK (0x7<<AHB_SEQ_WRAP_SHIFT)
  76. #define APB_DMA_CHAN_APB_PTR 0x018
  77. #define APB_DMA_CHAN_APB_SEQ 0x01c
  78. #define APB_SEQ_BUS_WIDTH_SHIFT 28
  79. #define APB_SEQ_BUS_WIDTH_MASK (0x7<<APB_SEQ_BUS_WIDTH_SHIFT)
  80. #define APB_SEQ_BUS_WIDTH_8 (0<<APB_SEQ_BUS_WIDTH_SHIFT)
  81. #define APB_SEQ_BUS_WIDTH_16 (1<<APB_SEQ_BUS_WIDTH_SHIFT)
  82. #define APB_SEQ_BUS_WIDTH_32 (2<<APB_SEQ_BUS_WIDTH_SHIFT)
  83. #define APB_SEQ_BUS_WIDTH_64 (3<<APB_SEQ_BUS_WIDTH_SHIFT)
  84. #define APB_SEQ_BUS_WIDTH_128 (4<<APB_SEQ_BUS_WIDTH_SHIFT)
  85. #define APB_SEQ_DATA_SWAP (1<<27)
  86. #define APB_SEQ_WRAP_SHIFT 16
  87. #define APB_SEQ_WRAP_MASK (0x7<<APB_SEQ_WRAP_SHIFT)
  88. #define TEGRA_SYSTEM_DMA_CH_NR 16
  89. #define TEGRA_SYSTEM_DMA_AVP_CH_NUM 4
  90. #define TEGRA_SYSTEM_DMA_CH_MIN 0
  91. #define TEGRA_SYSTEM_DMA_CH_MAX \
  92. (TEGRA_SYSTEM_DMA_CH_NR - TEGRA_SYSTEM_DMA_AVP_CH_NUM - 1)
  93. #define NV_DMA_MAX_TRASFER_SIZE 0x10000
  94. const unsigned int ahb_addr_wrap_table[8] = {
  95. 0, 32, 64, 128, 256, 512, 1024, 2048
  96. };
  97. const unsigned int apb_addr_wrap_table[8] = {0, 1, 2, 4, 8, 16, 32, 64};
  98. const unsigned int bus_width_table[5] = {8, 16, 32, 64, 128};
  99. #define TEGRA_DMA_NAME_SIZE 16
  100. struct tegra_dma_channel {
  101. struct list_head list;
  102. int id;
  103. spinlock_t lock;
  104. char name[TEGRA_DMA_NAME_SIZE];
  105. void __iomem *addr;
  106. int mode;
  107. int irq;
  108. int req_transfer_count;
  109. };
  110. #define NV_DMA_MAX_CHANNELS 32
  111. static DEFINE_MUTEX(tegra_dma_lock);
  112. static DECLARE_BITMAP(channel_usage, NV_DMA_MAX_CHANNELS);
  113. static struct tegra_dma_channel dma_channels[NV_DMA_MAX_CHANNELS];
  114. static void tegra_dma_update_hw(struct tegra_dma_channel *ch,
  115. struct tegra_dma_req *req);
  116. static void tegra_dma_update_hw_partial(struct tegra_dma_channel *ch,
  117. struct tegra_dma_req *req);
  118. static void tegra_dma_stop(struct tegra_dma_channel *ch);
  119. void tegra_dma_flush(struct tegra_dma_channel *ch)
  120. {
  121. }
  122. EXPORT_SYMBOL(tegra_dma_flush);
  123. void tegra_dma_dequeue(struct tegra_dma_channel *ch)
  124. {
  125. struct tegra_dma_req *req;
  126. if (tegra_dma_is_empty(ch))
  127. return;
  128. req = list_entry(ch->list.next, typeof(*req), node);
  129. tegra_dma_dequeue_req(ch, req);
  130. return;
  131. }
  132. void tegra_dma_stop(struct tegra_dma_channel *ch)
  133. {
  134. u32 csr;
  135. u32 status;
  136. csr = readl(ch->addr + APB_DMA_CHAN_CSR);
  137. csr &= ~CSR_IE_EOC;
  138. writel(csr, ch->addr + APB_DMA_CHAN_CSR);
  139. csr &= ~CSR_ENB;
  140. writel(csr, ch->addr + APB_DMA_CHAN_CSR);
  141. status = readl(ch->addr + APB_DMA_CHAN_STA);
  142. if (status & STA_ISE_EOC)
  143. writel(status, ch->addr + APB_DMA_CHAN_STA);
  144. }
  145. int tegra_dma_cancel(struct tegra_dma_channel *ch)
  146. {
  147. u32 csr;
  148. unsigned long irq_flags;
  149. spin_lock_irqsave(&ch->lock, irq_flags);
  150. while (!list_empty(&ch->list))
  151. list_del(ch->list.next);
  152. csr = readl(ch->addr + APB_DMA_CHAN_CSR);
  153. csr &= ~CSR_REQ_SEL_MASK;
  154. csr |= CSR_REQ_SEL_INVALID;
  155. writel(csr, ch->addr + APB_DMA_CHAN_CSR);
  156. tegra_dma_stop(ch);
  157. spin_unlock_irqrestore(&ch->lock, irq_flags);
  158. return 0;
  159. }
  160. int tegra_dma_dequeue_req(struct tegra_dma_channel *ch,
  161. struct tegra_dma_req *_req)
  162. {
  163. unsigned int csr;
  164. unsigned int status;
  165. struct tegra_dma_req *req = NULL;
  166. int found = 0;
  167. unsigned long irq_flags;
  168. int to_transfer;
  169. int req_transfer_count;
  170. spin_lock_irqsave(&ch->lock, irq_flags);
  171. list_for_each_entry(req, &ch->list, node) {
  172. if (req == _req) {
  173. list_del(&req->node);
  174. found = 1;
  175. break;
  176. }
  177. }
  178. if (!found) {
  179. spin_unlock_irqrestore(&ch->lock, irq_flags);
  180. return 0;
  181. }
  182. /* STOP the DMA and get the transfer count.
  183. * Getting the transfer count is tricky.
  184. * - Change the source selector to invalid to stop the DMA from
  185. * FIFO to memory.
  186. * - Read the status register to know the number of pending
  187. * bytes to be transfered.
  188. * - Finally stop or program the DMA to the next buffer in the
  189. * list.
  190. */
  191. csr = readl(ch->addr + APB_DMA_CHAN_CSR);
  192. csr &= ~CSR_REQ_SEL_MASK;
  193. csr |= CSR_REQ_SEL_INVALID;
  194. writel(csr, ch->addr + APB_DMA_CHAN_CSR);
  195. /* Get the transfer count */
  196. status = readl(ch->addr + APB_DMA_CHAN_STA);
  197. to_transfer = (status & STA_COUNT_MASK) >> STA_COUNT_SHIFT;
  198. req_transfer_count = ch->req_transfer_count;
  199. req_transfer_count += 1;
  200. to_transfer += 1;
  201. req->bytes_transferred = req_transfer_count;
  202. if (status & STA_BUSY)
  203. req->bytes_transferred -= to_transfer;
  204. /* In continous transfer mode, DMA only tracks the count of the
  205. * half DMA buffer. So, if the DMA already finished half the DMA
  206. * then add the half buffer to the completed count.
  207. *
  208. * FIXME: There can be a race here. What if the req to
  209. * dequue happens at the same time as the DMA just moved to
  210. * the new buffer and SW didn't yet received the interrupt?
  211. */
  212. if (ch->mode & TEGRA_DMA_MODE_CONTINOUS)
  213. if (req->buffer_status == TEGRA_DMA_REQ_BUF_STATUS_HALF_FULL)
  214. req->bytes_transferred += req_transfer_count;
  215. req->bytes_transferred *= 4;
  216. tegra_dma_stop(ch);
  217. if (!list_empty(&ch->list)) {
  218. /* if the list is not empty, queue the next request */
  219. struct tegra_dma_req *next_req;
  220. next_req = list_entry(ch->list.next,
  221. typeof(*next_req), node);
  222. tegra_dma_update_hw(ch, next_req);
  223. }
  224. req->status = -TEGRA_DMA_REQ_ERROR_ABORTED;
  225. spin_unlock_irqrestore(&ch->lock, irq_flags);
  226. /* Callback should be called without any lock */
  227. req->complete(req);
  228. return 0;
  229. }
  230. EXPORT_SYMBOL(tegra_dma_dequeue_req);
  231. bool tegra_dma_is_empty(struct tegra_dma_channel *ch)
  232. {
  233. unsigned long irq_flags;
  234. bool is_empty;
  235. spin_lock_irqsave(&ch->lock, irq_flags);
  236. if (list_empty(&ch->list))
  237. is_empty = true;
  238. else
  239. is_empty = false;
  240. spin_unlock_irqrestore(&ch->lock, irq_flags);
  241. return is_empty;
  242. }
  243. EXPORT_SYMBOL(tegra_dma_is_empty);
  244. bool tegra_dma_is_req_inflight(struct tegra_dma_channel *ch,
  245. struct tegra_dma_req *_req)
  246. {
  247. unsigned long irq_flags;
  248. struct tegra_dma_req *req;
  249. spin_lock_irqsave(&ch->lock, irq_flags);
  250. list_for_each_entry(req, &ch->list, node) {
  251. if (req == _req) {
  252. spin_unlock_irqrestore(&ch->lock, irq_flags);
  253. return true;
  254. }
  255. }
  256. spin_unlock_irqrestore(&ch->lock, irq_flags);
  257. return false;
  258. }
  259. EXPORT_SYMBOL(tegra_dma_is_req_inflight);
  260. int tegra_dma_enqueue_req(struct tegra_dma_channel *ch,
  261. struct tegra_dma_req *req)
  262. {
  263. unsigned long irq_flags;
  264. struct tegra_dma_req *_req;
  265. int start_dma = 0;
  266. if (req->size > NV_DMA_MAX_TRASFER_SIZE ||
  267. req->source_addr & 0x3 || req->dest_addr & 0x3) {
  268. pr_err("Invalid DMA request for channel %d\n", ch->id);
  269. return -EINVAL;
  270. }
  271. spin_lock_irqsave(&ch->lock, irq_flags);
  272. list_for_each_entry(_req, &ch->list, node) {
  273. if (req == _req) {
  274. spin_unlock_irqrestore(&ch->lock, irq_flags);
  275. return -EEXIST;
  276. }
  277. }
  278. req->bytes_transferred = 0;
  279. req->status = 0;
  280. req->buffer_status = 0;
  281. if (list_empty(&ch->list))
  282. start_dma = 1;
  283. list_add_tail(&req->node, &ch->list);
  284. if (start_dma)
  285. tegra_dma_update_hw(ch, req);
  286. spin_unlock_irqrestore(&ch->lock, irq_flags);
  287. return 0;
  288. }
  289. EXPORT_SYMBOL(tegra_dma_enqueue_req);
  290. struct tegra_dma_channel *tegra_dma_allocate_channel(int mode)
  291. {
  292. int channel;
  293. struct tegra_dma_channel *ch = NULL;
  294. mutex_lock(&tegra_dma_lock);
  295. /* first channel is the shared channel */
  296. if (mode & TEGRA_DMA_SHARED) {
  297. channel = TEGRA_SYSTEM_DMA_CH_MIN;
  298. } else {
  299. channel = find_first_zero_bit(channel_usage,
  300. ARRAY_SIZE(dma_channels));
  301. if (channel >= ARRAY_SIZE(dma_channels))
  302. goto out;
  303. }
  304. __set_bit(channel, channel_usage);
  305. ch = &dma_channels[channel];
  306. ch->mode = mode;
  307. out:
  308. mutex_unlock(&tegra_dma_lock);
  309. return ch;
  310. }
  311. EXPORT_SYMBOL(tegra_dma_allocate_channel);
  312. void tegra_dma_free_channel(struct tegra_dma_channel *ch)
  313. {
  314. if (ch->mode & TEGRA_DMA_SHARED)
  315. return;
  316. tegra_dma_cancel(ch);
  317. mutex_lock(&tegra_dma_lock);
  318. __clear_bit(ch->id, channel_usage);
  319. mutex_unlock(&tegra_dma_lock);
  320. }
  321. EXPORT_SYMBOL(tegra_dma_free_channel);
  322. static void tegra_dma_update_hw_partial(struct tegra_dma_channel *ch,
  323. struct tegra_dma_req *req)
  324. {
  325. u32 apb_ptr;
  326. u32 ahb_ptr;
  327. if (req->to_memory) {
  328. apb_ptr = req->source_addr;
  329. ahb_ptr = req->dest_addr;
  330. } else {
  331. apb_ptr = req->dest_addr;
  332. ahb_ptr = req->source_addr;
  333. }
  334. writel(apb_ptr, ch->addr + APB_DMA_CHAN_APB_PTR);
  335. writel(ahb_ptr, ch->addr + APB_DMA_CHAN_AHB_PTR);
  336. req->status = TEGRA_DMA_REQ_INFLIGHT;
  337. return;
  338. }
  339. static void tegra_dma_update_hw(struct tegra_dma_channel *ch,
  340. struct tegra_dma_req *req)
  341. {
  342. int ahb_addr_wrap;
  343. int apb_addr_wrap;
  344. int ahb_bus_width;
  345. int apb_bus_width;
  346. int index;
  347. u32 ahb_seq;
  348. u32 apb_seq;
  349. u32 ahb_ptr;
  350. u32 apb_ptr;
  351. u32 csr;
  352. csr = CSR_IE_EOC | CSR_FLOW;
  353. ahb_seq = AHB_SEQ_INTR_ENB | AHB_SEQ_BURST_1;
  354. apb_seq = 0;
  355. csr |= req->req_sel << CSR_REQ_SEL_SHIFT;
  356. /* One shot mode is always single buffered,
  357. * continuous mode is always double buffered
  358. * */
  359. if (ch->mode & TEGRA_DMA_MODE_ONESHOT) {
  360. csr |= CSR_ONCE;
  361. ch->req_transfer_count = (req->size >> 2) - 1;
  362. } else {
  363. ahb_seq |= AHB_SEQ_DBL_BUF;
  364. /* In double buffered mode, we set the size to half the
  365. * requested size and interrupt when half the buffer
  366. * is full */
  367. ch->req_transfer_count = (req->size >> 3) - 1;
  368. }
  369. csr |= ch->req_transfer_count << CSR_WCOUNT_SHIFT;
  370. if (req->to_memory) {
  371. apb_ptr = req->source_addr;
  372. ahb_ptr = req->dest_addr;
  373. apb_addr_wrap = req->source_wrap;
  374. ahb_addr_wrap = req->dest_wrap;
  375. apb_bus_width = req->source_bus_width;
  376. ahb_bus_width = req->dest_bus_width;
  377. } else {
  378. csr |= CSR_DIR;
  379. apb_ptr = req->dest_addr;
  380. ahb_ptr = req->source_addr;
  381. apb_addr_wrap = req->dest_wrap;
  382. ahb_addr_wrap = req->source_wrap;
  383. apb_bus_width = req->dest_bus_width;
  384. ahb_bus_width = req->source_bus_width;
  385. }
  386. apb_addr_wrap >>= 2;
  387. ahb_addr_wrap >>= 2;
  388. /* set address wrap for APB size */
  389. index = 0;
  390. do {
  391. if (apb_addr_wrap_table[index] == apb_addr_wrap)
  392. break;
  393. index++;
  394. } while (index < ARRAY_SIZE(apb_addr_wrap_table));
  395. BUG_ON(index == ARRAY_SIZE(apb_addr_wrap_table));
  396. apb_seq |= index << APB_SEQ_WRAP_SHIFT;
  397. /* set address wrap for AHB size */
  398. index = 0;
  399. do {
  400. if (ahb_addr_wrap_table[index] == ahb_addr_wrap)
  401. break;
  402. index++;
  403. } while (index < ARRAY_SIZE(ahb_addr_wrap_table));
  404. BUG_ON(index == ARRAY_SIZE(ahb_addr_wrap_table));
  405. ahb_seq |= index << AHB_SEQ_WRAP_SHIFT;
  406. for (index = 0; index < ARRAY_SIZE(bus_width_table); index++) {
  407. if (bus_width_table[index] == ahb_bus_width)
  408. break;
  409. }
  410. BUG_ON(index == ARRAY_SIZE(bus_width_table));
  411. ahb_seq |= index << AHB_SEQ_BUS_WIDTH_SHIFT;
  412. for (index = 0; index < ARRAY_SIZE(bus_width_table); index++) {
  413. if (bus_width_table[index] == apb_bus_width)
  414. break;
  415. }
  416. BUG_ON(index == ARRAY_SIZE(bus_width_table));
  417. apb_seq |= index << APB_SEQ_BUS_WIDTH_SHIFT;
  418. writel(csr, ch->addr + APB_DMA_CHAN_CSR);
  419. writel(apb_seq, ch->addr + APB_DMA_CHAN_APB_SEQ);
  420. writel(apb_ptr, ch->addr + APB_DMA_CHAN_APB_PTR);
  421. writel(ahb_seq, ch->addr + APB_DMA_CHAN_AHB_SEQ);
  422. writel(ahb_ptr, ch->addr + APB_DMA_CHAN_AHB_PTR);
  423. csr |= CSR_ENB;
  424. writel(csr, ch->addr + APB_DMA_CHAN_CSR);
  425. req->status = TEGRA_DMA_REQ_INFLIGHT;
  426. }
  427. static void handle_oneshot_dma(struct tegra_dma_channel *ch)
  428. {
  429. struct tegra_dma_req *req;
  430. unsigned long irq_flags;
  431. spin_lock_irqsave(&ch->lock, irq_flags);
  432. if (list_empty(&ch->list)) {
  433. spin_unlock_irqrestore(&ch->lock, irq_flags);
  434. return;
  435. }
  436. req = list_entry(ch->list.next, typeof(*req), node);
  437. if (req) {
  438. int bytes_transferred;
  439. bytes_transferred = ch->req_transfer_count;
  440. bytes_transferred += 1;
  441. bytes_transferred <<= 2;
  442. list_del(&req->node);
  443. req->bytes_transferred = bytes_transferred;
  444. req->status = TEGRA_DMA_REQ_SUCCESS;
  445. spin_unlock_irqrestore(&ch->lock, irq_flags);
  446. /* Callback should be called without any lock */
  447. pr_debug("%s: transferred %d bytes\n", __func__,
  448. req->bytes_transferred);
  449. req->complete(req);
  450. spin_lock_irqsave(&ch->lock, irq_flags);
  451. }
  452. if (!list_empty(&ch->list)) {
  453. req = list_entry(ch->list.next, typeof(*req), node);
  454. /* the complete function we just called may have enqueued
  455. another req, in which case dma has already started */
  456. if (req->status != TEGRA_DMA_REQ_INFLIGHT)
  457. tegra_dma_update_hw(ch, req);
  458. }
  459. spin_unlock_irqrestore(&ch->lock, irq_flags);
  460. }
  461. static void handle_continuous_dma(struct tegra_dma_channel *ch)
  462. {
  463. struct tegra_dma_req *req;
  464. unsigned long irq_flags;
  465. spin_lock_irqsave(&ch->lock, irq_flags);
  466. if (list_empty(&ch->list)) {
  467. spin_unlock_irqrestore(&ch->lock, irq_flags);
  468. return;
  469. }
  470. req = list_entry(ch->list.next, typeof(*req), node);
  471. if (req) {
  472. if (req->buffer_status == TEGRA_DMA_REQ_BUF_STATUS_EMPTY) {
  473. bool is_dma_ping_complete;
  474. is_dma_ping_complete = (readl(ch->addr + APB_DMA_CHAN_STA)
  475. & STA_PING_PONG) ? true : false;
  476. if (req->to_memory)
  477. is_dma_ping_complete = !is_dma_ping_complete;
  478. /* Out of sync - Release current buffer */
  479. if (!is_dma_ping_complete) {
  480. int bytes_transferred;
  481. bytes_transferred = ch->req_transfer_count;
  482. bytes_transferred += 1;
  483. bytes_transferred <<= 3;
  484. req->buffer_status = TEGRA_DMA_REQ_BUF_STATUS_FULL;
  485. req->bytes_transferred = bytes_transferred;
  486. req->status = TEGRA_DMA_REQ_SUCCESS;
  487. tegra_dma_stop(ch);
  488. if (!list_is_last(&req->node, &ch->list)) {
  489. struct tegra_dma_req *next_req;
  490. next_req = list_entry(req->node.next,
  491. typeof(*next_req), node);
  492. tegra_dma_update_hw(ch, next_req);
  493. }
  494. list_del(&req->node);
  495. /* DMA lock is NOT held when callbak is called */
  496. spin_unlock_irqrestore(&ch->lock, irq_flags);
  497. req->complete(req);
  498. return;
  499. }
  500. /* Load the next request into the hardware, if available
  501. * */
  502. if (!list_is_last(&req->node, &ch->list)) {
  503. struct tegra_dma_req *next_req;
  504. next_req = list_entry(req->node.next,
  505. typeof(*next_req), node);
  506. tegra_dma_update_hw_partial(ch, next_req);
  507. }
  508. req->buffer_status = TEGRA_DMA_REQ_BUF_STATUS_HALF_FULL;
  509. req->status = TEGRA_DMA_REQ_SUCCESS;
  510. /* DMA lock is NOT held when callback is called */
  511. spin_unlock_irqrestore(&ch->lock, irq_flags);
  512. if (likely(req->threshold))
  513. req->threshold(req);
  514. return;
  515. } else if (req->buffer_status ==
  516. TEGRA_DMA_REQ_BUF_STATUS_HALF_FULL) {
  517. /* Callback when the buffer is completely full (i.e on
  518. * the second interrupt */
  519. int bytes_transferred;
  520. bytes_transferred = ch->req_transfer_count;
  521. bytes_transferred += 1;
  522. bytes_transferred <<= 3;
  523. req->buffer_status = TEGRA_DMA_REQ_BUF_STATUS_FULL;
  524. req->bytes_transferred = bytes_transferred;
  525. req->status = TEGRA_DMA_REQ_SUCCESS;
  526. list_del(&req->node);
  527. /* DMA lock is NOT held when callbak is called */
  528. spin_unlock_irqrestore(&ch->lock, irq_flags);
  529. req->complete(req);
  530. return;
  531. } else {
  532. BUG();
  533. }
  534. }
  535. spin_unlock_irqrestore(&ch->lock, irq_flags);
  536. }
  537. static irqreturn_t dma_isr(int irq, void *data)
  538. {
  539. struct tegra_dma_channel *ch = data;
  540. unsigned long status;
  541. status = readl(ch->addr + APB_DMA_CHAN_STA);
  542. if (status & STA_ISE_EOC)
  543. writel(status, ch->addr + APB_DMA_CHAN_STA);
  544. else {
  545. pr_warning("Got a spurious ISR for DMA channel %d\n", ch->id);
  546. return IRQ_HANDLED;
  547. }
  548. return IRQ_WAKE_THREAD;
  549. }
  550. static irqreturn_t dma_thread_fn(int irq, void *data)
  551. {
  552. struct tegra_dma_channel *ch = data;
  553. if (ch->mode & TEGRA_DMA_MODE_ONESHOT)
  554. handle_oneshot_dma(ch);
  555. else
  556. handle_continuous_dma(ch);
  557. return IRQ_HANDLED;
  558. }
  559. int __init tegra_dma_init(void)
  560. {
  561. int ret = 0;
  562. int i;
  563. unsigned int irq;
  564. void __iomem *addr;
  565. struct clk *c;
  566. c = clk_get_sys("tegra-dma", NULL);
  567. if (IS_ERR(c)) {
  568. pr_err("Unable to get clock for APB DMA\n");
  569. ret = PTR_ERR(c);
  570. goto fail;
  571. }
  572. ret = clk_enable(c);
  573. if (ret != 0) {
  574. pr_err("Unable to enable clock for APB DMA\n");
  575. goto fail;
  576. }
  577. addr = IO_ADDRESS(TEGRA_APB_DMA_BASE);
  578. writel(GEN_ENABLE, addr + APB_DMA_GEN);
  579. writel(0, addr + APB_DMA_CNTRL);
  580. writel(0xFFFFFFFFul >> (31 - TEGRA_SYSTEM_DMA_CH_MAX),
  581. addr + APB_DMA_IRQ_MASK_SET);
  582. memset(channel_usage, 0, sizeof(channel_usage));
  583. memset(dma_channels, 0, sizeof(dma_channels));
  584. /* Reserve all the channels we are not supposed to touch */
  585. for (i = 0; i < TEGRA_SYSTEM_DMA_CH_MIN; i++)
  586. __set_bit(i, channel_usage);
  587. for (i = TEGRA_SYSTEM_DMA_CH_MIN; i <= TEGRA_SYSTEM_DMA_CH_MAX; i++) {
  588. struct tegra_dma_channel *ch = &dma_channels[i];
  589. __clear_bit(i, channel_usage);
  590. ch->id = i;
  591. snprintf(ch->name, TEGRA_DMA_NAME_SIZE, "dma_channel_%d", i);
  592. ch->addr = IO_ADDRESS(TEGRA_APB_DMA_CH0_BASE +
  593. TEGRA_APB_DMA_CH0_SIZE * i);
  594. spin_lock_init(&ch->lock);
  595. INIT_LIST_HEAD(&ch->list);
  596. irq = INT_APB_DMA_CH0 + i;
  597. ret = request_threaded_irq(irq, dma_isr, dma_thread_fn, 0,
  598. dma_channels[i].name, ch);
  599. if (ret) {
  600. pr_err("Failed to register IRQ %d for DMA %d\n",
  601. irq, i);
  602. goto fail;
  603. }
  604. ch->irq = irq;
  605. }
  606. /* mark the shared channel allocated */
  607. __set_bit(TEGRA_SYSTEM_DMA_CH_MIN, channel_usage);
  608. for (i = TEGRA_SYSTEM_DMA_CH_MAX+1; i < NV_DMA_MAX_CHANNELS; i++)
  609. __set_bit(i, channel_usage);
  610. return ret;
  611. fail:
  612. writel(0, addr + APB_DMA_GEN);
  613. for (i = TEGRA_SYSTEM_DMA_CH_MIN; i <= TEGRA_SYSTEM_DMA_CH_MAX; i++) {
  614. struct tegra_dma_channel *ch = &dma_channels[i];
  615. if (ch->irq)
  616. free_irq(ch->irq, ch);
  617. }
  618. return ret;
  619. }
  620. postcore_initcall(tegra_dma_init);
  621. #ifdef CONFIG_PM
  622. static u32 apb_dma[5*TEGRA_SYSTEM_DMA_CH_NR + 3];
  623. void tegra_dma_suspend(void)
  624. {
  625. void __iomem *addr = IO_ADDRESS(TEGRA_APB_DMA_BASE);
  626. u32 *ctx = apb_dma;
  627. int i;
  628. *ctx++ = readl(addr + APB_DMA_GEN);
  629. *ctx++ = readl(addr + APB_DMA_CNTRL);
  630. *ctx++ = readl(addr + APB_DMA_IRQ_MASK);
  631. for (i = 0; i < TEGRA_SYSTEM_DMA_CH_NR; i++) {
  632. addr = IO_ADDRESS(TEGRA_APB_DMA_CH0_BASE +
  633. TEGRA_APB_DMA_CH0_SIZE * i);
  634. *ctx++ = readl(addr + APB_DMA_CHAN_CSR);
  635. *ctx++ = readl(addr + APB_DMA_CHAN_AHB_PTR);
  636. *ctx++ = readl(addr + APB_DMA_CHAN_AHB_SEQ);
  637. *ctx++ = readl(addr + APB_DMA_CHAN_APB_PTR);
  638. *ctx++ = readl(addr + APB_DMA_CHAN_APB_SEQ);
  639. }
  640. }
  641. void tegra_dma_resume(void)
  642. {
  643. void __iomem *addr = IO_ADDRESS(TEGRA_APB_DMA_BASE);
  644. u32 *ctx = apb_dma;
  645. int i;
  646. writel(*ctx++, addr + APB_DMA_GEN);
  647. writel(*ctx++, addr + APB_DMA_CNTRL);
  648. writel(*ctx++, addr + APB_DMA_IRQ_MASK);
  649. for (i = 0; i < TEGRA_SYSTEM_DMA_CH_NR; i++) {
  650. addr = IO_ADDRESS(TEGRA_APB_DMA_CH0_BASE +
  651. TEGRA_APB_DMA_CH0_SIZE * i);
  652. writel(*ctx++, addr + APB_DMA_CHAN_CSR);
  653. writel(*ctx++, addr + APB_DMA_CHAN_AHB_PTR);
  654. writel(*ctx++, addr + APB_DMA_CHAN_AHB_SEQ);
  655. writel(*ctx++, addr + APB_DMA_CHAN_APB_PTR);
  656. writel(*ctx++, addr + APB_DMA_CHAN_APB_SEQ);
  657. }
  658. }
  659. #endif