tegra20-apb-dma.c 41 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527
  1. /*
  2. * DMA driver for Nvidia's Tegra20 APB DMA controller.
  3. *
  4. * Copyright (c) 2012, NVIDIA CORPORATION. All rights reserved.
  5. *
  6. * This program is free software; you can redistribute it and/or modify it
  7. * under the terms and conditions of the GNU General Public License,
  8. * version 2, as published by the Free Software Foundation.
  9. *
  10. * This program is distributed in the hope it will be useful, but WITHOUT
  11. * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  12. * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  13. * more details.
  14. *
  15. * You should have received a copy of the GNU General Public License
  16. * along with this program. If not, see <http://www.gnu.org/licenses/>.
  17. */
  18. #include <linux/bitops.h>
  19. #include <linux/clk.h>
  20. #include <linux/delay.h>
  21. #include <linux/dmaengine.h>
  22. #include <linux/dma-mapping.h>
  23. #include <linux/err.h>
  24. #include <linux/init.h>
  25. #include <linux/interrupt.h>
  26. #include <linux/io.h>
  27. #include <linux/mm.h>
  28. #include <linux/module.h>
  29. #include <linux/of.h>
  30. #include <linux/of_device.h>
  31. #include <linux/platform_device.h>
  32. #include <linux/pm.h>
  33. #include <linux/pm_runtime.h>
  34. #include <linux/slab.h>
  35. #include <linux/clk/tegra.h>
  36. #include "dmaengine.h"
  37. #define TEGRA_APBDMA_GENERAL 0x0
  38. #define TEGRA_APBDMA_GENERAL_ENABLE BIT(31)
  39. #define TEGRA_APBDMA_CONTROL 0x010
  40. #define TEGRA_APBDMA_IRQ_MASK 0x01c
  41. #define TEGRA_APBDMA_IRQ_MASK_SET 0x020
  42. /* CSR register */
  43. #define TEGRA_APBDMA_CHAN_CSR 0x00
  44. #define TEGRA_APBDMA_CSR_ENB BIT(31)
  45. #define TEGRA_APBDMA_CSR_IE_EOC BIT(30)
  46. #define TEGRA_APBDMA_CSR_HOLD BIT(29)
  47. #define TEGRA_APBDMA_CSR_DIR BIT(28)
  48. #define TEGRA_APBDMA_CSR_ONCE BIT(27)
  49. #define TEGRA_APBDMA_CSR_FLOW BIT(21)
  50. #define TEGRA_APBDMA_CSR_REQ_SEL_SHIFT 16
  51. #define TEGRA_APBDMA_CSR_WCOUNT_MASK 0xFFFC
  52. /* STATUS register */
  53. #define TEGRA_APBDMA_CHAN_STATUS 0x004
  54. #define TEGRA_APBDMA_STATUS_BUSY BIT(31)
  55. #define TEGRA_APBDMA_STATUS_ISE_EOC BIT(30)
  56. #define TEGRA_APBDMA_STATUS_HALT BIT(29)
  57. #define TEGRA_APBDMA_STATUS_PING_PONG BIT(28)
  58. #define TEGRA_APBDMA_STATUS_COUNT_SHIFT 2
  59. #define TEGRA_APBDMA_STATUS_COUNT_MASK 0xFFFC
  60. #define TEGRA_APBDMA_CHAN_CSRE 0x00C
  61. #define TEGRA_APBDMA_CHAN_CSRE_PAUSE (1 << 31)
  62. /* AHB memory address */
  63. #define TEGRA_APBDMA_CHAN_AHBPTR 0x010
  64. /* AHB sequence register */
  65. #define TEGRA_APBDMA_CHAN_AHBSEQ 0x14
  66. #define TEGRA_APBDMA_AHBSEQ_INTR_ENB BIT(31)
  67. #define TEGRA_APBDMA_AHBSEQ_BUS_WIDTH_8 (0 << 28)
  68. #define TEGRA_APBDMA_AHBSEQ_BUS_WIDTH_16 (1 << 28)
  69. #define TEGRA_APBDMA_AHBSEQ_BUS_WIDTH_32 (2 << 28)
  70. #define TEGRA_APBDMA_AHBSEQ_BUS_WIDTH_64 (3 << 28)
  71. #define TEGRA_APBDMA_AHBSEQ_BUS_WIDTH_128 (4 << 28)
  72. #define TEGRA_APBDMA_AHBSEQ_DATA_SWAP BIT(27)
  73. #define TEGRA_APBDMA_AHBSEQ_BURST_1 (4 << 24)
  74. #define TEGRA_APBDMA_AHBSEQ_BURST_4 (5 << 24)
  75. #define TEGRA_APBDMA_AHBSEQ_BURST_8 (6 << 24)
  76. #define TEGRA_APBDMA_AHBSEQ_DBL_BUF BIT(19)
  77. #define TEGRA_APBDMA_AHBSEQ_WRAP_SHIFT 16
  78. #define TEGRA_APBDMA_AHBSEQ_WRAP_NONE 0
  79. /* APB address */
  80. #define TEGRA_APBDMA_CHAN_APBPTR 0x018
  81. /* APB sequence register */
  82. #define TEGRA_APBDMA_CHAN_APBSEQ 0x01c
  83. #define TEGRA_APBDMA_APBSEQ_BUS_WIDTH_8 (0 << 28)
  84. #define TEGRA_APBDMA_APBSEQ_BUS_WIDTH_16 (1 << 28)
  85. #define TEGRA_APBDMA_APBSEQ_BUS_WIDTH_32 (2 << 28)
  86. #define TEGRA_APBDMA_APBSEQ_BUS_WIDTH_64 (3 << 28)
  87. #define TEGRA_APBDMA_APBSEQ_BUS_WIDTH_128 (4 << 28)
  88. #define TEGRA_APBDMA_APBSEQ_DATA_SWAP BIT(27)
  89. #define TEGRA_APBDMA_APBSEQ_WRAP_WORD_1 (1 << 16)
  90. /*
  91. * If any burst is in flight and DMA paused then this is the time to complete
  92. * on-flight burst and update DMA status register.
  93. */
  94. #define TEGRA_APBDMA_BURST_COMPLETE_TIME 20
  95. /* Channel base address offset from APBDMA base address */
  96. #define TEGRA_APBDMA_CHANNEL_BASE_ADD_OFFSET 0x1000
  97. /* DMA channel register space size */
  98. #define TEGRA_APBDMA_CHANNEL_REGISTER_SIZE 0x20
  99. struct tegra_dma;
  100. /*
  101. * tegra_dma_chip_data Tegra chip specific DMA data
  102. * @nr_channels: Number of channels available in the controller.
  103. * @max_dma_count: Maximum DMA transfer count supported by DMA controller.
  104. * @support_channel_pause: Support channel wise pause of dma.
  105. */
  106. struct tegra_dma_chip_data {
  107. int nr_channels;
  108. int max_dma_count;
  109. bool support_channel_pause;
  110. };
  111. /* DMA channel registers */
  112. struct tegra_dma_channel_regs {
  113. unsigned long csr;
  114. unsigned long ahb_ptr;
  115. unsigned long apb_ptr;
  116. unsigned long ahb_seq;
  117. unsigned long apb_seq;
  118. };
  119. /*
  120. * tegra_dma_sg_req: Dma request details to configure hardware. This
  121. * contains the details for one transfer to configure DMA hw.
  122. * The client's request for data transfer can be broken into multiple
  123. * sub-transfer as per requester details and hw support.
  124. * This sub transfer get added in the list of transfer and point to Tegra
  125. * DMA descriptor which manages the transfer details.
  126. */
  127. struct tegra_dma_sg_req {
  128. struct tegra_dma_channel_regs ch_regs;
  129. int req_len;
  130. bool configured;
  131. bool last_sg;
  132. bool half_done;
  133. struct list_head node;
  134. struct tegra_dma_desc *dma_desc;
  135. };
  136. /*
  137. * tegra_dma_desc: Tegra DMA descriptors which manages the client requests.
  138. * This descriptor keep track of transfer status, callbacks and request
  139. * counts etc.
  140. */
  141. struct tegra_dma_desc {
  142. struct dma_async_tx_descriptor txd;
  143. int bytes_requested;
  144. int bytes_transferred;
  145. enum dma_status dma_status;
  146. struct list_head node;
  147. struct list_head tx_list;
  148. struct list_head cb_node;
  149. int cb_count;
  150. };
  151. struct tegra_dma_channel;
  152. typedef void (*dma_isr_handler)(struct tegra_dma_channel *tdc,
  153. bool to_terminate);
  154. /* tegra_dma_channel: Channel specific information */
  155. struct tegra_dma_channel {
  156. struct dma_chan dma_chan;
  157. char name[30];
  158. bool config_init;
  159. int id;
  160. int irq;
  161. unsigned long chan_base_offset;
  162. spinlock_t lock;
  163. bool busy;
  164. struct tegra_dma *tdma;
  165. bool cyclic;
  166. /* Different lists for managing the requests */
  167. struct list_head free_sg_req;
  168. struct list_head pending_sg_req;
  169. struct list_head free_dma_desc;
  170. struct list_head cb_desc;
  171. /* ISR handler and tasklet for bottom half of isr handling */
  172. dma_isr_handler isr_handler;
  173. struct tasklet_struct tasklet;
  174. dma_async_tx_callback callback;
  175. void *callback_param;
  176. /* Channel-slave specific configuration */
  177. struct dma_slave_config dma_sconfig;
  178. struct tegra_dma_channel_regs channel_reg;
  179. };
  180. /* tegra_dma: Tegra DMA specific information */
  181. struct tegra_dma {
  182. struct dma_device dma_dev;
  183. struct device *dev;
  184. struct clk *dma_clk;
  185. spinlock_t global_lock;
  186. void __iomem *base_addr;
  187. const struct tegra_dma_chip_data *chip_data;
  188. /* Some register need to be cache before suspend */
  189. u32 reg_gen;
  190. /* Last member of the structure */
  191. struct tegra_dma_channel channels[0];
  192. };
  193. static inline void tdma_write(struct tegra_dma *tdma, u32 reg, u32 val)
  194. {
  195. writel(val, tdma->base_addr + reg);
  196. }
  197. static inline u32 tdma_read(struct tegra_dma *tdma, u32 reg)
  198. {
  199. return readl(tdma->base_addr + reg);
  200. }
  201. static inline void tdc_write(struct tegra_dma_channel *tdc,
  202. u32 reg, u32 val)
  203. {
  204. writel(val, tdc->tdma->base_addr + tdc->chan_base_offset + reg);
  205. }
  206. static inline u32 tdc_read(struct tegra_dma_channel *tdc, u32 reg)
  207. {
  208. return readl(tdc->tdma->base_addr + tdc->chan_base_offset + reg);
  209. }
  210. static inline struct tegra_dma_channel *to_tegra_dma_chan(struct dma_chan *dc)
  211. {
  212. return container_of(dc, struct tegra_dma_channel, dma_chan);
  213. }
  214. static inline struct tegra_dma_desc *txd_to_tegra_dma_desc(
  215. struct dma_async_tx_descriptor *td)
  216. {
  217. return container_of(td, struct tegra_dma_desc, txd);
  218. }
  219. static inline struct device *tdc2dev(struct tegra_dma_channel *tdc)
  220. {
  221. return &tdc->dma_chan.dev->device;
  222. }
  223. static dma_cookie_t tegra_dma_tx_submit(struct dma_async_tx_descriptor *tx);
  224. static int tegra_dma_runtime_suspend(struct device *dev);
  225. static int tegra_dma_runtime_resume(struct device *dev);
  226. /* Get DMA desc from free list, if not there then allocate it. */
  227. static struct tegra_dma_desc *tegra_dma_desc_get(
  228. struct tegra_dma_channel *tdc)
  229. {
  230. struct tegra_dma_desc *dma_desc;
  231. unsigned long flags;
  232. spin_lock_irqsave(&tdc->lock, flags);
  233. /* Do not allocate if desc are waiting for ack */
  234. list_for_each_entry(dma_desc, &tdc->free_dma_desc, node) {
  235. if (async_tx_test_ack(&dma_desc->txd)) {
  236. list_del(&dma_desc->node);
  237. spin_unlock_irqrestore(&tdc->lock, flags);
  238. dma_desc->txd.flags = 0;
  239. return dma_desc;
  240. }
  241. }
  242. spin_unlock_irqrestore(&tdc->lock, flags);
  243. /* Allocate DMA desc */
  244. dma_desc = kzalloc(sizeof(*dma_desc), GFP_ATOMIC);
  245. if (!dma_desc) {
  246. dev_err(tdc2dev(tdc), "dma_desc alloc failed\n");
  247. return NULL;
  248. }
  249. dma_async_tx_descriptor_init(&dma_desc->txd, &tdc->dma_chan);
  250. dma_desc->txd.tx_submit = tegra_dma_tx_submit;
  251. dma_desc->txd.flags = 0;
  252. return dma_desc;
  253. }
  254. static void tegra_dma_desc_put(struct tegra_dma_channel *tdc,
  255. struct tegra_dma_desc *dma_desc)
  256. {
  257. unsigned long flags;
  258. spin_lock_irqsave(&tdc->lock, flags);
  259. if (!list_empty(&dma_desc->tx_list))
  260. list_splice_init(&dma_desc->tx_list, &tdc->free_sg_req);
  261. list_add_tail(&dma_desc->node, &tdc->free_dma_desc);
  262. spin_unlock_irqrestore(&tdc->lock, flags);
  263. }
  264. static struct tegra_dma_sg_req *tegra_dma_sg_req_get(
  265. struct tegra_dma_channel *tdc)
  266. {
  267. struct tegra_dma_sg_req *sg_req = NULL;
  268. unsigned long flags;
  269. spin_lock_irqsave(&tdc->lock, flags);
  270. if (!list_empty(&tdc->free_sg_req)) {
  271. sg_req = list_first_entry(&tdc->free_sg_req,
  272. typeof(*sg_req), node);
  273. list_del(&sg_req->node);
  274. spin_unlock_irqrestore(&tdc->lock, flags);
  275. return sg_req;
  276. }
  277. spin_unlock_irqrestore(&tdc->lock, flags);
  278. sg_req = kzalloc(sizeof(struct tegra_dma_sg_req), GFP_ATOMIC);
  279. if (!sg_req)
  280. dev_err(tdc2dev(tdc), "sg_req alloc failed\n");
  281. return sg_req;
  282. }
  283. static int tegra_dma_slave_config(struct dma_chan *dc,
  284. struct dma_slave_config *sconfig)
  285. {
  286. struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc);
  287. if (!list_empty(&tdc->pending_sg_req)) {
  288. dev_err(tdc2dev(tdc), "Configuration not allowed\n");
  289. return -EBUSY;
  290. }
  291. memcpy(&tdc->dma_sconfig, sconfig, sizeof(*sconfig));
  292. tdc->config_init = true;
  293. return 0;
  294. }
  295. static void tegra_dma_global_pause(struct tegra_dma_channel *tdc,
  296. bool wait_for_burst_complete)
  297. {
  298. struct tegra_dma *tdma = tdc->tdma;
  299. spin_lock(&tdma->global_lock);
  300. tdma_write(tdma, TEGRA_APBDMA_GENERAL, 0);
  301. if (wait_for_burst_complete)
  302. udelay(TEGRA_APBDMA_BURST_COMPLETE_TIME);
  303. }
  304. static void tegra_dma_global_resume(struct tegra_dma_channel *tdc)
  305. {
  306. struct tegra_dma *tdma = tdc->tdma;
  307. tdma_write(tdma, TEGRA_APBDMA_GENERAL, TEGRA_APBDMA_GENERAL_ENABLE);
  308. spin_unlock(&tdma->global_lock);
  309. }
  310. static void tegra_dma_pause(struct tegra_dma_channel *tdc,
  311. bool wait_for_burst_complete)
  312. {
  313. struct tegra_dma *tdma = tdc->tdma;
  314. if (tdma->chip_data->support_channel_pause) {
  315. tdc_write(tdc, TEGRA_APBDMA_CHAN_CSRE,
  316. TEGRA_APBDMA_CHAN_CSRE_PAUSE);
  317. if (wait_for_burst_complete)
  318. udelay(TEGRA_APBDMA_BURST_COMPLETE_TIME);
  319. } else {
  320. tegra_dma_global_pause(tdc, wait_for_burst_complete);
  321. }
  322. }
  323. static void tegra_dma_resume(struct tegra_dma_channel *tdc)
  324. {
  325. struct tegra_dma *tdma = tdc->tdma;
  326. if (tdma->chip_data->support_channel_pause) {
  327. tdc_write(tdc, TEGRA_APBDMA_CHAN_CSRE, 0);
  328. } else {
  329. tegra_dma_global_resume(tdc);
  330. }
  331. }
  332. static void tegra_dma_stop(struct tegra_dma_channel *tdc)
  333. {
  334. u32 csr;
  335. u32 status;
  336. /* Disable interrupts */
  337. csr = tdc_read(tdc, TEGRA_APBDMA_CHAN_CSR);
  338. csr &= ~TEGRA_APBDMA_CSR_IE_EOC;
  339. tdc_write(tdc, TEGRA_APBDMA_CHAN_CSR, csr);
  340. /* Disable DMA */
  341. csr &= ~TEGRA_APBDMA_CSR_ENB;
  342. tdc_write(tdc, TEGRA_APBDMA_CHAN_CSR, csr);
  343. /* Clear interrupt status if it is there */
  344. status = tdc_read(tdc, TEGRA_APBDMA_CHAN_STATUS);
  345. if (status & TEGRA_APBDMA_STATUS_ISE_EOC) {
  346. dev_dbg(tdc2dev(tdc), "%s():clearing interrupt\n", __func__);
  347. tdc_write(tdc, TEGRA_APBDMA_CHAN_STATUS, status);
  348. }
  349. tdc->busy = false;
  350. }
  351. static void tegra_dma_start(struct tegra_dma_channel *tdc,
  352. struct tegra_dma_sg_req *sg_req)
  353. {
  354. struct tegra_dma_channel_regs *ch_regs = &sg_req->ch_regs;
  355. tdc_write(tdc, TEGRA_APBDMA_CHAN_CSR, ch_regs->csr);
  356. tdc_write(tdc, TEGRA_APBDMA_CHAN_APBSEQ, ch_regs->apb_seq);
  357. tdc_write(tdc, TEGRA_APBDMA_CHAN_APBPTR, ch_regs->apb_ptr);
  358. tdc_write(tdc, TEGRA_APBDMA_CHAN_AHBSEQ, ch_regs->ahb_seq);
  359. tdc_write(tdc, TEGRA_APBDMA_CHAN_AHBPTR, ch_regs->ahb_ptr);
  360. /* Start DMA */
  361. tdc_write(tdc, TEGRA_APBDMA_CHAN_CSR,
  362. ch_regs->csr | TEGRA_APBDMA_CSR_ENB);
  363. }
  364. static void tegra_dma_configure_for_next(struct tegra_dma_channel *tdc,
  365. struct tegra_dma_sg_req *nsg_req)
  366. {
  367. unsigned long status;
  368. /*
  369. * The DMA controller reloads the new configuration for next transfer
  370. * after last burst of current transfer completes.
  371. * If there is no IEC status then this makes sure that last burst
  372. * has not be completed. There may be case that last burst is on
  373. * flight and so it can complete but because DMA is paused, it
  374. * will not generates interrupt as well as not reload the new
  375. * configuration.
  376. * If there is already IEC status then interrupt handler need to
  377. * load new configuration.
  378. */
  379. tegra_dma_pause(tdc, false);
  380. status = tdc_read(tdc, TEGRA_APBDMA_CHAN_STATUS);
  381. /*
  382. * If interrupt is pending then do nothing as the ISR will handle
  383. * the programing for new request.
  384. */
  385. if (status & TEGRA_APBDMA_STATUS_ISE_EOC) {
  386. dev_err(tdc2dev(tdc),
  387. "Skipping new configuration as interrupt is pending\n");
  388. tegra_dma_resume(tdc);
  389. return;
  390. }
  391. /* Safe to program new configuration */
  392. tdc_write(tdc, TEGRA_APBDMA_CHAN_APBPTR, nsg_req->ch_regs.apb_ptr);
  393. tdc_write(tdc, TEGRA_APBDMA_CHAN_AHBPTR, nsg_req->ch_regs.ahb_ptr);
  394. tdc_write(tdc, TEGRA_APBDMA_CHAN_CSR,
  395. nsg_req->ch_regs.csr | TEGRA_APBDMA_CSR_ENB);
  396. nsg_req->configured = true;
  397. tegra_dma_resume(tdc);
  398. }
  399. static void tdc_start_head_req(struct tegra_dma_channel *tdc)
  400. {
  401. struct tegra_dma_sg_req *sg_req;
  402. if (list_empty(&tdc->pending_sg_req))
  403. return;
  404. sg_req = list_first_entry(&tdc->pending_sg_req,
  405. typeof(*sg_req), node);
  406. tegra_dma_start(tdc, sg_req);
  407. sg_req->configured = true;
  408. tdc->busy = true;
  409. }
  410. static void tdc_configure_next_head_desc(struct tegra_dma_channel *tdc)
  411. {
  412. struct tegra_dma_sg_req *hsgreq;
  413. struct tegra_dma_sg_req *hnsgreq;
  414. if (list_empty(&tdc->pending_sg_req))
  415. return;
  416. hsgreq = list_first_entry(&tdc->pending_sg_req, typeof(*hsgreq), node);
  417. if (!list_is_last(&hsgreq->node, &tdc->pending_sg_req)) {
  418. hnsgreq = list_first_entry(&hsgreq->node,
  419. typeof(*hnsgreq), node);
  420. tegra_dma_configure_for_next(tdc, hnsgreq);
  421. }
  422. }
  423. static inline int get_current_xferred_count(struct tegra_dma_channel *tdc,
  424. struct tegra_dma_sg_req *sg_req, unsigned long status)
  425. {
  426. return sg_req->req_len - (status & TEGRA_APBDMA_STATUS_COUNT_MASK) - 4;
  427. }
  428. static void tegra_dma_abort_all(struct tegra_dma_channel *tdc)
  429. {
  430. struct tegra_dma_sg_req *sgreq;
  431. struct tegra_dma_desc *dma_desc;
  432. while (!list_empty(&tdc->pending_sg_req)) {
  433. sgreq = list_first_entry(&tdc->pending_sg_req,
  434. typeof(*sgreq), node);
  435. list_move_tail(&sgreq->node, &tdc->free_sg_req);
  436. if (sgreq->last_sg) {
  437. dma_desc = sgreq->dma_desc;
  438. dma_desc->dma_status = DMA_ERROR;
  439. list_add_tail(&dma_desc->node, &tdc->free_dma_desc);
  440. /* Add in cb list if it is not there. */
  441. if (!dma_desc->cb_count)
  442. list_add_tail(&dma_desc->cb_node,
  443. &tdc->cb_desc);
  444. dma_desc->cb_count++;
  445. }
  446. }
  447. tdc->isr_handler = NULL;
  448. }
  449. static bool handle_continuous_head_request(struct tegra_dma_channel *tdc,
  450. struct tegra_dma_sg_req *last_sg_req, bool to_terminate)
  451. {
  452. struct tegra_dma_sg_req *hsgreq = NULL;
  453. if (list_empty(&tdc->pending_sg_req)) {
  454. dev_err(tdc2dev(tdc), "Dma is running without req\n");
  455. tegra_dma_stop(tdc);
  456. return false;
  457. }
  458. /*
  459. * Check that head req on list should be in flight.
  460. * If it is not in flight then abort transfer as
  461. * looping of transfer can not continue.
  462. */
  463. hsgreq = list_first_entry(&tdc->pending_sg_req, typeof(*hsgreq), node);
  464. if (!hsgreq->configured) {
  465. tegra_dma_stop(tdc);
  466. dev_err(tdc2dev(tdc), "Error in dma transfer, aborting dma\n");
  467. tegra_dma_abort_all(tdc);
  468. return false;
  469. }
  470. /* Configure next request */
  471. if (!to_terminate)
  472. tdc_configure_next_head_desc(tdc);
  473. return true;
  474. }
  475. static void handle_once_dma_done(struct tegra_dma_channel *tdc,
  476. bool to_terminate)
  477. {
  478. struct tegra_dma_sg_req *sgreq;
  479. struct tegra_dma_desc *dma_desc;
  480. tdc->busy = false;
  481. sgreq = list_first_entry(&tdc->pending_sg_req, typeof(*sgreq), node);
  482. dma_desc = sgreq->dma_desc;
  483. dma_desc->bytes_transferred += sgreq->req_len;
  484. list_del(&sgreq->node);
  485. if (sgreq->last_sg) {
  486. dma_desc->dma_status = DMA_SUCCESS;
  487. dma_cookie_complete(&dma_desc->txd);
  488. if (!dma_desc->cb_count)
  489. list_add_tail(&dma_desc->cb_node, &tdc->cb_desc);
  490. dma_desc->cb_count++;
  491. list_add_tail(&dma_desc->node, &tdc->free_dma_desc);
  492. }
  493. list_add_tail(&sgreq->node, &tdc->free_sg_req);
  494. /* Do not start DMA if it is going to be terminate */
  495. if (to_terminate || list_empty(&tdc->pending_sg_req))
  496. return;
  497. tdc_start_head_req(tdc);
  498. return;
  499. }
  500. static void handle_cont_sngl_cycle_dma_done(struct tegra_dma_channel *tdc,
  501. bool to_terminate)
  502. {
  503. struct tegra_dma_sg_req *sgreq;
  504. struct tegra_dma_desc *dma_desc;
  505. bool st;
  506. sgreq = list_first_entry(&tdc->pending_sg_req, typeof(*sgreq), node);
  507. dma_desc = sgreq->dma_desc;
  508. dma_desc->bytes_transferred += sgreq->req_len;
  509. /* Callback need to be call */
  510. if (!dma_desc->cb_count)
  511. list_add_tail(&dma_desc->cb_node, &tdc->cb_desc);
  512. dma_desc->cb_count++;
  513. /* If not last req then put at end of pending list */
  514. if (!list_is_last(&sgreq->node, &tdc->pending_sg_req)) {
  515. list_move_tail(&sgreq->node, &tdc->pending_sg_req);
  516. sgreq->configured = false;
  517. st = handle_continuous_head_request(tdc, sgreq, to_terminate);
  518. if (!st)
  519. dma_desc->dma_status = DMA_ERROR;
  520. }
  521. return;
  522. }
  523. static void tegra_dma_tasklet(unsigned long data)
  524. {
  525. struct tegra_dma_channel *tdc = (struct tegra_dma_channel *)data;
  526. dma_async_tx_callback callback = NULL;
  527. void *callback_param = NULL;
  528. struct tegra_dma_desc *dma_desc;
  529. unsigned long flags;
  530. int cb_count;
  531. spin_lock_irqsave(&tdc->lock, flags);
  532. while (!list_empty(&tdc->cb_desc)) {
  533. dma_desc = list_first_entry(&tdc->cb_desc,
  534. typeof(*dma_desc), cb_node);
  535. list_del(&dma_desc->cb_node);
  536. callback = dma_desc->txd.callback;
  537. callback_param = dma_desc->txd.callback_param;
  538. cb_count = dma_desc->cb_count;
  539. dma_desc->cb_count = 0;
  540. spin_unlock_irqrestore(&tdc->lock, flags);
  541. while (cb_count-- && callback)
  542. callback(callback_param);
  543. spin_lock_irqsave(&tdc->lock, flags);
  544. }
  545. spin_unlock_irqrestore(&tdc->lock, flags);
  546. }
  547. static irqreturn_t tegra_dma_isr(int irq, void *dev_id)
  548. {
  549. struct tegra_dma_channel *tdc = dev_id;
  550. unsigned long status;
  551. unsigned long flags;
  552. spin_lock_irqsave(&tdc->lock, flags);
  553. status = tdc_read(tdc, TEGRA_APBDMA_CHAN_STATUS);
  554. if (status & TEGRA_APBDMA_STATUS_ISE_EOC) {
  555. tdc_write(tdc, TEGRA_APBDMA_CHAN_STATUS, status);
  556. tdc->isr_handler(tdc, false);
  557. tasklet_schedule(&tdc->tasklet);
  558. spin_unlock_irqrestore(&tdc->lock, flags);
  559. return IRQ_HANDLED;
  560. }
  561. spin_unlock_irqrestore(&tdc->lock, flags);
  562. dev_info(tdc2dev(tdc),
  563. "Interrupt already served status 0x%08lx\n", status);
  564. return IRQ_NONE;
  565. }
  566. static dma_cookie_t tegra_dma_tx_submit(struct dma_async_tx_descriptor *txd)
  567. {
  568. struct tegra_dma_desc *dma_desc = txd_to_tegra_dma_desc(txd);
  569. struct tegra_dma_channel *tdc = to_tegra_dma_chan(txd->chan);
  570. unsigned long flags;
  571. dma_cookie_t cookie;
  572. spin_lock_irqsave(&tdc->lock, flags);
  573. dma_desc->dma_status = DMA_IN_PROGRESS;
  574. cookie = dma_cookie_assign(&dma_desc->txd);
  575. list_splice_tail_init(&dma_desc->tx_list, &tdc->pending_sg_req);
  576. spin_unlock_irqrestore(&tdc->lock, flags);
  577. return cookie;
  578. }
  579. static void tegra_dma_issue_pending(struct dma_chan *dc)
  580. {
  581. struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc);
  582. unsigned long flags;
  583. spin_lock_irqsave(&tdc->lock, flags);
  584. if (list_empty(&tdc->pending_sg_req)) {
  585. dev_err(tdc2dev(tdc), "No DMA request\n");
  586. goto end;
  587. }
  588. if (!tdc->busy) {
  589. tdc_start_head_req(tdc);
  590. /* Continuous single mode: Configure next req */
  591. if (tdc->cyclic) {
  592. /*
  593. * Wait for 1 burst time for configure DMA for
  594. * next transfer.
  595. */
  596. udelay(TEGRA_APBDMA_BURST_COMPLETE_TIME);
  597. tdc_configure_next_head_desc(tdc);
  598. }
  599. }
  600. end:
  601. spin_unlock_irqrestore(&tdc->lock, flags);
  602. return;
  603. }
  604. static void tegra_dma_terminate_all(struct dma_chan *dc)
  605. {
  606. struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc);
  607. struct tegra_dma_sg_req *sgreq;
  608. struct tegra_dma_desc *dma_desc;
  609. unsigned long flags;
  610. unsigned long status;
  611. bool was_busy;
  612. spin_lock_irqsave(&tdc->lock, flags);
  613. if (list_empty(&tdc->pending_sg_req)) {
  614. spin_unlock_irqrestore(&tdc->lock, flags);
  615. return;
  616. }
  617. if (!tdc->busy)
  618. goto skip_dma_stop;
  619. /* Pause DMA before checking the queue status */
  620. tegra_dma_pause(tdc, true);
  621. status = tdc_read(tdc, TEGRA_APBDMA_CHAN_STATUS);
  622. if (status & TEGRA_APBDMA_STATUS_ISE_EOC) {
  623. dev_dbg(tdc2dev(tdc), "%s():handling isr\n", __func__);
  624. tdc->isr_handler(tdc, true);
  625. status = tdc_read(tdc, TEGRA_APBDMA_CHAN_STATUS);
  626. }
  627. was_busy = tdc->busy;
  628. tegra_dma_stop(tdc);
  629. if (!list_empty(&tdc->pending_sg_req) && was_busy) {
  630. sgreq = list_first_entry(&tdc->pending_sg_req,
  631. typeof(*sgreq), node);
  632. sgreq->dma_desc->bytes_transferred +=
  633. get_current_xferred_count(tdc, sgreq, status);
  634. }
  635. tegra_dma_resume(tdc);
  636. skip_dma_stop:
  637. tegra_dma_abort_all(tdc);
  638. while (!list_empty(&tdc->cb_desc)) {
  639. dma_desc = list_first_entry(&tdc->cb_desc,
  640. typeof(*dma_desc), cb_node);
  641. list_del(&dma_desc->cb_node);
  642. dma_desc->cb_count = 0;
  643. }
  644. spin_unlock_irqrestore(&tdc->lock, flags);
  645. }
  646. static enum dma_status tegra_dma_tx_status(struct dma_chan *dc,
  647. dma_cookie_t cookie, struct dma_tx_state *txstate)
  648. {
  649. struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc);
  650. struct tegra_dma_desc *dma_desc;
  651. struct tegra_dma_sg_req *sg_req;
  652. enum dma_status ret;
  653. unsigned long flags;
  654. unsigned int residual;
  655. spin_lock_irqsave(&tdc->lock, flags);
  656. ret = dma_cookie_status(dc, cookie, txstate);
  657. if (ret == DMA_SUCCESS) {
  658. spin_unlock_irqrestore(&tdc->lock, flags);
  659. return ret;
  660. }
  661. /* Check on wait_ack desc status */
  662. list_for_each_entry(dma_desc, &tdc->free_dma_desc, node) {
  663. if (dma_desc->txd.cookie == cookie) {
  664. residual = dma_desc->bytes_requested -
  665. (dma_desc->bytes_transferred %
  666. dma_desc->bytes_requested);
  667. dma_set_residue(txstate, residual);
  668. ret = dma_desc->dma_status;
  669. spin_unlock_irqrestore(&tdc->lock, flags);
  670. return ret;
  671. }
  672. }
  673. /* Check in pending list */
  674. list_for_each_entry(sg_req, &tdc->pending_sg_req, node) {
  675. dma_desc = sg_req->dma_desc;
  676. if (dma_desc->txd.cookie == cookie) {
  677. residual = dma_desc->bytes_requested -
  678. (dma_desc->bytes_transferred %
  679. dma_desc->bytes_requested);
  680. dma_set_residue(txstate, residual);
  681. ret = dma_desc->dma_status;
  682. spin_unlock_irqrestore(&tdc->lock, flags);
  683. return ret;
  684. }
  685. }
  686. dev_dbg(tdc2dev(tdc), "cookie %d does not found\n", cookie);
  687. spin_unlock_irqrestore(&tdc->lock, flags);
  688. return ret;
  689. }
  690. static int tegra_dma_device_control(struct dma_chan *dc, enum dma_ctrl_cmd cmd,
  691. unsigned long arg)
  692. {
  693. switch (cmd) {
  694. case DMA_SLAVE_CONFIG:
  695. return tegra_dma_slave_config(dc,
  696. (struct dma_slave_config *)arg);
  697. case DMA_TERMINATE_ALL:
  698. tegra_dma_terminate_all(dc);
  699. return 0;
  700. default:
  701. break;
  702. }
  703. return -ENXIO;
  704. }
  705. static inline int get_bus_width(struct tegra_dma_channel *tdc,
  706. enum dma_slave_buswidth slave_bw)
  707. {
  708. switch (slave_bw) {
  709. case DMA_SLAVE_BUSWIDTH_1_BYTE:
  710. return TEGRA_APBDMA_APBSEQ_BUS_WIDTH_8;
  711. case DMA_SLAVE_BUSWIDTH_2_BYTES:
  712. return TEGRA_APBDMA_APBSEQ_BUS_WIDTH_16;
  713. case DMA_SLAVE_BUSWIDTH_4_BYTES:
  714. return TEGRA_APBDMA_APBSEQ_BUS_WIDTH_32;
  715. case DMA_SLAVE_BUSWIDTH_8_BYTES:
  716. return TEGRA_APBDMA_APBSEQ_BUS_WIDTH_64;
  717. default:
  718. dev_warn(tdc2dev(tdc),
  719. "slave bw is not supported, using 32bits\n");
  720. return TEGRA_APBDMA_APBSEQ_BUS_WIDTH_32;
  721. }
  722. }
  723. static inline int get_burst_size(struct tegra_dma_channel *tdc,
  724. u32 burst_size, enum dma_slave_buswidth slave_bw, int len)
  725. {
  726. int burst_byte;
  727. int burst_ahb_width;
  728. /*
  729. * burst_size from client is in terms of the bus_width.
  730. * convert them into AHB memory width which is 4 byte.
  731. */
  732. burst_byte = burst_size * slave_bw;
  733. burst_ahb_width = burst_byte / 4;
  734. /* If burst size is 0 then calculate the burst size based on length */
  735. if (!burst_ahb_width) {
  736. if (len & 0xF)
  737. return TEGRA_APBDMA_AHBSEQ_BURST_1;
  738. else if ((len >> 4) & 0x1)
  739. return TEGRA_APBDMA_AHBSEQ_BURST_4;
  740. else
  741. return TEGRA_APBDMA_AHBSEQ_BURST_8;
  742. }
  743. if (burst_ahb_width < 4)
  744. return TEGRA_APBDMA_AHBSEQ_BURST_1;
  745. else if (burst_ahb_width < 8)
  746. return TEGRA_APBDMA_AHBSEQ_BURST_4;
  747. else
  748. return TEGRA_APBDMA_AHBSEQ_BURST_8;
  749. }
  750. static int get_transfer_param(struct tegra_dma_channel *tdc,
  751. enum dma_transfer_direction direction, unsigned long *apb_addr,
  752. unsigned long *apb_seq, unsigned long *csr, unsigned int *burst_size,
  753. enum dma_slave_buswidth *slave_bw)
  754. {
  755. switch (direction) {
  756. case DMA_MEM_TO_DEV:
  757. *apb_addr = tdc->dma_sconfig.dst_addr;
  758. *apb_seq = get_bus_width(tdc, tdc->dma_sconfig.dst_addr_width);
  759. *burst_size = tdc->dma_sconfig.dst_maxburst;
  760. *slave_bw = tdc->dma_sconfig.dst_addr_width;
  761. *csr = TEGRA_APBDMA_CSR_DIR;
  762. return 0;
  763. case DMA_DEV_TO_MEM:
  764. *apb_addr = tdc->dma_sconfig.src_addr;
  765. *apb_seq = get_bus_width(tdc, tdc->dma_sconfig.src_addr_width);
  766. *burst_size = tdc->dma_sconfig.src_maxburst;
  767. *slave_bw = tdc->dma_sconfig.src_addr_width;
  768. *csr = 0;
  769. return 0;
  770. default:
  771. dev_err(tdc2dev(tdc), "Dma direction is not supported\n");
  772. return -EINVAL;
  773. }
  774. return -EINVAL;
  775. }
  776. static struct dma_async_tx_descriptor *tegra_dma_prep_slave_sg(
  777. struct dma_chan *dc, struct scatterlist *sgl, unsigned int sg_len,
  778. enum dma_transfer_direction direction, unsigned long flags,
  779. void *context)
  780. {
  781. struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc);
  782. struct tegra_dma_desc *dma_desc;
  783. unsigned int i;
  784. struct scatterlist *sg;
  785. unsigned long csr, ahb_seq, apb_ptr, apb_seq;
  786. struct list_head req_list;
  787. struct tegra_dma_sg_req *sg_req = NULL;
  788. u32 burst_size;
  789. enum dma_slave_buswidth slave_bw;
  790. int ret;
  791. if (!tdc->config_init) {
  792. dev_err(tdc2dev(tdc), "dma channel is not configured\n");
  793. return NULL;
  794. }
  795. if (sg_len < 1) {
  796. dev_err(tdc2dev(tdc), "Invalid segment length %d\n", sg_len);
  797. return NULL;
  798. }
  799. ret = get_transfer_param(tdc, direction, &apb_ptr, &apb_seq, &csr,
  800. &burst_size, &slave_bw);
  801. if (ret < 0)
  802. return NULL;
  803. INIT_LIST_HEAD(&req_list);
  804. ahb_seq = TEGRA_APBDMA_AHBSEQ_INTR_ENB;
  805. ahb_seq |= TEGRA_APBDMA_AHBSEQ_WRAP_NONE <<
  806. TEGRA_APBDMA_AHBSEQ_WRAP_SHIFT;
  807. ahb_seq |= TEGRA_APBDMA_AHBSEQ_BUS_WIDTH_32;
  808. csr |= TEGRA_APBDMA_CSR_ONCE | TEGRA_APBDMA_CSR_FLOW;
  809. csr |= tdc->dma_sconfig.slave_id << TEGRA_APBDMA_CSR_REQ_SEL_SHIFT;
  810. if (flags & DMA_PREP_INTERRUPT)
  811. csr |= TEGRA_APBDMA_CSR_IE_EOC;
  812. apb_seq |= TEGRA_APBDMA_APBSEQ_WRAP_WORD_1;
  813. dma_desc = tegra_dma_desc_get(tdc);
  814. if (!dma_desc) {
  815. dev_err(tdc2dev(tdc), "Dma descriptors not available\n");
  816. return NULL;
  817. }
  818. INIT_LIST_HEAD(&dma_desc->tx_list);
  819. INIT_LIST_HEAD(&dma_desc->cb_node);
  820. dma_desc->cb_count = 0;
  821. dma_desc->bytes_requested = 0;
  822. dma_desc->bytes_transferred = 0;
  823. dma_desc->dma_status = DMA_IN_PROGRESS;
  824. /* Make transfer requests */
  825. for_each_sg(sgl, sg, sg_len, i) {
  826. u32 len, mem;
  827. mem = sg_dma_address(sg);
  828. len = sg_dma_len(sg);
  829. if ((len & 3) || (mem & 3) ||
  830. (len > tdc->tdma->chip_data->max_dma_count)) {
  831. dev_err(tdc2dev(tdc),
  832. "Dma length/memory address is not supported\n");
  833. tegra_dma_desc_put(tdc, dma_desc);
  834. return NULL;
  835. }
  836. sg_req = tegra_dma_sg_req_get(tdc);
  837. if (!sg_req) {
  838. dev_err(tdc2dev(tdc), "Dma sg-req not available\n");
  839. tegra_dma_desc_put(tdc, dma_desc);
  840. return NULL;
  841. }
  842. ahb_seq |= get_burst_size(tdc, burst_size, slave_bw, len);
  843. dma_desc->bytes_requested += len;
  844. sg_req->ch_regs.apb_ptr = apb_ptr;
  845. sg_req->ch_regs.ahb_ptr = mem;
  846. sg_req->ch_regs.csr = csr | ((len - 4) & 0xFFFC);
  847. sg_req->ch_regs.apb_seq = apb_seq;
  848. sg_req->ch_regs.ahb_seq = ahb_seq;
  849. sg_req->configured = false;
  850. sg_req->last_sg = false;
  851. sg_req->dma_desc = dma_desc;
  852. sg_req->req_len = len;
  853. list_add_tail(&sg_req->node, &dma_desc->tx_list);
  854. }
  855. sg_req->last_sg = true;
  856. if (flags & DMA_CTRL_ACK)
  857. dma_desc->txd.flags = DMA_CTRL_ACK;
  858. /*
  859. * Make sure that mode should not be conflicting with currently
  860. * configured mode.
  861. */
  862. if (!tdc->isr_handler) {
  863. tdc->isr_handler = handle_once_dma_done;
  864. tdc->cyclic = false;
  865. } else {
  866. if (tdc->cyclic) {
  867. dev_err(tdc2dev(tdc), "DMA configured in cyclic mode\n");
  868. tegra_dma_desc_put(tdc, dma_desc);
  869. return NULL;
  870. }
  871. }
  872. return &dma_desc->txd;
  873. }
  874. struct dma_async_tx_descriptor *tegra_dma_prep_dma_cyclic(
  875. struct dma_chan *dc, dma_addr_t buf_addr, size_t buf_len,
  876. size_t period_len, enum dma_transfer_direction direction,
  877. unsigned long flags, void *context)
  878. {
  879. struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc);
  880. struct tegra_dma_desc *dma_desc = NULL;
  881. struct tegra_dma_sg_req *sg_req = NULL;
  882. unsigned long csr, ahb_seq, apb_ptr, apb_seq;
  883. int len;
  884. size_t remain_len;
  885. dma_addr_t mem = buf_addr;
  886. u32 burst_size;
  887. enum dma_slave_buswidth slave_bw;
  888. int ret;
  889. if (!buf_len || !period_len) {
  890. dev_err(tdc2dev(tdc), "Invalid buffer/period len\n");
  891. return NULL;
  892. }
  893. if (!tdc->config_init) {
  894. dev_err(tdc2dev(tdc), "DMA slave is not configured\n");
  895. return NULL;
  896. }
  897. /*
  898. * We allow to take more number of requests till DMA is
  899. * not started. The driver will loop over all requests.
  900. * Once DMA is started then new requests can be queued only after
  901. * terminating the DMA.
  902. */
  903. if (tdc->busy) {
  904. dev_err(tdc2dev(tdc), "Request not allowed when dma running\n");
  905. return NULL;
  906. }
  907. /*
  908. * We only support cycle transfer when buf_len is multiple of
  909. * period_len.
  910. */
  911. if (buf_len % period_len) {
  912. dev_err(tdc2dev(tdc), "buf_len is not multiple of period_len\n");
  913. return NULL;
  914. }
  915. len = period_len;
  916. if ((len & 3) || (buf_addr & 3) ||
  917. (len > tdc->tdma->chip_data->max_dma_count)) {
  918. dev_err(tdc2dev(tdc), "Req len/mem address is not correct\n");
  919. return NULL;
  920. }
  921. ret = get_transfer_param(tdc, direction, &apb_ptr, &apb_seq, &csr,
  922. &burst_size, &slave_bw);
  923. if (ret < 0)
  924. return NULL;
  925. ahb_seq = TEGRA_APBDMA_AHBSEQ_INTR_ENB;
  926. ahb_seq |= TEGRA_APBDMA_AHBSEQ_WRAP_NONE <<
  927. TEGRA_APBDMA_AHBSEQ_WRAP_SHIFT;
  928. ahb_seq |= TEGRA_APBDMA_AHBSEQ_BUS_WIDTH_32;
  929. csr |= TEGRA_APBDMA_CSR_FLOW;
  930. if (flags & DMA_PREP_INTERRUPT)
  931. csr |= TEGRA_APBDMA_CSR_IE_EOC;
  932. csr |= tdc->dma_sconfig.slave_id << TEGRA_APBDMA_CSR_REQ_SEL_SHIFT;
  933. apb_seq |= TEGRA_APBDMA_APBSEQ_WRAP_WORD_1;
  934. dma_desc = tegra_dma_desc_get(tdc);
  935. if (!dma_desc) {
  936. dev_err(tdc2dev(tdc), "not enough descriptors available\n");
  937. return NULL;
  938. }
  939. INIT_LIST_HEAD(&dma_desc->tx_list);
  940. INIT_LIST_HEAD(&dma_desc->cb_node);
  941. dma_desc->cb_count = 0;
  942. dma_desc->bytes_transferred = 0;
  943. dma_desc->bytes_requested = buf_len;
  944. remain_len = buf_len;
  945. /* Split transfer equal to period size */
  946. while (remain_len) {
  947. sg_req = tegra_dma_sg_req_get(tdc);
  948. if (!sg_req) {
  949. dev_err(tdc2dev(tdc), "Dma sg-req not available\n");
  950. tegra_dma_desc_put(tdc, dma_desc);
  951. return NULL;
  952. }
  953. ahb_seq |= get_burst_size(tdc, burst_size, slave_bw, len);
  954. sg_req->ch_regs.apb_ptr = apb_ptr;
  955. sg_req->ch_regs.ahb_ptr = mem;
  956. sg_req->ch_regs.csr = csr | ((len - 4) & 0xFFFC);
  957. sg_req->ch_regs.apb_seq = apb_seq;
  958. sg_req->ch_regs.ahb_seq = ahb_seq;
  959. sg_req->configured = false;
  960. sg_req->half_done = false;
  961. sg_req->last_sg = false;
  962. sg_req->dma_desc = dma_desc;
  963. sg_req->req_len = len;
  964. list_add_tail(&sg_req->node, &dma_desc->tx_list);
  965. remain_len -= len;
  966. mem += len;
  967. }
  968. sg_req->last_sg = true;
  969. if (flags & DMA_CTRL_ACK)
  970. dma_desc->txd.flags = DMA_CTRL_ACK;
  971. /*
  972. * Make sure that mode should not be conflicting with currently
  973. * configured mode.
  974. */
  975. if (!tdc->isr_handler) {
  976. tdc->isr_handler = handle_cont_sngl_cycle_dma_done;
  977. tdc->cyclic = true;
  978. } else {
  979. if (!tdc->cyclic) {
  980. dev_err(tdc2dev(tdc), "DMA configuration conflict\n");
  981. tegra_dma_desc_put(tdc, dma_desc);
  982. return NULL;
  983. }
  984. }
  985. return &dma_desc->txd;
  986. }
  987. static int tegra_dma_alloc_chan_resources(struct dma_chan *dc)
  988. {
  989. struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc);
  990. struct tegra_dma *tdma = tdc->tdma;
  991. int ret;
  992. dma_cookie_init(&tdc->dma_chan);
  993. tdc->config_init = false;
  994. ret = clk_prepare_enable(tdma->dma_clk);
  995. if (ret < 0)
  996. dev_err(tdc2dev(tdc), "clk_prepare_enable failed: %d\n", ret);
  997. return ret;
  998. }
  999. static void tegra_dma_free_chan_resources(struct dma_chan *dc)
  1000. {
  1001. struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc);
  1002. struct tegra_dma *tdma = tdc->tdma;
  1003. struct tegra_dma_desc *dma_desc;
  1004. struct tegra_dma_sg_req *sg_req;
  1005. struct list_head dma_desc_list;
  1006. struct list_head sg_req_list;
  1007. unsigned long flags;
  1008. INIT_LIST_HEAD(&dma_desc_list);
  1009. INIT_LIST_HEAD(&sg_req_list);
  1010. dev_dbg(tdc2dev(tdc), "Freeing channel %d\n", tdc->id);
  1011. if (tdc->busy)
  1012. tegra_dma_terminate_all(dc);
  1013. spin_lock_irqsave(&tdc->lock, flags);
  1014. list_splice_init(&tdc->pending_sg_req, &sg_req_list);
  1015. list_splice_init(&tdc->free_sg_req, &sg_req_list);
  1016. list_splice_init(&tdc->free_dma_desc, &dma_desc_list);
  1017. INIT_LIST_HEAD(&tdc->cb_desc);
  1018. tdc->config_init = false;
  1019. tdc->isr_handler = NULL;
  1020. spin_unlock_irqrestore(&tdc->lock, flags);
  1021. while (!list_empty(&dma_desc_list)) {
  1022. dma_desc = list_first_entry(&dma_desc_list,
  1023. typeof(*dma_desc), node);
  1024. list_del(&dma_desc->node);
  1025. kfree(dma_desc);
  1026. }
  1027. while (!list_empty(&sg_req_list)) {
  1028. sg_req = list_first_entry(&sg_req_list, typeof(*sg_req), node);
  1029. list_del(&sg_req->node);
  1030. kfree(sg_req);
  1031. }
  1032. clk_disable_unprepare(tdma->dma_clk);
  1033. }
  1034. /* Tegra20 specific DMA controller information */
  1035. static const struct tegra_dma_chip_data tegra20_dma_chip_data = {
  1036. .nr_channels = 16,
  1037. .max_dma_count = 1024UL * 64,
  1038. .support_channel_pause = false,
  1039. };
  1040. /* Tegra30 specific DMA controller information */
  1041. static const struct tegra_dma_chip_data tegra30_dma_chip_data = {
  1042. .nr_channels = 32,
  1043. .max_dma_count = 1024UL * 64,
  1044. .support_channel_pause = false,
  1045. };
  1046. /* Tegra114 specific DMA controller information */
  1047. static const struct tegra_dma_chip_data tegra114_dma_chip_data = {
  1048. .nr_channels = 32,
  1049. .max_dma_count = 1024UL * 64,
  1050. .support_channel_pause = true,
  1051. };
  1052. static const struct of_device_id tegra_dma_of_match[] = {
  1053. {
  1054. .compatible = "nvidia,tegra114-apbdma",
  1055. .data = &tegra114_dma_chip_data,
  1056. }, {
  1057. .compatible = "nvidia,tegra30-apbdma",
  1058. .data = &tegra30_dma_chip_data,
  1059. }, {
  1060. .compatible = "nvidia,tegra20-apbdma",
  1061. .data = &tegra20_dma_chip_data,
  1062. }, {
  1063. },
  1064. };
  1065. MODULE_DEVICE_TABLE(of, tegra_dma_of_match);
  1066. static int tegra_dma_probe(struct platform_device *pdev)
  1067. {
  1068. struct resource *res;
  1069. struct tegra_dma *tdma;
  1070. int ret;
  1071. int i;
  1072. const struct tegra_dma_chip_data *cdata = NULL;
  1073. const struct of_device_id *match;
  1074. match = of_match_device(tegra_dma_of_match, &pdev->dev);
  1075. if (!match) {
  1076. dev_err(&pdev->dev, "Error: No device match found\n");
  1077. return -ENODEV;
  1078. }
  1079. cdata = match->data;
  1080. tdma = devm_kzalloc(&pdev->dev, sizeof(*tdma) + cdata->nr_channels *
  1081. sizeof(struct tegra_dma_channel), GFP_KERNEL);
  1082. if (!tdma) {
  1083. dev_err(&pdev->dev, "Error: memory allocation failed\n");
  1084. return -ENOMEM;
  1085. }
  1086. tdma->dev = &pdev->dev;
  1087. tdma->chip_data = cdata;
  1088. platform_set_drvdata(pdev, tdma);
  1089. res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  1090. tdma->base_addr = devm_ioremap_resource(&pdev->dev, res);
  1091. if (IS_ERR(tdma->base_addr))
  1092. return PTR_ERR(tdma->base_addr);
  1093. tdma->dma_clk = devm_clk_get(&pdev->dev, NULL);
  1094. if (IS_ERR(tdma->dma_clk)) {
  1095. dev_err(&pdev->dev, "Error: Missing controller clock\n");
  1096. return PTR_ERR(tdma->dma_clk);
  1097. }
  1098. spin_lock_init(&tdma->global_lock);
  1099. pm_runtime_enable(&pdev->dev);
  1100. if (!pm_runtime_enabled(&pdev->dev)) {
  1101. ret = tegra_dma_runtime_resume(&pdev->dev);
  1102. if (ret) {
  1103. dev_err(&pdev->dev, "dma_runtime_resume failed %d\n",
  1104. ret);
  1105. goto err_pm_disable;
  1106. }
  1107. }
  1108. /* Enable clock before accessing registers */
  1109. ret = clk_prepare_enable(tdma->dma_clk);
  1110. if (ret < 0) {
  1111. dev_err(&pdev->dev, "clk_prepare_enable failed: %d\n", ret);
  1112. goto err_pm_disable;
  1113. }
  1114. /* Reset DMA controller */
  1115. tegra_periph_reset_assert(tdma->dma_clk);
  1116. udelay(2);
  1117. tegra_periph_reset_deassert(tdma->dma_clk);
  1118. /* Enable global DMA registers */
  1119. tdma_write(tdma, TEGRA_APBDMA_GENERAL, TEGRA_APBDMA_GENERAL_ENABLE);
  1120. tdma_write(tdma, TEGRA_APBDMA_CONTROL, 0);
  1121. tdma_write(tdma, TEGRA_APBDMA_IRQ_MASK_SET, 0xFFFFFFFFul);
  1122. clk_disable_unprepare(tdma->dma_clk);
  1123. INIT_LIST_HEAD(&tdma->dma_dev.channels);
  1124. for (i = 0; i < cdata->nr_channels; i++) {
  1125. struct tegra_dma_channel *tdc = &tdma->channels[i];
  1126. tdc->chan_base_offset = TEGRA_APBDMA_CHANNEL_BASE_ADD_OFFSET +
  1127. i * TEGRA_APBDMA_CHANNEL_REGISTER_SIZE;
  1128. res = platform_get_resource(pdev, IORESOURCE_IRQ, i);
  1129. if (!res) {
  1130. ret = -EINVAL;
  1131. dev_err(&pdev->dev, "No irq resource for chan %d\n", i);
  1132. goto err_irq;
  1133. }
  1134. tdc->irq = res->start;
  1135. snprintf(tdc->name, sizeof(tdc->name), "apbdma.%d", i);
  1136. ret = devm_request_irq(&pdev->dev, tdc->irq,
  1137. tegra_dma_isr, 0, tdc->name, tdc);
  1138. if (ret) {
  1139. dev_err(&pdev->dev,
  1140. "request_irq failed with err %d channel %d\n",
  1141. ret, i);
  1142. goto err_irq;
  1143. }
  1144. tdc->dma_chan.device = &tdma->dma_dev;
  1145. dma_cookie_init(&tdc->dma_chan);
  1146. list_add_tail(&tdc->dma_chan.device_node,
  1147. &tdma->dma_dev.channels);
  1148. tdc->tdma = tdma;
  1149. tdc->id = i;
  1150. tasklet_init(&tdc->tasklet, tegra_dma_tasklet,
  1151. (unsigned long)tdc);
  1152. spin_lock_init(&tdc->lock);
  1153. INIT_LIST_HEAD(&tdc->pending_sg_req);
  1154. INIT_LIST_HEAD(&tdc->free_sg_req);
  1155. INIT_LIST_HEAD(&tdc->free_dma_desc);
  1156. INIT_LIST_HEAD(&tdc->cb_desc);
  1157. }
  1158. dma_cap_set(DMA_SLAVE, tdma->dma_dev.cap_mask);
  1159. dma_cap_set(DMA_PRIVATE, tdma->dma_dev.cap_mask);
  1160. dma_cap_set(DMA_CYCLIC, tdma->dma_dev.cap_mask);
  1161. tdma->dma_dev.dev = &pdev->dev;
  1162. tdma->dma_dev.device_alloc_chan_resources =
  1163. tegra_dma_alloc_chan_resources;
  1164. tdma->dma_dev.device_free_chan_resources =
  1165. tegra_dma_free_chan_resources;
  1166. tdma->dma_dev.device_prep_slave_sg = tegra_dma_prep_slave_sg;
  1167. tdma->dma_dev.device_prep_dma_cyclic = tegra_dma_prep_dma_cyclic;
  1168. tdma->dma_dev.device_control = tegra_dma_device_control;
  1169. tdma->dma_dev.device_tx_status = tegra_dma_tx_status;
  1170. tdma->dma_dev.device_issue_pending = tegra_dma_issue_pending;
  1171. ret = dma_async_device_register(&tdma->dma_dev);
  1172. if (ret < 0) {
  1173. dev_err(&pdev->dev,
  1174. "Tegra20 APB DMA driver registration failed %d\n", ret);
  1175. goto err_irq;
  1176. }
  1177. dev_info(&pdev->dev, "Tegra20 APB DMA driver register %d channels\n",
  1178. cdata->nr_channels);
  1179. return 0;
  1180. err_irq:
  1181. while (--i >= 0) {
  1182. struct tegra_dma_channel *tdc = &tdma->channels[i];
  1183. tasklet_kill(&tdc->tasklet);
  1184. }
  1185. err_pm_disable:
  1186. pm_runtime_disable(&pdev->dev);
  1187. if (!pm_runtime_status_suspended(&pdev->dev))
  1188. tegra_dma_runtime_suspend(&pdev->dev);
  1189. return ret;
  1190. }
  1191. static int tegra_dma_remove(struct platform_device *pdev)
  1192. {
  1193. struct tegra_dma *tdma = platform_get_drvdata(pdev);
  1194. int i;
  1195. struct tegra_dma_channel *tdc;
  1196. dma_async_device_unregister(&tdma->dma_dev);
  1197. for (i = 0; i < tdma->chip_data->nr_channels; ++i) {
  1198. tdc = &tdma->channels[i];
  1199. tasklet_kill(&tdc->tasklet);
  1200. }
  1201. pm_runtime_disable(&pdev->dev);
  1202. if (!pm_runtime_status_suspended(&pdev->dev))
  1203. tegra_dma_runtime_suspend(&pdev->dev);
  1204. return 0;
  1205. }
  1206. static int tegra_dma_runtime_suspend(struct device *dev)
  1207. {
  1208. struct platform_device *pdev = to_platform_device(dev);
  1209. struct tegra_dma *tdma = platform_get_drvdata(pdev);
  1210. clk_disable_unprepare(tdma->dma_clk);
  1211. return 0;
  1212. }
  1213. static int tegra_dma_runtime_resume(struct device *dev)
  1214. {
  1215. struct platform_device *pdev = to_platform_device(dev);
  1216. struct tegra_dma *tdma = platform_get_drvdata(pdev);
  1217. int ret;
  1218. ret = clk_prepare_enable(tdma->dma_clk);
  1219. if (ret < 0) {
  1220. dev_err(dev, "clk_enable failed: %d\n", ret);
  1221. return ret;
  1222. }
  1223. return 0;
  1224. }
  1225. #ifdef CONFIG_PM_SLEEP
  1226. static int tegra_dma_pm_suspend(struct device *dev)
  1227. {
  1228. struct tegra_dma *tdma = dev_get_drvdata(dev);
  1229. int i;
  1230. int ret;
  1231. /* Enable clock before accessing register */
  1232. ret = tegra_dma_runtime_resume(dev);
  1233. if (ret < 0)
  1234. return ret;
  1235. tdma->reg_gen = tdma_read(tdma, TEGRA_APBDMA_GENERAL);
  1236. for (i = 0; i < tdma->chip_data->nr_channels; i++) {
  1237. struct tegra_dma_channel *tdc = &tdma->channels[i];
  1238. struct tegra_dma_channel_regs *ch_reg = &tdc->channel_reg;
  1239. ch_reg->csr = tdc_read(tdc, TEGRA_APBDMA_CHAN_CSR);
  1240. ch_reg->ahb_ptr = tdc_read(tdc, TEGRA_APBDMA_CHAN_AHBPTR);
  1241. ch_reg->apb_ptr = tdc_read(tdc, TEGRA_APBDMA_CHAN_APBPTR);
  1242. ch_reg->ahb_seq = tdc_read(tdc, TEGRA_APBDMA_CHAN_AHBSEQ);
  1243. ch_reg->apb_seq = tdc_read(tdc, TEGRA_APBDMA_CHAN_APBSEQ);
  1244. }
  1245. /* Disable clock */
  1246. tegra_dma_runtime_suspend(dev);
  1247. return 0;
  1248. }
  1249. static int tegra_dma_pm_resume(struct device *dev)
  1250. {
  1251. struct tegra_dma *tdma = dev_get_drvdata(dev);
  1252. int i;
  1253. int ret;
  1254. /* Enable clock before accessing register */
  1255. ret = tegra_dma_runtime_resume(dev);
  1256. if (ret < 0)
  1257. return ret;
  1258. tdma_write(tdma, TEGRA_APBDMA_GENERAL, tdma->reg_gen);
  1259. tdma_write(tdma, TEGRA_APBDMA_CONTROL, 0);
  1260. tdma_write(tdma, TEGRA_APBDMA_IRQ_MASK_SET, 0xFFFFFFFFul);
  1261. for (i = 0; i < tdma->chip_data->nr_channels; i++) {
  1262. struct tegra_dma_channel *tdc = &tdma->channels[i];
  1263. struct tegra_dma_channel_regs *ch_reg = &tdc->channel_reg;
  1264. tdc_write(tdc, TEGRA_APBDMA_CHAN_APBSEQ, ch_reg->apb_seq);
  1265. tdc_write(tdc, TEGRA_APBDMA_CHAN_APBPTR, ch_reg->apb_ptr);
  1266. tdc_write(tdc, TEGRA_APBDMA_CHAN_AHBSEQ, ch_reg->ahb_seq);
  1267. tdc_write(tdc, TEGRA_APBDMA_CHAN_AHBPTR, ch_reg->ahb_ptr);
  1268. tdc_write(tdc, TEGRA_APBDMA_CHAN_CSR,
  1269. (ch_reg->csr & ~TEGRA_APBDMA_CSR_ENB));
  1270. }
  1271. /* Disable clock */
  1272. tegra_dma_runtime_suspend(dev);
  1273. return 0;
  1274. }
  1275. #endif
  1276. static const struct dev_pm_ops tegra_dma_dev_pm_ops = {
  1277. #ifdef CONFIG_PM_RUNTIME
  1278. .runtime_suspend = tegra_dma_runtime_suspend,
  1279. .runtime_resume = tegra_dma_runtime_resume,
  1280. #endif
  1281. SET_SYSTEM_SLEEP_PM_OPS(tegra_dma_pm_suspend, tegra_dma_pm_resume)
  1282. };
  1283. static struct platform_driver tegra_dmac_driver = {
  1284. .driver = {
  1285. .name = "tegra-apbdma",
  1286. .owner = THIS_MODULE,
  1287. .pm = &tegra_dma_dev_pm_ops,
  1288. .of_match_table = tegra_dma_of_match,
  1289. },
  1290. .probe = tegra_dma_probe,
  1291. .remove = tegra_dma_remove,
  1292. };
  1293. module_platform_driver(tegra_dmac_driver);
  1294. MODULE_ALIAS("platform:tegra20-apbdma");
  1295. MODULE_DESCRIPTION("NVIDIA Tegra APB DMA Controller driver");
  1296. MODULE_AUTHOR("Laxman Dewangan <ldewangan@nvidia.com>");
  1297. MODULE_LICENSE("GPL v2");