sa11x0-dma.c 24 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016
  1. /*
  2. * SA11x0 DMAengine support
  3. *
  4. * Copyright (C) 2012 Russell King
  5. * Derived in part from arch/arm/mach-sa1100/dma.c,
  6. * Copyright (C) 2000, 2001 by Nicolas Pitre
  7. *
  8. * This program is free software; you can redistribute it and/or modify
  9. * it under the terms of the GNU General Public License version 2 as
  10. * published by the Free Software Foundation.
  11. */
  12. #include <linux/sched.h>
  13. #include <linux/device.h>
  14. #include <linux/dmaengine.h>
  15. #include <linux/init.h>
  16. #include <linux/interrupt.h>
  17. #include <linux/kernel.h>
  18. #include <linux/module.h>
  19. #include <linux/platform_device.h>
  20. #include <linux/sa11x0-dma.h>
  21. #include <linux/slab.h>
  22. #include <linux/spinlock.h>
  23. #include "virt-dma.h"
  24. #define NR_PHY_CHAN 6
  25. #define DMA_ALIGN 3
  26. #define DMA_MAX_SIZE 0x1fff
  27. #define DMA_CHUNK_SIZE 0x1000
  28. #define DMA_DDAR 0x00
  29. #define DMA_DCSR_S 0x04
  30. #define DMA_DCSR_C 0x08
  31. #define DMA_DCSR_R 0x0c
  32. #define DMA_DBSA 0x10
  33. #define DMA_DBTA 0x14
  34. #define DMA_DBSB 0x18
  35. #define DMA_DBTB 0x1c
  36. #define DMA_SIZE 0x20
  37. #define DCSR_RUN (1 << 0)
  38. #define DCSR_IE (1 << 1)
  39. #define DCSR_ERROR (1 << 2)
  40. #define DCSR_DONEA (1 << 3)
  41. #define DCSR_STRTA (1 << 4)
  42. #define DCSR_DONEB (1 << 5)
  43. #define DCSR_STRTB (1 << 6)
  44. #define DCSR_BIU (1 << 7)
  45. #define DDAR_RW (1 << 0) /* 0 = W, 1 = R */
  46. #define DDAR_E (1 << 1) /* 0 = LE, 1 = BE */
  47. #define DDAR_BS (1 << 2) /* 0 = BS4, 1 = BS8 */
  48. #define DDAR_DW (1 << 3) /* 0 = 8b, 1 = 16b */
  49. #define DDAR_Ser0UDCTr (0x0 << 4)
  50. #define DDAR_Ser0UDCRc (0x1 << 4)
  51. #define DDAR_Ser1SDLCTr (0x2 << 4)
  52. #define DDAR_Ser1SDLCRc (0x3 << 4)
  53. #define DDAR_Ser1UARTTr (0x4 << 4)
  54. #define DDAR_Ser1UARTRc (0x5 << 4)
  55. #define DDAR_Ser2ICPTr (0x6 << 4)
  56. #define DDAR_Ser2ICPRc (0x7 << 4)
  57. #define DDAR_Ser3UARTTr (0x8 << 4)
  58. #define DDAR_Ser3UARTRc (0x9 << 4)
  59. #define DDAR_Ser4MCP0Tr (0xa << 4)
  60. #define DDAR_Ser4MCP0Rc (0xb << 4)
  61. #define DDAR_Ser4MCP1Tr (0xc << 4)
  62. #define DDAR_Ser4MCP1Rc (0xd << 4)
  63. #define DDAR_Ser4SSPTr (0xe << 4)
  64. #define DDAR_Ser4SSPRc (0xf << 4)
  65. struct sa11x0_dma_sg {
  66. u32 addr;
  67. u32 len;
  68. };
  69. struct sa11x0_dma_desc {
  70. struct virt_dma_desc vd;
  71. u32 ddar;
  72. size_t size;
  73. unsigned sglen;
  74. struct sa11x0_dma_sg sg[0];
  75. };
  76. struct sa11x0_dma_phy;
  77. struct sa11x0_dma_chan {
  78. struct virt_dma_chan vc;
  79. /* protected by c->vc.lock */
  80. struct sa11x0_dma_phy *phy;
  81. enum dma_status status;
  82. /* protected by d->lock */
  83. struct list_head node;
  84. u32 ddar;
  85. const char *name;
  86. };
  87. struct sa11x0_dma_phy {
  88. void __iomem *base;
  89. struct sa11x0_dma_dev *dev;
  90. unsigned num;
  91. struct sa11x0_dma_chan *vchan;
  92. /* Protected by c->vc.lock */
  93. unsigned sg_load;
  94. struct sa11x0_dma_desc *txd_load;
  95. unsigned sg_done;
  96. struct sa11x0_dma_desc *txd_done;
  97. #ifdef CONFIG_PM_SLEEP
  98. u32 dbs[2];
  99. u32 dbt[2];
  100. u32 dcsr;
  101. #endif
  102. };
  103. struct sa11x0_dma_dev {
  104. struct dma_device slave;
  105. void __iomem *base;
  106. spinlock_t lock;
  107. struct tasklet_struct task;
  108. struct list_head chan_pending;
  109. struct sa11x0_dma_phy phy[NR_PHY_CHAN];
  110. };
  111. static struct sa11x0_dma_chan *to_sa11x0_dma_chan(struct dma_chan *chan)
  112. {
  113. return container_of(chan, struct sa11x0_dma_chan, vc.chan);
  114. }
  115. static struct sa11x0_dma_dev *to_sa11x0_dma(struct dma_device *dmadev)
  116. {
  117. return container_of(dmadev, struct sa11x0_dma_dev, slave);
  118. }
  119. static struct sa11x0_dma_desc *sa11x0_dma_next_desc(struct sa11x0_dma_chan *c)
  120. {
  121. struct virt_dma_desc *vd = vchan_next_desc(&c->vc);
  122. return vd ? container_of(vd, struct sa11x0_dma_desc, vd) : NULL;
  123. }
  124. static void sa11x0_dma_free_desc(struct virt_dma_desc *vd)
  125. {
  126. kfree(container_of(vd, struct sa11x0_dma_desc, vd));
  127. }
  128. static void sa11x0_dma_start_desc(struct sa11x0_dma_phy *p, struct sa11x0_dma_desc *txd)
  129. {
  130. list_del(&txd->vd.node);
  131. p->txd_load = txd;
  132. p->sg_load = 0;
  133. dev_vdbg(p->dev->slave.dev, "pchan %u: txd %p[%x]: starting: DDAR:%x\n",
  134. p->num, &txd->vd, txd->vd.tx.cookie, txd->ddar);
  135. }
  136. static void noinline sa11x0_dma_start_sg(struct sa11x0_dma_phy *p,
  137. struct sa11x0_dma_chan *c)
  138. {
  139. struct sa11x0_dma_desc *txd = p->txd_load;
  140. struct sa11x0_dma_sg *sg;
  141. void __iomem *base = p->base;
  142. unsigned dbsx, dbtx;
  143. u32 dcsr;
  144. if (!txd)
  145. return;
  146. dcsr = readl_relaxed(base + DMA_DCSR_R);
  147. /* Don't try to load the next transfer if both buffers are started */
  148. if ((dcsr & (DCSR_STRTA | DCSR_STRTB)) == (DCSR_STRTA | DCSR_STRTB))
  149. return;
  150. if (p->sg_load == txd->sglen) {
  151. struct sa11x0_dma_desc *txn = sa11x0_dma_next_desc(c);
  152. /*
  153. * We have reached the end of the current descriptor.
  154. * Peek at the next descriptor, and if compatible with
  155. * the current, start processing it.
  156. */
  157. if (txn && txn->ddar == txd->ddar) {
  158. txd = txn;
  159. sa11x0_dma_start_desc(p, txn);
  160. } else {
  161. p->txd_load = NULL;
  162. return;
  163. }
  164. }
  165. sg = &txd->sg[p->sg_load++];
  166. /* Select buffer to load according to channel status */
  167. if (((dcsr & (DCSR_BIU | DCSR_STRTB)) == (DCSR_BIU | DCSR_STRTB)) ||
  168. ((dcsr & (DCSR_BIU | DCSR_STRTA)) == 0)) {
  169. dbsx = DMA_DBSA;
  170. dbtx = DMA_DBTA;
  171. dcsr = DCSR_STRTA | DCSR_IE | DCSR_RUN;
  172. } else {
  173. dbsx = DMA_DBSB;
  174. dbtx = DMA_DBTB;
  175. dcsr = DCSR_STRTB | DCSR_IE | DCSR_RUN;
  176. }
  177. writel_relaxed(sg->addr, base + dbsx);
  178. writel_relaxed(sg->len, base + dbtx);
  179. writel(dcsr, base + DMA_DCSR_S);
  180. dev_dbg(p->dev->slave.dev, "pchan %u: load: DCSR:%02x DBS%c:%08x DBT%c:%08x\n",
  181. p->num, dcsr,
  182. 'A' + (dbsx == DMA_DBSB), sg->addr,
  183. 'A' + (dbtx == DMA_DBTB), sg->len);
  184. }
  185. static void noinline sa11x0_dma_complete(struct sa11x0_dma_phy *p,
  186. struct sa11x0_dma_chan *c)
  187. {
  188. struct sa11x0_dma_desc *txd = p->txd_done;
  189. if (++p->sg_done == txd->sglen) {
  190. vchan_cookie_complete(&txd->vd);
  191. p->sg_done = 0;
  192. p->txd_done = p->txd_load;
  193. if (!p->txd_done)
  194. tasklet_schedule(&p->dev->task);
  195. }
  196. sa11x0_dma_start_sg(p, c);
  197. }
  198. static irqreturn_t sa11x0_dma_irq(int irq, void *dev_id)
  199. {
  200. struct sa11x0_dma_phy *p = dev_id;
  201. struct sa11x0_dma_dev *d = p->dev;
  202. struct sa11x0_dma_chan *c;
  203. u32 dcsr;
  204. dcsr = readl_relaxed(p->base + DMA_DCSR_R);
  205. if (!(dcsr & (DCSR_ERROR | DCSR_DONEA | DCSR_DONEB)))
  206. return IRQ_NONE;
  207. /* Clear reported status bits */
  208. writel_relaxed(dcsr & (DCSR_ERROR | DCSR_DONEA | DCSR_DONEB),
  209. p->base + DMA_DCSR_C);
  210. dev_dbg(d->slave.dev, "pchan %u: irq: DCSR:%02x\n", p->num, dcsr);
  211. if (dcsr & DCSR_ERROR) {
  212. dev_err(d->slave.dev, "pchan %u: error. DCSR:%02x DDAR:%08x DBSA:%08x DBTA:%08x DBSB:%08x DBTB:%08x\n",
  213. p->num, dcsr,
  214. readl_relaxed(p->base + DMA_DDAR),
  215. readl_relaxed(p->base + DMA_DBSA),
  216. readl_relaxed(p->base + DMA_DBTA),
  217. readl_relaxed(p->base + DMA_DBSB),
  218. readl_relaxed(p->base + DMA_DBTB));
  219. }
  220. c = p->vchan;
  221. if (c) {
  222. unsigned long flags;
  223. spin_lock_irqsave(&c->vc.lock, flags);
  224. /*
  225. * Now that we're holding the lock, check that the vchan
  226. * really is associated with this pchan before touching the
  227. * hardware. This should always succeed, because we won't
  228. * change p->vchan or c->phy while the channel is actively
  229. * transferring.
  230. */
  231. if (c->phy == p) {
  232. if (dcsr & DCSR_DONEA)
  233. sa11x0_dma_complete(p, c);
  234. if (dcsr & DCSR_DONEB)
  235. sa11x0_dma_complete(p, c);
  236. }
  237. spin_unlock_irqrestore(&c->vc.lock, flags);
  238. }
  239. return IRQ_HANDLED;
  240. }
  241. static void sa11x0_dma_start_txd(struct sa11x0_dma_chan *c)
  242. {
  243. struct sa11x0_dma_desc *txd = sa11x0_dma_next_desc(c);
  244. /* If the issued list is empty, we have no further txds to process */
  245. if (txd) {
  246. struct sa11x0_dma_phy *p = c->phy;
  247. sa11x0_dma_start_desc(p, txd);
  248. p->txd_done = txd;
  249. p->sg_done = 0;
  250. /* The channel should not have any transfers started */
  251. WARN_ON(readl_relaxed(p->base + DMA_DCSR_R) &
  252. (DCSR_STRTA | DCSR_STRTB));
  253. /* Clear the run and start bits before changing DDAR */
  254. writel_relaxed(DCSR_RUN | DCSR_STRTA | DCSR_STRTB,
  255. p->base + DMA_DCSR_C);
  256. writel_relaxed(txd->ddar, p->base + DMA_DDAR);
  257. /* Try to start both buffers */
  258. sa11x0_dma_start_sg(p, c);
  259. sa11x0_dma_start_sg(p, c);
  260. }
  261. }
  262. static void sa11x0_dma_tasklet(unsigned long arg)
  263. {
  264. struct sa11x0_dma_dev *d = (struct sa11x0_dma_dev *)arg;
  265. struct sa11x0_dma_phy *p;
  266. struct sa11x0_dma_chan *c;
  267. unsigned pch, pch_alloc = 0;
  268. dev_dbg(d->slave.dev, "tasklet enter\n");
  269. list_for_each_entry(c, &d->slave.channels, vc.chan.device_node) {
  270. spin_lock_irq(&c->vc.lock);
  271. p = c->phy;
  272. if (p && !p->txd_done) {
  273. sa11x0_dma_start_txd(c);
  274. if (!p->txd_done) {
  275. /* No current txd associated with this channel */
  276. dev_dbg(d->slave.dev, "pchan %u: free\n", p->num);
  277. /* Mark this channel free */
  278. c->phy = NULL;
  279. p->vchan = NULL;
  280. }
  281. }
  282. spin_unlock_irq(&c->vc.lock);
  283. }
  284. spin_lock_irq(&d->lock);
  285. for (pch = 0; pch < NR_PHY_CHAN; pch++) {
  286. p = &d->phy[pch];
  287. if (p->vchan == NULL && !list_empty(&d->chan_pending)) {
  288. c = list_first_entry(&d->chan_pending,
  289. struct sa11x0_dma_chan, node);
  290. list_del_init(&c->node);
  291. pch_alloc |= 1 << pch;
  292. /* Mark this channel allocated */
  293. p->vchan = c;
  294. dev_dbg(d->slave.dev, "pchan %u: alloc vchan %p\n", pch, &c->vc);
  295. }
  296. }
  297. spin_unlock_irq(&d->lock);
  298. for (pch = 0; pch < NR_PHY_CHAN; pch++) {
  299. if (pch_alloc & (1 << pch)) {
  300. p = &d->phy[pch];
  301. c = p->vchan;
  302. spin_lock_irq(&c->vc.lock);
  303. c->phy = p;
  304. sa11x0_dma_start_txd(c);
  305. spin_unlock_irq(&c->vc.lock);
  306. }
  307. }
  308. dev_dbg(d->slave.dev, "tasklet exit\n");
  309. }
  310. static int sa11x0_dma_alloc_chan_resources(struct dma_chan *chan)
  311. {
  312. return 0;
  313. }
  314. static void sa11x0_dma_free_chan_resources(struct dma_chan *chan)
  315. {
  316. struct sa11x0_dma_chan *c = to_sa11x0_dma_chan(chan);
  317. struct sa11x0_dma_dev *d = to_sa11x0_dma(chan->device);
  318. unsigned long flags;
  319. spin_lock_irqsave(&d->lock, flags);
  320. list_del_init(&c->node);
  321. spin_unlock_irqrestore(&d->lock, flags);
  322. vchan_free_chan_resources(&c->vc);
  323. }
  324. static dma_addr_t sa11x0_dma_pos(struct sa11x0_dma_phy *p)
  325. {
  326. unsigned reg;
  327. u32 dcsr;
  328. dcsr = readl_relaxed(p->base + DMA_DCSR_R);
  329. if ((dcsr & (DCSR_BIU | DCSR_STRTA)) == DCSR_STRTA ||
  330. (dcsr & (DCSR_BIU | DCSR_STRTB)) == DCSR_BIU)
  331. reg = DMA_DBSA;
  332. else
  333. reg = DMA_DBSB;
  334. return readl_relaxed(p->base + reg);
  335. }
  336. static enum dma_status sa11x0_dma_tx_status(struct dma_chan *chan,
  337. dma_cookie_t cookie, struct dma_tx_state *state)
  338. {
  339. struct sa11x0_dma_chan *c = to_sa11x0_dma_chan(chan);
  340. struct sa11x0_dma_dev *d = to_sa11x0_dma(chan->device);
  341. struct sa11x0_dma_phy *p;
  342. struct sa11x0_dma_desc *txd;
  343. unsigned long flags;
  344. enum dma_status ret;
  345. size_t bytes = 0;
  346. ret = dma_cookie_status(&c->vc.chan, cookie, state);
  347. if (ret == DMA_SUCCESS)
  348. return ret;
  349. spin_lock_irqsave(&c->vc.lock, flags);
  350. p = c->phy;
  351. ret = c->status;
  352. if (p) {
  353. dma_addr_t addr = sa11x0_dma_pos(p);
  354. dev_vdbg(d->slave.dev, "tx_status: addr:%x\n", addr);
  355. txd = p->txd_done;
  356. if (txd) {
  357. unsigned i;
  358. for (i = 0; i < txd->sglen; i++) {
  359. dev_vdbg(d->slave.dev, "tx_status: [%u] %x+%x\n",
  360. i, txd->sg[i].addr, txd->sg[i].len);
  361. if (addr >= txd->sg[i].addr &&
  362. addr < txd->sg[i].addr + txd->sg[i].len) {
  363. unsigned len;
  364. len = txd->sg[i].len -
  365. (addr - txd->sg[i].addr);
  366. dev_vdbg(d->slave.dev, "tx_status: [%u] +%x\n",
  367. i, len);
  368. bytes += len;
  369. i++;
  370. break;
  371. }
  372. }
  373. for (; i < txd->sglen; i++) {
  374. dev_vdbg(d->slave.dev, "tx_status: [%u] %x+%x ++\n",
  375. i, txd->sg[i].addr, txd->sg[i].len);
  376. bytes += txd->sg[i].len;
  377. }
  378. }
  379. if (txd != p->txd_load && p->txd_load)
  380. bytes += p->txd_load->size;
  381. }
  382. list_for_each_entry(txd, &c->vc.desc_issued, vd.node) {
  383. bytes += txd->size;
  384. }
  385. spin_unlock_irqrestore(&c->vc.lock, flags);
  386. if (state)
  387. state->residue = bytes;
  388. dev_vdbg(d->slave.dev, "tx_status: bytes 0x%zx\n", bytes);
  389. return ret;
  390. }
  391. /*
  392. * Move pending txds to the issued list, and re-init pending list.
  393. * If not already pending, add this channel to the list of pending
  394. * channels and trigger the tasklet to run.
  395. */
  396. static void sa11x0_dma_issue_pending(struct dma_chan *chan)
  397. {
  398. struct sa11x0_dma_chan *c = to_sa11x0_dma_chan(chan);
  399. struct sa11x0_dma_dev *d = to_sa11x0_dma(chan->device);
  400. unsigned long flags;
  401. spin_lock_irqsave(&c->vc.lock, flags);
  402. if (vchan_issue_pending(&c->vc)) {
  403. if (!c->phy) {
  404. spin_lock(&d->lock);
  405. if (list_empty(&c->node)) {
  406. list_add_tail(&c->node, &d->chan_pending);
  407. tasklet_schedule(&d->task);
  408. dev_dbg(d->slave.dev, "vchan %p: issued\n", &c->vc);
  409. }
  410. spin_unlock(&d->lock);
  411. }
  412. } else
  413. dev_dbg(d->slave.dev, "vchan %p: nothing to issue\n", &c->vc);
  414. spin_unlock_irqrestore(&c->vc.lock, flags);
  415. }
  416. static struct dma_async_tx_descriptor *sa11x0_dma_prep_slave_sg(
  417. struct dma_chan *chan, struct scatterlist *sg, unsigned int sglen,
  418. enum dma_transfer_direction dir, unsigned long flags, void *context)
  419. {
  420. struct sa11x0_dma_chan *c = to_sa11x0_dma_chan(chan);
  421. struct sa11x0_dma_desc *txd;
  422. struct scatterlist *sgent;
  423. unsigned i, j = sglen;
  424. size_t size = 0;
  425. /* SA11x0 channels can only operate in their native direction */
  426. if (dir != (c->ddar & DDAR_RW ? DMA_DEV_TO_MEM : DMA_MEM_TO_DEV)) {
  427. dev_err(chan->device->dev, "vchan %p: bad DMA direction: DDAR:%08x dir:%u\n",
  428. &c->vc, c->ddar, dir);
  429. return NULL;
  430. }
  431. /* Do not allow zero-sized txds */
  432. if (sglen == 0)
  433. return NULL;
  434. for_each_sg(sg, sgent, sglen, i) {
  435. dma_addr_t addr = sg_dma_address(sgent);
  436. unsigned int len = sg_dma_len(sgent);
  437. if (len > DMA_MAX_SIZE)
  438. j += DIV_ROUND_UP(len, DMA_MAX_SIZE & ~DMA_ALIGN) - 1;
  439. if (addr & DMA_ALIGN) {
  440. dev_dbg(chan->device->dev, "vchan %p: bad buffer alignment: %08x\n",
  441. &c->vc, addr);
  442. return NULL;
  443. }
  444. }
  445. txd = kzalloc(sizeof(*txd) + j * sizeof(txd->sg[0]), GFP_ATOMIC);
  446. if (!txd) {
  447. dev_dbg(chan->device->dev, "vchan %p: kzalloc failed\n", &c->vc);
  448. return NULL;
  449. }
  450. j = 0;
  451. for_each_sg(sg, sgent, sglen, i) {
  452. dma_addr_t addr = sg_dma_address(sgent);
  453. unsigned len = sg_dma_len(sgent);
  454. size += len;
  455. do {
  456. unsigned tlen = len;
  457. /*
  458. * Check whether the transfer will fit. If not, try
  459. * to split the transfer up such that we end up with
  460. * equal chunks - but make sure that we preserve the
  461. * alignment. This avoids small segments.
  462. */
  463. if (tlen > DMA_MAX_SIZE) {
  464. unsigned mult = DIV_ROUND_UP(tlen,
  465. DMA_MAX_SIZE & ~DMA_ALIGN);
  466. tlen = (tlen / mult) & ~DMA_ALIGN;
  467. }
  468. txd->sg[j].addr = addr;
  469. txd->sg[j].len = tlen;
  470. addr += tlen;
  471. len -= tlen;
  472. j++;
  473. } while (len);
  474. }
  475. txd->ddar = c->ddar;
  476. txd->size = size;
  477. txd->sglen = j;
  478. dev_dbg(chan->device->dev, "vchan %p: txd %p: size %u nr %u\n",
  479. &c->vc, &txd->vd, txd->size, txd->sglen);
  480. return vchan_tx_prep(&c->vc, &txd->vd, flags);
  481. }
  482. static int sa11x0_dma_slave_config(struct sa11x0_dma_chan *c, struct dma_slave_config *cfg)
  483. {
  484. u32 ddar = c->ddar & ((0xf << 4) | DDAR_RW);
  485. dma_addr_t addr;
  486. enum dma_slave_buswidth width;
  487. u32 maxburst;
  488. if (ddar & DDAR_RW) {
  489. addr = cfg->src_addr;
  490. width = cfg->src_addr_width;
  491. maxburst = cfg->src_maxburst;
  492. } else {
  493. addr = cfg->dst_addr;
  494. width = cfg->dst_addr_width;
  495. maxburst = cfg->dst_maxburst;
  496. }
  497. if ((width != DMA_SLAVE_BUSWIDTH_1_BYTE &&
  498. width != DMA_SLAVE_BUSWIDTH_2_BYTES) ||
  499. (maxburst != 4 && maxburst != 8))
  500. return -EINVAL;
  501. if (width == DMA_SLAVE_BUSWIDTH_2_BYTES)
  502. ddar |= DDAR_DW;
  503. if (maxburst == 8)
  504. ddar |= DDAR_BS;
  505. dev_dbg(c->vc.chan.device->dev, "vchan %p: dma_slave_config addr %x width %u burst %u\n",
  506. &c->vc, addr, width, maxburst);
  507. c->ddar = ddar | (addr & 0xf0000000) | (addr & 0x003ffffc) << 6;
  508. return 0;
  509. }
  510. static int sa11x0_dma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
  511. unsigned long arg)
  512. {
  513. struct sa11x0_dma_chan *c = to_sa11x0_dma_chan(chan);
  514. struct sa11x0_dma_dev *d = to_sa11x0_dma(chan->device);
  515. struct sa11x0_dma_phy *p;
  516. LIST_HEAD(head);
  517. unsigned long flags;
  518. int ret;
  519. switch (cmd) {
  520. case DMA_SLAVE_CONFIG:
  521. return sa11x0_dma_slave_config(c, (struct dma_slave_config *)arg);
  522. case DMA_TERMINATE_ALL:
  523. dev_dbg(d->slave.dev, "vchan %p: terminate all\n", &c->vc);
  524. /* Clear the tx descriptor lists */
  525. spin_lock_irqsave(&c->vc.lock, flags);
  526. vchan_get_all_descriptors(&c->vc, &head);
  527. p = c->phy;
  528. if (p) {
  529. dev_dbg(d->slave.dev, "pchan %u: terminating\n", p->num);
  530. /* vchan is assigned to a pchan - stop the channel */
  531. writel(DCSR_RUN | DCSR_IE |
  532. DCSR_STRTA | DCSR_DONEA |
  533. DCSR_STRTB | DCSR_DONEB,
  534. p->base + DMA_DCSR_C);
  535. if (p->txd_load) {
  536. if (p->txd_load != p->txd_done)
  537. list_add_tail(&p->txd_load->vd.node, &head);
  538. p->txd_load = NULL;
  539. }
  540. if (p->txd_done) {
  541. list_add_tail(&p->txd_done->vd.node, &head);
  542. p->txd_done = NULL;
  543. }
  544. c->phy = NULL;
  545. spin_lock(&d->lock);
  546. p->vchan = NULL;
  547. spin_unlock(&d->lock);
  548. tasklet_schedule(&d->task);
  549. }
  550. spin_unlock_irqrestore(&c->vc.lock, flags);
  551. vchan_dma_desc_free_list(&c->vc, &head);
  552. ret = 0;
  553. break;
  554. case DMA_PAUSE:
  555. dev_dbg(d->slave.dev, "vchan %p: pause\n", &c->vc);
  556. spin_lock_irqsave(&c->vc.lock, flags);
  557. if (c->status == DMA_IN_PROGRESS) {
  558. c->status = DMA_PAUSED;
  559. p = c->phy;
  560. if (p) {
  561. writel(DCSR_RUN | DCSR_IE, p->base + DMA_DCSR_C);
  562. } else {
  563. spin_lock(&d->lock);
  564. list_del_init(&c->node);
  565. spin_unlock(&d->lock);
  566. }
  567. }
  568. spin_unlock_irqrestore(&c->vc.lock, flags);
  569. ret = 0;
  570. break;
  571. case DMA_RESUME:
  572. dev_dbg(d->slave.dev, "vchan %p: resume\n", &c->vc);
  573. spin_lock_irqsave(&c->vc.lock, flags);
  574. if (c->status == DMA_PAUSED) {
  575. c->status = DMA_IN_PROGRESS;
  576. p = c->phy;
  577. if (p) {
  578. writel(DCSR_RUN | DCSR_IE, p->base + DMA_DCSR_S);
  579. } else if (!list_empty(&c->vc.desc_issued)) {
  580. spin_lock(&d->lock);
  581. list_add_tail(&c->node, &d->chan_pending);
  582. spin_unlock(&d->lock);
  583. }
  584. }
  585. spin_unlock_irqrestore(&c->vc.lock, flags);
  586. ret = 0;
  587. break;
  588. default:
  589. ret = -ENXIO;
  590. break;
  591. }
  592. return ret;
  593. }
  594. struct sa11x0_dma_channel_desc {
  595. u32 ddar;
  596. const char *name;
  597. };
  598. #define CD(d1, d2) { .ddar = DDAR_##d1 | d2, .name = #d1 }
  599. static const struct sa11x0_dma_channel_desc chan_desc[] = {
  600. CD(Ser0UDCTr, 0),
  601. CD(Ser0UDCRc, DDAR_RW),
  602. CD(Ser1SDLCTr, 0),
  603. CD(Ser1SDLCRc, DDAR_RW),
  604. CD(Ser1UARTTr, 0),
  605. CD(Ser1UARTRc, DDAR_RW),
  606. CD(Ser2ICPTr, 0),
  607. CD(Ser2ICPRc, DDAR_RW),
  608. CD(Ser3UARTTr, 0),
  609. CD(Ser3UARTRc, DDAR_RW),
  610. CD(Ser4MCP0Tr, 0),
  611. CD(Ser4MCP0Rc, DDAR_RW),
  612. CD(Ser4MCP1Tr, 0),
  613. CD(Ser4MCP1Rc, DDAR_RW),
  614. CD(Ser4SSPTr, 0),
  615. CD(Ser4SSPRc, DDAR_RW),
  616. };
  617. static int __devinit sa11x0_dma_init_dmadev(struct dma_device *dmadev,
  618. struct device *dev)
  619. {
  620. unsigned i;
  621. dmadev->chancnt = ARRAY_SIZE(chan_desc);
  622. INIT_LIST_HEAD(&dmadev->channels);
  623. dmadev->dev = dev;
  624. dmadev->device_alloc_chan_resources = sa11x0_dma_alloc_chan_resources;
  625. dmadev->device_free_chan_resources = sa11x0_dma_free_chan_resources;
  626. dmadev->device_control = sa11x0_dma_control;
  627. dmadev->device_tx_status = sa11x0_dma_tx_status;
  628. dmadev->device_issue_pending = sa11x0_dma_issue_pending;
  629. for (i = 0; i < dmadev->chancnt; i++) {
  630. struct sa11x0_dma_chan *c;
  631. c = kzalloc(sizeof(*c), GFP_KERNEL);
  632. if (!c) {
  633. dev_err(dev, "no memory for channel %u\n", i);
  634. return -ENOMEM;
  635. }
  636. c->status = DMA_IN_PROGRESS;
  637. c->ddar = chan_desc[i].ddar;
  638. c->name = chan_desc[i].name;
  639. INIT_LIST_HEAD(&c->node);
  640. c->vc.desc_free = sa11x0_dma_free_desc;
  641. vchan_init(&c->vc, dmadev);
  642. }
  643. return dma_async_device_register(dmadev);
  644. }
  645. static int sa11x0_dma_request_irq(struct platform_device *pdev, int nr,
  646. void *data)
  647. {
  648. int irq = platform_get_irq(pdev, nr);
  649. if (irq <= 0)
  650. return -ENXIO;
  651. return request_irq(irq, sa11x0_dma_irq, 0, dev_name(&pdev->dev), data);
  652. }
  653. static void sa11x0_dma_free_irq(struct platform_device *pdev, int nr,
  654. void *data)
  655. {
  656. int irq = platform_get_irq(pdev, nr);
  657. if (irq > 0)
  658. free_irq(irq, data);
  659. }
  660. static void sa11x0_dma_free_channels(struct dma_device *dmadev)
  661. {
  662. struct sa11x0_dma_chan *c, *cn;
  663. list_for_each_entry_safe(c, cn, &dmadev->channels, vc.chan.device_node) {
  664. list_del(&c->vc.chan.device_node);
  665. tasklet_kill(&c->vc.task);
  666. kfree(c);
  667. }
  668. }
  669. static int __devinit sa11x0_dma_probe(struct platform_device *pdev)
  670. {
  671. struct sa11x0_dma_dev *d;
  672. struct resource *res;
  673. unsigned i;
  674. int ret;
  675. res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  676. if (!res)
  677. return -ENXIO;
  678. d = kzalloc(sizeof(*d), GFP_KERNEL);
  679. if (!d) {
  680. ret = -ENOMEM;
  681. goto err_alloc;
  682. }
  683. spin_lock_init(&d->lock);
  684. INIT_LIST_HEAD(&d->chan_pending);
  685. d->base = ioremap(res->start, resource_size(res));
  686. if (!d->base) {
  687. ret = -ENOMEM;
  688. goto err_ioremap;
  689. }
  690. tasklet_init(&d->task, sa11x0_dma_tasklet, (unsigned long)d);
  691. for (i = 0; i < NR_PHY_CHAN; i++) {
  692. struct sa11x0_dma_phy *p = &d->phy[i];
  693. p->dev = d;
  694. p->num = i;
  695. p->base = d->base + i * DMA_SIZE;
  696. writel_relaxed(DCSR_RUN | DCSR_IE | DCSR_ERROR |
  697. DCSR_DONEA | DCSR_STRTA | DCSR_DONEB | DCSR_STRTB,
  698. p->base + DMA_DCSR_C);
  699. writel_relaxed(0, p->base + DMA_DDAR);
  700. ret = sa11x0_dma_request_irq(pdev, i, p);
  701. if (ret) {
  702. while (i) {
  703. i--;
  704. sa11x0_dma_free_irq(pdev, i, &d->phy[i]);
  705. }
  706. goto err_irq;
  707. }
  708. }
  709. dma_cap_set(DMA_SLAVE, d->slave.cap_mask);
  710. d->slave.device_prep_slave_sg = sa11x0_dma_prep_slave_sg;
  711. ret = sa11x0_dma_init_dmadev(&d->slave, &pdev->dev);
  712. if (ret) {
  713. dev_warn(d->slave.dev, "failed to register slave async device: %d\n",
  714. ret);
  715. goto err_slave_reg;
  716. }
  717. platform_set_drvdata(pdev, d);
  718. return 0;
  719. err_slave_reg:
  720. sa11x0_dma_free_channels(&d->slave);
  721. for (i = 0; i < NR_PHY_CHAN; i++)
  722. sa11x0_dma_free_irq(pdev, i, &d->phy[i]);
  723. err_irq:
  724. tasklet_kill(&d->task);
  725. iounmap(d->base);
  726. err_ioremap:
  727. kfree(d);
  728. err_alloc:
  729. return ret;
  730. }
  731. static int __devexit sa11x0_dma_remove(struct platform_device *pdev)
  732. {
  733. struct sa11x0_dma_dev *d = platform_get_drvdata(pdev);
  734. unsigned pch;
  735. dma_async_device_unregister(&d->slave);
  736. sa11x0_dma_free_channels(&d->slave);
  737. for (pch = 0; pch < NR_PHY_CHAN; pch++)
  738. sa11x0_dma_free_irq(pdev, pch, &d->phy[pch]);
  739. tasklet_kill(&d->task);
  740. iounmap(d->base);
  741. kfree(d);
  742. return 0;
  743. }
  744. #ifdef CONFIG_PM_SLEEP
  745. static int sa11x0_dma_suspend(struct device *dev)
  746. {
  747. struct sa11x0_dma_dev *d = dev_get_drvdata(dev);
  748. unsigned pch;
  749. for (pch = 0; pch < NR_PHY_CHAN; pch++) {
  750. struct sa11x0_dma_phy *p = &d->phy[pch];
  751. u32 dcsr, saved_dcsr;
  752. dcsr = saved_dcsr = readl_relaxed(p->base + DMA_DCSR_R);
  753. if (dcsr & DCSR_RUN) {
  754. writel(DCSR_RUN | DCSR_IE, p->base + DMA_DCSR_C);
  755. dcsr = readl_relaxed(p->base + DMA_DCSR_R);
  756. }
  757. saved_dcsr &= DCSR_RUN | DCSR_IE;
  758. if (dcsr & DCSR_BIU) {
  759. p->dbs[0] = readl_relaxed(p->base + DMA_DBSB);
  760. p->dbt[0] = readl_relaxed(p->base + DMA_DBTB);
  761. p->dbs[1] = readl_relaxed(p->base + DMA_DBSA);
  762. p->dbt[1] = readl_relaxed(p->base + DMA_DBTA);
  763. saved_dcsr |= (dcsr & DCSR_STRTA ? DCSR_STRTB : 0) |
  764. (dcsr & DCSR_STRTB ? DCSR_STRTA : 0);
  765. } else {
  766. p->dbs[0] = readl_relaxed(p->base + DMA_DBSA);
  767. p->dbt[0] = readl_relaxed(p->base + DMA_DBTA);
  768. p->dbs[1] = readl_relaxed(p->base + DMA_DBSB);
  769. p->dbt[1] = readl_relaxed(p->base + DMA_DBTB);
  770. saved_dcsr |= dcsr & (DCSR_STRTA | DCSR_STRTB);
  771. }
  772. p->dcsr = saved_dcsr;
  773. writel(DCSR_STRTA | DCSR_STRTB, p->base + DMA_DCSR_C);
  774. }
  775. return 0;
  776. }
  777. static int sa11x0_dma_resume(struct device *dev)
  778. {
  779. struct sa11x0_dma_dev *d = dev_get_drvdata(dev);
  780. unsigned pch;
  781. for (pch = 0; pch < NR_PHY_CHAN; pch++) {
  782. struct sa11x0_dma_phy *p = &d->phy[pch];
  783. struct sa11x0_dma_desc *txd = NULL;
  784. u32 dcsr = readl_relaxed(p->base + DMA_DCSR_R);
  785. WARN_ON(dcsr & (DCSR_BIU | DCSR_STRTA | DCSR_STRTB | DCSR_RUN));
  786. if (p->txd_done)
  787. txd = p->txd_done;
  788. else if (p->txd_load)
  789. txd = p->txd_load;
  790. if (!txd)
  791. continue;
  792. writel_relaxed(txd->ddar, p->base + DMA_DDAR);
  793. writel_relaxed(p->dbs[0], p->base + DMA_DBSA);
  794. writel_relaxed(p->dbt[0], p->base + DMA_DBTA);
  795. writel_relaxed(p->dbs[1], p->base + DMA_DBSB);
  796. writel_relaxed(p->dbt[1], p->base + DMA_DBTB);
  797. writel_relaxed(p->dcsr, p->base + DMA_DCSR_S);
  798. }
  799. return 0;
  800. }
  801. #endif
  802. static const struct dev_pm_ops sa11x0_dma_pm_ops = {
  803. .suspend_noirq = sa11x0_dma_suspend,
  804. .resume_noirq = sa11x0_dma_resume,
  805. .freeze_noirq = sa11x0_dma_suspend,
  806. .thaw_noirq = sa11x0_dma_resume,
  807. .poweroff_noirq = sa11x0_dma_suspend,
  808. .restore_noirq = sa11x0_dma_resume,
  809. };
  810. static struct platform_driver sa11x0_dma_driver = {
  811. .driver = {
  812. .name = "sa11x0-dma",
  813. .owner = THIS_MODULE,
  814. .pm = &sa11x0_dma_pm_ops,
  815. },
  816. .probe = sa11x0_dma_probe,
  817. .remove = __devexit_p(sa11x0_dma_remove),
  818. };
  819. bool sa11x0_dma_filter_fn(struct dma_chan *chan, void *param)
  820. {
  821. if (chan->device->dev->driver == &sa11x0_dma_driver.driver) {
  822. struct sa11x0_dma_chan *c = to_sa11x0_dma_chan(chan);
  823. const char *p = param;
  824. return !strcmp(c->name, p);
  825. }
  826. return false;
  827. }
  828. EXPORT_SYMBOL(sa11x0_dma_filter_fn);
  829. static int __init sa11x0_dma_init(void)
  830. {
  831. return platform_driver_register(&sa11x0_dma_driver);
  832. }
  833. subsys_initcall(sa11x0_dma_init);
  834. static void __exit sa11x0_dma_exit(void)
  835. {
  836. platform_driver_unregister(&sa11x0_dma_driver);
  837. }
  838. module_exit(sa11x0_dma_exit);
  839. MODULE_AUTHOR("Russell King");
  840. MODULE_DESCRIPTION("SA-11x0 DMA driver");
  841. MODULE_LICENSE("GPL v2");
  842. MODULE_ALIAS("platform:sa11x0-dma");