dma.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762
  1. /* linux/arch/arm/plat-s3c64xx/dma.c
  2. *
  3. * Copyright 2009 Openmoko, Inc.
  4. * Copyright 2009 Simtec Electronics
  5. * Ben Dooks <ben@simtec.co.uk>
  6. * http://armlinux.simtec.co.uk/
  7. *
  8. * S3C64XX DMA core
  9. *
  10. * This program is free software; you can redistribute it and/or modify
  11. * it under the terms of the GNU General Public License version 2 as
  12. * published by the Free Software Foundation.
  13. */
  14. /*
  15. * NOTE: Code in this file is not used when booting with Device Tree support.
  16. */
  17. #include <linux/kernel.h>
  18. #include <linux/module.h>
  19. #include <linux/interrupt.h>
  20. #include <linux/dmapool.h>
  21. #include <linux/device.h>
  22. #include <linux/errno.h>
  23. #include <linux/slab.h>
  24. #include <linux/delay.h>
  25. #include <linux/clk.h>
  26. #include <linux/err.h>
  27. #include <linux/io.h>
  28. #include <linux/amba/pl080.h>
  29. #include <linux/of.h>
  30. #include <mach/dma.h>
  31. #include <mach/map.h>
  32. #include <mach/irqs.h>
  33. #include "regs-sys.h"
  34. /* dma channel state information */
  35. struct s3c64xx_dmac {
  36. struct device dev;
  37. struct clk *clk;
  38. void __iomem *regs;
  39. struct s3c2410_dma_chan *channels;
  40. enum dma_ch chanbase;
  41. };
  42. /* pool to provide LLI buffers */
  43. static struct dma_pool *dma_pool;
  44. /* Debug configuration and code */
  45. static unsigned char debug_show_buffs = 0;
  46. static void dbg_showchan(struct s3c2410_dma_chan *chan)
  47. {
  48. pr_debug("DMA%d: %08x->%08x L %08x C %08x,%08x S %08x\n",
  49. chan->number,
  50. readl(chan->regs + PL080_CH_SRC_ADDR),
  51. readl(chan->regs + PL080_CH_DST_ADDR),
  52. readl(chan->regs + PL080_CH_LLI),
  53. readl(chan->regs + PL080_CH_CONTROL),
  54. readl(chan->regs + PL080S_CH_CONTROL2),
  55. readl(chan->regs + PL080S_CH_CONFIG));
  56. }
  57. static void show_lli(struct pl080s_lli *lli)
  58. {
  59. pr_debug("LLI[%p] %08x->%08x, NL %08x C %08x,%08x\n",
  60. lli, lli->src_addr, lli->dst_addr, lli->next_lli,
  61. lli->control0, lli->control1);
  62. }
  63. static void dbg_showbuffs(struct s3c2410_dma_chan *chan)
  64. {
  65. struct s3c64xx_dma_buff *ptr;
  66. struct s3c64xx_dma_buff *end;
  67. pr_debug("DMA%d: buffs next %p, curr %p, end %p\n",
  68. chan->number, chan->next, chan->curr, chan->end);
  69. ptr = chan->next;
  70. end = chan->end;
  71. if (debug_show_buffs) {
  72. for (; ptr != NULL; ptr = ptr->next) {
  73. pr_debug("DMA%d: %08x ",
  74. chan->number, ptr->lli_dma);
  75. show_lli(ptr->lli);
  76. }
  77. }
  78. }
  79. /* End of Debug */
  80. static struct s3c2410_dma_chan *s3c64xx_dma_map_channel(unsigned int channel)
  81. {
  82. struct s3c2410_dma_chan *chan;
  83. unsigned int start, offs;
  84. start = 0;
  85. if (channel >= DMACH_PCM1_TX)
  86. start = 8;
  87. for (offs = 0; offs < 8; offs++) {
  88. chan = &s3c2410_chans[start + offs];
  89. if (!chan->in_use)
  90. goto found;
  91. }
  92. return NULL;
  93. found:
  94. s3c_dma_chan_map[channel] = chan;
  95. return chan;
  96. }
  97. int s3c2410_dma_config(enum dma_ch channel, int xferunit)
  98. {
  99. struct s3c2410_dma_chan *chan = s3c_dma_lookup_channel(channel);
  100. if (chan == NULL)
  101. return -EINVAL;
  102. switch (xferunit) {
  103. case 1:
  104. chan->hw_width = 0;
  105. break;
  106. case 2:
  107. chan->hw_width = 1;
  108. break;
  109. case 4:
  110. chan->hw_width = 2;
  111. break;
  112. default:
  113. printk(KERN_ERR "%s: illegal width %d\n", __func__, xferunit);
  114. return -EINVAL;
  115. }
  116. return 0;
  117. }
  118. EXPORT_SYMBOL(s3c2410_dma_config);
  119. static void s3c64xx_dma_fill_lli(struct s3c2410_dma_chan *chan,
  120. struct pl080s_lli *lli,
  121. dma_addr_t data, int size)
  122. {
  123. dma_addr_t src, dst;
  124. u32 control0, control1;
  125. switch (chan->source) {
  126. case DMA_FROM_DEVICE:
  127. src = chan->dev_addr;
  128. dst = data;
  129. control0 = PL080_CONTROL_SRC_AHB2;
  130. control0 |= PL080_CONTROL_DST_INCR;
  131. break;
  132. case DMA_TO_DEVICE:
  133. src = data;
  134. dst = chan->dev_addr;
  135. control0 = PL080_CONTROL_DST_AHB2;
  136. control0 |= PL080_CONTROL_SRC_INCR;
  137. break;
  138. default:
  139. BUG();
  140. }
  141. /* note, we do not currently setup any of the burst controls */
  142. control1 = size >> chan->hw_width; /* size in no of xfers */
  143. control0 |= PL080_CONTROL_PROT_SYS; /* always in priv. mode */
  144. control0 |= PL080_CONTROL_TC_IRQ_EN; /* always fire IRQ */
  145. control0 |= (u32)chan->hw_width << PL080_CONTROL_DWIDTH_SHIFT;
  146. control0 |= (u32)chan->hw_width << PL080_CONTROL_SWIDTH_SHIFT;
  147. lli->src_addr = src;
  148. lli->dst_addr = dst;
  149. lli->next_lli = 0;
  150. lli->control0 = control0;
  151. lli->control1 = control1;
  152. }
  153. static void s3c64xx_lli_to_regs(struct s3c2410_dma_chan *chan,
  154. struct pl080s_lli *lli)
  155. {
  156. void __iomem *regs = chan->regs;
  157. pr_debug("%s: LLI %p => regs\n", __func__, lli);
  158. show_lli(lli);
  159. writel(lli->src_addr, regs + PL080_CH_SRC_ADDR);
  160. writel(lli->dst_addr, regs + PL080_CH_DST_ADDR);
  161. writel(lli->next_lli, regs + PL080_CH_LLI);
  162. writel(lli->control0, regs + PL080_CH_CONTROL);
  163. writel(lli->control1, regs + PL080S_CH_CONTROL2);
  164. }
  165. static int s3c64xx_dma_start(struct s3c2410_dma_chan *chan)
  166. {
  167. struct s3c64xx_dmac *dmac = chan->dmac;
  168. u32 config;
  169. u32 bit = chan->bit;
  170. dbg_showchan(chan);
  171. pr_debug("%s: clearing interrupts\n", __func__);
  172. /* clear interrupts */
  173. writel(bit, dmac->regs + PL080_TC_CLEAR);
  174. writel(bit, dmac->regs + PL080_ERR_CLEAR);
  175. pr_debug("%s: starting channel\n", __func__);
  176. config = readl(chan->regs + PL080S_CH_CONFIG);
  177. config |= PL080_CONFIG_ENABLE;
  178. config &= ~PL080_CONFIG_HALT;
  179. pr_debug("%s: writing config %08x\n", __func__, config);
  180. writel(config, chan->regs + PL080S_CH_CONFIG);
  181. return 0;
  182. }
  183. static int s3c64xx_dma_stop(struct s3c2410_dma_chan *chan)
  184. {
  185. u32 config;
  186. int timeout;
  187. pr_debug("%s: stopping channel\n", __func__);
  188. dbg_showchan(chan);
  189. config = readl(chan->regs + PL080S_CH_CONFIG);
  190. config |= PL080_CONFIG_HALT;
  191. writel(config, chan->regs + PL080S_CH_CONFIG);
  192. timeout = 1000;
  193. do {
  194. config = readl(chan->regs + PL080S_CH_CONFIG);
  195. pr_debug("%s: %d - config %08x\n", __func__, timeout, config);
  196. if (config & PL080_CONFIG_ACTIVE)
  197. udelay(10);
  198. else
  199. break;
  200. } while (--timeout > 0);
  201. if (config & PL080_CONFIG_ACTIVE) {
  202. printk(KERN_ERR "%s: channel still active\n", __func__);
  203. return -EFAULT;
  204. }
  205. config = readl(chan->regs + PL080S_CH_CONFIG);
  206. config &= ~PL080_CONFIG_ENABLE;
  207. writel(config, chan->regs + PL080S_CH_CONFIG);
  208. return 0;
  209. }
  210. static inline void s3c64xx_dma_bufffdone(struct s3c2410_dma_chan *chan,
  211. struct s3c64xx_dma_buff *buf,
  212. enum s3c2410_dma_buffresult result)
  213. {
  214. if (chan->callback_fn != NULL)
  215. (chan->callback_fn)(chan, buf->pw, 0, result);
  216. }
  217. static void s3c64xx_dma_freebuff(struct s3c64xx_dma_buff *buff)
  218. {
  219. dma_pool_free(dma_pool, buff->lli, buff->lli_dma);
  220. kfree(buff);
  221. }
  222. static int s3c64xx_dma_flush(struct s3c2410_dma_chan *chan)
  223. {
  224. struct s3c64xx_dma_buff *buff, *next;
  225. u32 config;
  226. dbg_showchan(chan);
  227. pr_debug("%s: flushing channel\n", __func__);
  228. config = readl(chan->regs + PL080S_CH_CONFIG);
  229. config &= ~PL080_CONFIG_ENABLE;
  230. writel(config, chan->regs + PL080S_CH_CONFIG);
  231. /* dump all the buffers associated with this channel */
  232. for (buff = chan->curr; buff != NULL; buff = next) {
  233. next = buff->next;
  234. pr_debug("%s: buff %p (next %p)\n", __func__, buff, buff->next);
  235. s3c64xx_dma_bufffdone(chan, buff, S3C2410_RES_ABORT);
  236. s3c64xx_dma_freebuff(buff);
  237. }
  238. chan->curr = chan->next = chan->end = NULL;
  239. return 0;
  240. }
  241. int s3c2410_dma_ctrl(enum dma_ch channel, enum s3c2410_chan_op op)
  242. {
  243. struct s3c2410_dma_chan *chan = s3c_dma_lookup_channel(channel);
  244. WARN_ON(!chan);
  245. if (!chan)
  246. return -EINVAL;
  247. switch (op) {
  248. case S3C2410_DMAOP_START:
  249. return s3c64xx_dma_start(chan);
  250. case S3C2410_DMAOP_STOP:
  251. return s3c64xx_dma_stop(chan);
  252. case S3C2410_DMAOP_FLUSH:
  253. return s3c64xx_dma_flush(chan);
  254. /* believe PAUSE/RESUME are no-ops */
  255. case S3C2410_DMAOP_PAUSE:
  256. case S3C2410_DMAOP_RESUME:
  257. case S3C2410_DMAOP_STARTED:
  258. case S3C2410_DMAOP_TIMEOUT:
  259. return 0;
  260. }
  261. return -ENOENT;
  262. }
  263. EXPORT_SYMBOL(s3c2410_dma_ctrl);
  264. /* s3c2410_dma_enque
  265. *
  266. */
  267. int s3c2410_dma_enqueue(enum dma_ch channel, void *id,
  268. dma_addr_t data, int size)
  269. {
  270. struct s3c2410_dma_chan *chan = s3c_dma_lookup_channel(channel);
  271. struct s3c64xx_dma_buff *next;
  272. struct s3c64xx_dma_buff *buff;
  273. struct pl080s_lli *lli;
  274. unsigned long flags;
  275. int ret;
  276. WARN_ON(!chan);
  277. if (!chan)
  278. return -EINVAL;
  279. buff = kzalloc(sizeof(struct s3c64xx_dma_buff), GFP_ATOMIC);
  280. if (!buff) {
  281. printk(KERN_ERR "%s: no memory for buffer\n", __func__);
  282. return -ENOMEM;
  283. }
  284. lli = dma_pool_alloc(dma_pool, GFP_ATOMIC, &buff->lli_dma);
  285. if (!lli) {
  286. printk(KERN_ERR "%s: no memory for lli\n", __func__);
  287. ret = -ENOMEM;
  288. goto err_buff;
  289. }
  290. pr_debug("%s: buff %p, dp %08x lli (%p, %08x) %d\n",
  291. __func__, buff, data, lli, (u32)buff->lli_dma, size);
  292. buff->lli = lli;
  293. buff->pw = id;
  294. s3c64xx_dma_fill_lli(chan, lli, data, size);
  295. local_irq_save(flags);
  296. if ((next = chan->next) != NULL) {
  297. struct s3c64xx_dma_buff *end = chan->end;
  298. struct pl080s_lli *endlli = end->lli;
  299. pr_debug("enquing onto channel\n");
  300. end->next = buff;
  301. endlli->next_lli = buff->lli_dma;
  302. if (chan->flags & S3C2410_DMAF_CIRCULAR) {
  303. struct s3c64xx_dma_buff *curr = chan->curr;
  304. lli->next_lli = curr->lli_dma;
  305. }
  306. if (next == chan->curr) {
  307. writel(buff->lli_dma, chan->regs + PL080_CH_LLI);
  308. chan->next = buff;
  309. }
  310. show_lli(endlli);
  311. chan->end = buff;
  312. } else {
  313. pr_debug("enquing onto empty channel\n");
  314. chan->curr = buff;
  315. chan->next = buff;
  316. chan->end = buff;
  317. s3c64xx_lli_to_regs(chan, lli);
  318. }
  319. local_irq_restore(flags);
  320. show_lli(lli);
  321. dbg_showchan(chan);
  322. dbg_showbuffs(chan);
  323. return 0;
  324. err_buff:
  325. kfree(buff);
  326. return ret;
  327. }
  328. EXPORT_SYMBOL(s3c2410_dma_enqueue);
  329. int s3c2410_dma_devconfig(enum dma_ch channel,
  330. enum dma_data_direction source,
  331. unsigned long devaddr)
  332. {
  333. struct s3c2410_dma_chan *chan = s3c_dma_lookup_channel(channel);
  334. u32 peripheral;
  335. u32 config = 0;
  336. pr_debug("%s: channel %d, source %d, dev %08lx, chan %p\n",
  337. __func__, channel, source, devaddr, chan);
  338. WARN_ON(!chan);
  339. if (!chan)
  340. return -EINVAL;
  341. peripheral = (chan->peripheral & 0xf);
  342. chan->source = source;
  343. chan->dev_addr = devaddr;
  344. pr_debug("%s: peripheral %d\n", __func__, peripheral);
  345. switch (source) {
  346. case DMA_FROM_DEVICE:
  347. config = 2 << PL080_CONFIG_FLOW_CONTROL_SHIFT;
  348. config |= peripheral << PL080_CONFIG_SRC_SEL_SHIFT;
  349. break;
  350. case DMA_TO_DEVICE:
  351. config = 1 << PL080_CONFIG_FLOW_CONTROL_SHIFT;
  352. config |= peripheral << PL080_CONFIG_DST_SEL_SHIFT;
  353. break;
  354. default:
  355. printk(KERN_ERR "%s: bad source\n", __func__);
  356. return -EINVAL;
  357. }
  358. /* allow TC and ERR interrupts */
  359. config |= PL080_CONFIG_TC_IRQ_MASK;
  360. config |= PL080_CONFIG_ERR_IRQ_MASK;
  361. pr_debug("%s: config %08x\n", __func__, config);
  362. writel(config, chan->regs + PL080S_CH_CONFIG);
  363. return 0;
  364. }
  365. EXPORT_SYMBOL(s3c2410_dma_devconfig);
  366. int s3c2410_dma_getposition(enum dma_ch channel,
  367. dma_addr_t *src, dma_addr_t *dst)
  368. {
  369. struct s3c2410_dma_chan *chan = s3c_dma_lookup_channel(channel);
  370. WARN_ON(!chan);
  371. if (!chan)
  372. return -EINVAL;
  373. if (src != NULL)
  374. *src = readl(chan->regs + PL080_CH_SRC_ADDR);
  375. if (dst != NULL)
  376. *dst = readl(chan->regs + PL080_CH_DST_ADDR);
  377. return 0;
  378. }
  379. EXPORT_SYMBOL(s3c2410_dma_getposition);
  380. /* s3c2410_request_dma
  381. *
  382. * get control of an dma channel
  383. */
  384. int s3c2410_dma_request(enum dma_ch channel,
  385. struct s3c2410_dma_client *client,
  386. void *dev)
  387. {
  388. struct s3c2410_dma_chan *chan;
  389. unsigned long flags;
  390. pr_debug("dma%d: s3c2410_request_dma: client=%s, dev=%p\n",
  391. channel, client->name, dev);
  392. local_irq_save(flags);
  393. chan = s3c64xx_dma_map_channel(channel);
  394. if (chan == NULL) {
  395. local_irq_restore(flags);
  396. return -EBUSY;
  397. }
  398. dbg_showchan(chan);
  399. chan->client = client;
  400. chan->in_use = 1;
  401. chan->peripheral = channel;
  402. chan->flags = 0;
  403. local_irq_restore(flags);
  404. /* need to setup */
  405. pr_debug("%s: channel initialised, %p\n", __func__, chan);
  406. return chan->number | DMACH_LOW_LEVEL;
  407. }
  408. EXPORT_SYMBOL(s3c2410_dma_request);
  409. /* s3c2410_dma_free
  410. *
  411. * release the given channel back to the system, will stop and flush
  412. * any outstanding transfers, and ensure the channel is ready for the
  413. * next claimant.
  414. *
  415. * Note, although a warning is currently printed if the freeing client
  416. * info is not the same as the registrant's client info, the free is still
  417. * allowed to go through.
  418. */
  419. int s3c2410_dma_free(enum dma_ch channel, struct s3c2410_dma_client *client)
  420. {
  421. struct s3c2410_dma_chan *chan = s3c_dma_lookup_channel(channel);
  422. unsigned long flags;
  423. if (chan == NULL)
  424. return -EINVAL;
  425. local_irq_save(flags);
  426. if (chan->client != client) {
  427. printk(KERN_WARNING "dma%d: possible free from different client (channel %p, passed %p)\n",
  428. channel, chan->client, client);
  429. }
  430. /* sort out stopping and freeing the channel */
  431. chan->client = NULL;
  432. chan->in_use = 0;
  433. if (!(channel & DMACH_LOW_LEVEL))
  434. s3c_dma_chan_map[channel] = NULL;
  435. local_irq_restore(flags);
  436. return 0;
  437. }
  438. EXPORT_SYMBOL(s3c2410_dma_free);
  439. static irqreturn_t s3c64xx_dma_irq(int irq, void *pw)
  440. {
  441. struct s3c64xx_dmac *dmac = pw;
  442. struct s3c2410_dma_chan *chan;
  443. enum s3c2410_dma_buffresult res;
  444. u32 tcstat, errstat;
  445. u32 bit;
  446. int offs;
  447. tcstat = readl(dmac->regs + PL080_TC_STATUS);
  448. errstat = readl(dmac->regs + PL080_ERR_STATUS);
  449. for (offs = 0, bit = 1; offs < 8; offs++, bit <<= 1) {
  450. struct s3c64xx_dma_buff *buff;
  451. if (!(errstat & bit) && !(tcstat & bit))
  452. continue;
  453. chan = dmac->channels + offs;
  454. res = S3C2410_RES_ERR;
  455. if (tcstat & bit) {
  456. writel(bit, dmac->regs + PL080_TC_CLEAR);
  457. res = S3C2410_RES_OK;
  458. }
  459. if (errstat & bit)
  460. writel(bit, dmac->regs + PL080_ERR_CLEAR);
  461. /* 'next' points to the buffer that is next to the
  462. * currently active buffer.
  463. * For CIRCULAR queues, 'next' will be same as 'curr'
  464. * when 'end' is the active buffer.
  465. */
  466. buff = chan->curr;
  467. while (buff && buff != chan->next
  468. && buff->next != chan->next)
  469. buff = buff->next;
  470. if (!buff)
  471. BUG();
  472. if (buff == chan->next)
  473. buff = chan->end;
  474. s3c64xx_dma_bufffdone(chan, buff, res);
  475. /* Free the node and update curr, if non-circular queue */
  476. if (!(chan->flags & S3C2410_DMAF_CIRCULAR)) {
  477. chan->curr = buff->next;
  478. s3c64xx_dma_freebuff(buff);
  479. }
  480. /* Update 'next' */
  481. buff = chan->next;
  482. if (chan->next == chan->end) {
  483. chan->next = chan->curr;
  484. if (!(chan->flags & S3C2410_DMAF_CIRCULAR))
  485. chan->end = NULL;
  486. } else {
  487. chan->next = buff->next;
  488. }
  489. }
  490. return IRQ_HANDLED;
  491. }
  492. static struct bus_type dma_subsys = {
  493. .name = "s3c64xx-dma",
  494. .dev_name = "s3c64xx-dma",
  495. };
  496. static int s3c64xx_dma_init1(int chno, enum dma_ch chbase,
  497. int irq, unsigned int base)
  498. {
  499. struct s3c2410_dma_chan *chptr = &s3c2410_chans[chno];
  500. struct s3c64xx_dmac *dmac;
  501. char clkname[16];
  502. void __iomem *regs;
  503. void __iomem *regptr;
  504. int err, ch;
  505. dmac = kzalloc(sizeof(struct s3c64xx_dmac), GFP_KERNEL);
  506. if (!dmac) {
  507. printk(KERN_ERR "%s: failed to alloc mem\n", __func__);
  508. return -ENOMEM;
  509. }
  510. dmac->dev.id = chno / 8;
  511. dmac->dev.bus = &dma_subsys;
  512. err = device_register(&dmac->dev);
  513. if (err) {
  514. printk(KERN_ERR "%s: failed to register device\n", __func__);
  515. goto err_alloc;
  516. }
  517. regs = ioremap(base, 0x200);
  518. if (!regs) {
  519. printk(KERN_ERR "%s: failed to ioremap()\n", __func__);
  520. err = -ENXIO;
  521. goto err_dev;
  522. }
  523. snprintf(clkname, sizeof(clkname), "dma%d", dmac->dev.id);
  524. dmac->clk = clk_get(NULL, clkname);
  525. if (IS_ERR(dmac->clk)) {
  526. printk(KERN_ERR "%s: failed to get clock %s\n", __func__, clkname);
  527. err = PTR_ERR(dmac->clk);
  528. goto err_map;
  529. }
  530. clk_prepare_enable(dmac->clk);
  531. dmac->regs = regs;
  532. dmac->chanbase = chbase;
  533. dmac->channels = chptr;
  534. err = request_irq(irq, s3c64xx_dma_irq, 0, "DMA", dmac);
  535. if (err < 0) {
  536. printk(KERN_ERR "%s: failed to get irq\n", __func__);
  537. goto err_clk;
  538. }
  539. regptr = regs + PL080_Cx_BASE(0);
  540. for (ch = 0; ch < 8; ch++, chptr++) {
  541. pr_debug("%s: registering DMA %d (%p)\n",
  542. __func__, chno + ch, regptr);
  543. chptr->bit = 1 << ch;
  544. chptr->number = chno + ch;
  545. chptr->dmac = dmac;
  546. chptr->regs = regptr;
  547. regptr += PL080_Cx_STRIDE;
  548. }
  549. /* for the moment, permanently enable the controller */
  550. writel(PL080_CONFIG_ENABLE, regs + PL080_CONFIG);
  551. printk(KERN_INFO "PL080: IRQ %d, at %p, channels %d..%d\n",
  552. irq, regs, chno, chno+8);
  553. return 0;
  554. err_clk:
  555. clk_disable_unprepare(dmac->clk);
  556. clk_put(dmac->clk);
  557. err_map:
  558. iounmap(regs);
  559. err_dev:
  560. device_unregister(&dmac->dev);
  561. err_alloc:
  562. kfree(dmac);
  563. return err;
  564. }
  565. static int __init s3c64xx_dma_init(void)
  566. {
  567. int ret;
  568. /* This driver is not supported when booting with device tree. */
  569. if (of_have_populated_dt())
  570. return -ENODEV;
  571. printk(KERN_INFO "%s: Registering DMA channels\n", __func__);
  572. dma_pool = dma_pool_create("DMA-LLI", NULL, sizeof(struct pl080s_lli), 16, 0);
  573. if (!dma_pool) {
  574. printk(KERN_ERR "%s: failed to create pool\n", __func__);
  575. return -ENOMEM;
  576. }
  577. ret = subsys_system_register(&dma_subsys, NULL);
  578. if (ret) {
  579. printk(KERN_ERR "%s: failed to create subsys\n", __func__);
  580. return -ENOMEM;
  581. }
  582. /* Set all DMA configuration to be DMA, not SDMA */
  583. writel(0xffffff, S3C64XX_SDMA_SEL);
  584. /* Register standard DMA controllers */
  585. s3c64xx_dma_init1(0, DMACH_UART0, IRQ_DMA0, 0x75000000);
  586. s3c64xx_dma_init1(8, DMACH_PCM1_TX, IRQ_DMA1, 0x75100000);
  587. return 0;
  588. }
  589. arch_initcall(s3c64xx_dma_init);