txx9aclc.c 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434
  1. /*
  2. * Generic TXx9 ACLC platform driver
  3. *
  4. * Copyright (C) 2009 Atsushi Nemoto
  5. *
  6. * Based on RBTX49xx patch from CELF patch archive.
  7. * (C) Copyright TOSHIBA CORPORATION 2004-2006
  8. *
  9. * This program is free software; you can redistribute it and/or modify
  10. * it under the terms of the GNU General Public License version 2 as
  11. * published by the Free Software Foundation.
  12. */
  13. #include <linux/module.h>
  14. #include <linux/init.h>
  15. #include <linux/platform_device.h>
  16. #include <linux/scatterlist.h>
  17. #include <linux/slab.h>
  18. #include <sound/core.h>
  19. #include <sound/pcm.h>
  20. #include <sound/pcm_params.h>
  21. #include <sound/soc.h>
  22. #include "txx9aclc.h"
  23. static const struct snd_pcm_hardware txx9aclc_pcm_hardware = {
  24. /*
  25. * REVISIT: SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_MMAP_VALID
  26. * needs more works for noncoherent MIPS.
  27. */
  28. .info = SNDRV_PCM_INFO_INTERLEAVED |
  29. SNDRV_PCM_INFO_BATCH |
  30. SNDRV_PCM_INFO_PAUSE,
  31. #ifdef __BIG_ENDIAN
  32. .formats = SNDRV_PCM_FMTBIT_S16_BE,
  33. #else
  34. .formats = SNDRV_PCM_FMTBIT_S16_LE,
  35. #endif
  36. .period_bytes_min = 1024,
  37. .period_bytes_max = 8 * 1024,
  38. .periods_min = 2,
  39. .periods_max = 4096,
  40. .buffer_bytes_max = 32 * 1024,
  41. };
  42. static int txx9aclc_pcm_hw_params(struct snd_pcm_substream *substream,
  43. struct snd_pcm_hw_params *params)
  44. {
  45. struct snd_soc_pcm_runtime *rtd = snd_pcm_substream_chip(substream);
  46. struct snd_soc_device *socdev = rtd->socdev;
  47. struct snd_pcm_runtime *runtime = substream->runtime;
  48. struct txx9aclc_dmadata *dmadata = runtime->private_data;
  49. int ret;
  50. ret = snd_pcm_lib_malloc_pages(substream, params_buffer_bytes(params));
  51. if (ret < 0)
  52. return ret;
  53. dev_dbg(socdev->dev,
  54. "runtime->dma_area = %#lx dma_addr = %#lx dma_bytes = %zd "
  55. "runtime->min_align %ld\n",
  56. (unsigned long)runtime->dma_area,
  57. (unsigned long)runtime->dma_addr, runtime->dma_bytes,
  58. runtime->min_align);
  59. dev_dbg(socdev->dev,
  60. "periods %d period_bytes %d stream %d\n",
  61. params_periods(params), params_period_bytes(params),
  62. substream->stream);
  63. dmadata->substream = substream;
  64. dmadata->pos = 0;
  65. return 0;
  66. }
  67. static int txx9aclc_pcm_hw_free(struct snd_pcm_substream *substream)
  68. {
  69. return snd_pcm_lib_free_pages(substream);
  70. }
  71. static int txx9aclc_pcm_prepare(struct snd_pcm_substream *substream)
  72. {
  73. struct snd_pcm_runtime *runtime = substream->runtime;
  74. struct txx9aclc_dmadata *dmadata = runtime->private_data;
  75. dmadata->dma_addr = runtime->dma_addr;
  76. dmadata->buffer_bytes = snd_pcm_lib_buffer_bytes(substream);
  77. dmadata->period_bytes = snd_pcm_lib_period_bytes(substream);
  78. if (dmadata->buffer_bytes == dmadata->period_bytes) {
  79. dmadata->frag_bytes = dmadata->period_bytes >> 1;
  80. dmadata->frags = 2;
  81. } else {
  82. dmadata->frag_bytes = dmadata->period_bytes;
  83. dmadata->frags = dmadata->buffer_bytes / dmadata->period_bytes;
  84. }
  85. dmadata->frag_count = 0;
  86. dmadata->pos = 0;
  87. return 0;
  88. }
  89. static void txx9aclc_dma_complete(void *arg)
  90. {
  91. struct txx9aclc_dmadata *dmadata = arg;
  92. unsigned long flags;
  93. /* dma completion handler cannot submit new operations */
  94. spin_lock_irqsave(&dmadata->dma_lock, flags);
  95. if (dmadata->frag_count >= 0) {
  96. dmadata->dmacount--;
  97. BUG_ON(dmadata->dmacount < 0);
  98. tasklet_schedule(&dmadata->tasklet);
  99. }
  100. spin_unlock_irqrestore(&dmadata->dma_lock, flags);
  101. }
  102. static struct dma_async_tx_descriptor *
  103. txx9aclc_dma_submit(struct txx9aclc_dmadata *dmadata, dma_addr_t buf_dma_addr)
  104. {
  105. struct dma_chan *chan = dmadata->dma_chan;
  106. struct dma_async_tx_descriptor *desc;
  107. struct scatterlist sg;
  108. sg_init_table(&sg, 1);
  109. sg_set_page(&sg, pfn_to_page(PFN_DOWN(buf_dma_addr)),
  110. dmadata->frag_bytes, buf_dma_addr & (PAGE_SIZE - 1));
  111. sg_dma_address(&sg) = buf_dma_addr;
  112. desc = chan->device->device_prep_slave_sg(chan, &sg, 1,
  113. dmadata->substream->stream == SNDRV_PCM_STREAM_PLAYBACK ?
  114. DMA_TO_DEVICE : DMA_FROM_DEVICE,
  115. DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
  116. if (!desc) {
  117. dev_err(&chan->dev->device, "cannot prepare slave dma\n");
  118. return NULL;
  119. }
  120. desc->callback = txx9aclc_dma_complete;
  121. desc->callback_param = dmadata;
  122. desc->tx_submit(desc);
  123. return desc;
  124. }
  125. #define NR_DMA_CHAIN 2
  126. static void txx9aclc_dma_tasklet(unsigned long data)
  127. {
  128. struct txx9aclc_dmadata *dmadata = (struct txx9aclc_dmadata *)data;
  129. struct dma_chan *chan = dmadata->dma_chan;
  130. struct dma_async_tx_descriptor *desc;
  131. struct snd_pcm_substream *substream = dmadata->substream;
  132. u32 ctlbit = substream->stream == SNDRV_PCM_STREAM_PLAYBACK ?
  133. ACCTL_AUDODMA : ACCTL_AUDIDMA;
  134. int i;
  135. unsigned long flags;
  136. spin_lock_irqsave(&dmadata->dma_lock, flags);
  137. if (dmadata->frag_count < 0) {
  138. struct txx9aclc_soc_device *dev =
  139. container_of(dmadata, struct txx9aclc_soc_device,
  140. dmadata[substream->stream]);
  141. struct txx9aclc_plat_drvdata *drvdata =
  142. txx9aclc_get_plat_drvdata(dev);
  143. void __iomem *base = drvdata->base;
  144. spin_unlock_irqrestore(&dmadata->dma_lock, flags);
  145. chan->device->device_control(chan, DMA_TERMINATE_ALL, 0);
  146. /* first time */
  147. for (i = 0; i < NR_DMA_CHAIN; i++) {
  148. desc = txx9aclc_dma_submit(dmadata,
  149. dmadata->dma_addr + i * dmadata->frag_bytes);
  150. if (!desc)
  151. return;
  152. }
  153. dmadata->dmacount = NR_DMA_CHAIN;
  154. chan->device->device_issue_pending(chan);
  155. spin_lock_irqsave(&dmadata->dma_lock, flags);
  156. __raw_writel(ctlbit, base + ACCTLEN);
  157. dmadata->frag_count = NR_DMA_CHAIN % dmadata->frags;
  158. spin_unlock_irqrestore(&dmadata->dma_lock, flags);
  159. return;
  160. }
  161. BUG_ON(dmadata->dmacount >= NR_DMA_CHAIN);
  162. while (dmadata->dmacount < NR_DMA_CHAIN) {
  163. dmadata->dmacount++;
  164. spin_unlock_irqrestore(&dmadata->dma_lock, flags);
  165. desc = txx9aclc_dma_submit(dmadata,
  166. dmadata->dma_addr +
  167. dmadata->frag_count * dmadata->frag_bytes);
  168. if (!desc)
  169. return;
  170. chan->device->device_issue_pending(chan);
  171. spin_lock_irqsave(&dmadata->dma_lock, flags);
  172. dmadata->frag_count++;
  173. dmadata->frag_count %= dmadata->frags;
  174. dmadata->pos += dmadata->frag_bytes;
  175. dmadata->pos %= dmadata->buffer_bytes;
  176. if ((dmadata->frag_count * dmadata->frag_bytes) %
  177. dmadata->period_bytes == 0)
  178. snd_pcm_period_elapsed(substream);
  179. }
  180. spin_unlock_irqrestore(&dmadata->dma_lock, flags);
  181. }
  182. static int txx9aclc_pcm_trigger(struct snd_pcm_substream *substream, int cmd)
  183. {
  184. struct txx9aclc_dmadata *dmadata = substream->runtime->private_data;
  185. struct snd_soc_pcm_runtime *rtd = substream->private_data;
  186. struct txx9aclc_soc_device *dev =
  187. container_of(rtd->socdev, struct txx9aclc_soc_device, soc_dev);
  188. struct txx9aclc_plat_drvdata *drvdata = txx9aclc_get_plat_drvdata(dev);
  189. void __iomem *base = drvdata->base;
  190. unsigned long flags;
  191. int ret = 0;
  192. u32 ctlbit = substream->stream == SNDRV_PCM_STREAM_PLAYBACK ?
  193. ACCTL_AUDODMA : ACCTL_AUDIDMA;
  194. spin_lock_irqsave(&dmadata->dma_lock, flags);
  195. switch (cmd) {
  196. case SNDRV_PCM_TRIGGER_START:
  197. dmadata->frag_count = -1;
  198. tasklet_schedule(&dmadata->tasklet);
  199. break;
  200. case SNDRV_PCM_TRIGGER_STOP:
  201. case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
  202. case SNDRV_PCM_TRIGGER_SUSPEND:
  203. __raw_writel(ctlbit, base + ACCTLDIS);
  204. break;
  205. case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
  206. case SNDRV_PCM_TRIGGER_RESUME:
  207. __raw_writel(ctlbit, base + ACCTLEN);
  208. break;
  209. default:
  210. ret = -EINVAL;
  211. }
  212. spin_unlock_irqrestore(&dmadata->dma_lock, flags);
  213. return ret;
  214. }
  215. static snd_pcm_uframes_t
  216. txx9aclc_pcm_pointer(struct snd_pcm_substream *substream)
  217. {
  218. struct txx9aclc_dmadata *dmadata = substream->runtime->private_data;
  219. return bytes_to_frames(substream->runtime, dmadata->pos);
  220. }
  221. static int txx9aclc_pcm_open(struct snd_pcm_substream *substream)
  222. {
  223. struct snd_soc_pcm_runtime *rtd = substream->private_data;
  224. struct txx9aclc_soc_device *dev =
  225. container_of(rtd->socdev, struct txx9aclc_soc_device, soc_dev);
  226. struct txx9aclc_dmadata *dmadata = &dev->dmadata[substream->stream];
  227. int ret;
  228. ret = snd_soc_set_runtime_hwparams(substream, &txx9aclc_pcm_hardware);
  229. if (ret)
  230. return ret;
  231. /* ensure that buffer size is a multiple of period size */
  232. ret = snd_pcm_hw_constraint_integer(substream->runtime,
  233. SNDRV_PCM_HW_PARAM_PERIODS);
  234. if (ret < 0)
  235. return ret;
  236. substream->runtime->private_data = dmadata;
  237. return 0;
  238. }
  239. static int txx9aclc_pcm_close(struct snd_pcm_substream *substream)
  240. {
  241. struct txx9aclc_dmadata *dmadata = substream->runtime->private_data;
  242. struct dma_chan *chan = dmadata->dma_chan;
  243. dmadata->frag_count = -1;
  244. chan->device->device_control(chan, DMA_TERMINATE_ALL, 0);
  245. return 0;
  246. }
  247. static struct snd_pcm_ops txx9aclc_pcm_ops = {
  248. .open = txx9aclc_pcm_open,
  249. .close = txx9aclc_pcm_close,
  250. .ioctl = snd_pcm_lib_ioctl,
  251. .hw_params = txx9aclc_pcm_hw_params,
  252. .hw_free = txx9aclc_pcm_hw_free,
  253. .prepare = txx9aclc_pcm_prepare,
  254. .trigger = txx9aclc_pcm_trigger,
  255. .pointer = txx9aclc_pcm_pointer,
  256. };
  257. static void txx9aclc_pcm_free_dma_buffers(struct snd_pcm *pcm)
  258. {
  259. snd_pcm_lib_preallocate_free_for_all(pcm);
  260. }
  261. static int txx9aclc_pcm_new(struct snd_card *card, struct snd_soc_dai *dai,
  262. struct snd_pcm *pcm)
  263. {
  264. return snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV,
  265. card->dev, 64 * 1024, 4 * 1024 * 1024);
  266. }
  267. static bool filter(struct dma_chan *chan, void *param)
  268. {
  269. struct txx9aclc_dmadata *dmadata = param;
  270. char *devname;
  271. bool found = false;
  272. devname = kasprintf(GFP_KERNEL, "%s.%d", dmadata->dma_res->name,
  273. (int)dmadata->dma_res->start);
  274. if (strcmp(dev_name(chan->device->dev), devname) == 0) {
  275. chan->private = &dmadata->dma_slave;
  276. found = true;
  277. }
  278. kfree(devname);
  279. return found;
  280. }
  281. static int txx9aclc_dma_init(struct txx9aclc_soc_device *dev,
  282. struct txx9aclc_dmadata *dmadata)
  283. {
  284. struct txx9aclc_plat_drvdata *drvdata = txx9aclc_get_plat_drvdata(dev);
  285. struct txx9dmac_slave *ds = &dmadata->dma_slave;
  286. dma_cap_mask_t mask;
  287. spin_lock_init(&dmadata->dma_lock);
  288. ds->reg_width = sizeof(u32);
  289. if (dmadata->stream == SNDRV_PCM_STREAM_PLAYBACK) {
  290. ds->tx_reg = drvdata->physbase + ACAUDODAT;
  291. ds->rx_reg = 0;
  292. } else {
  293. ds->tx_reg = 0;
  294. ds->rx_reg = drvdata->physbase + ACAUDIDAT;
  295. }
  296. /* Try to grab a DMA channel */
  297. dma_cap_zero(mask);
  298. dma_cap_set(DMA_SLAVE, mask);
  299. dmadata->dma_chan = dma_request_channel(mask, filter, dmadata);
  300. if (!dmadata->dma_chan) {
  301. dev_err(dev->soc_dev.dev,
  302. "DMA channel for %s is not available\n",
  303. dmadata->stream == SNDRV_PCM_STREAM_PLAYBACK ?
  304. "playback" : "capture");
  305. return -EBUSY;
  306. }
  307. tasklet_init(&dmadata->tasklet, txx9aclc_dma_tasklet,
  308. (unsigned long)dmadata);
  309. return 0;
  310. }
  311. static int txx9aclc_pcm_probe(struct platform_device *pdev)
  312. {
  313. struct snd_soc_device *socdev = platform_get_drvdata(pdev);
  314. struct txx9aclc_soc_device *dev =
  315. container_of(socdev, struct txx9aclc_soc_device, soc_dev);
  316. struct resource *r;
  317. int i;
  318. int ret;
  319. dev->dmadata[0].stream = SNDRV_PCM_STREAM_PLAYBACK;
  320. dev->dmadata[1].stream = SNDRV_PCM_STREAM_CAPTURE;
  321. for (i = 0; i < 2; i++) {
  322. r = platform_get_resource(dev->aclc_pdev, IORESOURCE_DMA, i);
  323. if (!r) {
  324. ret = -EBUSY;
  325. goto exit;
  326. }
  327. dev->dmadata[i].dma_res = r;
  328. ret = txx9aclc_dma_init(dev, &dev->dmadata[i]);
  329. if (ret)
  330. goto exit;
  331. }
  332. return 0;
  333. exit:
  334. for (i = 0; i < 2; i++) {
  335. if (dev->dmadata[i].dma_chan)
  336. dma_release_channel(dev->dmadata[i].dma_chan);
  337. dev->dmadata[i].dma_chan = NULL;
  338. }
  339. return ret;
  340. }
  341. static int txx9aclc_pcm_remove(struct platform_device *pdev)
  342. {
  343. struct snd_soc_device *socdev = platform_get_drvdata(pdev);
  344. struct txx9aclc_soc_device *dev =
  345. container_of(socdev, struct txx9aclc_soc_device, soc_dev);
  346. struct txx9aclc_plat_drvdata *drvdata = txx9aclc_get_plat_drvdata(dev);
  347. void __iomem *base = drvdata->base;
  348. int i;
  349. /* disable all FIFO DMAs */
  350. __raw_writel(ACCTL_AUDODMA | ACCTL_AUDIDMA, base + ACCTLDIS);
  351. /* dummy R/W to clear pending DMAREQ if any */
  352. __raw_writel(__raw_readl(base + ACAUDIDAT), base + ACAUDODAT);
  353. for (i = 0; i < 2; i++) {
  354. struct txx9aclc_dmadata *dmadata = &dev->dmadata[i];
  355. struct dma_chan *chan = dmadata->dma_chan;
  356. if (chan) {
  357. dmadata->frag_count = -1;
  358. chan->device->device_control(chan,
  359. DMA_TERMINATE_ALL, 0);
  360. dma_release_channel(chan);
  361. }
  362. dev->dmadata[i].dma_chan = NULL;
  363. }
  364. return 0;
  365. }
  366. struct snd_soc_platform txx9aclc_soc_platform = {
  367. .name = "txx9aclc-audio",
  368. .probe = txx9aclc_pcm_probe,
  369. .remove = txx9aclc_pcm_remove,
  370. .pcm_ops = &txx9aclc_pcm_ops,
  371. .pcm_new = txx9aclc_pcm_new,
  372. .pcm_free = txx9aclc_pcm_free_dma_buffers,
  373. };
  374. EXPORT_SYMBOL_GPL(txx9aclc_soc_platform);
  375. static int __init txx9aclc_soc_platform_init(void)
  376. {
  377. return snd_soc_register_platform(&txx9aclc_soc_platform);
  378. }
  379. static void __exit txx9aclc_soc_platform_exit(void)
  380. {
  381. snd_soc_unregister_platform(&txx9aclc_soc_platform);
  382. }
  383. module_init(txx9aclc_soc_platform_init);
  384. module_exit(txx9aclc_soc_platform_exit);
  385. MODULE_AUTHOR("Atsushi Nemoto <anemo@mba.ocn.ne.jp>");
  386. MODULE_DESCRIPTION("TXx9 ACLC Audio DMA driver");
  387. MODULE_LICENSE("GPL");