mpc5200_dma.c 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564
  1. /*
  2. * Freescale MPC5200 PSC DMA
  3. * ALSA SoC Platform driver
  4. *
  5. * Copyright (C) 2008 Secret Lab Technologies Ltd.
  6. * Copyright (C) 2009 Jon Smirl, Digispeaker
  7. */
  8. #include <linux/module.h>
  9. #include <linux/of_device.h>
  10. #include <sound/soc.h>
  11. #include <sysdev/bestcomm/bestcomm.h>
  12. #include <sysdev/bestcomm/gen_bd.h>
  13. #include <asm/mpc52xx_psc.h>
  14. #include "mpc5200_dma.h"
  15. /*
  16. * Interrupt handlers
  17. */
  18. static irqreturn_t psc_dma_status_irq(int irq, void *_psc_dma)
  19. {
  20. struct psc_dma *psc_dma = _psc_dma;
  21. struct mpc52xx_psc __iomem *regs = psc_dma->psc_regs;
  22. u16 isr;
  23. isr = in_be16(&regs->mpc52xx_psc_isr);
  24. /* Playback underrun error */
  25. if (psc_dma->playback.active && (isr & MPC52xx_PSC_IMR_TXEMP))
  26. psc_dma->stats.underrun_count++;
  27. /* Capture overrun error */
  28. if (psc_dma->capture.active && (isr & MPC52xx_PSC_IMR_ORERR))
  29. psc_dma->stats.overrun_count++;
  30. out_8(&regs->command, MPC52xx_PSC_RST_ERR_STAT);
  31. return IRQ_HANDLED;
  32. }
  33. /**
  34. * psc_dma_bcom_enqueue_next_buffer - Enqueue another audio buffer
  35. * @s: pointer to stream private data structure
  36. *
  37. * Enqueues another audio period buffer into the bestcomm queue.
  38. *
  39. * Note: The routine must only be called when there is space available in
  40. * the queue. Otherwise the enqueue will fail and the audio ring buffer
  41. * will get out of sync
  42. */
  43. static void psc_dma_bcom_enqueue_next_buffer(struct psc_dma_stream *s)
  44. {
  45. struct bcom_bd *bd;
  46. /* Prepare and enqueue the next buffer descriptor */
  47. bd = bcom_prepare_next_buffer(s->bcom_task);
  48. bd->status = s->period_bytes;
  49. bd->data[0] = s->period_next_pt;
  50. bcom_submit_next_buffer(s->bcom_task, NULL);
  51. /* Update for next period */
  52. s->period_next_pt += s->period_bytes;
  53. if (s->period_next_pt >= s->period_end)
  54. s->period_next_pt = s->period_start;
  55. }
  56. static void psc_dma_bcom_enqueue_tx(struct psc_dma_stream *s)
  57. {
  58. while (s->appl_ptr < s->runtime->control->appl_ptr) {
  59. if (bcom_queue_full(s->bcom_task))
  60. return;
  61. s->appl_ptr += s->period_size;
  62. psc_dma_bcom_enqueue_next_buffer(s);
  63. }
  64. }
  65. /* Bestcomm DMA irq handler */
  66. static irqreturn_t psc_dma_bcom_irq_tx(int irq, void *_psc_dma_stream)
  67. {
  68. struct psc_dma_stream *s = _psc_dma_stream;
  69. spin_lock(&s->psc_dma->lock);
  70. /* For each finished period, dequeue the completed period buffer
  71. * and enqueue a new one in it's place. */
  72. while (bcom_buffer_done(s->bcom_task)) {
  73. bcom_retrieve_buffer(s->bcom_task, NULL, NULL);
  74. s->period_current_pt += s->period_bytes;
  75. if (s->period_current_pt >= s->period_end)
  76. s->period_current_pt = s->period_start;
  77. }
  78. psc_dma_bcom_enqueue_tx(s);
  79. spin_unlock(&s->psc_dma->lock);
  80. /* If the stream is active, then also inform the PCM middle layer
  81. * of the period finished event. */
  82. if (s->active)
  83. snd_pcm_period_elapsed(s->stream);
  84. return IRQ_HANDLED;
  85. }
  86. static irqreturn_t psc_dma_bcom_irq_rx(int irq, void *_psc_dma_stream)
  87. {
  88. struct psc_dma_stream *s = _psc_dma_stream;
  89. spin_lock(&s->psc_dma->lock);
  90. /* For each finished period, dequeue the completed period buffer
  91. * and enqueue a new one in it's place. */
  92. while (bcom_buffer_done(s->bcom_task)) {
  93. bcom_retrieve_buffer(s->bcom_task, NULL, NULL);
  94. s->period_current_pt += s->period_bytes;
  95. if (s->period_current_pt >= s->period_end)
  96. s->period_current_pt = s->period_start;
  97. psc_dma_bcom_enqueue_next_buffer(s);
  98. }
  99. spin_unlock(&s->psc_dma->lock);
  100. /* If the stream is active, then also inform the PCM middle layer
  101. * of the period finished event. */
  102. if (s->active)
  103. snd_pcm_period_elapsed(s->stream);
  104. return IRQ_HANDLED;
  105. }
  106. static int psc_dma_hw_free(struct snd_pcm_substream *substream)
  107. {
  108. snd_pcm_set_runtime_buffer(substream, NULL);
  109. return 0;
  110. }
  111. /**
  112. * psc_dma_trigger: start and stop the DMA transfer.
  113. *
  114. * This function is called by ALSA to start, stop, pause, and resume the DMA
  115. * transfer of data.
  116. */
  117. static int psc_dma_trigger(struct snd_pcm_substream *substream, int cmd)
  118. {
  119. struct snd_soc_pcm_runtime *rtd = substream->private_data;
  120. struct psc_dma *psc_dma = rtd->dai->cpu_dai->private_data;
  121. struct snd_pcm_runtime *runtime = substream->runtime;
  122. struct psc_dma_stream *s;
  123. struct mpc52xx_psc __iomem *regs = psc_dma->psc_regs;
  124. u16 imr;
  125. unsigned long flags;
  126. int i;
  127. if (substream->pstr->stream == SNDRV_PCM_STREAM_CAPTURE)
  128. s = &psc_dma->capture;
  129. else
  130. s = &psc_dma->playback;
  131. dev_dbg(psc_dma->dev, "psc_dma_trigger(substream=%p, cmd=%i)"
  132. " stream_id=%i\n",
  133. substream, cmd, substream->pstr->stream);
  134. switch (cmd) {
  135. case SNDRV_PCM_TRIGGER_START:
  136. s->period_bytes = frames_to_bytes(runtime,
  137. runtime->period_size);
  138. s->period_start = virt_to_phys(runtime->dma_area);
  139. s->period_end = s->period_start +
  140. (s->period_bytes * runtime->periods);
  141. s->period_next_pt = s->period_start;
  142. s->period_current_pt = s->period_start;
  143. s->period_size = runtime->period_size;
  144. s->active = 1;
  145. /* track appl_ptr so that we have a better chance of detecting
  146. * end of stream and not over running it.
  147. */
  148. s->runtime = runtime;
  149. s->appl_ptr = s->runtime->control->appl_ptr -
  150. (runtime->period_size * runtime->periods);
  151. /* Fill up the bestcomm bd queue and enable DMA.
  152. * This will begin filling the PSC's fifo.
  153. */
  154. spin_lock_irqsave(&psc_dma->lock, flags);
  155. if (substream->pstr->stream == SNDRV_PCM_STREAM_CAPTURE) {
  156. bcom_gen_bd_rx_reset(s->bcom_task);
  157. for (i = 0; i < runtime->periods; i++)
  158. if (!bcom_queue_full(s->bcom_task))
  159. psc_dma_bcom_enqueue_next_buffer(s);
  160. } else {
  161. bcom_gen_bd_tx_reset(s->bcom_task);
  162. psc_dma_bcom_enqueue_tx(s);
  163. }
  164. bcom_enable(s->bcom_task);
  165. spin_unlock_irqrestore(&psc_dma->lock, flags);
  166. out_8(&regs->command, MPC52xx_PSC_RST_ERR_STAT);
  167. break;
  168. case SNDRV_PCM_TRIGGER_STOP:
  169. s->active = 0;
  170. spin_lock_irqsave(&psc_dma->lock, flags);
  171. bcom_disable(s->bcom_task);
  172. if (substream->pstr->stream == SNDRV_PCM_STREAM_CAPTURE)
  173. bcom_gen_bd_rx_reset(s->bcom_task);
  174. else
  175. bcom_gen_bd_tx_reset(s->bcom_task);
  176. spin_unlock_irqrestore(&psc_dma->lock, flags);
  177. break;
  178. default:
  179. dev_dbg(psc_dma->dev, "invalid command\n");
  180. return -EINVAL;
  181. }
  182. /* Update interrupt enable settings */
  183. imr = 0;
  184. if (psc_dma->playback.active)
  185. imr |= MPC52xx_PSC_IMR_TXEMP;
  186. if (psc_dma->capture.active)
  187. imr |= MPC52xx_PSC_IMR_ORERR;
  188. out_be16(&regs->isr_imr.imr, psc_dma->imr | imr);
  189. return 0;
  190. }
  191. /* ---------------------------------------------------------------------
  192. * The PSC DMA 'ASoC platform' driver
  193. *
  194. * Can be referenced by an 'ASoC machine' driver
  195. * This driver only deals with the audio bus; it doesn't have any
  196. * interaction with the attached codec
  197. */
  198. static const struct snd_pcm_hardware psc_dma_hardware = {
  199. .info = SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_MMAP_VALID |
  200. SNDRV_PCM_INFO_INTERLEAVED | SNDRV_PCM_INFO_BLOCK_TRANSFER |
  201. SNDRV_PCM_INFO_BATCH,
  202. .formats = SNDRV_PCM_FMTBIT_S8 | SNDRV_PCM_FMTBIT_S16_BE |
  203. SNDRV_PCM_FMTBIT_S24_BE | SNDRV_PCM_FMTBIT_S32_BE,
  204. .rate_min = 8000,
  205. .rate_max = 48000,
  206. .channels_min = 1,
  207. .channels_max = 2,
  208. .period_bytes_max = 1024 * 1024,
  209. .period_bytes_min = 32,
  210. .periods_min = 2,
  211. .periods_max = 256,
  212. .buffer_bytes_max = 2 * 1024 * 1024,
  213. .fifo_size = 512,
  214. };
  215. static int psc_dma_open(struct snd_pcm_substream *substream)
  216. {
  217. struct snd_pcm_runtime *runtime = substream->runtime;
  218. struct snd_soc_pcm_runtime *rtd = substream->private_data;
  219. struct psc_dma *psc_dma = rtd->dai->cpu_dai->private_data;
  220. struct psc_dma_stream *s;
  221. int rc;
  222. dev_dbg(psc_dma->dev, "psc_dma_open(substream=%p)\n", substream);
  223. if (substream->pstr->stream == SNDRV_PCM_STREAM_CAPTURE)
  224. s = &psc_dma->capture;
  225. else
  226. s = &psc_dma->playback;
  227. snd_soc_set_runtime_hwparams(substream, &psc_dma_hardware);
  228. rc = snd_pcm_hw_constraint_integer(runtime,
  229. SNDRV_PCM_HW_PARAM_PERIODS);
  230. if (rc < 0) {
  231. dev_err(substream->pcm->card->dev, "invalid buffer size\n");
  232. return rc;
  233. }
  234. s->stream = substream;
  235. return 0;
  236. }
  237. static int psc_dma_close(struct snd_pcm_substream *substream)
  238. {
  239. struct snd_soc_pcm_runtime *rtd = substream->private_data;
  240. struct psc_dma *psc_dma = rtd->dai->cpu_dai->private_data;
  241. struct psc_dma_stream *s;
  242. dev_dbg(psc_dma->dev, "psc_dma_close(substream=%p)\n", substream);
  243. if (substream->pstr->stream == SNDRV_PCM_STREAM_CAPTURE)
  244. s = &psc_dma->capture;
  245. else
  246. s = &psc_dma->playback;
  247. if (!psc_dma->playback.active &&
  248. !psc_dma->capture.active) {
  249. /* Disable all interrupts and reset the PSC */
  250. out_be16(&psc_dma->psc_regs->isr_imr.imr, psc_dma->imr);
  251. out_8(&psc_dma->psc_regs->command, 4 << 4); /* reset error */
  252. }
  253. s->stream = NULL;
  254. return 0;
  255. }
  256. static snd_pcm_uframes_t
  257. psc_dma_pointer(struct snd_pcm_substream *substream)
  258. {
  259. struct snd_soc_pcm_runtime *rtd = substream->private_data;
  260. struct psc_dma *psc_dma = rtd->dai->cpu_dai->private_data;
  261. struct psc_dma_stream *s;
  262. dma_addr_t count;
  263. if (substream->pstr->stream == SNDRV_PCM_STREAM_CAPTURE)
  264. s = &psc_dma->capture;
  265. else
  266. s = &psc_dma->playback;
  267. count = s->period_current_pt - s->period_start;
  268. return bytes_to_frames(substream->runtime, count);
  269. }
  270. static int
  271. psc_dma_hw_params(struct snd_pcm_substream *substream,
  272. struct snd_pcm_hw_params *params)
  273. {
  274. snd_pcm_set_runtime_buffer(substream, &substream->dma_buffer);
  275. return 0;
  276. }
  277. static struct snd_pcm_ops psc_dma_ops = {
  278. .open = psc_dma_open,
  279. .close = psc_dma_close,
  280. .hw_free = psc_dma_hw_free,
  281. .ioctl = snd_pcm_lib_ioctl,
  282. .pointer = psc_dma_pointer,
  283. .trigger = psc_dma_trigger,
  284. .hw_params = psc_dma_hw_params,
  285. };
  286. static u64 psc_dma_dmamask = 0xffffffff;
  287. static int psc_dma_new(struct snd_card *card, struct snd_soc_dai *dai,
  288. struct snd_pcm *pcm)
  289. {
  290. struct snd_soc_pcm_runtime *rtd = pcm->private_data;
  291. struct psc_dma *psc_dma = rtd->dai->cpu_dai->private_data;
  292. size_t size = psc_dma_hardware.buffer_bytes_max;
  293. int rc = 0;
  294. dev_dbg(rtd->socdev->dev, "psc_dma_new(card=%p, dai=%p, pcm=%p)\n",
  295. card, dai, pcm);
  296. if (!card->dev->dma_mask)
  297. card->dev->dma_mask = &psc_dma_dmamask;
  298. if (!card->dev->coherent_dma_mask)
  299. card->dev->coherent_dma_mask = 0xffffffff;
  300. if (pcm->streams[0].substream) {
  301. rc = snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, pcm->card->dev,
  302. size, &pcm->streams[0].substream->dma_buffer);
  303. if (rc)
  304. goto playback_alloc_err;
  305. }
  306. if (pcm->streams[1].substream) {
  307. rc = snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, pcm->card->dev,
  308. size, &pcm->streams[1].substream->dma_buffer);
  309. if (rc)
  310. goto capture_alloc_err;
  311. }
  312. if (rtd->socdev->card->codec->ac97)
  313. rtd->socdev->card->codec->ac97->private_data = psc_dma;
  314. return 0;
  315. capture_alloc_err:
  316. if (pcm->streams[0].substream)
  317. snd_dma_free_pages(&pcm->streams[0].substream->dma_buffer);
  318. playback_alloc_err:
  319. dev_err(card->dev, "Cannot allocate buffer(s)\n");
  320. return -ENOMEM;
  321. }
  322. static void psc_dma_free(struct snd_pcm *pcm)
  323. {
  324. struct snd_soc_pcm_runtime *rtd = pcm->private_data;
  325. struct snd_pcm_substream *substream;
  326. int stream;
  327. dev_dbg(rtd->socdev->dev, "psc_dma_free(pcm=%p)\n", pcm);
  328. for (stream = 0; stream < 2; stream++) {
  329. substream = pcm->streams[stream].substream;
  330. if (substream) {
  331. snd_dma_free_pages(&substream->dma_buffer);
  332. substream->dma_buffer.area = NULL;
  333. substream->dma_buffer.addr = 0;
  334. }
  335. }
  336. }
  337. struct snd_soc_platform mpc5200_audio_dma_platform = {
  338. .name = "mpc5200-psc-audio",
  339. .pcm_ops = &psc_dma_ops,
  340. .pcm_new = &psc_dma_new,
  341. .pcm_free = &psc_dma_free,
  342. };
  343. EXPORT_SYMBOL_GPL(mpc5200_audio_dma_platform);
  344. int mpc5200_audio_dma_create(struct of_device *op)
  345. {
  346. phys_addr_t fifo;
  347. struct psc_dma *psc_dma;
  348. struct resource res;
  349. int size, irq, rc;
  350. const __be32 *prop;
  351. void __iomem *regs;
  352. /* Fetch the registers and IRQ of the PSC */
  353. irq = irq_of_parse_and_map(op->node, 0);
  354. if (of_address_to_resource(op->node, 0, &res)) {
  355. dev_err(&op->dev, "Missing reg property\n");
  356. return -ENODEV;
  357. }
  358. regs = ioremap(res.start, 1 + res.end - res.start);
  359. if (!regs) {
  360. dev_err(&op->dev, "Could not map registers\n");
  361. return -ENODEV;
  362. }
  363. /* Allocate and initialize the driver private data */
  364. psc_dma = kzalloc(sizeof *psc_dma, GFP_KERNEL);
  365. if (!psc_dma) {
  366. iounmap(regs);
  367. return -ENOMEM;
  368. }
  369. /* Get the PSC ID */
  370. prop = of_get_property(op->node, "cell-index", &size);
  371. if (!prop || size < sizeof *prop)
  372. return -ENODEV;
  373. spin_lock_init(&psc_dma->lock);
  374. psc_dma->id = be32_to_cpu(*prop);
  375. psc_dma->irq = irq;
  376. psc_dma->psc_regs = regs;
  377. psc_dma->fifo_regs = regs + sizeof *psc_dma->psc_regs;
  378. psc_dma->dev = &op->dev;
  379. psc_dma->playback.psc_dma = psc_dma;
  380. psc_dma->capture.psc_dma = psc_dma;
  381. snprintf(psc_dma->name, sizeof psc_dma->name, "PSC%u", psc_dma->id);
  382. /* Find the address of the fifo data registers and setup the
  383. * DMA tasks */
  384. fifo = res.start + offsetof(struct mpc52xx_psc, buffer.buffer_32);
  385. psc_dma->capture.bcom_task =
  386. bcom_psc_gen_bd_rx_init(psc_dma->id, 10, fifo, 512);
  387. psc_dma->playback.bcom_task =
  388. bcom_psc_gen_bd_tx_init(psc_dma->id, 10, fifo);
  389. if (!psc_dma->capture.bcom_task ||
  390. !psc_dma->playback.bcom_task) {
  391. dev_err(&op->dev, "Could not allocate bestcomm tasks\n");
  392. iounmap(regs);
  393. kfree(psc_dma);
  394. return -ENODEV;
  395. }
  396. /* Disable all interrupts and reset the PSC */
  397. out_be16(&psc_dma->psc_regs->isr_imr.imr, psc_dma->imr);
  398. /* reset receiver */
  399. out_8(&psc_dma->psc_regs->command, MPC52xx_PSC_RST_RX);
  400. /* reset transmitter */
  401. out_8(&psc_dma->psc_regs->command, MPC52xx_PSC_RST_TX);
  402. /* reset error */
  403. out_8(&psc_dma->psc_regs->command, MPC52xx_PSC_RST_ERR_STAT);
  404. /* reset mode */
  405. out_8(&psc_dma->psc_regs->command, MPC52xx_PSC_SEL_MODE_REG_1);
  406. /* Set up mode register;
  407. * First write: RxRdy (FIFO Alarm) generates rx FIFO irq
  408. * Second write: register Normal mode for non loopback
  409. */
  410. out_8(&psc_dma->psc_regs->mode, 0);
  411. out_8(&psc_dma->psc_regs->mode, 0);
  412. /* Set the TX and RX fifo alarm thresholds */
  413. out_be16(&psc_dma->fifo_regs->rfalarm, 0x100);
  414. out_8(&psc_dma->fifo_regs->rfcntl, 0x4);
  415. out_be16(&psc_dma->fifo_regs->tfalarm, 0x100);
  416. out_8(&psc_dma->fifo_regs->tfcntl, 0x7);
  417. /* Lookup the IRQ numbers */
  418. psc_dma->playback.irq =
  419. bcom_get_task_irq(psc_dma->playback.bcom_task);
  420. psc_dma->capture.irq =
  421. bcom_get_task_irq(psc_dma->capture.bcom_task);
  422. rc = request_irq(psc_dma->irq, &psc_dma_status_irq, IRQF_SHARED,
  423. "psc-dma-status", psc_dma);
  424. rc |= request_irq(psc_dma->capture.irq,
  425. &psc_dma_bcom_irq_rx, IRQF_SHARED,
  426. "psc-dma-capture", &psc_dma->capture);
  427. rc |= request_irq(psc_dma->playback.irq,
  428. &psc_dma_bcom_irq_tx, IRQF_SHARED,
  429. "psc-dma-playback", &psc_dma->playback);
  430. if (rc) {
  431. free_irq(psc_dma->irq, psc_dma);
  432. free_irq(psc_dma->capture.irq,
  433. &psc_dma->capture);
  434. free_irq(psc_dma->playback.irq,
  435. &psc_dma->playback);
  436. return -ENODEV;
  437. }
  438. /* Save what we've done so it can be found again later */
  439. dev_set_drvdata(&op->dev, psc_dma);
  440. /* Tell the ASoC OF helpers about it */
  441. return snd_soc_register_platform(&mpc5200_audio_dma_platform);
  442. }
  443. EXPORT_SYMBOL_GPL(mpc5200_audio_dma_create);
  444. int mpc5200_audio_dma_destroy(struct of_device *op)
  445. {
  446. struct psc_dma *psc_dma = dev_get_drvdata(&op->dev);
  447. dev_dbg(&op->dev, "mpc5200_audio_dma_destroy()\n");
  448. snd_soc_unregister_platform(&mpc5200_audio_dma_platform);
  449. bcom_gen_bd_rx_release(psc_dma->capture.bcom_task);
  450. bcom_gen_bd_tx_release(psc_dma->playback.bcom_task);
  451. /* Release irqs */
  452. free_irq(psc_dma->irq, psc_dma);
  453. free_irq(psc_dma->capture.irq, &psc_dma->capture);
  454. free_irq(psc_dma->playback.irq, &psc_dma->playback);
  455. iounmap(psc_dma->psc_regs);
  456. kfree(psc_dma);
  457. dev_set_drvdata(&op->dev, NULL);
  458. return 0;
  459. }
  460. EXPORT_SYMBOL_GPL(mpc5200_audio_dma_destroy);
  461. MODULE_AUTHOR("Grant Likely <grant.likely@secretlab.ca>");
  462. MODULE_DESCRIPTION("Freescale MPC5200 PSC in DMA mode ASoC Driver");
  463. MODULE_LICENSE("GPL");