davinci-pcm.c 25 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880
  1. /*
  2. * ALSA PCM interface for the TI DAVINCI processor
  3. *
  4. * Author: Vladimir Barinov, <vbarinov@embeddedalley.com>
  5. * Copyright: (C) 2007 MontaVista Software, Inc., <source@mvista.com>
  6. * added SRAM ping/pong (C) 2008 Troy Kisky <troy.kisky@boundarydevices.com>
  7. *
  8. * This program is free software; you can redistribute it and/or modify
  9. * it under the terms of the GNU General Public License version 2 as
  10. * published by the Free Software Foundation.
  11. */
  12. #include <linux/module.h>
  13. #include <linux/init.h>
  14. #include <linux/platform_device.h>
  15. #include <linux/slab.h>
  16. #include <linux/dma-mapping.h>
  17. #include <linux/kernel.h>
  18. #include <sound/core.h>
  19. #include <sound/pcm.h>
  20. #include <sound/pcm_params.h>
  21. #include <sound/soc.h>
  22. #include <asm/dma.h>
  23. #include <mach/sram.h>
  24. #include "davinci-pcm.h"
  25. #ifdef DEBUG
  26. static void print_buf_info(int slot, char *name)
  27. {
  28. struct edmacc_param p;
  29. if (slot < 0)
  30. return;
  31. edma_read_slot(slot, &p);
  32. printk(KERN_DEBUG "%s: 0x%x, opt=%x, src=%x, a_b_cnt=%x dst=%x\n",
  33. name, slot, p.opt, p.src, p.a_b_cnt, p.dst);
  34. printk(KERN_DEBUG " src_dst_bidx=%x link_bcntrld=%x src_dst_cidx=%x ccnt=%x\n",
  35. p.src_dst_bidx, p.link_bcntrld, p.src_dst_cidx, p.ccnt);
  36. }
  37. #else
  38. static void print_buf_info(int slot, char *name)
  39. {
  40. }
  41. #endif
  42. #define DAVINCI_PCM_FMTBITS (\
  43. SNDRV_PCM_FMTBIT_S8 |\
  44. SNDRV_PCM_FMTBIT_U8 |\
  45. SNDRV_PCM_FMTBIT_S16_LE |\
  46. SNDRV_PCM_FMTBIT_S16_BE |\
  47. SNDRV_PCM_FMTBIT_U16_LE |\
  48. SNDRV_PCM_FMTBIT_U16_BE |\
  49. SNDRV_PCM_FMTBIT_S24_LE |\
  50. SNDRV_PCM_FMTBIT_S24_BE |\
  51. SNDRV_PCM_FMTBIT_U24_LE |\
  52. SNDRV_PCM_FMTBIT_U24_BE |\
  53. SNDRV_PCM_FMTBIT_S32_LE |\
  54. SNDRV_PCM_FMTBIT_S32_BE |\
  55. SNDRV_PCM_FMTBIT_U32_LE |\
  56. SNDRV_PCM_FMTBIT_U32_BE)
  57. static struct snd_pcm_hardware pcm_hardware_playback = {
  58. .info = (SNDRV_PCM_INFO_INTERLEAVED | SNDRV_PCM_INFO_BLOCK_TRANSFER |
  59. SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_MMAP_VALID |
  60. SNDRV_PCM_INFO_PAUSE | SNDRV_PCM_INFO_RESUME|
  61. SNDRV_PCM_INFO_BATCH),
  62. .formats = DAVINCI_PCM_FMTBITS,
  63. .rates = (SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_16000 |
  64. SNDRV_PCM_RATE_22050 | SNDRV_PCM_RATE_32000 |
  65. SNDRV_PCM_RATE_44100 | SNDRV_PCM_RATE_48000 |
  66. SNDRV_PCM_RATE_88200 | SNDRV_PCM_RATE_96000 |
  67. SNDRV_PCM_RATE_KNOT),
  68. .rate_min = 8000,
  69. .rate_max = 96000,
  70. .channels_min = 2,
  71. .channels_max = 384,
  72. .buffer_bytes_max = 128 * 1024,
  73. .period_bytes_min = 32,
  74. .period_bytes_max = 8 * 1024,
  75. .periods_min = 16,
  76. .periods_max = 255,
  77. .fifo_size = 0,
  78. };
  79. static struct snd_pcm_hardware pcm_hardware_capture = {
  80. .info = (SNDRV_PCM_INFO_INTERLEAVED | SNDRV_PCM_INFO_BLOCK_TRANSFER |
  81. SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_MMAP_VALID |
  82. SNDRV_PCM_INFO_PAUSE |
  83. SNDRV_PCM_INFO_BATCH),
  84. .formats = DAVINCI_PCM_FMTBITS,
  85. .rates = (SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_16000 |
  86. SNDRV_PCM_RATE_22050 | SNDRV_PCM_RATE_32000 |
  87. SNDRV_PCM_RATE_44100 | SNDRV_PCM_RATE_48000 |
  88. SNDRV_PCM_RATE_88200 | SNDRV_PCM_RATE_96000 |
  89. SNDRV_PCM_RATE_KNOT),
  90. .rate_min = 8000,
  91. .rate_max = 96000,
  92. .channels_min = 2,
  93. .channels_max = 384,
  94. .buffer_bytes_max = 128 * 1024,
  95. .period_bytes_min = 32,
  96. .period_bytes_max = 8 * 1024,
  97. .periods_min = 16,
  98. .periods_max = 255,
  99. .fifo_size = 0,
  100. };
  101. /*
  102. * How ping/pong works....
  103. *
  104. * Playback:
  105. * ram_params - copys 2*ping_size from start of SDRAM to iram,
  106. * links to ram_link2
  107. * ram_link2 - copys rest of SDRAM to iram in ping_size units,
  108. * links to ram_link
  109. * ram_link - copys entire SDRAM to iram in ping_size uints,
  110. * links to self
  111. *
  112. * asp_params - same as asp_link[0]
  113. * asp_link[0] - copys from lower half of iram to asp port
  114. * links to asp_link[1], triggers iram copy event on completion
  115. * asp_link[1] - copys from upper half of iram to asp port
  116. * links to asp_link[0], triggers iram copy event on completion
  117. * triggers interrupt only needed to let upper SOC levels update position
  118. * in stream on completion
  119. *
  120. * When playback is started:
  121. * ram_params started
  122. * asp_params started
  123. *
  124. * Capture:
  125. * ram_params - same as ram_link,
  126. * links to ram_link
  127. * ram_link - same as playback
  128. * links to self
  129. *
  130. * asp_params - same as playback
  131. * asp_link[0] - same as playback
  132. * asp_link[1] - same as playback
  133. *
  134. * When capture is started:
  135. * asp_params started
  136. */
  137. struct davinci_runtime_data {
  138. spinlock_t lock;
  139. int period; /* current DMA period */
  140. int asp_channel; /* Master DMA channel */
  141. int asp_link[2]; /* asp parameter link channel, ping/pong */
  142. struct davinci_pcm_dma_params *params; /* DMA params */
  143. int ram_channel;
  144. int ram_link;
  145. int ram_link2;
  146. struct edmacc_param asp_params;
  147. struct edmacc_param ram_params;
  148. };
  149. static void davinci_pcm_period_elapsed(struct snd_pcm_substream *substream)
  150. {
  151. struct davinci_runtime_data *prtd = substream->runtime->private_data;
  152. struct snd_pcm_runtime *runtime = substream->runtime;
  153. prtd->period++;
  154. if (unlikely(prtd->period >= runtime->periods))
  155. prtd->period = 0;
  156. }
  157. static void davinci_pcm_period_reset(struct snd_pcm_substream *substream)
  158. {
  159. struct davinci_runtime_data *prtd = substream->runtime->private_data;
  160. prtd->period = 0;
  161. }
  162. /*
  163. * Not used with ping/pong
  164. */
  165. static void davinci_pcm_enqueue_dma(struct snd_pcm_substream *substream)
  166. {
  167. struct davinci_runtime_data *prtd = substream->runtime->private_data;
  168. struct snd_pcm_runtime *runtime = substream->runtime;
  169. unsigned int period_size;
  170. unsigned int dma_offset;
  171. dma_addr_t dma_pos;
  172. dma_addr_t src, dst;
  173. unsigned short src_bidx, dst_bidx;
  174. unsigned short src_cidx, dst_cidx;
  175. unsigned int data_type;
  176. unsigned short acnt;
  177. unsigned int count;
  178. unsigned int fifo_level;
  179. period_size = snd_pcm_lib_period_bytes(substream);
  180. dma_offset = prtd->period * period_size;
  181. dma_pos = runtime->dma_addr + dma_offset;
  182. fifo_level = prtd->params->fifo_level;
  183. pr_debug("davinci_pcm: audio_set_dma_params_play channel = %d "
  184. "dma_ptr = %x period_size=%x\n", prtd->asp_link[0], dma_pos,
  185. period_size);
  186. data_type = prtd->params->data_type;
  187. count = period_size / data_type;
  188. if (fifo_level)
  189. count /= fifo_level;
  190. if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
  191. src = dma_pos;
  192. dst = prtd->params->dma_addr;
  193. src_bidx = data_type;
  194. dst_bidx = 0;
  195. src_cidx = data_type * fifo_level;
  196. dst_cidx = 0;
  197. } else {
  198. src = prtd->params->dma_addr;
  199. dst = dma_pos;
  200. src_bidx = 0;
  201. dst_bidx = data_type;
  202. src_cidx = 0;
  203. dst_cidx = data_type * fifo_level;
  204. }
  205. acnt = prtd->params->acnt;
  206. edma_set_src(prtd->asp_link[0], src, INCR, W8BIT);
  207. edma_set_dest(prtd->asp_link[0], dst, INCR, W8BIT);
  208. edma_set_src_index(prtd->asp_link[0], src_bidx, src_cidx);
  209. edma_set_dest_index(prtd->asp_link[0], dst_bidx, dst_cidx);
  210. if (!fifo_level)
  211. edma_set_transfer_params(prtd->asp_link[0], acnt, count, 1, 0,
  212. ASYNC);
  213. else
  214. edma_set_transfer_params(prtd->asp_link[0], acnt, fifo_level,
  215. count, fifo_level,
  216. ABSYNC);
  217. }
  218. static void davinci_pcm_dma_irq(unsigned link, u16 ch_status, void *data)
  219. {
  220. struct snd_pcm_substream *substream = data;
  221. struct davinci_runtime_data *prtd = substream->runtime->private_data;
  222. print_buf_info(prtd->ram_channel, "i ram_channel");
  223. pr_debug("davinci_pcm: link=%d, status=0x%x\n", link, ch_status);
  224. if (unlikely(ch_status != DMA_COMPLETE))
  225. return;
  226. if (snd_pcm_running(substream)) {
  227. spin_lock(&prtd->lock);
  228. if (prtd->ram_channel < 0) {
  229. /* No ping/pong must fix up link dma data*/
  230. davinci_pcm_enqueue_dma(substream);
  231. }
  232. davinci_pcm_period_elapsed(substream);
  233. spin_unlock(&prtd->lock);
  234. snd_pcm_period_elapsed(substream);
  235. }
  236. }
  237. static int allocate_sram(struct snd_pcm_substream *substream, unsigned size,
  238. struct snd_pcm_hardware *ppcm)
  239. {
  240. struct snd_dma_buffer *buf = &substream->dma_buffer;
  241. struct snd_dma_buffer *iram_dma = NULL;
  242. dma_addr_t iram_phys = 0;
  243. void *iram_virt = NULL;
  244. if (buf->private_data || !size)
  245. return 0;
  246. ppcm->period_bytes_max = size;
  247. iram_virt = sram_alloc(size, &iram_phys);
  248. if (!iram_virt)
  249. goto exit1;
  250. iram_dma = kzalloc(sizeof(*iram_dma), GFP_KERNEL);
  251. if (!iram_dma)
  252. goto exit2;
  253. iram_dma->area = iram_virt;
  254. iram_dma->addr = iram_phys;
  255. memset(iram_dma->area, 0, size);
  256. iram_dma->bytes = size;
  257. buf->private_data = iram_dma;
  258. return 0;
  259. exit2:
  260. if (iram_virt)
  261. sram_free(iram_virt, size);
  262. exit1:
  263. return -ENOMEM;
  264. }
  265. /*
  266. * Only used with ping/pong.
  267. * This is called after runtime->dma_addr, period_bytes and data_type are valid
  268. */
  269. static int ping_pong_dma_setup(struct snd_pcm_substream *substream)
  270. {
  271. unsigned short ram_src_cidx, ram_dst_cidx;
  272. struct snd_pcm_runtime *runtime = substream->runtime;
  273. struct davinci_runtime_data *prtd = runtime->private_data;
  274. struct snd_dma_buffer *iram_dma =
  275. (struct snd_dma_buffer *)substream->dma_buffer.private_data;
  276. struct davinci_pcm_dma_params *params = prtd->params;
  277. unsigned int data_type = params->data_type;
  278. unsigned int acnt = params->acnt;
  279. /* divide by 2 for ping/pong */
  280. unsigned int ping_size = snd_pcm_lib_period_bytes(substream) >> 1;
  281. unsigned int fifo_level = prtd->params->fifo_level;
  282. unsigned int count;
  283. if ((data_type == 0) || (data_type > 4)) {
  284. printk(KERN_ERR "%s: data_type=%i\n", __func__, data_type);
  285. return -EINVAL;
  286. }
  287. if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
  288. dma_addr_t asp_src_pong = iram_dma->addr + ping_size;
  289. ram_src_cidx = ping_size;
  290. ram_dst_cidx = -ping_size;
  291. edma_set_src(prtd->asp_link[1], asp_src_pong, INCR, W8BIT);
  292. edma_set_src_index(prtd->asp_link[0], data_type,
  293. data_type * fifo_level);
  294. edma_set_src_index(prtd->asp_link[1], data_type,
  295. data_type * fifo_level);
  296. edma_set_src(prtd->ram_link, runtime->dma_addr, INCR, W32BIT);
  297. } else {
  298. dma_addr_t asp_dst_pong = iram_dma->addr + ping_size;
  299. ram_src_cidx = -ping_size;
  300. ram_dst_cidx = ping_size;
  301. edma_set_dest(prtd->asp_link[1], asp_dst_pong, INCR, W8BIT);
  302. edma_set_dest_index(prtd->asp_link[0], data_type,
  303. data_type * fifo_level);
  304. edma_set_dest_index(prtd->asp_link[1], data_type,
  305. data_type * fifo_level);
  306. edma_set_dest(prtd->ram_link, runtime->dma_addr, INCR, W32BIT);
  307. }
  308. if (!fifo_level) {
  309. count = ping_size / data_type;
  310. edma_set_transfer_params(prtd->asp_link[0], acnt, count,
  311. 1, 0, ASYNC);
  312. edma_set_transfer_params(prtd->asp_link[1], acnt, count,
  313. 1, 0, ASYNC);
  314. } else {
  315. count = ping_size / (data_type * fifo_level);
  316. edma_set_transfer_params(prtd->asp_link[0], acnt, fifo_level,
  317. count, fifo_level, ABSYNC);
  318. edma_set_transfer_params(prtd->asp_link[1], acnt, fifo_level,
  319. count, fifo_level, ABSYNC);
  320. }
  321. edma_set_src_index(prtd->ram_link, ping_size, ram_src_cidx);
  322. edma_set_dest_index(prtd->ram_link, ping_size, ram_dst_cidx);
  323. edma_set_transfer_params(prtd->ram_link, ping_size, 2,
  324. runtime->periods, 2, ASYNC);
  325. /* init master params */
  326. edma_read_slot(prtd->asp_link[0], &prtd->asp_params);
  327. edma_read_slot(prtd->ram_link, &prtd->ram_params);
  328. if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
  329. struct edmacc_param p_ram;
  330. /* Copy entire iram buffer before playback started */
  331. prtd->ram_params.a_b_cnt = (1 << 16) | (ping_size << 1);
  332. /* 0 dst_bidx */
  333. prtd->ram_params.src_dst_bidx = (ping_size << 1);
  334. /* 0 dst_cidx */
  335. prtd->ram_params.src_dst_cidx = (ping_size << 1);
  336. prtd->ram_params.ccnt = 1;
  337. /* Skip 1st period */
  338. edma_read_slot(prtd->ram_link, &p_ram);
  339. p_ram.src += (ping_size << 1);
  340. p_ram.ccnt -= 1;
  341. edma_write_slot(prtd->ram_link2, &p_ram);
  342. /*
  343. * When 1st started, ram -> iram dma channel will fill the
  344. * entire iram. Then, whenever a ping/pong asp buffer finishes,
  345. * 1/2 iram will be filled.
  346. */
  347. prtd->ram_params.link_bcntrld =
  348. EDMA_CHAN_SLOT(prtd->ram_link2) << 5;
  349. }
  350. return 0;
  351. }
  352. /* 1 asp tx or rx channel using 2 parameter channels
  353. * 1 ram to/from iram channel using 1 parameter channel
  354. *
  355. * Playback
  356. * ram copy channel kicks off first,
  357. * 1st ram copy of entire iram buffer completion kicks off asp channel
  358. * asp tcc always kicks off ram copy of 1/2 iram buffer
  359. *
  360. * Record
  361. * asp channel starts, tcc kicks off ram copy
  362. */
  363. static int request_ping_pong(struct snd_pcm_substream *substream,
  364. struct davinci_runtime_data *prtd,
  365. struct snd_dma_buffer *iram_dma)
  366. {
  367. dma_addr_t asp_src_ping;
  368. dma_addr_t asp_dst_ping;
  369. int ret;
  370. struct davinci_pcm_dma_params *params = prtd->params;
  371. /* Request ram master channel */
  372. ret = prtd->ram_channel = edma_alloc_channel(EDMA_CHANNEL_ANY,
  373. davinci_pcm_dma_irq, substream,
  374. prtd->params->ram_chan_q);
  375. if (ret < 0)
  376. goto exit1;
  377. /* Request ram link channel */
  378. ret = prtd->ram_link = edma_alloc_slot(
  379. EDMA_CTLR(prtd->ram_channel), EDMA_SLOT_ANY);
  380. if (ret < 0)
  381. goto exit2;
  382. ret = prtd->asp_link[1] = edma_alloc_slot(
  383. EDMA_CTLR(prtd->asp_channel), EDMA_SLOT_ANY);
  384. if (ret < 0)
  385. goto exit3;
  386. prtd->ram_link2 = -1;
  387. if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
  388. ret = prtd->ram_link2 = edma_alloc_slot(
  389. EDMA_CTLR(prtd->ram_channel), EDMA_SLOT_ANY);
  390. if (ret < 0)
  391. goto exit4;
  392. }
  393. /* circle ping-pong buffers */
  394. edma_link(prtd->asp_link[0], prtd->asp_link[1]);
  395. edma_link(prtd->asp_link[1], prtd->asp_link[0]);
  396. /* circle ram buffers */
  397. edma_link(prtd->ram_link, prtd->ram_link);
  398. if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
  399. asp_src_ping = iram_dma->addr;
  400. asp_dst_ping = params->dma_addr; /* fifo */
  401. } else {
  402. asp_src_ping = params->dma_addr; /* fifo */
  403. asp_dst_ping = iram_dma->addr;
  404. }
  405. /* ping */
  406. edma_set_src(prtd->asp_link[0], asp_src_ping, INCR, W16BIT);
  407. edma_set_dest(prtd->asp_link[0], asp_dst_ping, INCR, W16BIT);
  408. edma_set_src_index(prtd->asp_link[0], 0, 0);
  409. edma_set_dest_index(prtd->asp_link[0], 0, 0);
  410. edma_read_slot(prtd->asp_link[0], &prtd->asp_params);
  411. prtd->asp_params.opt &= ~(TCCMODE | EDMA_TCC(0x3f) | TCINTEN);
  412. prtd->asp_params.opt |= TCCHEN |
  413. EDMA_TCC(prtd->ram_channel & 0x3f);
  414. edma_write_slot(prtd->asp_link[0], &prtd->asp_params);
  415. /* pong */
  416. edma_set_src(prtd->asp_link[1], asp_src_ping, INCR, W16BIT);
  417. edma_set_dest(prtd->asp_link[1], asp_dst_ping, INCR, W16BIT);
  418. edma_set_src_index(prtd->asp_link[1], 0, 0);
  419. edma_set_dest_index(prtd->asp_link[1], 0, 0);
  420. edma_read_slot(prtd->asp_link[1], &prtd->asp_params);
  421. prtd->asp_params.opt &= ~(TCCMODE | EDMA_TCC(0x3f));
  422. /* interrupt after every pong completion */
  423. prtd->asp_params.opt |= TCINTEN | TCCHEN |
  424. EDMA_TCC(prtd->ram_channel & 0x3f);
  425. edma_write_slot(prtd->asp_link[1], &prtd->asp_params);
  426. /* ram */
  427. edma_set_src(prtd->ram_link, iram_dma->addr, INCR, W32BIT);
  428. edma_set_dest(prtd->ram_link, iram_dma->addr, INCR, W32BIT);
  429. pr_debug("%s: audio dma channels/slots in use for ram:%u %u %u,"
  430. "for asp:%u %u %u\n", __func__,
  431. prtd->ram_channel, prtd->ram_link, prtd->ram_link2,
  432. prtd->asp_channel, prtd->asp_link[0],
  433. prtd->asp_link[1]);
  434. return 0;
  435. exit4:
  436. edma_free_channel(prtd->asp_link[1]);
  437. prtd->asp_link[1] = -1;
  438. exit3:
  439. edma_free_channel(prtd->ram_link);
  440. prtd->ram_link = -1;
  441. exit2:
  442. edma_free_channel(prtd->ram_channel);
  443. prtd->ram_channel = -1;
  444. exit1:
  445. return ret;
  446. }
  447. static int davinci_pcm_dma_request(struct snd_pcm_substream *substream)
  448. {
  449. struct snd_dma_buffer *iram_dma;
  450. struct davinci_runtime_data *prtd = substream->runtime->private_data;
  451. struct davinci_pcm_dma_params *params = prtd->params;
  452. int ret;
  453. if (!params)
  454. return -ENODEV;
  455. /* Request asp master DMA channel */
  456. ret = prtd->asp_channel = edma_alloc_channel(params->channel,
  457. davinci_pcm_dma_irq, substream,
  458. prtd->params->asp_chan_q);
  459. if (ret < 0)
  460. goto exit1;
  461. /* Request asp link channels */
  462. ret = prtd->asp_link[0] = edma_alloc_slot(
  463. EDMA_CTLR(prtd->asp_channel), EDMA_SLOT_ANY);
  464. if (ret < 0)
  465. goto exit2;
  466. iram_dma = (struct snd_dma_buffer *)substream->dma_buffer.private_data;
  467. if (iram_dma) {
  468. if (request_ping_pong(substream, prtd, iram_dma) == 0)
  469. return 0;
  470. printk(KERN_WARNING "%s: dma channel allocation failed,"
  471. "not using sram\n", __func__);
  472. }
  473. /* Issue transfer completion IRQ when the channel completes a
  474. * transfer, then always reload from the same slot (by a kind
  475. * of loopback link). The completion IRQ handler will update
  476. * the reload slot with a new buffer.
  477. *
  478. * REVISIT save p_ram here after setting up everything except
  479. * the buffer and its length (ccnt) ... use it as a template
  480. * so davinci_pcm_enqueue_dma() takes less time in IRQ.
  481. */
  482. edma_read_slot(prtd->asp_link[0], &prtd->asp_params);
  483. prtd->asp_params.opt |= TCINTEN |
  484. EDMA_TCC(EDMA_CHAN_SLOT(prtd->asp_channel));
  485. prtd->asp_params.link_bcntrld = EDMA_CHAN_SLOT(prtd->asp_link[0]) << 5;
  486. edma_write_slot(prtd->asp_link[0], &prtd->asp_params);
  487. return 0;
  488. exit2:
  489. edma_free_channel(prtd->asp_channel);
  490. prtd->asp_channel = -1;
  491. exit1:
  492. return ret;
  493. }
  494. static int davinci_pcm_trigger(struct snd_pcm_substream *substream, int cmd)
  495. {
  496. struct davinci_runtime_data *prtd = substream->runtime->private_data;
  497. int ret = 0;
  498. spin_lock(&prtd->lock);
  499. switch (cmd) {
  500. case SNDRV_PCM_TRIGGER_START:
  501. edma_start(prtd->asp_channel);
  502. if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK &&
  503. prtd->ram_channel >= 0) {
  504. /* copy 1st iram buffer */
  505. edma_start(prtd->ram_channel);
  506. }
  507. break;
  508. case SNDRV_PCM_TRIGGER_RESUME:
  509. case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
  510. edma_resume(prtd->asp_channel);
  511. break;
  512. case SNDRV_PCM_TRIGGER_STOP:
  513. case SNDRV_PCM_TRIGGER_SUSPEND:
  514. case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
  515. edma_pause(prtd->asp_channel);
  516. break;
  517. default:
  518. ret = -EINVAL;
  519. break;
  520. }
  521. spin_unlock(&prtd->lock);
  522. return ret;
  523. }
  524. static int davinci_pcm_prepare(struct snd_pcm_substream *substream)
  525. {
  526. struct davinci_runtime_data *prtd = substream->runtime->private_data;
  527. davinci_pcm_period_reset(substream);
  528. if (prtd->ram_channel >= 0) {
  529. int ret = ping_pong_dma_setup(substream);
  530. if (ret < 0)
  531. return ret;
  532. edma_write_slot(prtd->ram_channel, &prtd->ram_params);
  533. edma_write_slot(prtd->asp_channel, &prtd->asp_params);
  534. print_buf_info(prtd->ram_channel, "ram_channel");
  535. print_buf_info(prtd->ram_link, "ram_link");
  536. print_buf_info(prtd->ram_link2, "ram_link2");
  537. print_buf_info(prtd->asp_channel, "asp_channel");
  538. print_buf_info(prtd->asp_link[0], "asp_link[0]");
  539. print_buf_info(prtd->asp_link[1], "asp_link[1]");
  540. /*
  541. * There is a phase offset of 2 periods between the position
  542. * used by dma setup and the position reported in the pointer
  543. * function.
  544. *
  545. * The phase offset, when not using ping-pong buffers, is due to
  546. * the two consecutive calls to davinci_pcm_enqueue_dma() below.
  547. *
  548. * Whereas here, with ping-pong buffers, the phase is due to
  549. * there being an entire buffer transfer complete before the
  550. * first dma completion event triggers davinci_pcm_dma_irq().
  551. */
  552. davinci_pcm_period_elapsed(substream);
  553. davinci_pcm_period_elapsed(substream);
  554. return 0;
  555. }
  556. davinci_pcm_enqueue_dma(substream);
  557. davinci_pcm_period_elapsed(substream);
  558. /* Copy self-linked parameter RAM entry into master channel */
  559. edma_read_slot(prtd->asp_link[0], &prtd->asp_params);
  560. edma_write_slot(prtd->asp_channel, &prtd->asp_params);
  561. davinci_pcm_enqueue_dma(substream);
  562. davinci_pcm_period_elapsed(substream);
  563. return 0;
  564. }
  565. static snd_pcm_uframes_t
  566. davinci_pcm_pointer(struct snd_pcm_substream *substream)
  567. {
  568. struct snd_pcm_runtime *runtime = substream->runtime;
  569. struct davinci_runtime_data *prtd = runtime->private_data;
  570. unsigned int offset;
  571. int asp_count;
  572. unsigned int period_size = snd_pcm_lib_period_bytes(substream);
  573. /*
  574. * There is a phase offset of 2 periods between the position used by dma
  575. * setup and the position reported in the pointer function. Either +2 in
  576. * the dma setup or -2 here in the pointer function (with wrapping,
  577. * both) accounts for this offset -- choose the latter since it makes
  578. * the first-time setup clearer.
  579. */
  580. spin_lock(&prtd->lock);
  581. asp_count = prtd->period - 2;
  582. spin_unlock(&prtd->lock);
  583. if (asp_count < 0)
  584. asp_count += runtime->periods;
  585. asp_count *= period_size;
  586. offset = bytes_to_frames(runtime, asp_count);
  587. if (offset >= runtime->buffer_size)
  588. offset = 0;
  589. return offset;
  590. }
  591. static int davinci_pcm_open(struct snd_pcm_substream *substream)
  592. {
  593. struct snd_pcm_runtime *runtime = substream->runtime;
  594. struct davinci_runtime_data *prtd;
  595. struct snd_pcm_hardware *ppcm;
  596. int ret = 0;
  597. struct snd_soc_pcm_runtime *rtd = substream->private_data;
  598. struct davinci_pcm_dma_params *pa;
  599. struct davinci_pcm_dma_params *params;
  600. pa = snd_soc_dai_get_dma_data(rtd->cpu_dai, substream);
  601. if (!pa)
  602. return -ENODEV;
  603. params = &pa[substream->stream];
  604. ppcm = (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) ?
  605. &pcm_hardware_playback : &pcm_hardware_capture;
  606. allocate_sram(substream, params->sram_size, ppcm);
  607. snd_soc_set_runtime_hwparams(substream, ppcm);
  608. /* ensure that buffer size is a multiple of period size */
  609. ret = snd_pcm_hw_constraint_integer(runtime,
  610. SNDRV_PCM_HW_PARAM_PERIODS);
  611. if (ret < 0)
  612. return ret;
  613. prtd = kzalloc(sizeof(struct davinci_runtime_data), GFP_KERNEL);
  614. if (prtd == NULL)
  615. return -ENOMEM;
  616. spin_lock_init(&prtd->lock);
  617. prtd->params = params;
  618. prtd->asp_channel = -1;
  619. prtd->asp_link[0] = prtd->asp_link[1] = -1;
  620. prtd->ram_channel = -1;
  621. prtd->ram_link = -1;
  622. prtd->ram_link2 = -1;
  623. runtime->private_data = prtd;
  624. ret = davinci_pcm_dma_request(substream);
  625. if (ret) {
  626. printk(KERN_ERR "davinci_pcm: Failed to get dma channels\n");
  627. kfree(prtd);
  628. }
  629. return ret;
  630. }
  631. static int davinci_pcm_close(struct snd_pcm_substream *substream)
  632. {
  633. struct snd_pcm_runtime *runtime = substream->runtime;
  634. struct davinci_runtime_data *prtd = runtime->private_data;
  635. if (prtd->ram_channel >= 0)
  636. edma_stop(prtd->ram_channel);
  637. if (prtd->asp_channel >= 0)
  638. edma_stop(prtd->asp_channel);
  639. if (prtd->asp_link[0] >= 0)
  640. edma_unlink(prtd->asp_link[0]);
  641. if (prtd->asp_link[1] >= 0)
  642. edma_unlink(prtd->asp_link[1]);
  643. if (prtd->ram_link >= 0)
  644. edma_unlink(prtd->ram_link);
  645. if (prtd->asp_link[0] >= 0)
  646. edma_free_slot(prtd->asp_link[0]);
  647. if (prtd->asp_link[1] >= 0)
  648. edma_free_slot(prtd->asp_link[1]);
  649. if (prtd->asp_channel >= 0)
  650. edma_free_channel(prtd->asp_channel);
  651. if (prtd->ram_link >= 0)
  652. edma_free_slot(prtd->ram_link);
  653. if (prtd->ram_link2 >= 0)
  654. edma_free_slot(prtd->ram_link2);
  655. if (prtd->ram_channel >= 0)
  656. edma_free_channel(prtd->ram_channel);
  657. kfree(prtd);
  658. return 0;
  659. }
  660. static int davinci_pcm_hw_params(struct snd_pcm_substream *substream,
  661. struct snd_pcm_hw_params *hw_params)
  662. {
  663. return snd_pcm_lib_malloc_pages(substream,
  664. params_buffer_bytes(hw_params));
  665. }
  666. static int davinci_pcm_hw_free(struct snd_pcm_substream *substream)
  667. {
  668. return snd_pcm_lib_free_pages(substream);
  669. }
  670. static int davinci_pcm_mmap(struct snd_pcm_substream *substream,
  671. struct vm_area_struct *vma)
  672. {
  673. struct snd_pcm_runtime *runtime = substream->runtime;
  674. return dma_mmap_writecombine(substream->pcm->card->dev, vma,
  675. runtime->dma_area,
  676. runtime->dma_addr,
  677. runtime->dma_bytes);
  678. }
  679. static struct snd_pcm_ops davinci_pcm_ops = {
  680. .open = davinci_pcm_open,
  681. .close = davinci_pcm_close,
  682. .ioctl = snd_pcm_lib_ioctl,
  683. .hw_params = davinci_pcm_hw_params,
  684. .hw_free = davinci_pcm_hw_free,
  685. .prepare = davinci_pcm_prepare,
  686. .trigger = davinci_pcm_trigger,
  687. .pointer = davinci_pcm_pointer,
  688. .mmap = davinci_pcm_mmap,
  689. };
  690. static int davinci_pcm_preallocate_dma_buffer(struct snd_pcm *pcm, int stream,
  691. size_t size)
  692. {
  693. struct snd_pcm_substream *substream = pcm->streams[stream].substream;
  694. struct snd_dma_buffer *buf = &substream->dma_buffer;
  695. buf->dev.type = SNDRV_DMA_TYPE_DEV;
  696. buf->dev.dev = pcm->card->dev;
  697. buf->private_data = NULL;
  698. buf->area = dma_alloc_writecombine(pcm->card->dev, size,
  699. &buf->addr, GFP_KERNEL);
  700. pr_debug("davinci_pcm: preallocate_dma_buffer: area=%p, addr=%p, "
  701. "size=%d\n", (void *) buf->area, (void *) buf->addr, size);
  702. if (!buf->area)
  703. return -ENOMEM;
  704. buf->bytes = size;
  705. return 0;
  706. }
  707. static void davinci_pcm_free(struct snd_pcm *pcm)
  708. {
  709. struct snd_pcm_substream *substream;
  710. struct snd_dma_buffer *buf;
  711. int stream;
  712. for (stream = 0; stream < 2; stream++) {
  713. struct snd_dma_buffer *iram_dma;
  714. substream = pcm->streams[stream].substream;
  715. if (!substream)
  716. continue;
  717. buf = &substream->dma_buffer;
  718. if (!buf->area)
  719. continue;
  720. dma_free_writecombine(pcm->card->dev, buf->bytes,
  721. buf->area, buf->addr);
  722. buf->area = NULL;
  723. iram_dma = buf->private_data;
  724. if (iram_dma) {
  725. sram_free(iram_dma->area, iram_dma->bytes);
  726. kfree(iram_dma);
  727. }
  728. }
  729. }
  730. static u64 davinci_pcm_dmamask = DMA_BIT_MASK(32);
  731. static int davinci_pcm_new(struct snd_soc_pcm_runtime *rtd)
  732. {
  733. struct snd_card *card = rtd->card->snd_card;
  734. struct snd_pcm *pcm = rtd->pcm;
  735. int ret;
  736. if (!card->dev->dma_mask)
  737. card->dev->dma_mask = &davinci_pcm_dmamask;
  738. if (!card->dev->coherent_dma_mask)
  739. card->dev->coherent_dma_mask = DMA_BIT_MASK(32);
  740. if (pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream) {
  741. ret = davinci_pcm_preallocate_dma_buffer(pcm,
  742. SNDRV_PCM_STREAM_PLAYBACK,
  743. pcm_hardware_playback.buffer_bytes_max);
  744. if (ret)
  745. return ret;
  746. }
  747. if (pcm->streams[SNDRV_PCM_STREAM_CAPTURE].substream) {
  748. ret = davinci_pcm_preallocate_dma_buffer(pcm,
  749. SNDRV_PCM_STREAM_CAPTURE,
  750. pcm_hardware_capture.buffer_bytes_max);
  751. if (ret)
  752. return ret;
  753. }
  754. return 0;
  755. }
  756. static struct snd_soc_platform_driver davinci_soc_platform = {
  757. .ops = &davinci_pcm_ops,
  758. .pcm_new = davinci_pcm_new,
  759. .pcm_free = davinci_pcm_free,
  760. };
  761. int davinci_soc_platform_register(struct device *dev)
  762. {
  763. return snd_soc_register_platform(dev, &davinci_soc_platform);
  764. }
  765. EXPORT_SYMBOL_GPL(davinci_soc_platform_register);
  766. void davinci_soc_platform_unregister(struct device *dev)
  767. {
  768. snd_soc_unregister_platform(dev);
  769. }
  770. EXPORT_SYMBOL_GPL(davinci_soc_platform_unregister);
  771. MODULE_AUTHOR("Vladimir Barinov");
  772. MODULE_DESCRIPTION("TI DAVINCI PCM DMA module");
  773. MODULE_LICENSE("GPL");