bf5xx-sport.c 25 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014
  1. /*
  2. * File: bf5xx_sport.c
  3. * Based on:
  4. * Author: Roy Huang <roy.huang@analog.com>
  5. *
  6. * Created: Tue Sep 21 10:52:42 CEST 2004
  7. * Description:
  8. * Blackfin SPORT Driver
  9. *
  10. * Copyright 2004-2007 Analog Devices Inc.
  11. *
  12. * Bugs: Enter bugs at http://blackfin.uclinux.org/
  13. *
  14. * This program is free software; you can redistribute it and/or modify
  15. * it under the terms of the GNU General Public License as published by
  16. * the Free Software Foundation; either version 2 of the License, or
  17. * (at your option) any later version.
  18. *
  19. * This program is distributed in the hope that it will be useful,
  20. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  21. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  22. * GNU General Public License for more details.
  23. *
  24. * You should have received a copy of the GNU General Public License
  25. * along with this program; if not, see the file COPYING, or write
  26. * to the Free Software Foundation, Inc.,
  27. * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
  28. */
  29. #include <linux/kernel.h>
  30. #include <linux/slab.h>
  31. #include <linux/delay.h>
  32. #include <linux/dma-mapping.h>
  33. #include <linux/gpio.h>
  34. #include <linux/bug.h>
  35. #include <asm/portmux.h>
  36. #include <asm/dma.h>
  37. #include <asm/blackfin.h>
  38. #include <asm/cacheflush.h>
  39. #include "bf5xx-sport.h"
  40. /* delay between frame sync pulse and first data bit in multichannel mode */
  41. #define FRAME_DELAY (1<<12)
  42. struct sport_device *sport_handle;
  43. EXPORT_SYMBOL(sport_handle);
  44. /* note: multichannel is in units of 8 channels,
  45. * tdm_count is # channels NOT / 8 ! */
  46. int sport_set_multichannel(struct sport_device *sport,
  47. int tdm_count, u32 mask, int packed)
  48. {
  49. pr_debug("%s tdm_count=%d mask:0x%08x packed=%d\n", __func__,
  50. tdm_count, mask, packed);
  51. if ((sport->regs->tcr1 & TSPEN) || (sport->regs->rcr1 & RSPEN))
  52. return -EBUSY;
  53. if (tdm_count & 0x7)
  54. return -EINVAL;
  55. if (tdm_count > 32)
  56. return -EINVAL; /* Only support less than 32 channels now */
  57. if (tdm_count) {
  58. sport->regs->mcmc1 = ((tdm_count>>3)-1) << 12;
  59. sport->regs->mcmc2 = FRAME_DELAY | MCMEN | \
  60. (packed ? (MCDTXPE|MCDRXPE) : 0);
  61. sport->regs->mtcs0 = mask;
  62. sport->regs->mrcs0 = mask;
  63. sport->regs->mtcs1 = 0;
  64. sport->regs->mrcs1 = 0;
  65. sport->regs->mtcs2 = 0;
  66. sport->regs->mrcs2 = 0;
  67. sport->regs->mtcs3 = 0;
  68. sport->regs->mrcs3 = 0;
  69. } else {
  70. sport->regs->mcmc1 = 0;
  71. sport->regs->mcmc2 = 0;
  72. sport->regs->mtcs0 = 0;
  73. sport->regs->mrcs0 = 0;
  74. }
  75. sport->regs->mtcs1 = 0; sport->regs->mtcs2 = 0; sport->regs->mtcs3 = 0;
  76. sport->regs->mrcs1 = 0; sport->regs->mrcs2 = 0; sport->regs->mrcs3 = 0;
  77. SSYNC();
  78. return 0;
  79. }
  80. EXPORT_SYMBOL(sport_set_multichannel);
  81. int sport_config_rx(struct sport_device *sport, unsigned int rcr1,
  82. unsigned int rcr2, unsigned int clkdiv, unsigned int fsdiv)
  83. {
  84. if ((sport->regs->tcr1 & TSPEN) || (sport->regs->rcr1 & RSPEN))
  85. return -EBUSY;
  86. sport->regs->rcr1 = rcr1;
  87. sport->regs->rcr2 = rcr2;
  88. sport->regs->rclkdiv = clkdiv;
  89. sport->regs->rfsdiv = fsdiv;
  90. SSYNC();
  91. return 0;
  92. }
  93. EXPORT_SYMBOL(sport_config_rx);
  94. int sport_config_tx(struct sport_device *sport, unsigned int tcr1,
  95. unsigned int tcr2, unsigned int clkdiv, unsigned int fsdiv)
  96. {
  97. if ((sport->regs->tcr1 & TSPEN) || (sport->regs->rcr1 & RSPEN))
  98. return -EBUSY;
  99. sport->regs->tcr1 = tcr1;
  100. sport->regs->tcr2 = tcr2;
  101. sport->regs->tclkdiv = clkdiv;
  102. sport->regs->tfsdiv = fsdiv;
  103. SSYNC();
  104. return 0;
  105. }
  106. EXPORT_SYMBOL(sport_config_tx);
  107. static void setup_desc(struct dmasg *desc, void *buf, int fragcount,
  108. size_t fragsize, unsigned int cfg,
  109. unsigned int x_count, unsigned int ycount, size_t wdsize)
  110. {
  111. int i;
  112. for (i = 0; i < fragcount; ++i) {
  113. desc[i].next_desc_addr = &(desc[i + 1]);
  114. desc[i].start_addr = (unsigned long)buf + i*fragsize;
  115. desc[i].cfg = cfg;
  116. desc[i].x_count = x_count;
  117. desc[i].x_modify = wdsize;
  118. desc[i].y_count = ycount;
  119. desc[i].y_modify = wdsize;
  120. }
  121. /* make circular */
  122. desc[fragcount-1].next_desc_addr = desc;
  123. pr_debug("setup desc: desc0=%p, next0=%p, desc1=%p,"
  124. "next1=%p\nx_count=%x,y_count=%x,addr=0x%lx,cfs=0x%x\n",
  125. desc, desc[0].next_desc_addr,
  126. desc+1, desc[1].next_desc_addr,
  127. desc[0].x_count, desc[0].y_count,
  128. desc[0].start_addr, desc[0].cfg);
  129. }
  130. static int sport_start(struct sport_device *sport)
  131. {
  132. enable_dma(sport->dma_rx_chan);
  133. enable_dma(sport->dma_tx_chan);
  134. sport->regs->rcr1 |= RSPEN;
  135. sport->regs->tcr1 |= TSPEN;
  136. SSYNC();
  137. return 0;
  138. }
  139. static int sport_stop(struct sport_device *sport)
  140. {
  141. sport->regs->tcr1 &= ~TSPEN;
  142. sport->regs->rcr1 &= ~RSPEN;
  143. SSYNC();
  144. disable_dma(sport->dma_rx_chan);
  145. disable_dma(sport->dma_tx_chan);
  146. return 0;
  147. }
  148. static inline int sport_hook_rx_dummy(struct sport_device *sport)
  149. {
  150. struct dmasg *desc, temp_desc;
  151. unsigned long flags;
  152. BUG_ON(sport->dummy_rx_desc == NULL);
  153. BUG_ON(sport->curr_rx_desc == sport->dummy_rx_desc);
  154. /* Maybe the dummy buffer descriptor ring is damaged */
  155. sport->dummy_rx_desc->next_desc_addr = sport->dummy_rx_desc + 1;
  156. local_irq_save(flags);
  157. desc = get_dma_next_desc_ptr(sport->dma_rx_chan);
  158. /* Copy the descriptor which will be damaged to backup */
  159. temp_desc = *desc;
  160. desc->x_count = sport->dummy_count / 2;
  161. desc->y_count = 0;
  162. desc->next_desc_addr = sport->dummy_rx_desc;
  163. local_irq_restore(flags);
  164. /* Waiting for dummy buffer descriptor is already hooked*/
  165. while ((get_dma_curr_desc_ptr(sport->dma_rx_chan) -
  166. sizeof(struct dmasg)) != sport->dummy_rx_desc)
  167. continue;
  168. sport->curr_rx_desc = sport->dummy_rx_desc;
  169. /* Restore the damaged descriptor */
  170. *desc = temp_desc;
  171. return 0;
  172. }
  173. static inline int sport_rx_dma_start(struct sport_device *sport, int dummy)
  174. {
  175. if (dummy) {
  176. sport->dummy_rx_desc->next_desc_addr = sport->dummy_rx_desc;
  177. sport->curr_rx_desc = sport->dummy_rx_desc;
  178. } else
  179. sport->curr_rx_desc = sport->dma_rx_desc;
  180. set_dma_next_desc_addr(sport->dma_rx_chan, sport->curr_rx_desc);
  181. set_dma_x_count(sport->dma_rx_chan, 0);
  182. set_dma_x_modify(sport->dma_rx_chan, 0);
  183. set_dma_config(sport->dma_rx_chan, (DMAFLOW_LARGE | NDSIZE_9 | \
  184. WDSIZE_32 | WNR));
  185. set_dma_curr_addr(sport->dma_rx_chan, sport->curr_rx_desc->start_addr);
  186. SSYNC();
  187. return 0;
  188. }
  189. static inline int sport_tx_dma_start(struct sport_device *sport, int dummy)
  190. {
  191. if (dummy) {
  192. sport->dummy_tx_desc->next_desc_addr = sport->dummy_tx_desc;
  193. sport->curr_tx_desc = sport->dummy_tx_desc;
  194. } else
  195. sport->curr_tx_desc = sport->dma_tx_desc;
  196. set_dma_next_desc_addr(sport->dma_tx_chan, sport->curr_tx_desc);
  197. set_dma_x_count(sport->dma_tx_chan, 0);
  198. set_dma_x_modify(sport->dma_tx_chan, 0);
  199. set_dma_config(sport->dma_tx_chan,
  200. (DMAFLOW_LARGE | NDSIZE_9 | WDSIZE_32));
  201. set_dma_curr_addr(sport->dma_tx_chan, sport->curr_tx_desc->start_addr);
  202. SSYNC();
  203. return 0;
  204. }
  205. int sport_rx_start(struct sport_device *sport)
  206. {
  207. unsigned long flags;
  208. pr_debug("%s enter\n", __func__);
  209. if (sport->rx_run)
  210. return -EBUSY;
  211. if (sport->tx_run) {
  212. /* tx is running, rx is not running */
  213. BUG_ON(sport->dma_rx_desc == NULL);
  214. BUG_ON(sport->curr_rx_desc != sport->dummy_rx_desc);
  215. local_irq_save(flags);
  216. while ((get_dma_curr_desc_ptr(sport->dma_rx_chan) -
  217. sizeof(struct dmasg)) != sport->dummy_rx_desc)
  218. continue;
  219. sport->dummy_rx_desc->next_desc_addr = sport->dma_rx_desc;
  220. local_irq_restore(flags);
  221. sport->curr_rx_desc = sport->dma_rx_desc;
  222. } else {
  223. sport_tx_dma_start(sport, 1);
  224. sport_rx_dma_start(sport, 0);
  225. sport_start(sport);
  226. }
  227. sport->rx_run = 1;
  228. return 0;
  229. }
  230. EXPORT_SYMBOL(sport_rx_start);
  231. int sport_rx_stop(struct sport_device *sport)
  232. {
  233. pr_debug("%s enter\n", __func__);
  234. if (!sport->rx_run)
  235. return 0;
  236. if (sport->tx_run) {
  237. /* TX dma is still running, hook the dummy buffer */
  238. sport_hook_rx_dummy(sport);
  239. } else {
  240. /* Both rx and tx dma will be stopped */
  241. sport_stop(sport);
  242. sport->curr_rx_desc = NULL;
  243. sport->curr_tx_desc = NULL;
  244. }
  245. sport->rx_run = 0;
  246. return 0;
  247. }
  248. EXPORT_SYMBOL(sport_rx_stop);
  249. static inline int sport_hook_tx_dummy(struct sport_device *sport)
  250. {
  251. struct dmasg *desc, temp_desc;
  252. unsigned long flags;
  253. BUG_ON(sport->dummy_tx_desc == NULL);
  254. BUG_ON(sport->curr_tx_desc == sport->dummy_tx_desc);
  255. sport->dummy_tx_desc->next_desc_addr = sport->dummy_tx_desc + 1;
  256. /* Shorten the time on last normal descriptor */
  257. local_irq_save(flags);
  258. desc = get_dma_next_desc_ptr(sport->dma_tx_chan);
  259. /* Store the descriptor which will be damaged */
  260. temp_desc = *desc;
  261. desc->x_count = sport->dummy_count / 2;
  262. desc->y_count = 0;
  263. desc->next_desc_addr = sport->dummy_tx_desc;
  264. local_irq_restore(flags);
  265. /* Waiting for dummy buffer descriptor is already hooked*/
  266. while ((get_dma_curr_desc_ptr(sport->dma_tx_chan) - \
  267. sizeof(struct dmasg)) != sport->dummy_tx_desc)
  268. continue;
  269. sport->curr_tx_desc = sport->dummy_tx_desc;
  270. /* Restore the damaged descriptor */
  271. *desc = temp_desc;
  272. return 0;
  273. }
  274. int sport_tx_start(struct sport_device *sport)
  275. {
  276. unsigned long flags;
  277. pr_debug("%s: tx_run:%d, rx_run:%d\n", __func__,
  278. sport->tx_run, sport->rx_run);
  279. if (sport->tx_run)
  280. return -EBUSY;
  281. if (sport->rx_run) {
  282. BUG_ON(sport->dma_tx_desc == NULL);
  283. BUG_ON(sport->curr_tx_desc != sport->dummy_tx_desc);
  284. /* Hook the normal buffer descriptor */
  285. local_irq_save(flags);
  286. while ((get_dma_curr_desc_ptr(sport->dma_tx_chan) -
  287. sizeof(struct dmasg)) != sport->dummy_tx_desc)
  288. continue;
  289. sport->dummy_tx_desc->next_desc_addr = sport->dma_tx_desc;
  290. local_irq_restore(flags);
  291. sport->curr_tx_desc = sport->dma_tx_desc;
  292. } else {
  293. sport_tx_dma_start(sport, 0);
  294. /* Let rx dma run the dummy buffer */
  295. sport_rx_dma_start(sport, 1);
  296. sport_start(sport);
  297. }
  298. sport->tx_run = 1;
  299. return 0;
  300. }
  301. EXPORT_SYMBOL(sport_tx_start);
  302. int sport_tx_stop(struct sport_device *sport)
  303. {
  304. if (!sport->tx_run)
  305. return 0;
  306. if (sport->rx_run) {
  307. /* RX is still running, hook the dummy buffer */
  308. sport_hook_tx_dummy(sport);
  309. } else {
  310. /* Both rx and tx dma stopped */
  311. sport_stop(sport);
  312. sport->curr_rx_desc = NULL;
  313. sport->curr_tx_desc = NULL;
  314. }
  315. sport->tx_run = 0;
  316. return 0;
  317. }
  318. EXPORT_SYMBOL(sport_tx_stop);
  319. static inline int compute_wdsize(size_t wdsize)
  320. {
  321. switch (wdsize) {
  322. case 1:
  323. return WDSIZE_8;
  324. case 2:
  325. return WDSIZE_16;
  326. case 4:
  327. default:
  328. return WDSIZE_32;
  329. }
  330. }
  331. int sport_config_rx_dma(struct sport_device *sport, void *buf,
  332. int fragcount, size_t fragsize)
  333. {
  334. unsigned int x_count;
  335. unsigned int y_count;
  336. unsigned int cfg;
  337. dma_addr_t addr;
  338. pr_debug("%s buf:%p, frag:%d, fragsize:0x%lx\n", __func__, \
  339. buf, fragcount, fragsize);
  340. x_count = fragsize / sport->wdsize;
  341. y_count = 0;
  342. /* for fragments larger than 64k words we use 2d dma,
  343. * denote fragecount as two numbers' mutliply and both of them
  344. * are less than 64k.*/
  345. if (x_count >= 0x10000) {
  346. int i, count = x_count;
  347. for (i = 16; i > 0; i--) {
  348. x_count = 1 << i;
  349. if ((count & (x_count - 1)) == 0) {
  350. y_count = count >> i;
  351. if (y_count < 0x10000)
  352. break;
  353. }
  354. }
  355. if (i == 0)
  356. return -EINVAL;
  357. }
  358. pr_debug("%s(x_count:0x%x, y_count:0x%x)\n", __func__,
  359. x_count, y_count);
  360. if (sport->dma_rx_desc)
  361. dma_free_coherent(NULL, sport->rx_desc_bytes,
  362. sport->dma_rx_desc, 0);
  363. /* Allocate a new descritor ring as current one. */
  364. sport->dma_rx_desc = dma_alloc_coherent(NULL, \
  365. fragcount * sizeof(struct dmasg), &addr, 0);
  366. sport->rx_desc_bytes = fragcount * sizeof(struct dmasg);
  367. if (!sport->dma_rx_desc) {
  368. pr_err("Failed to allocate memory for rx desc\n");
  369. return -ENOMEM;
  370. }
  371. sport->rx_buf = buf;
  372. sport->rx_fragsize = fragsize;
  373. sport->rx_frags = fragcount;
  374. cfg = 0x7000 | DI_EN | compute_wdsize(sport->wdsize) | WNR | \
  375. (DESC_ELEMENT_COUNT << 8); /* large descriptor mode */
  376. if (y_count != 0)
  377. cfg |= DMA2D;
  378. setup_desc(sport->dma_rx_desc, buf, fragcount, fragsize,
  379. cfg|DMAEN, x_count, y_count, sport->wdsize);
  380. return 0;
  381. }
  382. EXPORT_SYMBOL(sport_config_rx_dma);
  383. int sport_config_tx_dma(struct sport_device *sport, void *buf, \
  384. int fragcount, size_t fragsize)
  385. {
  386. unsigned int x_count;
  387. unsigned int y_count;
  388. unsigned int cfg;
  389. dma_addr_t addr;
  390. pr_debug("%s buf:%p, fragcount:%d, fragsize:0x%lx\n",
  391. __func__, buf, fragcount, fragsize);
  392. x_count = fragsize/sport->wdsize;
  393. y_count = 0;
  394. /* for fragments larger than 64k words we use 2d dma,
  395. * denote fragecount as two numbers' mutliply and both of them
  396. * are less than 64k.*/
  397. if (x_count >= 0x10000) {
  398. int i, count = x_count;
  399. for (i = 16; i > 0; i--) {
  400. x_count = 1 << i;
  401. if ((count & (x_count - 1)) == 0) {
  402. y_count = count >> i;
  403. if (y_count < 0x10000)
  404. break;
  405. }
  406. }
  407. if (i == 0)
  408. return -EINVAL;
  409. }
  410. pr_debug("%s x_count:0x%x, y_count:0x%x\n", __func__,
  411. x_count, y_count);
  412. if (sport->dma_tx_desc) {
  413. dma_free_coherent(NULL, sport->tx_desc_bytes, \
  414. sport->dma_tx_desc, 0);
  415. }
  416. sport->dma_tx_desc = dma_alloc_coherent(NULL, \
  417. fragcount * sizeof(struct dmasg), &addr, 0);
  418. sport->tx_desc_bytes = fragcount * sizeof(struct dmasg);
  419. if (!sport->dma_tx_desc) {
  420. pr_err("Failed to allocate memory for tx desc\n");
  421. return -ENOMEM;
  422. }
  423. sport->tx_buf = buf;
  424. sport->tx_fragsize = fragsize;
  425. sport->tx_frags = fragcount;
  426. cfg = 0x7000 | DI_EN | compute_wdsize(sport->wdsize) | \
  427. (DESC_ELEMENT_COUNT << 8); /* large descriptor mode */
  428. if (y_count != 0)
  429. cfg |= DMA2D;
  430. setup_desc(sport->dma_tx_desc, buf, fragcount, fragsize,
  431. cfg|DMAEN, x_count, y_count, sport->wdsize);
  432. return 0;
  433. }
  434. EXPORT_SYMBOL(sport_config_tx_dma);
  435. /* setup dummy dma descriptor ring, which don't generate interrupts,
  436. * the x_modify is set to 0 */
  437. static int sport_config_rx_dummy(struct sport_device *sport)
  438. {
  439. struct dmasg *desc;
  440. unsigned config;
  441. pr_debug("%s entered\n", __func__);
  442. if (L1_DATA_A_LENGTH)
  443. desc = l1_data_sram_zalloc(2 * sizeof(*desc));
  444. else {
  445. dma_addr_t addr;
  446. desc = dma_alloc_coherent(NULL, 2 * sizeof(*desc), &addr, 0);
  447. memset(desc, 0, 2 * sizeof(*desc));
  448. }
  449. if (desc == NULL) {
  450. pr_err("Failed to allocate memory for dummy rx desc\n");
  451. return -ENOMEM;
  452. }
  453. sport->dummy_rx_desc = desc;
  454. desc->start_addr = (unsigned long)sport->dummy_buf;
  455. config = DMAFLOW_LARGE | NDSIZE_9 | compute_wdsize(sport->wdsize)
  456. | WNR | DMAEN;
  457. desc->cfg = config;
  458. desc->x_count = sport->dummy_count/sport->wdsize;
  459. desc->x_modify = sport->wdsize;
  460. desc->y_count = 0;
  461. desc->y_modify = 0;
  462. memcpy(desc+1, desc, sizeof(*desc));
  463. desc->next_desc_addr = desc + 1;
  464. desc[1].next_desc_addr = desc;
  465. return 0;
  466. }
  467. static int sport_config_tx_dummy(struct sport_device *sport)
  468. {
  469. struct dmasg *desc;
  470. unsigned int config;
  471. pr_debug("%s entered\n", __func__);
  472. if (L1_DATA_A_LENGTH)
  473. desc = l1_data_sram_zalloc(2 * sizeof(*desc));
  474. else {
  475. dma_addr_t addr;
  476. desc = dma_alloc_coherent(NULL, 2 * sizeof(*desc), &addr, 0);
  477. memset(desc, 0, 2 * sizeof(*desc));
  478. }
  479. if (!desc) {
  480. pr_err("Failed to allocate memory for dummy tx desc\n");
  481. return -ENOMEM;
  482. }
  483. sport->dummy_tx_desc = desc;
  484. desc->start_addr = (unsigned long)sport->dummy_buf + \
  485. sport->dummy_count;
  486. config = DMAFLOW_LARGE | NDSIZE_9 |
  487. compute_wdsize(sport->wdsize) | DMAEN;
  488. desc->cfg = config;
  489. desc->x_count = sport->dummy_count/sport->wdsize;
  490. desc->x_modify = sport->wdsize;
  491. desc->y_count = 0;
  492. desc->y_modify = 0;
  493. memcpy(desc+1, desc, sizeof(*desc));
  494. desc->next_desc_addr = desc + 1;
  495. desc[1].next_desc_addr = desc;
  496. return 0;
  497. }
  498. unsigned long sport_curr_offset_rx(struct sport_device *sport)
  499. {
  500. unsigned long curr = get_dma_curr_addr(sport->dma_rx_chan);
  501. return (unsigned char *)curr - sport->rx_buf;
  502. }
  503. EXPORT_SYMBOL(sport_curr_offset_rx);
  504. unsigned long sport_curr_offset_tx(struct sport_device *sport)
  505. {
  506. unsigned long curr = get_dma_curr_addr(sport->dma_tx_chan);
  507. return (unsigned char *)curr - sport->tx_buf;
  508. }
  509. EXPORT_SYMBOL(sport_curr_offset_tx);
  510. void sport_incfrag(struct sport_device *sport, int *frag, int tx)
  511. {
  512. ++(*frag);
  513. if (tx == 1 && *frag == sport->tx_frags)
  514. *frag = 0;
  515. if (tx == 0 && *frag == sport->rx_frags)
  516. *frag = 0;
  517. }
  518. EXPORT_SYMBOL(sport_incfrag);
  519. void sport_decfrag(struct sport_device *sport, int *frag, int tx)
  520. {
  521. --(*frag);
  522. if (tx == 1 && *frag == 0)
  523. *frag = sport->tx_frags;
  524. if (tx == 0 && *frag == 0)
  525. *frag = sport->rx_frags;
  526. }
  527. EXPORT_SYMBOL(sport_decfrag);
  528. static int sport_check_status(struct sport_device *sport,
  529. unsigned int *sport_stat,
  530. unsigned int *rx_stat,
  531. unsigned int *tx_stat)
  532. {
  533. int status = 0;
  534. if (sport_stat) {
  535. SSYNC();
  536. status = sport->regs->stat;
  537. if (status & (TOVF|TUVF|ROVF|RUVF))
  538. sport->regs->stat = (status & (TOVF|TUVF|ROVF|RUVF));
  539. SSYNC();
  540. *sport_stat = status;
  541. }
  542. if (rx_stat) {
  543. SSYNC();
  544. status = get_dma_curr_irqstat(sport->dma_rx_chan);
  545. if (status & (DMA_DONE|DMA_ERR))
  546. clear_dma_irqstat(sport->dma_rx_chan);
  547. SSYNC();
  548. *rx_stat = status;
  549. }
  550. if (tx_stat) {
  551. SSYNC();
  552. status = get_dma_curr_irqstat(sport->dma_tx_chan);
  553. if (status & (DMA_DONE|DMA_ERR))
  554. clear_dma_irqstat(sport->dma_tx_chan);
  555. SSYNC();
  556. *tx_stat = status;
  557. }
  558. return 0;
  559. }
  560. int sport_dump_stat(struct sport_device *sport, char *buf, size_t len)
  561. {
  562. int ret;
  563. ret = snprintf(buf, len,
  564. "sts: 0x%04x\n"
  565. "rx dma %d sts: 0x%04x tx dma %d sts: 0x%04x\n",
  566. sport->regs->stat,
  567. sport->dma_rx_chan,
  568. get_dma_curr_irqstat(sport->dma_rx_chan),
  569. sport->dma_tx_chan,
  570. get_dma_curr_irqstat(sport->dma_tx_chan));
  571. buf += ret;
  572. len -= ret;
  573. ret += snprintf(buf, len,
  574. "curr_rx_desc:0x%p, curr_tx_desc:0x%p\n"
  575. "dma_rx_desc:0x%p, dma_tx_desc:0x%p\n"
  576. "dummy_rx_desc:0x%p, dummy_tx_desc:0x%p\n",
  577. sport->curr_rx_desc, sport->curr_tx_desc,
  578. sport->dma_rx_desc, sport->dma_tx_desc,
  579. sport->dummy_rx_desc, sport->dummy_tx_desc);
  580. return ret;
  581. }
  582. static irqreturn_t rx_handler(int irq, void *dev_id)
  583. {
  584. unsigned int rx_stat;
  585. struct sport_device *sport = dev_id;
  586. pr_debug("%s enter\n", __func__);
  587. sport_check_status(sport, NULL, &rx_stat, NULL);
  588. if (!(rx_stat & DMA_DONE))
  589. pr_err("rx dma is already stopped\n");
  590. if (sport->rx_callback) {
  591. sport->rx_callback(sport->rx_data);
  592. return IRQ_HANDLED;
  593. }
  594. return IRQ_NONE;
  595. }
  596. static irqreturn_t tx_handler(int irq, void *dev_id)
  597. {
  598. unsigned int tx_stat;
  599. struct sport_device *sport = dev_id;
  600. pr_debug("%s enter\n", __func__);
  601. sport_check_status(sport, NULL, NULL, &tx_stat);
  602. if (!(tx_stat & DMA_DONE)) {
  603. pr_err("tx dma is already stopped\n");
  604. return IRQ_HANDLED;
  605. }
  606. if (sport->tx_callback) {
  607. sport->tx_callback(sport->tx_data);
  608. return IRQ_HANDLED;
  609. }
  610. return IRQ_NONE;
  611. }
  612. static irqreturn_t err_handler(int irq, void *dev_id)
  613. {
  614. unsigned int status = 0;
  615. struct sport_device *sport = dev_id;
  616. pr_debug("%s\n", __func__);
  617. if (sport_check_status(sport, &status, NULL, NULL)) {
  618. pr_err("error checking status ??");
  619. return IRQ_NONE;
  620. }
  621. if (status & (TOVF|TUVF|ROVF|RUVF)) {
  622. pr_info("sport status error:%s%s%s%s\n",
  623. status & TOVF ? " TOVF" : "",
  624. status & TUVF ? " TUVF" : "",
  625. status & ROVF ? " ROVF" : "",
  626. status & RUVF ? " RUVF" : "");
  627. if (status & TOVF || status & TUVF) {
  628. disable_dma(sport->dma_tx_chan);
  629. if (sport->tx_run)
  630. sport_tx_dma_start(sport, 0);
  631. else
  632. sport_tx_dma_start(sport, 1);
  633. enable_dma(sport->dma_tx_chan);
  634. } else {
  635. disable_dma(sport->dma_rx_chan);
  636. if (sport->rx_run)
  637. sport_rx_dma_start(sport, 0);
  638. else
  639. sport_rx_dma_start(sport, 1);
  640. enable_dma(sport->dma_rx_chan);
  641. }
  642. }
  643. status = sport->regs->stat;
  644. if (status & (TOVF|TUVF|ROVF|RUVF))
  645. sport->regs->stat = (status & (TOVF|TUVF|ROVF|RUVF));
  646. SSYNC();
  647. if (sport->err_callback)
  648. sport->err_callback(sport->err_data);
  649. return IRQ_HANDLED;
  650. }
  651. int sport_set_rx_callback(struct sport_device *sport,
  652. void (*rx_callback)(void *), void *rx_data)
  653. {
  654. BUG_ON(rx_callback == NULL);
  655. sport->rx_callback = rx_callback;
  656. sport->rx_data = rx_data;
  657. return 0;
  658. }
  659. EXPORT_SYMBOL(sport_set_rx_callback);
  660. int sport_set_tx_callback(struct sport_device *sport,
  661. void (*tx_callback)(void *), void *tx_data)
  662. {
  663. BUG_ON(tx_callback == NULL);
  664. sport->tx_callback = tx_callback;
  665. sport->tx_data = tx_data;
  666. return 0;
  667. }
  668. EXPORT_SYMBOL(sport_set_tx_callback);
  669. int sport_set_err_callback(struct sport_device *sport,
  670. void (*err_callback)(void *), void *err_data)
  671. {
  672. BUG_ON(err_callback == NULL);
  673. sport->err_callback = err_callback;
  674. sport->err_data = err_data;
  675. return 0;
  676. }
  677. EXPORT_SYMBOL(sport_set_err_callback);
  678. struct sport_device *sport_init(struct sport_param *param, unsigned wdsize,
  679. unsigned dummy_count, void *private_data)
  680. {
  681. int ret;
  682. struct sport_device *sport;
  683. pr_debug("%s enter\n", __func__);
  684. BUG_ON(param == NULL);
  685. BUG_ON(wdsize == 0 || dummy_count == 0);
  686. sport = kmalloc(sizeof(struct sport_device), GFP_KERNEL);
  687. if (!sport) {
  688. pr_err("Failed to allocate for sport device\n");
  689. return NULL;
  690. }
  691. memset(sport, 0, sizeof(struct sport_device));
  692. sport->dma_rx_chan = param->dma_rx_chan;
  693. sport->dma_tx_chan = param->dma_tx_chan;
  694. sport->err_irq = param->err_irq;
  695. sport->regs = param->regs;
  696. sport->private_data = private_data;
  697. if (request_dma(sport->dma_rx_chan, "SPORT RX Data") == -EBUSY) {
  698. pr_err("Failed to request RX dma %d\n", \
  699. sport->dma_rx_chan);
  700. goto __init_err1;
  701. }
  702. if (set_dma_callback(sport->dma_rx_chan, rx_handler, sport) != 0) {
  703. pr_err("Failed to request RX irq %d\n", \
  704. sport->dma_rx_chan);
  705. goto __init_err2;
  706. }
  707. if (request_dma(sport->dma_tx_chan, "SPORT TX Data") == -EBUSY) {
  708. pr_err("Failed to request TX dma %d\n", \
  709. sport->dma_tx_chan);
  710. goto __init_err2;
  711. }
  712. if (set_dma_callback(sport->dma_tx_chan, tx_handler, sport) != 0) {
  713. pr_err("Failed to request TX irq %d\n", \
  714. sport->dma_tx_chan);
  715. goto __init_err3;
  716. }
  717. if (request_irq(sport->err_irq, err_handler, IRQF_SHARED, "SPORT err",
  718. sport) < 0) {
  719. pr_err("Failed to request err irq:%d\n", \
  720. sport->err_irq);
  721. goto __init_err3;
  722. }
  723. pr_err("dma rx:%d tx:%d, err irq:%d, regs:%p\n",
  724. sport->dma_rx_chan, sport->dma_tx_chan,
  725. sport->err_irq, sport->regs);
  726. sport->wdsize = wdsize;
  727. sport->dummy_count = dummy_count;
  728. if (L1_DATA_A_LENGTH)
  729. sport->dummy_buf = l1_data_sram_zalloc(dummy_count * 2);
  730. else
  731. sport->dummy_buf = kzalloc(dummy_count * 2, GFP_KERNEL);
  732. if (sport->dummy_buf == NULL) {
  733. pr_err("Failed to allocate dummy buffer\n");
  734. goto __error;
  735. }
  736. ret = sport_config_rx_dummy(sport);
  737. if (ret) {
  738. pr_err("Failed to config rx dummy ring\n");
  739. goto __error;
  740. }
  741. ret = sport_config_tx_dummy(sport);
  742. if (ret) {
  743. pr_err("Failed to config tx dummy ring\n");
  744. goto __error;
  745. }
  746. return sport;
  747. __error:
  748. free_irq(sport->err_irq, sport);
  749. __init_err3:
  750. free_dma(sport->dma_tx_chan);
  751. __init_err2:
  752. free_dma(sport->dma_rx_chan);
  753. __init_err1:
  754. kfree(sport);
  755. return NULL;
  756. }
  757. EXPORT_SYMBOL(sport_init);
  758. void sport_done(struct sport_device *sport)
  759. {
  760. if (sport == NULL)
  761. return;
  762. sport_stop(sport);
  763. if (sport->dma_rx_desc)
  764. dma_free_coherent(NULL, sport->rx_desc_bytes,
  765. sport->dma_rx_desc, 0);
  766. if (sport->dma_tx_desc)
  767. dma_free_coherent(NULL, sport->tx_desc_bytes,
  768. sport->dma_tx_desc, 0);
  769. #if L1_DATA_A_LENGTH != 0
  770. l1_data_sram_free(sport->dummy_rx_desc);
  771. l1_data_sram_free(sport->dummy_tx_desc);
  772. l1_data_sram_free(sport->dummy_buf);
  773. #else
  774. dma_free_coherent(NULL, 2*sizeof(struct dmasg),
  775. sport->dummy_rx_desc, 0);
  776. dma_free_coherent(NULL, 2*sizeof(struct dmasg),
  777. sport->dummy_tx_desc, 0);
  778. kfree(sport->dummy_buf);
  779. #endif
  780. free_dma(sport->dma_rx_chan);
  781. free_dma(sport->dma_tx_chan);
  782. free_irq(sport->err_irq, sport);
  783. kfree(sport);
  784. sport = NULL;
  785. }
  786. EXPORT_SYMBOL(sport_done);
  787. /*
  788. * It is only used to send several bytes when dma is not enabled
  789. * sport controller is configured but not enabled.
  790. * Multichannel cannot works with pio mode */
  791. /* Used by ac97 to write and read codec register */
  792. int sport_send_and_recv(struct sport_device *sport, u8 *out_data, \
  793. u8 *in_data, int len)
  794. {
  795. unsigned short dma_config;
  796. unsigned short status;
  797. unsigned long flags;
  798. unsigned long wait = 0;
  799. pr_debug("%s enter, out_data:%p, in_data:%p len:%d\n", \
  800. __func__, out_data, in_data, len);
  801. pr_debug("tcr1:0x%04x, tcr2:0x%04x, tclkdiv:0x%04x, tfsdiv:0x%04x\n"
  802. "mcmc1:0x%04x, mcmc2:0x%04x\n",
  803. sport->regs->tcr1, sport->regs->tcr2,
  804. sport->regs->tclkdiv, sport->regs->tfsdiv,
  805. sport->regs->mcmc1, sport->regs->mcmc2);
  806. flush_dcache_range((unsigned)out_data, (unsigned)(out_data + len));
  807. /* Enable tx dma */
  808. dma_config = (RESTART | WDSIZE_16 | DI_EN);
  809. set_dma_start_addr(sport->dma_tx_chan, (unsigned long)out_data);
  810. set_dma_x_count(sport->dma_tx_chan, len/2);
  811. set_dma_x_modify(sport->dma_tx_chan, 2);
  812. set_dma_config(sport->dma_tx_chan, dma_config);
  813. enable_dma(sport->dma_tx_chan);
  814. if (in_data != NULL) {
  815. invalidate_dcache_range((unsigned)in_data, \
  816. (unsigned)(in_data + len));
  817. /* Enable rx dma */
  818. dma_config = (RESTART | WDSIZE_16 | WNR | DI_EN);
  819. set_dma_start_addr(sport->dma_rx_chan, (unsigned long)in_data);
  820. set_dma_x_count(sport->dma_rx_chan, len/2);
  821. set_dma_x_modify(sport->dma_rx_chan, 2);
  822. set_dma_config(sport->dma_rx_chan, dma_config);
  823. enable_dma(sport->dma_rx_chan);
  824. }
  825. local_irq_save(flags);
  826. sport->regs->tcr1 |= TSPEN;
  827. sport->regs->rcr1 |= RSPEN;
  828. SSYNC();
  829. status = get_dma_curr_irqstat(sport->dma_tx_chan);
  830. while (status & DMA_RUN) {
  831. udelay(1);
  832. status = get_dma_curr_irqstat(sport->dma_tx_chan);
  833. pr_debug("DMA status:0x%04x\n", status);
  834. if (wait++ > 100)
  835. goto __over;
  836. }
  837. status = sport->regs->stat;
  838. wait = 0;
  839. while (!(status & TXHRE)) {
  840. pr_debug("sport status:0x%04x\n", status);
  841. udelay(1);
  842. status = *(unsigned short *)&sport->regs->stat;
  843. if (wait++ > 1000)
  844. goto __over;
  845. }
  846. /* Wait for the last byte sent out */
  847. udelay(20);
  848. pr_debug("sport status:0x%04x\n", status);
  849. __over:
  850. sport->regs->tcr1 &= ~TSPEN;
  851. sport->regs->rcr1 &= ~RSPEN;
  852. SSYNC();
  853. disable_dma(sport->dma_tx_chan);
  854. /* Clear the status */
  855. clear_dma_irqstat(sport->dma_tx_chan);
  856. if (in_data != NULL) {
  857. disable_dma(sport->dma_rx_chan);
  858. clear_dma_irqstat(sport->dma_rx_chan);
  859. }
  860. SSYNC();
  861. local_irq_restore(flags);
  862. return 0;
  863. }
  864. EXPORT_SYMBOL(sport_send_and_recv);
  865. MODULE_AUTHOR("Roy Huang");
  866. MODULE_DESCRIPTION("SPORT driver for ADI Blackfin");
  867. MODULE_LICENSE("GPL");