bf5xx-sport.c 26 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032
  1. /*
  2. * File: bf5xx_sport.c
  3. * Based on:
  4. * Author: Roy Huang <roy.huang@analog.com>
  5. *
  6. * Created: Tue Sep 21 10:52:42 CEST 2004
  7. * Description:
  8. * Blackfin SPORT Driver
  9. *
  10. * Copyright 2004-2007 Analog Devices Inc.
  11. *
  12. * Bugs: Enter bugs at http://blackfin.uclinux.org/
  13. *
  14. * This program is free software; you can redistribute it and/or modify
  15. * it under the terms of the GNU General Public License as published by
  16. * the Free Software Foundation; either version 2 of the License, or
  17. * (at your option) any later version.
  18. *
  19. * This program is distributed in the hope that it will be useful,
  20. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  21. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  22. * GNU General Public License for more details.
  23. *
  24. * You should have received a copy of the GNU General Public License
  25. * along with this program; if not, see the file COPYING, or write
  26. * to the Free Software Foundation, Inc.,
  27. * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
  28. */
  29. #include <linux/kernel.h>
  30. #include <linux/slab.h>
  31. #include <linux/delay.h>
  32. #include <linux/dma-mapping.h>
  33. #include <linux/gpio.h>
  34. #include <linux/bug.h>
  35. #include <asm/portmux.h>
  36. #include <asm/dma.h>
  37. #include <asm/blackfin.h>
  38. #include <asm/cacheflush.h>
  39. #include "bf5xx-sport.h"
  40. /* delay between frame sync pulse and first data bit in multichannel mode */
  41. #define FRAME_DELAY (1<<12)
  42. struct sport_device *sport_handle;
  43. EXPORT_SYMBOL(sport_handle);
  44. /* note: multichannel is in units of 8 channels,
  45. * tdm_count is # channels NOT / 8 ! */
  46. int sport_set_multichannel(struct sport_device *sport,
  47. int tdm_count, u32 mask, int packed)
  48. {
  49. pr_debug("%s tdm_count=%d mask:0x%08x packed=%d\n", __func__,
  50. tdm_count, mask, packed);
  51. if ((sport->regs->tcr1 & TSPEN) || (sport->regs->rcr1 & RSPEN))
  52. return -EBUSY;
  53. if (tdm_count & 0x7)
  54. return -EINVAL;
  55. if (tdm_count > 32)
  56. return -EINVAL; /* Only support less than 32 channels now */
  57. if (tdm_count) {
  58. sport->regs->mcmc1 = ((tdm_count>>3)-1) << 12;
  59. sport->regs->mcmc2 = FRAME_DELAY | MCMEN | \
  60. (packed ? (MCDTXPE|MCDRXPE) : 0);
  61. sport->regs->mtcs0 = mask;
  62. sport->regs->mrcs0 = mask;
  63. sport->regs->mtcs1 = 0;
  64. sport->regs->mrcs1 = 0;
  65. sport->regs->mtcs2 = 0;
  66. sport->regs->mrcs2 = 0;
  67. sport->regs->mtcs3 = 0;
  68. sport->regs->mrcs3 = 0;
  69. } else {
  70. sport->regs->mcmc1 = 0;
  71. sport->regs->mcmc2 = 0;
  72. sport->regs->mtcs0 = 0;
  73. sport->regs->mrcs0 = 0;
  74. }
  75. sport->regs->mtcs1 = 0; sport->regs->mtcs2 = 0; sport->regs->mtcs3 = 0;
  76. sport->regs->mrcs1 = 0; sport->regs->mrcs2 = 0; sport->regs->mrcs3 = 0;
  77. SSYNC();
  78. return 0;
  79. }
  80. EXPORT_SYMBOL(sport_set_multichannel);
  81. int sport_config_rx(struct sport_device *sport, unsigned int rcr1,
  82. unsigned int rcr2, unsigned int clkdiv, unsigned int fsdiv)
  83. {
  84. if ((sport->regs->tcr1 & TSPEN) || (sport->regs->rcr1 & RSPEN))
  85. return -EBUSY;
  86. sport->regs->rcr1 = rcr1;
  87. sport->regs->rcr2 = rcr2;
  88. sport->regs->rclkdiv = clkdiv;
  89. sport->regs->rfsdiv = fsdiv;
  90. SSYNC();
  91. return 0;
  92. }
  93. EXPORT_SYMBOL(sport_config_rx);
  94. int sport_config_tx(struct sport_device *sport, unsigned int tcr1,
  95. unsigned int tcr2, unsigned int clkdiv, unsigned int fsdiv)
  96. {
  97. if ((sport->regs->tcr1 & TSPEN) || (sport->regs->rcr1 & RSPEN))
  98. return -EBUSY;
  99. sport->regs->tcr1 = tcr1;
  100. sport->regs->tcr2 = tcr2;
  101. sport->regs->tclkdiv = clkdiv;
  102. sport->regs->tfsdiv = fsdiv;
  103. SSYNC();
  104. return 0;
  105. }
  106. EXPORT_SYMBOL(sport_config_tx);
  107. static void setup_desc(struct dmasg *desc, void *buf, int fragcount,
  108. size_t fragsize, unsigned int cfg,
  109. unsigned int x_count, unsigned int ycount, size_t wdsize)
  110. {
  111. int i;
  112. for (i = 0; i < fragcount; ++i) {
  113. desc[i].next_desc_addr = (unsigned long)&(desc[i + 1]);
  114. desc[i].start_addr = (unsigned long)buf + i*fragsize;
  115. desc[i].cfg = cfg;
  116. desc[i].x_count = x_count;
  117. desc[i].x_modify = wdsize;
  118. desc[i].y_count = ycount;
  119. desc[i].y_modify = wdsize;
  120. }
  121. /* make circular */
  122. desc[fragcount-1].next_desc_addr = (unsigned long)desc;
  123. pr_debug("setup desc: desc0=%p, next0=%lx, desc1=%p,"
  124. "next1=%lx\nx_count=%x,y_count=%x,addr=0x%lx,cfs=0x%x\n",
  125. &(desc[0]), desc[0].next_desc_addr,
  126. &(desc[1]), desc[1].next_desc_addr,
  127. desc[0].x_count, desc[0].y_count,
  128. desc[0].start_addr, desc[0].cfg);
  129. }
  130. static int sport_start(struct sport_device *sport)
  131. {
  132. enable_dma(sport->dma_rx_chan);
  133. enable_dma(sport->dma_tx_chan);
  134. sport->regs->rcr1 |= RSPEN;
  135. sport->regs->tcr1 |= TSPEN;
  136. SSYNC();
  137. return 0;
  138. }
  139. static int sport_stop(struct sport_device *sport)
  140. {
  141. sport->regs->tcr1 &= ~TSPEN;
  142. sport->regs->rcr1 &= ~RSPEN;
  143. SSYNC();
  144. disable_dma(sport->dma_rx_chan);
  145. disable_dma(sport->dma_tx_chan);
  146. return 0;
  147. }
  148. static inline int sport_hook_rx_dummy(struct sport_device *sport)
  149. {
  150. struct dmasg *desc, temp_desc;
  151. unsigned long flags;
  152. BUG_ON(sport->dummy_rx_desc == NULL);
  153. BUG_ON(sport->curr_rx_desc == sport->dummy_rx_desc);
  154. /* Maybe the dummy buffer descriptor ring is damaged */
  155. sport->dummy_rx_desc->next_desc_addr = \
  156. (unsigned long)(sport->dummy_rx_desc+1);
  157. local_irq_save(flags);
  158. desc = (struct dmasg *)get_dma_next_desc_ptr(sport->dma_rx_chan);
  159. /* Copy the descriptor which will be damaged to backup */
  160. temp_desc = *desc;
  161. desc->x_count = 0xa;
  162. desc->y_count = 0;
  163. desc->next_desc_addr = (unsigned long)(sport->dummy_rx_desc);
  164. local_irq_restore(flags);
  165. /* Waiting for dummy buffer descriptor is already hooked*/
  166. while ((get_dma_curr_desc_ptr(sport->dma_rx_chan) -
  167. sizeof(struct dmasg)) !=
  168. (unsigned long)sport->dummy_rx_desc)
  169. ;
  170. sport->curr_rx_desc = sport->dummy_rx_desc;
  171. /* Restore the damaged descriptor */
  172. *desc = temp_desc;
  173. return 0;
  174. }
  175. static inline int sport_rx_dma_start(struct sport_device *sport, int dummy)
  176. {
  177. if (dummy) {
  178. sport->dummy_rx_desc->next_desc_addr = \
  179. (unsigned long) sport->dummy_rx_desc;
  180. sport->curr_rx_desc = sport->dummy_rx_desc;
  181. } else
  182. sport->curr_rx_desc = sport->dma_rx_desc;
  183. set_dma_next_desc_addr(sport->dma_rx_chan, \
  184. (unsigned long)(sport->curr_rx_desc));
  185. set_dma_x_count(sport->dma_rx_chan, 0);
  186. set_dma_x_modify(sport->dma_rx_chan, 0);
  187. set_dma_config(sport->dma_rx_chan, (DMAFLOW_LARGE | NDSIZE_9 | \
  188. WDSIZE_32 | WNR));
  189. set_dma_curr_addr(sport->dma_rx_chan, sport->curr_rx_desc->start_addr);
  190. SSYNC();
  191. return 0;
  192. }
  193. static inline int sport_tx_dma_start(struct sport_device *sport, int dummy)
  194. {
  195. if (dummy) {
  196. sport->dummy_tx_desc->next_desc_addr = \
  197. (unsigned long) sport->dummy_tx_desc;
  198. sport->curr_tx_desc = sport->dummy_tx_desc;
  199. } else
  200. sport->curr_tx_desc = sport->dma_tx_desc;
  201. set_dma_next_desc_addr(sport->dma_tx_chan, \
  202. (unsigned long)(sport->curr_tx_desc));
  203. set_dma_x_count(sport->dma_tx_chan, 0);
  204. set_dma_x_modify(sport->dma_tx_chan, 0);
  205. set_dma_config(sport->dma_tx_chan,
  206. (DMAFLOW_LARGE | NDSIZE_9 | WDSIZE_32));
  207. set_dma_curr_addr(sport->dma_tx_chan, sport->curr_tx_desc->start_addr);
  208. SSYNC();
  209. return 0;
  210. }
  211. int sport_rx_start(struct sport_device *sport)
  212. {
  213. unsigned long flags;
  214. pr_debug("%s enter\n", __func__);
  215. if (sport->rx_run)
  216. return -EBUSY;
  217. if (sport->tx_run) {
  218. /* tx is running, rx is not running */
  219. BUG_ON(sport->dma_rx_desc == NULL);
  220. BUG_ON(sport->curr_rx_desc != sport->dummy_rx_desc);
  221. local_irq_save(flags);
  222. while ((get_dma_curr_desc_ptr(sport->dma_rx_chan) -
  223. sizeof(struct dmasg)) !=
  224. (unsigned long)sport->dummy_rx_desc)
  225. ;
  226. sport->dummy_rx_desc->next_desc_addr =
  227. (unsigned long)(sport->dma_rx_desc);
  228. local_irq_restore(flags);
  229. sport->curr_rx_desc = sport->dma_rx_desc;
  230. } else {
  231. sport_tx_dma_start(sport, 1);
  232. sport_rx_dma_start(sport, 0);
  233. sport_start(sport);
  234. }
  235. sport->rx_run = 1;
  236. return 0;
  237. }
  238. EXPORT_SYMBOL(sport_rx_start);
  239. int sport_rx_stop(struct sport_device *sport)
  240. {
  241. pr_debug("%s enter\n", __func__);
  242. if (!sport->rx_run)
  243. return 0;
  244. if (sport->tx_run) {
  245. /* TX dma is still running, hook the dummy buffer */
  246. sport_hook_rx_dummy(sport);
  247. } else {
  248. /* Both rx and tx dma will be stopped */
  249. sport_stop(sport);
  250. sport->curr_rx_desc = NULL;
  251. sport->curr_tx_desc = NULL;
  252. }
  253. sport->rx_run = 0;
  254. return 0;
  255. }
  256. EXPORT_SYMBOL(sport_rx_stop);
  257. static inline int sport_hook_tx_dummy(struct sport_device *sport)
  258. {
  259. struct dmasg *desc, temp_desc;
  260. unsigned long flags;
  261. BUG_ON(sport->dummy_tx_desc == NULL);
  262. BUG_ON(sport->curr_tx_desc == sport->dummy_tx_desc);
  263. sport->dummy_tx_desc->next_desc_addr = \
  264. (unsigned long)(sport->dummy_tx_desc+1);
  265. /* Shorten the time on last normal descriptor */
  266. local_irq_save(flags);
  267. desc = (struct dmasg *)get_dma_next_desc_ptr(sport->dma_tx_chan);
  268. /* Store the descriptor which will be damaged */
  269. temp_desc = *desc;
  270. desc->x_count = 0xa;
  271. desc->y_count = 0;
  272. desc->next_desc_addr = (unsigned long)(sport->dummy_tx_desc);
  273. local_irq_restore(flags);
  274. /* Waiting for dummy buffer descriptor is already hooked*/
  275. while ((get_dma_curr_desc_ptr(sport->dma_tx_chan) - \
  276. sizeof(struct dmasg)) != \
  277. (unsigned long)sport->dummy_tx_desc)
  278. ;
  279. sport->curr_tx_desc = sport->dummy_tx_desc;
  280. /* Restore the damaged descriptor */
  281. *desc = temp_desc;
  282. return 0;
  283. }
  284. int sport_tx_start(struct sport_device *sport)
  285. {
  286. unsigned flags;
  287. pr_debug("%s: tx_run:%d, rx_run:%d\n", __func__,
  288. sport->tx_run, sport->rx_run);
  289. if (sport->tx_run)
  290. return -EBUSY;
  291. if (sport->rx_run) {
  292. BUG_ON(sport->dma_tx_desc == NULL);
  293. BUG_ON(sport->curr_tx_desc != sport->dummy_tx_desc);
  294. /* Hook the normal buffer descriptor */
  295. local_irq_save(flags);
  296. while ((get_dma_curr_desc_ptr(sport->dma_tx_chan) -
  297. sizeof(struct dmasg)) !=
  298. (unsigned long)sport->dummy_tx_desc)
  299. ;
  300. sport->dummy_tx_desc->next_desc_addr =
  301. (unsigned long)(sport->dma_tx_desc);
  302. local_irq_restore(flags);
  303. sport->curr_tx_desc = sport->dma_tx_desc;
  304. } else {
  305. sport_tx_dma_start(sport, 0);
  306. /* Let rx dma run the dummy buffer */
  307. sport_rx_dma_start(sport, 1);
  308. sport_start(sport);
  309. }
  310. sport->tx_run = 1;
  311. return 0;
  312. }
  313. EXPORT_SYMBOL(sport_tx_start);
  314. int sport_tx_stop(struct sport_device *sport)
  315. {
  316. if (!sport->tx_run)
  317. return 0;
  318. if (sport->rx_run) {
  319. /* RX is still running, hook the dummy buffer */
  320. sport_hook_tx_dummy(sport);
  321. } else {
  322. /* Both rx and tx dma stopped */
  323. sport_stop(sport);
  324. sport->curr_rx_desc = NULL;
  325. sport->curr_tx_desc = NULL;
  326. }
  327. sport->tx_run = 0;
  328. return 0;
  329. }
  330. EXPORT_SYMBOL(sport_tx_stop);
  331. static inline int compute_wdsize(size_t wdsize)
  332. {
  333. switch (wdsize) {
  334. case 1:
  335. return WDSIZE_8;
  336. case 2:
  337. return WDSIZE_16;
  338. case 4:
  339. default:
  340. return WDSIZE_32;
  341. }
  342. }
  343. int sport_config_rx_dma(struct sport_device *sport, void *buf,
  344. int fragcount, size_t fragsize)
  345. {
  346. unsigned int x_count;
  347. unsigned int y_count;
  348. unsigned int cfg;
  349. dma_addr_t addr;
  350. pr_debug("%s buf:%p, frag:%d, fragsize:0x%lx\n", __func__, \
  351. buf, fragcount, fragsize);
  352. x_count = fragsize / sport->wdsize;
  353. y_count = 0;
  354. /* for fragments larger than 64k words we use 2d dma,
  355. * denote fragecount as two numbers' mutliply and both of them
  356. * are less than 64k.*/
  357. if (x_count >= 0x10000) {
  358. int i, count = x_count;
  359. for (i = 16; i > 0; i--) {
  360. x_count = 1 << i;
  361. if ((count & (x_count - 1)) == 0) {
  362. y_count = count >> i;
  363. if (y_count < 0x10000)
  364. break;
  365. }
  366. }
  367. if (i == 0)
  368. return -EINVAL;
  369. }
  370. pr_debug("%s(x_count:0x%x, y_count:0x%x)\n", __func__,
  371. x_count, y_count);
  372. if (sport->dma_rx_desc)
  373. dma_free_coherent(NULL, sport->rx_desc_bytes,
  374. sport->dma_rx_desc, 0);
  375. /* Allocate a new descritor ring as current one. */
  376. sport->dma_rx_desc = dma_alloc_coherent(NULL, \
  377. fragcount * sizeof(struct dmasg), &addr, 0);
  378. sport->rx_desc_bytes = fragcount * sizeof(struct dmasg);
  379. if (!sport->dma_rx_desc) {
  380. pr_err("Failed to allocate memory for rx desc\n");
  381. return -ENOMEM;
  382. }
  383. sport->rx_buf = buf;
  384. sport->rx_fragsize = fragsize;
  385. sport->rx_frags = fragcount;
  386. cfg = 0x7000 | DI_EN | compute_wdsize(sport->wdsize) | WNR | \
  387. (DESC_ELEMENT_COUNT << 8); /* large descriptor mode */
  388. if (y_count != 0)
  389. cfg |= DMA2D;
  390. setup_desc(sport->dma_rx_desc, buf, fragcount, fragsize,
  391. cfg|DMAEN, x_count, y_count, sport->wdsize);
  392. return 0;
  393. }
  394. EXPORT_SYMBOL(sport_config_rx_dma);
  395. int sport_config_tx_dma(struct sport_device *sport, void *buf, \
  396. int fragcount, size_t fragsize)
  397. {
  398. unsigned int x_count;
  399. unsigned int y_count;
  400. unsigned int cfg;
  401. dma_addr_t addr;
  402. pr_debug("%s buf:%p, fragcount:%d, fragsize:0x%lx\n",
  403. __func__, buf, fragcount, fragsize);
  404. x_count = fragsize/sport->wdsize;
  405. y_count = 0;
  406. /* for fragments larger than 64k words we use 2d dma,
  407. * denote fragecount as two numbers' mutliply and both of them
  408. * are less than 64k.*/
  409. if (x_count >= 0x10000) {
  410. int i, count = x_count;
  411. for (i = 16; i > 0; i--) {
  412. x_count = 1 << i;
  413. if ((count & (x_count - 1)) == 0) {
  414. y_count = count >> i;
  415. if (y_count < 0x10000)
  416. break;
  417. }
  418. }
  419. if (i == 0)
  420. return -EINVAL;
  421. }
  422. pr_debug("%s x_count:0x%x, y_count:0x%x\n", __func__,
  423. x_count, y_count);
  424. if (sport->dma_tx_desc) {
  425. dma_free_coherent(NULL, sport->tx_desc_bytes, \
  426. sport->dma_tx_desc, 0);
  427. }
  428. sport->dma_tx_desc = dma_alloc_coherent(NULL, \
  429. fragcount * sizeof(struct dmasg), &addr, 0);
  430. sport->tx_desc_bytes = fragcount * sizeof(struct dmasg);
  431. if (!sport->dma_tx_desc) {
  432. pr_err("Failed to allocate memory for tx desc\n");
  433. return -ENOMEM;
  434. }
  435. sport->tx_buf = buf;
  436. sport->tx_fragsize = fragsize;
  437. sport->tx_frags = fragcount;
  438. cfg = 0x7000 | DI_EN | compute_wdsize(sport->wdsize) | \
  439. (DESC_ELEMENT_COUNT << 8); /* large descriptor mode */
  440. if (y_count != 0)
  441. cfg |= DMA2D;
  442. setup_desc(sport->dma_tx_desc, buf, fragcount, fragsize,
  443. cfg|DMAEN, x_count, y_count, sport->wdsize);
  444. return 0;
  445. }
  446. EXPORT_SYMBOL(sport_config_tx_dma);
  447. /* setup dummy dma descriptor ring, which don't generate interrupts,
  448. * the x_modify is set to 0 */
  449. static int sport_config_rx_dummy(struct sport_device *sport)
  450. {
  451. struct dmasg *desc;
  452. unsigned config;
  453. pr_debug("%s entered\n", __func__);
  454. #if L1_DATA_A_LENGTH != 0
  455. desc = (struct dmasg *) l1_data_sram_alloc(2 * sizeof(*desc));
  456. #else
  457. {
  458. dma_addr_t addr;
  459. desc = dma_alloc_coherent(NULL, 2 * sizeof(*desc), &addr, 0);
  460. }
  461. #endif
  462. if (desc == NULL) {
  463. pr_err("Failed to allocate memory for dummy rx desc\n");
  464. return -ENOMEM;
  465. }
  466. memset(desc, 0, 2 * sizeof(*desc));
  467. sport->dummy_rx_desc = desc;
  468. desc->start_addr = (unsigned long)sport->dummy_buf;
  469. config = DMAFLOW_LARGE | NDSIZE_9 | compute_wdsize(sport->wdsize)
  470. | WNR | DMAEN;
  471. desc->cfg = config;
  472. desc->x_count = sport->dummy_count/sport->wdsize;
  473. desc->x_modify = sport->wdsize;
  474. desc->y_count = 0;
  475. desc->y_modify = 0;
  476. memcpy(desc+1, desc, sizeof(*desc));
  477. desc->next_desc_addr = (unsigned long)(desc+1);
  478. desc[1].next_desc_addr = (unsigned long)desc;
  479. return 0;
  480. }
  481. static int sport_config_tx_dummy(struct sport_device *sport)
  482. {
  483. struct dmasg *desc;
  484. unsigned int config;
  485. pr_debug("%s entered\n", __func__);
  486. #if L1_DATA_A_LENGTH != 0
  487. desc = (struct dmasg *) l1_data_sram_alloc(2 * sizeof(*desc));
  488. #else
  489. {
  490. dma_addr_t addr;
  491. desc = dma_alloc_coherent(NULL, 2 * sizeof(*desc), &addr, 0);
  492. }
  493. #endif
  494. if (!desc) {
  495. pr_err("Failed to allocate memory for dummy tx desc\n");
  496. return -ENOMEM;
  497. }
  498. memset(desc, 0, 2 * sizeof(*desc));
  499. sport->dummy_tx_desc = desc;
  500. desc->start_addr = (unsigned long)sport->dummy_buf + \
  501. sport->dummy_count;
  502. config = DMAFLOW_LARGE | NDSIZE_9 |
  503. compute_wdsize(sport->wdsize) | DMAEN;
  504. desc->cfg = config;
  505. desc->x_count = sport->dummy_count/sport->wdsize;
  506. desc->x_modify = sport->wdsize;
  507. desc->y_count = 0;
  508. desc->y_modify = 0;
  509. memcpy(desc+1, desc, sizeof(*desc));
  510. desc->next_desc_addr = (unsigned long)(desc+1);
  511. desc[1].next_desc_addr = (unsigned long)desc;
  512. return 0;
  513. }
  514. unsigned long sport_curr_offset_rx(struct sport_device *sport)
  515. {
  516. unsigned long curr = get_dma_curr_addr(sport->dma_rx_chan);
  517. return (unsigned char *)curr - sport->rx_buf;
  518. }
  519. EXPORT_SYMBOL(sport_curr_offset_rx);
  520. unsigned long sport_curr_offset_tx(struct sport_device *sport)
  521. {
  522. unsigned long curr = get_dma_curr_addr(sport->dma_tx_chan);
  523. return (unsigned char *)curr - sport->tx_buf;
  524. }
  525. EXPORT_SYMBOL(sport_curr_offset_tx);
  526. void sport_incfrag(struct sport_device *sport, int *frag, int tx)
  527. {
  528. ++(*frag);
  529. if (tx == 1 && *frag == sport->tx_frags)
  530. *frag = 0;
  531. if (tx == 0 && *frag == sport->rx_frags)
  532. *frag = 0;
  533. }
  534. EXPORT_SYMBOL(sport_incfrag);
  535. void sport_decfrag(struct sport_device *sport, int *frag, int tx)
  536. {
  537. --(*frag);
  538. if (tx == 1 && *frag == 0)
  539. *frag = sport->tx_frags;
  540. if (tx == 0 && *frag == 0)
  541. *frag = sport->rx_frags;
  542. }
  543. EXPORT_SYMBOL(sport_decfrag);
  544. static int sport_check_status(struct sport_device *sport,
  545. unsigned int *sport_stat,
  546. unsigned int *rx_stat,
  547. unsigned int *tx_stat)
  548. {
  549. int status = 0;
  550. if (sport_stat) {
  551. SSYNC();
  552. status = sport->regs->stat;
  553. if (status & (TOVF|TUVF|ROVF|RUVF))
  554. sport->regs->stat = (status & (TOVF|TUVF|ROVF|RUVF));
  555. SSYNC();
  556. *sport_stat = status;
  557. }
  558. if (rx_stat) {
  559. SSYNC();
  560. status = get_dma_curr_irqstat(sport->dma_rx_chan);
  561. if (status & (DMA_DONE|DMA_ERR))
  562. clear_dma_irqstat(sport->dma_rx_chan);
  563. SSYNC();
  564. *rx_stat = status;
  565. }
  566. if (tx_stat) {
  567. SSYNC();
  568. status = get_dma_curr_irqstat(sport->dma_tx_chan);
  569. if (status & (DMA_DONE|DMA_ERR))
  570. clear_dma_irqstat(sport->dma_tx_chan);
  571. SSYNC();
  572. *tx_stat = status;
  573. }
  574. return 0;
  575. }
  576. int sport_dump_stat(struct sport_device *sport, char *buf, size_t len)
  577. {
  578. int ret;
  579. ret = snprintf(buf, len,
  580. "sts: 0x%04x\n"
  581. "rx dma %d sts: 0x%04x tx dma %d sts: 0x%04x\n",
  582. sport->regs->stat,
  583. sport->dma_rx_chan,
  584. get_dma_curr_irqstat(sport->dma_rx_chan),
  585. sport->dma_tx_chan,
  586. get_dma_curr_irqstat(sport->dma_tx_chan));
  587. buf += ret;
  588. len -= ret;
  589. ret += snprintf(buf, len,
  590. "curr_rx_desc:0x%p, curr_tx_desc:0x%p\n"
  591. "dma_rx_desc:0x%p, dma_tx_desc:0x%p\n"
  592. "dummy_rx_desc:0x%p, dummy_tx_desc:0x%p\n",
  593. sport->curr_rx_desc, sport->curr_tx_desc,
  594. sport->dma_rx_desc, sport->dma_tx_desc,
  595. sport->dummy_rx_desc, sport->dummy_tx_desc);
  596. return ret;
  597. }
  598. static irqreturn_t rx_handler(int irq, void *dev_id)
  599. {
  600. unsigned int rx_stat;
  601. struct sport_device *sport = dev_id;
  602. pr_debug("%s enter\n", __func__);
  603. sport_check_status(sport, NULL, &rx_stat, NULL);
  604. if (!(rx_stat & DMA_DONE))
  605. pr_err("rx dma is already stopped\n");
  606. if (sport->rx_callback) {
  607. sport->rx_callback(sport->rx_data);
  608. return IRQ_HANDLED;
  609. }
  610. return IRQ_NONE;
  611. }
  612. static irqreturn_t tx_handler(int irq, void *dev_id)
  613. {
  614. unsigned int tx_stat;
  615. struct sport_device *sport = dev_id;
  616. pr_debug("%s enter\n", __func__);
  617. sport_check_status(sport, NULL, NULL, &tx_stat);
  618. if (!(tx_stat & DMA_DONE)) {
  619. pr_err("tx dma is already stopped\n");
  620. return IRQ_HANDLED;
  621. }
  622. if (sport->tx_callback) {
  623. sport->tx_callback(sport->tx_data);
  624. return IRQ_HANDLED;
  625. }
  626. return IRQ_NONE;
  627. }
  628. static irqreturn_t err_handler(int irq, void *dev_id)
  629. {
  630. unsigned int status = 0;
  631. struct sport_device *sport = dev_id;
  632. pr_debug("%s\n", __func__);
  633. if (sport_check_status(sport, &status, NULL, NULL)) {
  634. pr_err("error checking status ??");
  635. return IRQ_NONE;
  636. }
  637. if (status & (TOVF|TUVF|ROVF|RUVF)) {
  638. pr_info("sport status error:%s%s%s%s\n",
  639. status & TOVF ? " TOVF" : "",
  640. status & TUVF ? " TUVF" : "",
  641. status & ROVF ? " ROVF" : "",
  642. status & RUVF ? " RUVF" : "");
  643. if (status & TOVF || status & TUVF) {
  644. disable_dma(sport->dma_tx_chan);
  645. if (sport->tx_run)
  646. sport_tx_dma_start(sport, 0);
  647. else
  648. sport_tx_dma_start(sport, 1);
  649. enable_dma(sport->dma_tx_chan);
  650. } else {
  651. disable_dma(sport->dma_rx_chan);
  652. if (sport->rx_run)
  653. sport_rx_dma_start(sport, 0);
  654. else
  655. sport_rx_dma_start(sport, 1);
  656. enable_dma(sport->dma_rx_chan);
  657. }
  658. }
  659. status = sport->regs->stat;
  660. if (status & (TOVF|TUVF|ROVF|RUVF))
  661. sport->regs->stat = (status & (TOVF|TUVF|ROVF|RUVF));
  662. SSYNC();
  663. if (sport->err_callback)
  664. sport->err_callback(sport->err_data);
  665. return IRQ_HANDLED;
  666. }
  667. int sport_set_rx_callback(struct sport_device *sport,
  668. void (*rx_callback)(void *), void *rx_data)
  669. {
  670. BUG_ON(rx_callback == NULL);
  671. sport->rx_callback = rx_callback;
  672. sport->rx_data = rx_data;
  673. return 0;
  674. }
  675. EXPORT_SYMBOL(sport_set_rx_callback);
  676. int sport_set_tx_callback(struct sport_device *sport,
  677. void (*tx_callback)(void *), void *tx_data)
  678. {
  679. BUG_ON(tx_callback == NULL);
  680. sport->tx_callback = tx_callback;
  681. sport->tx_data = tx_data;
  682. return 0;
  683. }
  684. EXPORT_SYMBOL(sport_set_tx_callback);
  685. int sport_set_err_callback(struct sport_device *sport,
  686. void (*err_callback)(void *), void *err_data)
  687. {
  688. BUG_ON(err_callback == NULL);
  689. sport->err_callback = err_callback;
  690. sport->err_data = err_data;
  691. return 0;
  692. }
  693. EXPORT_SYMBOL(sport_set_err_callback);
  694. struct sport_device *sport_init(struct sport_param *param, unsigned wdsize,
  695. unsigned dummy_count, void *private_data)
  696. {
  697. int ret;
  698. struct sport_device *sport;
  699. pr_debug("%s enter\n", __func__);
  700. BUG_ON(param == NULL);
  701. BUG_ON(wdsize == 0 || dummy_count == 0);
  702. sport = kmalloc(sizeof(struct sport_device), GFP_KERNEL);
  703. if (!sport) {
  704. pr_err("Failed to allocate for sport device\n");
  705. return NULL;
  706. }
  707. memset(sport, 0, sizeof(struct sport_device));
  708. sport->dma_rx_chan = param->dma_rx_chan;
  709. sport->dma_tx_chan = param->dma_tx_chan;
  710. sport->err_irq = param->err_irq;
  711. sport->regs = param->regs;
  712. sport->private_data = private_data;
  713. if (request_dma(sport->dma_rx_chan, "SPORT RX Data") == -EBUSY) {
  714. pr_err("Failed to request RX dma %d\n", \
  715. sport->dma_rx_chan);
  716. goto __init_err1;
  717. }
  718. if (set_dma_callback(sport->dma_rx_chan, rx_handler, sport) != 0) {
  719. pr_err("Failed to request RX irq %d\n", \
  720. sport->dma_rx_chan);
  721. goto __init_err2;
  722. }
  723. if (request_dma(sport->dma_tx_chan, "SPORT TX Data") == -EBUSY) {
  724. pr_err("Failed to request TX dma %d\n", \
  725. sport->dma_tx_chan);
  726. goto __init_err2;
  727. }
  728. if (set_dma_callback(sport->dma_tx_chan, tx_handler, sport) != 0) {
  729. pr_err("Failed to request TX irq %d\n", \
  730. sport->dma_tx_chan);
  731. goto __init_err3;
  732. }
  733. if (request_irq(sport->err_irq, err_handler, IRQF_SHARED, "SPORT err",
  734. sport) < 0) {
  735. pr_err("Failed to request err irq:%d\n", \
  736. sport->err_irq);
  737. goto __init_err3;
  738. }
  739. pr_err("dma rx:%d tx:%d, err irq:%d, regs:%p\n",
  740. sport->dma_rx_chan, sport->dma_tx_chan,
  741. sport->err_irq, sport->regs);
  742. sport->wdsize = wdsize;
  743. sport->dummy_count = dummy_count;
  744. #if L1_DATA_A_LENGTH != 0
  745. sport->dummy_buf = l1_data_sram_alloc(dummy_count * 2);
  746. #else
  747. sport->dummy_buf = kmalloc(dummy_count * 2, GFP_KERNEL);
  748. #endif
  749. if (sport->dummy_buf == NULL) {
  750. pr_err("Failed to allocate dummy buffer\n");
  751. goto __error;
  752. }
  753. memset(sport->dummy_buf, 0, dummy_count * 2);
  754. ret = sport_config_rx_dummy(sport);
  755. if (ret) {
  756. pr_err("Failed to config rx dummy ring\n");
  757. goto __error;
  758. }
  759. ret = sport_config_tx_dummy(sport);
  760. if (ret) {
  761. pr_err("Failed to config tx dummy ring\n");
  762. goto __error;
  763. }
  764. return sport;
  765. __error:
  766. free_irq(sport->err_irq, sport);
  767. __init_err3:
  768. free_dma(sport->dma_tx_chan);
  769. __init_err2:
  770. free_dma(sport->dma_rx_chan);
  771. __init_err1:
  772. kfree(sport);
  773. return NULL;
  774. }
  775. EXPORT_SYMBOL(sport_init);
  776. void sport_done(struct sport_device *sport)
  777. {
  778. if (sport == NULL)
  779. return;
  780. sport_stop(sport);
  781. if (sport->dma_rx_desc)
  782. dma_free_coherent(NULL, sport->rx_desc_bytes,
  783. sport->dma_rx_desc, 0);
  784. if (sport->dma_tx_desc)
  785. dma_free_coherent(NULL, sport->tx_desc_bytes,
  786. sport->dma_tx_desc, 0);
  787. #if L1_DATA_A_LENGTH != 0
  788. l1_data_sram_free(sport->dummy_rx_desc);
  789. l1_data_sram_free(sport->dummy_tx_desc);
  790. l1_data_sram_free(sport->dummy_buf);
  791. #else
  792. dma_free_coherent(NULL, 2*sizeof(struct dmasg),
  793. sport->dummy_rx_desc, 0);
  794. dma_free_coherent(NULL, 2*sizeof(struct dmasg),
  795. sport->dummy_tx_desc, 0);
  796. kfree(sport->dummy_buf);
  797. #endif
  798. free_dma(sport->dma_rx_chan);
  799. free_dma(sport->dma_tx_chan);
  800. free_irq(sport->err_irq, sport);
  801. kfree(sport);
  802. sport = NULL;
  803. }
  804. EXPORT_SYMBOL(sport_done);
  805. /*
  806. * It is only used to send several bytes when dma is not enabled
  807. * sport controller is configured but not enabled.
  808. * Multichannel cannot works with pio mode */
  809. /* Used by ac97 to write and read codec register */
  810. int sport_send_and_recv(struct sport_device *sport, u8 *out_data, \
  811. u8 *in_data, int len)
  812. {
  813. unsigned short dma_config;
  814. unsigned short status;
  815. unsigned long flags;
  816. unsigned long wait = 0;
  817. pr_debug("%s enter, out_data:%p, in_data:%p len:%d\n", \
  818. __func__, out_data, in_data, len);
  819. pr_debug("tcr1:0x%04x, tcr2:0x%04x, tclkdiv:0x%04x, tfsdiv:0x%04x\n"
  820. "mcmc1:0x%04x, mcmc2:0x%04x\n",
  821. sport->regs->tcr1, sport->regs->tcr2,
  822. sport->regs->tclkdiv, sport->regs->tfsdiv,
  823. sport->regs->mcmc1, sport->regs->mcmc2);
  824. flush_dcache_range((unsigned)out_data, (unsigned)(out_data + len));
  825. /* Enable tx dma */
  826. dma_config = (RESTART | WDSIZE_16 | DI_EN);
  827. set_dma_start_addr(sport->dma_tx_chan, (unsigned long)out_data);
  828. set_dma_x_count(sport->dma_tx_chan, len/2);
  829. set_dma_x_modify(sport->dma_tx_chan, 2);
  830. set_dma_config(sport->dma_tx_chan, dma_config);
  831. enable_dma(sport->dma_tx_chan);
  832. if (in_data != NULL) {
  833. invalidate_dcache_range((unsigned)in_data, \
  834. (unsigned)(in_data + len));
  835. /* Enable rx dma */
  836. dma_config = (RESTART | WDSIZE_16 | WNR | DI_EN);
  837. set_dma_start_addr(sport->dma_rx_chan, (unsigned long)in_data);
  838. set_dma_x_count(sport->dma_rx_chan, len/2);
  839. set_dma_x_modify(sport->dma_rx_chan, 2);
  840. set_dma_config(sport->dma_rx_chan, dma_config);
  841. enable_dma(sport->dma_rx_chan);
  842. }
  843. local_irq_save(flags);
  844. sport->regs->tcr1 |= TSPEN;
  845. sport->regs->rcr1 |= RSPEN;
  846. SSYNC();
  847. status = get_dma_curr_irqstat(sport->dma_tx_chan);
  848. while (status & DMA_RUN) {
  849. udelay(1);
  850. status = get_dma_curr_irqstat(sport->dma_tx_chan);
  851. pr_debug("DMA status:0x%04x\n", status);
  852. if (wait++ > 100)
  853. goto __over;
  854. }
  855. status = sport->regs->stat;
  856. wait = 0;
  857. while (!(status & TXHRE)) {
  858. pr_debug("sport status:0x%04x\n", status);
  859. udelay(1);
  860. status = *(unsigned short *)&sport->regs->stat;
  861. if (wait++ > 1000)
  862. goto __over;
  863. }
  864. /* Wait for the last byte sent out */
  865. udelay(20);
  866. pr_debug("sport status:0x%04x\n", status);
  867. __over:
  868. sport->regs->tcr1 &= ~TSPEN;
  869. sport->regs->rcr1 &= ~RSPEN;
  870. SSYNC();
  871. disable_dma(sport->dma_tx_chan);
  872. /* Clear the status */
  873. clear_dma_irqstat(sport->dma_tx_chan);
  874. if (in_data != NULL) {
  875. disable_dma(sport->dma_rx_chan);
  876. clear_dma_irqstat(sport->dma_rx_chan);
  877. }
  878. SSYNC();
  879. local_irq_restore(flags);
  880. return 0;
  881. }
  882. EXPORT_SYMBOL(sport_send_and_recv);
  883. MODULE_AUTHOR("Roy Huang");
  884. MODULE_DESCRIPTION("SPORT driver for ADI Blackfin");
  885. MODULE_LICENSE("GPL");