lx_core.c 32 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349
  1. /* -*- linux-c -*- *
  2. *
  3. * ALSA driver for the digigram lx6464es interface
  4. * low-level interface
  5. *
  6. * Copyright (c) 2009 Tim Blechmann <tim@klingt.org>
  7. *
  8. * This program is free software; you can redistribute it and/or modify
  9. * it under the terms of the GNU General Public License as published by
  10. * the Free Software Foundation; either version 2 of the License, or
  11. * (at your option) any later version.
  12. *
  13. * This program is distributed in the hope that it will be useful,
  14. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  15. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  16. * GNU General Public License for more details.
  17. *
  18. * You should have received a copy of the GNU General Public License
  19. * along with this program; see the file COPYING. If not, write to
  20. * the Free Software Foundation, Inc., 59 Temple Place - Suite 330,
  21. * Boston, MA 02111-1307, USA.
  22. *
  23. */
  24. /* #define RMH_DEBUG 1 */
  25. #include <linux/module.h>
  26. #include <linux/pci.h>
  27. #include <linux/delay.h>
  28. #include "lx6464es.h"
  29. #include "lx_core.h"
  30. /* low-level register access */
  31. static const unsigned long dsp_port_offsets[] = {
  32. 0,
  33. 0x400,
  34. 0x401,
  35. 0x402,
  36. 0x403,
  37. 0x404,
  38. 0x405,
  39. 0x406,
  40. 0x407,
  41. 0x408,
  42. 0x409,
  43. 0x40a,
  44. 0x40b,
  45. 0x40c,
  46. 0x410,
  47. 0x411,
  48. 0x412,
  49. 0x413,
  50. 0x414,
  51. 0x415,
  52. 0x416,
  53. 0x420,
  54. 0x430,
  55. 0x431,
  56. 0x432,
  57. 0x433,
  58. 0x434,
  59. 0x440
  60. };
  61. static void __iomem *lx_dsp_register(struct lx6464es *chip, int port)
  62. {
  63. void __iomem *base_address = chip->port_dsp_bar;
  64. return base_address + dsp_port_offsets[port]*4;
  65. }
  66. unsigned long lx_dsp_reg_read(struct lx6464es *chip, int port)
  67. {
  68. void __iomem *address = lx_dsp_register(chip, port);
  69. return ioread32(address);
  70. }
  71. static void lx_dsp_reg_readbuf(struct lx6464es *chip, int port, u32 *data,
  72. u32 len)
  73. {
  74. void __iomem *address = lx_dsp_register(chip, port);
  75. memcpy_fromio(data, address, len*sizeof(u32));
  76. }
  77. void lx_dsp_reg_write(struct lx6464es *chip, int port, unsigned data)
  78. {
  79. void __iomem *address = lx_dsp_register(chip, port);
  80. iowrite32(data, address);
  81. }
  82. static void lx_dsp_reg_writebuf(struct lx6464es *chip, int port,
  83. const u32 *data, u32 len)
  84. {
  85. void __iomem *address = lx_dsp_register(chip, port);
  86. memcpy_toio(address, data, len*sizeof(u32));
  87. }
  88. static const unsigned long plx_port_offsets[] = {
  89. 0x04,
  90. 0x40,
  91. 0x44,
  92. 0x48,
  93. 0x4c,
  94. 0x50,
  95. 0x54,
  96. 0x58,
  97. 0x5c,
  98. 0x64,
  99. 0x68,
  100. 0x6C
  101. };
  102. static void __iomem *lx_plx_register(struct lx6464es *chip, int port)
  103. {
  104. void __iomem *base_address = chip->port_plx_remapped;
  105. return base_address + plx_port_offsets[port];
  106. }
  107. unsigned long lx_plx_reg_read(struct lx6464es *chip, int port)
  108. {
  109. void __iomem *address = lx_plx_register(chip, port);
  110. return ioread32(address);
  111. }
  112. void lx_plx_reg_write(struct lx6464es *chip, int port, u32 data)
  113. {
  114. void __iomem *address = lx_plx_register(chip, port);
  115. iowrite32(data, address);
  116. }
  117. u32 lx_plx_mbox_read(struct lx6464es *chip, int mbox_nr)
  118. {
  119. int index;
  120. switch (mbox_nr) {
  121. case 1:
  122. index = ePLX_MBOX1; break;
  123. case 2:
  124. index = ePLX_MBOX2; break;
  125. case 3:
  126. index = ePLX_MBOX3; break;
  127. case 4:
  128. index = ePLX_MBOX4; break;
  129. case 5:
  130. index = ePLX_MBOX5; break;
  131. case 6:
  132. index = ePLX_MBOX6; break;
  133. case 7:
  134. index = ePLX_MBOX7; break;
  135. case 0: /* reserved for HF flags */
  136. snd_BUG();
  137. default:
  138. return 0xdeadbeef;
  139. }
  140. return lx_plx_reg_read(chip, index);
  141. }
  142. int lx_plx_mbox_write(struct lx6464es *chip, int mbox_nr, u32 value)
  143. {
  144. int index = -1;
  145. switch (mbox_nr) {
  146. case 1:
  147. index = ePLX_MBOX1; break;
  148. case 3:
  149. index = ePLX_MBOX3; break;
  150. case 4:
  151. index = ePLX_MBOX4; break;
  152. case 5:
  153. index = ePLX_MBOX5; break;
  154. case 6:
  155. index = ePLX_MBOX6; break;
  156. case 7:
  157. index = ePLX_MBOX7; break;
  158. case 0: /* reserved for HF flags */
  159. case 2: /* reserved for Pipe States
  160. * the DSP keeps an image of it */
  161. snd_BUG();
  162. return -EBADRQC;
  163. }
  164. lx_plx_reg_write(chip, index, value);
  165. return 0;
  166. }
  167. /* rmh */
  168. #ifdef CONFIG_SND_DEBUG
  169. #define CMD_NAME(a) a
  170. #else
  171. #define CMD_NAME(a) NULL
  172. #endif
  173. #define Reg_CSM_MR 0x00000002
  174. #define Reg_CSM_MC 0x00000001
  175. struct dsp_cmd_info {
  176. u32 dcCodeOp; /* Op Code of the command (usually 1st 24-bits
  177. * word).*/
  178. u16 dcCmdLength; /* Command length in words of 24 bits.*/
  179. u16 dcStatusType; /* Status type: 0 for fixed length, 1 for
  180. * random. */
  181. u16 dcStatusLength; /* Status length (if fixed).*/
  182. char *dcOpName;
  183. };
  184. /*
  185. Initialization and control data for the Microblaze interface
  186. - OpCode:
  187. the opcode field of the command set at the proper offset
  188. - CmdLength
  189. the number of command words
  190. - StatusType
  191. offset in the status registers: 0 means that the return value may be
  192. different from 0, and must be read
  193. - StatusLength
  194. the number of status words (in addition to the return value)
  195. */
  196. static struct dsp_cmd_info dsp_commands[] =
  197. {
  198. { (CMD_00_INFO_DEBUG << OPCODE_OFFSET) , 1 /*custom*/
  199. , 1 , 0 /**/ , CMD_NAME("INFO_DEBUG") },
  200. { (CMD_01_GET_SYS_CFG << OPCODE_OFFSET) , 1 /**/
  201. , 1 , 2 /**/ , CMD_NAME("GET_SYS_CFG") },
  202. { (CMD_02_SET_GRANULARITY << OPCODE_OFFSET) , 1 /**/
  203. , 1 , 0 /**/ , CMD_NAME("SET_GRANULARITY") },
  204. { (CMD_03_SET_TIMER_IRQ << OPCODE_OFFSET) , 1 /**/
  205. , 1 , 0 /**/ , CMD_NAME("SET_TIMER_IRQ") },
  206. { (CMD_04_GET_EVENT << OPCODE_OFFSET) , 1 /**/
  207. , 1 , 0 /*up to 10*/ , CMD_NAME("GET_EVENT") },
  208. { (CMD_05_GET_PIPES << OPCODE_OFFSET) , 1 /**/
  209. , 1 , 2 /*up to 4*/ , CMD_NAME("GET_PIPES") },
  210. { (CMD_06_ALLOCATE_PIPE << OPCODE_OFFSET) , 1 /**/
  211. , 0 , 0 /**/ , CMD_NAME("ALLOCATE_PIPE") },
  212. { (CMD_07_RELEASE_PIPE << OPCODE_OFFSET) , 1 /**/
  213. , 0 , 0 /**/ , CMD_NAME("RELEASE_PIPE") },
  214. { (CMD_08_ASK_BUFFERS << OPCODE_OFFSET) , 1 /**/
  215. , 1 , MAX_STREAM_BUFFER , CMD_NAME("ASK_BUFFERS") },
  216. { (CMD_09_STOP_PIPE << OPCODE_OFFSET) , 1 /**/
  217. , 0 , 0 /*up to 2*/ , CMD_NAME("STOP_PIPE") },
  218. { (CMD_0A_GET_PIPE_SPL_COUNT << OPCODE_OFFSET) , 1 /**/
  219. , 1 , 1 /*up to 2*/ , CMD_NAME("GET_PIPE_SPL_COUNT") },
  220. { (CMD_0B_TOGGLE_PIPE_STATE << OPCODE_OFFSET) , 1 /*up to 5*/
  221. , 1 , 0 /**/ , CMD_NAME("TOGGLE_PIPE_STATE") },
  222. { (CMD_0C_DEF_STREAM << OPCODE_OFFSET) , 1 /*up to 4*/
  223. , 1 , 0 /**/ , CMD_NAME("DEF_STREAM") },
  224. { (CMD_0D_SET_MUTE << OPCODE_OFFSET) , 3 /**/
  225. , 1 , 0 /**/ , CMD_NAME("SET_MUTE") },
  226. { (CMD_0E_GET_STREAM_SPL_COUNT << OPCODE_OFFSET) , 1/**/
  227. , 1 , 2 /**/ , CMD_NAME("GET_STREAM_SPL_COUNT") },
  228. { (CMD_0F_UPDATE_BUFFER << OPCODE_OFFSET) , 3 /*up to 4*/
  229. , 0 , 1 /**/ , CMD_NAME("UPDATE_BUFFER") },
  230. { (CMD_10_GET_BUFFER << OPCODE_OFFSET) , 1 /**/
  231. , 1 , 4 /**/ , CMD_NAME("GET_BUFFER") },
  232. { (CMD_11_CANCEL_BUFFER << OPCODE_OFFSET) , 1 /**/
  233. , 1 , 1 /*up to 4*/ , CMD_NAME("CANCEL_BUFFER") },
  234. { (CMD_12_GET_PEAK << OPCODE_OFFSET) , 1 /**/
  235. , 1 , 1 /**/ , CMD_NAME("GET_PEAK") },
  236. { (CMD_13_SET_STREAM_STATE << OPCODE_OFFSET) , 1 /**/
  237. , 1 , 0 /**/ , CMD_NAME("SET_STREAM_STATE") },
  238. };
  239. static void lx_message_init(struct lx_rmh *rmh, enum cmd_mb_opcodes cmd)
  240. {
  241. snd_BUG_ON(cmd >= CMD_14_INVALID);
  242. rmh->cmd[0] = dsp_commands[cmd].dcCodeOp;
  243. rmh->cmd_len = dsp_commands[cmd].dcCmdLength;
  244. rmh->stat_len = dsp_commands[cmd].dcStatusLength;
  245. rmh->dsp_stat = dsp_commands[cmd].dcStatusType;
  246. rmh->cmd_idx = cmd;
  247. memset(&rmh->cmd[1], 0, (REG_CRM_NUMBER - 1) * sizeof(u32));
  248. #ifdef CONFIG_SND_DEBUG
  249. memset(rmh->stat, 0, REG_CRM_NUMBER * sizeof(u32));
  250. #endif
  251. #ifdef RMH_DEBUG
  252. rmh->cmd_idx = cmd;
  253. #endif
  254. }
  255. #ifdef RMH_DEBUG
  256. #define LXRMH "lx6464es rmh: "
  257. static void lx_message_dump(struct lx_rmh *rmh)
  258. {
  259. u8 idx = rmh->cmd_idx;
  260. int i;
  261. snd_printk(LXRMH "command %s\n", dsp_commands[idx].dcOpName);
  262. for (i = 0; i != rmh->cmd_len; ++i)
  263. snd_printk(LXRMH "\tcmd[%d] %08x\n", i, rmh->cmd[i]);
  264. for (i = 0; i != rmh->stat_len; ++i)
  265. snd_printk(LXRMH "\tstat[%d]: %08x\n", i, rmh->stat[i]);
  266. snd_printk("\n");
  267. }
  268. #else
  269. static inline void lx_message_dump(struct lx_rmh *rmh)
  270. {}
  271. #endif
  272. /* sleep 500 - 100 = 400 times 100us -> the timeout is >= 40 ms */
  273. #define XILINX_TIMEOUT_MS 40
  274. #define XILINX_POLL_NO_SLEEP 100
  275. #define XILINX_POLL_ITERATIONS 150
  276. static int lx_message_send_atomic(struct lx6464es *chip, struct lx_rmh *rmh)
  277. {
  278. u32 reg = ED_DSP_TIMED_OUT;
  279. int dwloop;
  280. if (lx_dsp_reg_read(chip, eReg_CSM) & (Reg_CSM_MC | Reg_CSM_MR)) {
  281. snd_printk(KERN_ERR LXP "PIOSendMessage eReg_CSM %x\n", reg);
  282. return -EBUSY;
  283. }
  284. /* write command */
  285. lx_dsp_reg_writebuf(chip, eReg_CRM1, rmh->cmd, rmh->cmd_len);
  286. /* MicoBlaze gogogo */
  287. lx_dsp_reg_write(chip, eReg_CSM, Reg_CSM_MC);
  288. /* wait for device to answer */
  289. for (dwloop = 0; dwloop != XILINX_TIMEOUT_MS * 1000; ++dwloop) {
  290. if (lx_dsp_reg_read(chip, eReg_CSM) & Reg_CSM_MR) {
  291. if (rmh->dsp_stat == 0)
  292. reg = lx_dsp_reg_read(chip, eReg_CRM1);
  293. else
  294. reg = 0;
  295. goto polling_successful;
  296. } else
  297. udelay(1);
  298. }
  299. snd_printk(KERN_WARNING LXP "TIMEOUT lx_message_send_atomic! "
  300. "polling failed\n");
  301. polling_successful:
  302. if ((reg & ERROR_VALUE) == 0) {
  303. /* read response */
  304. if (rmh->stat_len) {
  305. snd_BUG_ON(rmh->stat_len >= (REG_CRM_NUMBER-1));
  306. lx_dsp_reg_readbuf(chip, eReg_CRM2, rmh->stat,
  307. rmh->stat_len);
  308. }
  309. } else
  310. snd_printk(LXP "rmh error: %08x\n", reg);
  311. /* clear Reg_CSM_MR */
  312. lx_dsp_reg_write(chip, eReg_CSM, 0);
  313. switch (reg) {
  314. case ED_DSP_TIMED_OUT:
  315. snd_printk(KERN_WARNING LXP "lx_message_send: dsp timeout\n");
  316. return -ETIMEDOUT;
  317. case ED_DSP_CRASHED:
  318. snd_printk(KERN_WARNING LXP "lx_message_send: dsp crashed\n");
  319. return -EAGAIN;
  320. }
  321. lx_message_dump(rmh);
  322. return reg;
  323. }
  324. /* low-level dsp access */
  325. int __devinit lx_dsp_get_version(struct lx6464es *chip, u32 *rdsp_version)
  326. {
  327. u16 ret;
  328. unsigned long flags;
  329. spin_lock_irqsave(&chip->msg_lock, flags);
  330. lx_message_init(&chip->rmh, CMD_01_GET_SYS_CFG);
  331. ret = lx_message_send_atomic(chip, &chip->rmh);
  332. *rdsp_version = chip->rmh.stat[1];
  333. spin_unlock_irqrestore(&chip->msg_lock, flags);
  334. return ret;
  335. }
  336. int lx_dsp_get_clock_frequency(struct lx6464es *chip, u32 *rfreq)
  337. {
  338. u16 ret = 0;
  339. unsigned long flags;
  340. u32 freq_raw = 0;
  341. u32 freq = 0;
  342. u32 frequency = 0;
  343. spin_lock_irqsave(&chip->msg_lock, flags);
  344. lx_message_init(&chip->rmh, CMD_01_GET_SYS_CFG);
  345. ret = lx_message_send_atomic(chip, &chip->rmh);
  346. if (ret == 0) {
  347. freq_raw = chip->rmh.stat[0] >> FREQ_FIELD_OFFSET;
  348. freq = freq_raw & XES_FREQ_COUNT8_MASK;
  349. if ((freq < XES_FREQ_COUNT8_48_MAX) ||
  350. (freq > XES_FREQ_COUNT8_44_MIN))
  351. frequency = 0; /* unknown */
  352. else if (freq >= XES_FREQ_COUNT8_44_MAX)
  353. frequency = 44100;
  354. else
  355. frequency = 48000;
  356. }
  357. spin_unlock_irqrestore(&chip->msg_lock, flags);
  358. *rfreq = frequency * chip->freq_ratio;
  359. return ret;
  360. }
  361. int lx_dsp_get_mac(struct lx6464es *chip)
  362. {
  363. u32 macmsb, maclsb;
  364. macmsb = lx_dsp_reg_read(chip, eReg_ADMACESMSB) & 0x00FFFFFF;
  365. maclsb = lx_dsp_reg_read(chip, eReg_ADMACESLSB) & 0x00FFFFFF;
  366. /* todo: endianess handling */
  367. chip->mac_address[5] = ((u8 *)(&maclsb))[0];
  368. chip->mac_address[4] = ((u8 *)(&maclsb))[1];
  369. chip->mac_address[3] = ((u8 *)(&maclsb))[2];
  370. chip->mac_address[2] = ((u8 *)(&macmsb))[0];
  371. chip->mac_address[1] = ((u8 *)(&macmsb))[1];
  372. chip->mac_address[0] = ((u8 *)(&macmsb))[2];
  373. return 0;
  374. }
  375. int lx_dsp_set_granularity(struct lx6464es *chip, u32 gran)
  376. {
  377. unsigned long flags;
  378. int ret;
  379. spin_lock_irqsave(&chip->msg_lock, flags);
  380. lx_message_init(&chip->rmh, CMD_02_SET_GRANULARITY);
  381. chip->rmh.cmd[0] |= gran;
  382. ret = lx_message_send_atomic(chip, &chip->rmh);
  383. spin_unlock_irqrestore(&chip->msg_lock, flags);
  384. return ret;
  385. }
  386. int lx_dsp_read_async_events(struct lx6464es *chip, u32 *data)
  387. {
  388. unsigned long flags;
  389. int ret;
  390. spin_lock_irqsave(&chip->msg_lock, flags);
  391. lx_message_init(&chip->rmh, CMD_04_GET_EVENT);
  392. chip->rmh.stat_len = 9; /* we don't necessarily need the full length */
  393. ret = lx_message_send_atomic(chip, &chip->rmh);
  394. if (!ret)
  395. memcpy(data, chip->rmh.stat, chip->rmh.stat_len * sizeof(u32));
  396. spin_unlock_irqrestore(&chip->msg_lock, flags);
  397. return ret;
  398. }
  399. #define CSES_TIMEOUT 100 /* microseconds */
  400. #define CSES_CE 0x0001
  401. #define CSES_BROADCAST 0x0002
  402. #define CSES_UPDATE_LDSV 0x0004
  403. int lx_dsp_es_check_pipeline(struct lx6464es *chip)
  404. {
  405. int i;
  406. for (i = 0; i != CSES_TIMEOUT; ++i) {
  407. /*
  408. * le bit CSES_UPDATE_LDSV est à 1 dés que le macprog
  409. * est pret. il re-passe à 0 lorsque le premier read a
  410. * été fait. pour l'instant on retire le test car ce bit
  411. * passe a 1 environ 200 à 400 ms aprés que le registre
  412. * confES à été écrit (kick du xilinx ES).
  413. *
  414. * On ne teste que le bit CE.
  415. * */
  416. u32 cses = lx_dsp_reg_read(chip, eReg_CSES);
  417. if ((cses & CSES_CE) == 0)
  418. return 0;
  419. udelay(1);
  420. }
  421. return -ETIMEDOUT;
  422. }
  423. #define PIPE_INFO_TO_CMD(capture, pipe) \
  424. ((u32)((u32)(pipe) | ((capture) ? ID_IS_CAPTURE : 0L)) << ID_OFFSET)
  425. /* low-level pipe handling */
  426. int lx_pipe_allocate(struct lx6464es *chip, u32 pipe, int is_capture,
  427. int channels)
  428. {
  429. int err;
  430. unsigned long flags;
  431. u32 pipe_cmd = PIPE_INFO_TO_CMD(is_capture, pipe);
  432. spin_lock_irqsave(&chip->msg_lock, flags);
  433. lx_message_init(&chip->rmh, CMD_06_ALLOCATE_PIPE);
  434. chip->rmh.cmd[0] |= pipe_cmd;
  435. chip->rmh.cmd[0] |= channels;
  436. err = lx_message_send_atomic(chip, &chip->rmh);
  437. spin_unlock_irqrestore(&chip->msg_lock, flags);
  438. if (err != 0)
  439. snd_printk(KERN_ERR "lx6464es: could not allocate pipe\n");
  440. return err;
  441. }
  442. int lx_pipe_release(struct lx6464es *chip, u32 pipe, int is_capture)
  443. {
  444. int err;
  445. unsigned long flags;
  446. u32 pipe_cmd = PIPE_INFO_TO_CMD(is_capture, pipe);
  447. spin_lock_irqsave(&chip->msg_lock, flags);
  448. lx_message_init(&chip->rmh, CMD_07_RELEASE_PIPE);
  449. chip->rmh.cmd[0] |= pipe_cmd;
  450. err = lx_message_send_atomic(chip, &chip->rmh);
  451. spin_unlock_irqrestore(&chip->msg_lock, flags);
  452. return err;
  453. }
  454. int lx_buffer_ask(struct lx6464es *chip, u32 pipe, int is_capture,
  455. u32 *r_needed, u32 *r_freed, u32 *size_array)
  456. {
  457. int err;
  458. unsigned long flags;
  459. u32 pipe_cmd = PIPE_INFO_TO_CMD(is_capture, pipe);
  460. #ifdef CONFIG_SND_DEBUG
  461. if (size_array)
  462. memset(size_array, 0, sizeof(u32)*MAX_STREAM_BUFFER);
  463. #endif
  464. *r_needed = 0;
  465. *r_freed = 0;
  466. spin_lock_irqsave(&chip->msg_lock, flags);
  467. lx_message_init(&chip->rmh, CMD_08_ASK_BUFFERS);
  468. chip->rmh.cmd[0] |= pipe_cmd;
  469. err = lx_message_send_atomic(chip, &chip->rmh);
  470. if (!err) {
  471. int i;
  472. for (i = 0; i < MAX_STREAM_BUFFER; ++i) {
  473. u32 stat = chip->rmh.stat[i];
  474. if (stat & (BF_EOB << BUFF_FLAGS_OFFSET)) {
  475. /* finished */
  476. *r_freed += 1;
  477. if (size_array)
  478. size_array[i] = stat & MASK_DATA_SIZE;
  479. } else if ((stat & (BF_VALID << BUFF_FLAGS_OFFSET))
  480. == 0)
  481. /* free */
  482. *r_needed += 1;
  483. }
  484. #if 0
  485. snd_printdd(LXP "CMD_08_ASK_BUFFERS: needed %d, freed %d\n",
  486. *r_needed, *r_freed);
  487. for (i = 0; i < MAX_STREAM_BUFFER; ++i) {
  488. for (i = 0; i != chip->rmh.stat_len; ++i)
  489. snd_printdd(" stat[%d]: %x, %x\n", i,
  490. chip->rmh.stat[i],
  491. chip->rmh.stat[i] & MASK_DATA_SIZE);
  492. }
  493. #endif
  494. }
  495. spin_unlock_irqrestore(&chip->msg_lock, flags);
  496. return err;
  497. }
  498. int lx_pipe_stop(struct lx6464es *chip, u32 pipe, int is_capture)
  499. {
  500. int err;
  501. unsigned long flags;
  502. u32 pipe_cmd = PIPE_INFO_TO_CMD(is_capture, pipe);
  503. spin_lock_irqsave(&chip->msg_lock, flags);
  504. lx_message_init(&chip->rmh, CMD_09_STOP_PIPE);
  505. chip->rmh.cmd[0] |= pipe_cmd;
  506. err = lx_message_send_atomic(chip, &chip->rmh);
  507. spin_unlock_irqrestore(&chip->msg_lock, flags);
  508. return err;
  509. }
  510. static int lx_pipe_toggle_state(struct lx6464es *chip, u32 pipe, int is_capture)
  511. {
  512. int err;
  513. unsigned long flags;
  514. u32 pipe_cmd = PIPE_INFO_TO_CMD(is_capture, pipe);
  515. spin_lock_irqsave(&chip->msg_lock, flags);
  516. lx_message_init(&chip->rmh, CMD_0B_TOGGLE_PIPE_STATE);
  517. chip->rmh.cmd[0] |= pipe_cmd;
  518. err = lx_message_send_atomic(chip, &chip->rmh);
  519. spin_unlock_irqrestore(&chip->msg_lock, flags);
  520. return err;
  521. }
  522. int lx_pipe_start(struct lx6464es *chip, u32 pipe, int is_capture)
  523. {
  524. int err;
  525. err = lx_pipe_wait_for_idle(chip, pipe, is_capture);
  526. if (err < 0)
  527. return err;
  528. err = lx_pipe_toggle_state(chip, pipe, is_capture);
  529. return err;
  530. }
  531. int lx_pipe_pause(struct lx6464es *chip, u32 pipe, int is_capture)
  532. {
  533. int err = 0;
  534. err = lx_pipe_wait_for_start(chip, pipe, is_capture);
  535. if (err < 0)
  536. return err;
  537. err = lx_pipe_toggle_state(chip, pipe, is_capture);
  538. return err;
  539. }
  540. int lx_pipe_sample_count(struct lx6464es *chip, u32 pipe, int is_capture,
  541. u64 *rsample_count)
  542. {
  543. int err;
  544. unsigned long flags;
  545. u32 pipe_cmd = PIPE_INFO_TO_CMD(is_capture, pipe);
  546. spin_lock_irqsave(&chip->msg_lock, flags);
  547. lx_message_init(&chip->rmh, CMD_0A_GET_PIPE_SPL_COUNT);
  548. chip->rmh.cmd[0] |= pipe_cmd;
  549. chip->rmh.stat_len = 2; /* need all words here! */
  550. err = lx_message_send_atomic(chip, &chip->rmh); /* don't sleep! */
  551. if (err != 0)
  552. snd_printk(KERN_ERR
  553. "lx6464es: could not query pipe's sample count\n");
  554. else {
  555. *rsample_count = ((u64)(chip->rmh.stat[0] & MASK_SPL_COUNT_HI)
  556. << 24) /* hi part */
  557. + chip->rmh.stat[1]; /* lo part */
  558. }
  559. spin_unlock_irqrestore(&chip->msg_lock, flags);
  560. return err;
  561. }
  562. int lx_pipe_state(struct lx6464es *chip, u32 pipe, int is_capture, u16 *rstate)
  563. {
  564. int err;
  565. unsigned long flags;
  566. u32 pipe_cmd = PIPE_INFO_TO_CMD(is_capture, pipe);
  567. spin_lock_irqsave(&chip->msg_lock, flags);
  568. lx_message_init(&chip->rmh, CMD_0A_GET_PIPE_SPL_COUNT);
  569. chip->rmh.cmd[0] |= pipe_cmd;
  570. err = lx_message_send_atomic(chip, &chip->rmh);
  571. if (err != 0)
  572. snd_printk(KERN_ERR "lx6464es: could not query pipe's state\n");
  573. else
  574. *rstate = (chip->rmh.stat[0] >> PSTATE_OFFSET) & 0x0F;
  575. spin_unlock_irqrestore(&chip->msg_lock, flags);
  576. return err;
  577. }
  578. static int lx_pipe_wait_for_state(struct lx6464es *chip, u32 pipe,
  579. int is_capture, u16 state)
  580. {
  581. int i;
  582. /* max 2*PCMOnlyGranularity = 2*1024 at 44100 = < 50 ms:
  583. * timeout 50 ms */
  584. for (i = 0; i != 50; ++i) {
  585. u16 current_state;
  586. int err = lx_pipe_state(chip, pipe, is_capture, &current_state);
  587. if (err < 0)
  588. return err;
  589. if (current_state == state)
  590. return 0;
  591. mdelay(1);
  592. }
  593. return -ETIMEDOUT;
  594. }
  595. int lx_pipe_wait_for_start(struct lx6464es *chip, u32 pipe, int is_capture)
  596. {
  597. return lx_pipe_wait_for_state(chip, pipe, is_capture, PSTATE_RUN);
  598. }
  599. int lx_pipe_wait_for_idle(struct lx6464es *chip, u32 pipe, int is_capture)
  600. {
  601. return lx_pipe_wait_for_state(chip, pipe, is_capture, PSTATE_IDLE);
  602. }
  603. /* low-level stream handling */
  604. int lx_stream_set_state(struct lx6464es *chip, u32 pipe,
  605. int is_capture, enum stream_state_t state)
  606. {
  607. int err;
  608. unsigned long flags;
  609. u32 pipe_cmd = PIPE_INFO_TO_CMD(is_capture, pipe);
  610. spin_lock_irqsave(&chip->msg_lock, flags);
  611. lx_message_init(&chip->rmh, CMD_13_SET_STREAM_STATE);
  612. chip->rmh.cmd[0] |= pipe_cmd;
  613. chip->rmh.cmd[0] |= state;
  614. err = lx_message_send_atomic(chip, &chip->rmh);
  615. spin_unlock_irqrestore(&chip->msg_lock, flags);
  616. return err;
  617. }
  618. int lx_stream_set_format(struct lx6464es *chip, struct snd_pcm_runtime *runtime,
  619. u32 pipe, int is_capture)
  620. {
  621. int err;
  622. unsigned long flags;
  623. u32 pipe_cmd = PIPE_INFO_TO_CMD(is_capture, pipe);
  624. u32 channels = runtime->channels;
  625. if (runtime->channels != channels)
  626. snd_printk(KERN_ERR LXP "channel count mismatch: %d vs %d",
  627. runtime->channels, channels);
  628. spin_lock_irqsave(&chip->msg_lock, flags);
  629. lx_message_init(&chip->rmh, CMD_0C_DEF_STREAM);
  630. chip->rmh.cmd[0] |= pipe_cmd;
  631. if (runtime->sample_bits == 16)
  632. /* 16 bit format */
  633. chip->rmh.cmd[0] |= (STREAM_FMT_16b << STREAM_FMT_OFFSET);
  634. if (snd_pcm_format_little_endian(runtime->format))
  635. /* little endian/intel format */
  636. chip->rmh.cmd[0] |= (STREAM_FMT_intel << STREAM_FMT_OFFSET);
  637. chip->rmh.cmd[0] |= channels-1;
  638. err = lx_message_send_atomic(chip, &chip->rmh);
  639. spin_unlock_irqrestore(&chip->msg_lock, flags);
  640. return err;
  641. }
  642. int lx_stream_state(struct lx6464es *chip, u32 pipe, int is_capture,
  643. int *rstate)
  644. {
  645. int err;
  646. unsigned long flags;
  647. u32 pipe_cmd = PIPE_INFO_TO_CMD(is_capture, pipe);
  648. spin_lock_irqsave(&chip->msg_lock, flags);
  649. lx_message_init(&chip->rmh, CMD_0E_GET_STREAM_SPL_COUNT);
  650. chip->rmh.cmd[0] |= pipe_cmd;
  651. err = lx_message_send_atomic(chip, &chip->rmh);
  652. *rstate = (chip->rmh.stat[0] & SF_START) ? START_STATE : PAUSE_STATE;
  653. spin_unlock_irqrestore(&chip->msg_lock, flags);
  654. return err;
  655. }
  656. int lx_stream_sample_position(struct lx6464es *chip, u32 pipe, int is_capture,
  657. u64 *r_bytepos)
  658. {
  659. int err;
  660. unsigned long flags;
  661. u32 pipe_cmd = PIPE_INFO_TO_CMD(is_capture, pipe);
  662. spin_lock_irqsave(&chip->msg_lock, flags);
  663. lx_message_init(&chip->rmh, CMD_0E_GET_STREAM_SPL_COUNT);
  664. chip->rmh.cmd[0] |= pipe_cmd;
  665. err = lx_message_send_atomic(chip, &chip->rmh);
  666. *r_bytepos = ((u64) (chip->rmh.stat[0] & MASK_SPL_COUNT_HI)
  667. << 32) /* hi part */
  668. + chip->rmh.stat[1]; /* lo part */
  669. spin_unlock_irqrestore(&chip->msg_lock, flags);
  670. return err;
  671. }
  672. /* low-level buffer handling */
  673. int lx_buffer_give(struct lx6464es *chip, u32 pipe, int is_capture,
  674. u32 buffer_size, u32 buf_address_lo, u32 buf_address_hi,
  675. u32 *r_buffer_index)
  676. {
  677. int err;
  678. unsigned long flags;
  679. u32 pipe_cmd = PIPE_INFO_TO_CMD(is_capture, pipe);
  680. spin_lock_irqsave(&chip->msg_lock, flags);
  681. lx_message_init(&chip->rmh, CMD_0F_UPDATE_BUFFER);
  682. chip->rmh.cmd[0] |= pipe_cmd;
  683. chip->rmh.cmd[0] |= BF_NOTIFY_EOB; /* request interrupt notification */
  684. /* todo: pause request, circular buffer */
  685. chip->rmh.cmd[1] = buffer_size & MASK_DATA_SIZE;
  686. chip->rmh.cmd[2] = buf_address_lo;
  687. if (buf_address_hi) {
  688. chip->rmh.cmd_len = 4;
  689. chip->rmh.cmd[3] = buf_address_hi;
  690. chip->rmh.cmd[0] |= BF_64BITS_ADR;
  691. }
  692. err = lx_message_send_atomic(chip, &chip->rmh);
  693. if (err == 0) {
  694. *r_buffer_index = chip->rmh.stat[0];
  695. goto done;
  696. }
  697. if (err == EB_RBUFFERS_TABLE_OVERFLOW)
  698. snd_printk(LXP "lx_buffer_give EB_RBUFFERS_TABLE_OVERFLOW\n");
  699. if (err == EB_INVALID_STREAM)
  700. snd_printk(LXP "lx_buffer_give EB_INVALID_STREAM\n");
  701. if (err == EB_CMD_REFUSED)
  702. snd_printk(LXP "lx_buffer_give EB_CMD_REFUSED\n");
  703. done:
  704. spin_unlock_irqrestore(&chip->msg_lock, flags);
  705. return err;
  706. }
  707. int lx_buffer_free(struct lx6464es *chip, u32 pipe, int is_capture,
  708. u32 *r_buffer_size)
  709. {
  710. int err;
  711. unsigned long flags;
  712. u32 pipe_cmd = PIPE_INFO_TO_CMD(is_capture, pipe);
  713. spin_lock_irqsave(&chip->msg_lock, flags);
  714. lx_message_init(&chip->rmh, CMD_11_CANCEL_BUFFER);
  715. chip->rmh.cmd[0] |= pipe_cmd;
  716. chip->rmh.cmd[0] |= MASK_BUFFER_ID; /* ask for the current buffer: the
  717. * microblaze will seek for it */
  718. err = lx_message_send_atomic(chip, &chip->rmh);
  719. if (err == 0)
  720. *r_buffer_size = chip->rmh.stat[0] & MASK_DATA_SIZE;
  721. spin_unlock_irqrestore(&chip->msg_lock, flags);
  722. return err;
  723. }
  724. int lx_buffer_cancel(struct lx6464es *chip, u32 pipe, int is_capture,
  725. u32 buffer_index)
  726. {
  727. int err;
  728. unsigned long flags;
  729. u32 pipe_cmd = PIPE_INFO_TO_CMD(is_capture, pipe);
  730. spin_lock_irqsave(&chip->msg_lock, flags);
  731. lx_message_init(&chip->rmh, CMD_11_CANCEL_BUFFER);
  732. chip->rmh.cmd[0] |= pipe_cmd;
  733. chip->rmh.cmd[0] |= buffer_index;
  734. err = lx_message_send_atomic(chip, &chip->rmh);
  735. spin_unlock_irqrestore(&chip->msg_lock, flags);
  736. return err;
  737. }
  738. /* low-level gain/peak handling
  739. *
  740. * \todo: can we unmute capture/playback channels independently?
  741. *
  742. * */
  743. int lx_level_unmute(struct lx6464es *chip, int is_capture, int unmute)
  744. {
  745. int err;
  746. unsigned long flags;
  747. /* bit set to 1: channel muted */
  748. u64 mute_mask = unmute ? 0 : 0xFFFFFFFFFFFFFFFFLLU;
  749. spin_lock_irqsave(&chip->msg_lock, flags);
  750. lx_message_init(&chip->rmh, CMD_0D_SET_MUTE);
  751. chip->rmh.cmd[0] |= PIPE_INFO_TO_CMD(is_capture, 0);
  752. chip->rmh.cmd[1] = (u32)(mute_mask >> (u64)32); /* hi part */
  753. chip->rmh.cmd[2] = (u32)(mute_mask & (u64)0xFFFFFFFF); /* lo part */
  754. snd_printk("mute %x %x %x\n", chip->rmh.cmd[0], chip->rmh.cmd[1],
  755. chip->rmh.cmd[2]);
  756. err = lx_message_send_atomic(chip, &chip->rmh);
  757. spin_unlock_irqrestore(&chip->msg_lock, flags);
  758. return err;
  759. }
  760. static u32 peak_map[] = {
  761. 0x00000109, /* -90.308dB */
  762. 0x0000083B, /* -72.247dB */
  763. 0x000020C4, /* -60.205dB */
  764. 0x00008273, /* -48.030dB */
  765. 0x00020756, /* -36.005dB */
  766. 0x00040C37, /* -30.001dB */
  767. 0x00081385, /* -24.002dB */
  768. 0x00101D3F, /* -18.000dB */
  769. 0x0016C310, /* -15.000dB */
  770. 0x002026F2, /* -12.001dB */
  771. 0x002D6A86, /* -9.000dB */
  772. 0x004026E6, /* -6.004dB */
  773. 0x005A9DF6, /* -3.000dB */
  774. 0x0065AC8B, /* -2.000dB */
  775. 0x00721481, /* -1.000dB */
  776. 0x007FFFFF, /* FS */
  777. };
  778. int lx_level_peaks(struct lx6464es *chip, int is_capture, int channels,
  779. u32 *r_levels)
  780. {
  781. int err = 0;
  782. unsigned long flags;
  783. int i;
  784. spin_lock_irqsave(&chip->msg_lock, flags);
  785. for (i = 0; i < channels; i += 4) {
  786. u32 s0, s1, s2, s3;
  787. lx_message_init(&chip->rmh, CMD_12_GET_PEAK);
  788. chip->rmh.cmd[0] |= PIPE_INFO_TO_CMD(is_capture, i);
  789. err = lx_message_send_atomic(chip, &chip->rmh);
  790. if (err == 0) {
  791. s0 = peak_map[chip->rmh.stat[0] & 0x0F];
  792. s1 = peak_map[(chip->rmh.stat[0] >> 4) & 0xf];
  793. s2 = peak_map[(chip->rmh.stat[0] >> 8) & 0xf];
  794. s3 = peak_map[(chip->rmh.stat[0] >> 12) & 0xf];
  795. } else
  796. s0 = s1 = s2 = s3 = 0;
  797. r_levels[0] = s0;
  798. r_levels[1] = s1;
  799. r_levels[2] = s2;
  800. r_levels[3] = s3;
  801. r_levels += 4;
  802. }
  803. spin_unlock_irqrestore(&chip->msg_lock, flags);
  804. return err;
  805. }
  806. /* interrupt handling */
  807. #define PCX_IRQ_NONE 0
  808. #define IRQCS_ACTIVE_PCIDB 0x00002000L /* Bit nø 13 */
  809. #define IRQCS_ENABLE_PCIIRQ 0x00000100L /* Bit nø 08 */
  810. #define IRQCS_ENABLE_PCIDB 0x00000200L /* Bit nø 09 */
  811. static u32 lx_interrupt_test_ack(struct lx6464es *chip)
  812. {
  813. u32 irqcs = lx_plx_reg_read(chip, ePLX_IRQCS);
  814. /* Test if PCI Doorbell interrupt is active */
  815. if (irqcs & IRQCS_ACTIVE_PCIDB) {
  816. u32 temp;
  817. irqcs = PCX_IRQ_NONE;
  818. while ((temp = lx_plx_reg_read(chip, ePLX_L2PCIDB))) {
  819. /* RAZ interrupt */
  820. irqcs |= temp;
  821. lx_plx_reg_write(chip, ePLX_L2PCIDB, temp);
  822. }
  823. return irqcs;
  824. }
  825. return PCX_IRQ_NONE;
  826. }
  827. static int lx_interrupt_ack(struct lx6464es *chip, u32 *r_irqsrc,
  828. int *r_async_pending, int *r_async_escmd)
  829. {
  830. u32 irq_async;
  831. u32 irqsrc = lx_interrupt_test_ack(chip);
  832. if (irqsrc == PCX_IRQ_NONE)
  833. return 0;
  834. *r_irqsrc = irqsrc;
  835. irq_async = irqsrc & MASK_SYS_ASYNC_EVENTS; /* + EtherSound response
  836. * (set by xilinx) + EOB */
  837. if (irq_async & MASK_SYS_STATUS_ESA) {
  838. irq_async &= ~MASK_SYS_STATUS_ESA;
  839. *r_async_escmd = 1;
  840. }
  841. if (irq_async) {
  842. /* snd_printd("interrupt: async event pending\n"); */
  843. *r_async_pending = 1;
  844. }
  845. return 1;
  846. }
  847. static int lx_interrupt_handle_async_events(struct lx6464es *chip, u32 irqsrc,
  848. int *r_freq_changed,
  849. u64 *r_notified_in_pipe_mask,
  850. u64 *r_notified_out_pipe_mask)
  851. {
  852. int err;
  853. u32 stat[9]; /* answer from CMD_04_GET_EVENT */
  854. /* On peut optimiser pour ne pas lire les evenements vides
  855. * les mots de réponse sont dans l'ordre suivant :
  856. * Stat[0] mot de status général
  857. * Stat[1] fin de buffer OUT pF
  858. * Stat[2] fin de buffer OUT pf
  859. * Stat[3] fin de buffer IN pF
  860. * Stat[4] fin de buffer IN pf
  861. * Stat[5] underrun poid fort
  862. * Stat[6] underrun poid faible
  863. * Stat[7] overrun poid fort
  864. * Stat[8] overrun poid faible
  865. * */
  866. u64 orun_mask;
  867. u64 urun_mask;
  868. #if 0
  869. int has_underrun = (irqsrc & MASK_SYS_STATUS_URUN) ? 1 : 0;
  870. int has_overrun = (irqsrc & MASK_SYS_STATUS_ORUN) ? 1 : 0;
  871. #endif
  872. int eb_pending_out = (irqsrc & MASK_SYS_STATUS_EOBO) ? 1 : 0;
  873. int eb_pending_in = (irqsrc & MASK_SYS_STATUS_EOBI) ? 1 : 0;
  874. *r_freq_changed = (irqsrc & MASK_SYS_STATUS_FREQ) ? 1 : 0;
  875. err = lx_dsp_read_async_events(chip, stat);
  876. if (err < 0)
  877. return err;
  878. if (eb_pending_in) {
  879. *r_notified_in_pipe_mask = ((u64)stat[3] << 32)
  880. + stat[4];
  881. snd_printdd(LXP "interrupt: EOBI pending %llx\n",
  882. *r_notified_in_pipe_mask);
  883. }
  884. if (eb_pending_out) {
  885. *r_notified_out_pipe_mask = ((u64)stat[1] << 32)
  886. + stat[2];
  887. snd_printdd(LXP "interrupt: EOBO pending %llx\n",
  888. *r_notified_out_pipe_mask);
  889. }
  890. orun_mask = ((u64)stat[7] << 32) + stat[8];
  891. urun_mask = ((u64)stat[5] << 32) + stat[6];
  892. /* todo: handle xrun notification */
  893. return err;
  894. }
  895. static int lx_interrupt_request_new_buffer(struct lx6464es *chip,
  896. struct lx_stream *lx_stream)
  897. {
  898. struct snd_pcm_substream *substream = lx_stream->stream;
  899. const unsigned int is_capture = lx_stream->is_capture;
  900. int err;
  901. unsigned long flags;
  902. const u32 channels = substream->runtime->channels;
  903. const u32 bytes_per_frame = channels * 3;
  904. const u32 period_size = substream->runtime->period_size;
  905. const u32 period_bytes = period_size * bytes_per_frame;
  906. const u32 pos = lx_stream->frame_pos;
  907. const u32 next_pos = ((pos+1) == substream->runtime->periods) ?
  908. 0 : pos + 1;
  909. dma_addr_t buf = substream->dma_buffer.addr + pos * period_bytes;
  910. u32 buf_hi = 0;
  911. u32 buf_lo = 0;
  912. u32 buffer_index = 0;
  913. u32 needed, freed;
  914. u32 size_array[MAX_STREAM_BUFFER];
  915. snd_printdd("->lx_interrupt_request_new_buffer\n");
  916. spin_lock_irqsave(&chip->lock, flags);
  917. err = lx_buffer_ask(chip, 0, is_capture, &needed, &freed, size_array);
  918. snd_printdd(LXP "interrupt: needed %d, freed %d\n", needed, freed);
  919. unpack_pointer(buf, &buf_lo, &buf_hi);
  920. err = lx_buffer_give(chip, 0, is_capture, period_bytes, buf_lo, buf_hi,
  921. &buffer_index);
  922. snd_printdd(LXP "interrupt: gave buffer index %x on %p (%d bytes)\n",
  923. buffer_index, (void *)buf, period_bytes);
  924. lx_stream->frame_pos = next_pos;
  925. spin_unlock_irqrestore(&chip->lock, flags);
  926. return err;
  927. }
  928. void lx_tasklet_playback(unsigned long data)
  929. {
  930. struct lx6464es *chip = (struct lx6464es *)data;
  931. struct lx_stream *lx_stream = &chip->playback_stream;
  932. int err;
  933. snd_printdd("->lx_tasklet_playback\n");
  934. err = lx_interrupt_request_new_buffer(chip, lx_stream);
  935. if (err < 0)
  936. snd_printk(KERN_ERR LXP
  937. "cannot request new buffer for playback\n");
  938. snd_pcm_period_elapsed(lx_stream->stream);
  939. }
  940. void lx_tasklet_capture(unsigned long data)
  941. {
  942. struct lx6464es *chip = (struct lx6464es *)data;
  943. struct lx_stream *lx_stream = &chip->capture_stream;
  944. int err;
  945. snd_printdd("->lx_tasklet_capture\n");
  946. err = lx_interrupt_request_new_buffer(chip, lx_stream);
  947. if (err < 0)
  948. snd_printk(KERN_ERR LXP
  949. "cannot request new buffer for capture\n");
  950. snd_pcm_period_elapsed(lx_stream->stream);
  951. }
  952. static int lx_interrupt_handle_audio_transfer(struct lx6464es *chip,
  953. u64 notified_in_pipe_mask,
  954. u64 notified_out_pipe_mask)
  955. {
  956. int err = 0;
  957. if (notified_in_pipe_mask) {
  958. snd_printdd(LXP "requesting audio transfer for capture\n");
  959. tasklet_hi_schedule(&chip->tasklet_capture);
  960. }
  961. if (notified_out_pipe_mask) {
  962. snd_printdd(LXP "requesting audio transfer for playback\n");
  963. tasklet_hi_schedule(&chip->tasklet_playback);
  964. }
  965. return err;
  966. }
  967. irqreturn_t lx_interrupt(int irq, void *dev_id)
  968. {
  969. struct lx6464es *chip = dev_id;
  970. int async_pending, async_escmd;
  971. u32 irqsrc;
  972. spin_lock(&chip->lock);
  973. snd_printdd("**************************************************\n");
  974. if (!lx_interrupt_ack(chip, &irqsrc, &async_pending, &async_escmd)) {
  975. spin_unlock(&chip->lock);
  976. snd_printdd("IRQ_NONE\n");
  977. return IRQ_NONE; /* this device did not cause the interrupt */
  978. }
  979. if (irqsrc & MASK_SYS_STATUS_CMD_DONE)
  980. goto exit;
  981. #if 0
  982. if (irqsrc & MASK_SYS_STATUS_EOBI)
  983. snd_printdd(LXP "interrupt: EOBI\n");
  984. if (irqsrc & MASK_SYS_STATUS_EOBO)
  985. snd_printdd(LXP "interrupt: EOBO\n");
  986. if (irqsrc & MASK_SYS_STATUS_URUN)
  987. snd_printdd(LXP "interrupt: URUN\n");
  988. if (irqsrc & MASK_SYS_STATUS_ORUN)
  989. snd_printdd(LXP "interrupt: ORUN\n");
  990. #endif
  991. if (async_pending) {
  992. u64 notified_in_pipe_mask = 0;
  993. u64 notified_out_pipe_mask = 0;
  994. int freq_changed;
  995. int err;
  996. /* handle async events */
  997. err = lx_interrupt_handle_async_events(chip, irqsrc,
  998. &freq_changed,
  999. &notified_in_pipe_mask,
  1000. &notified_out_pipe_mask);
  1001. if (err)
  1002. snd_printk(KERN_ERR LXP
  1003. "error handling async events\n");
  1004. err = lx_interrupt_handle_audio_transfer(chip,
  1005. notified_in_pipe_mask,
  1006. notified_out_pipe_mask
  1007. );
  1008. if (err)
  1009. snd_printk(KERN_ERR LXP
  1010. "error during audio transfer\n");
  1011. }
  1012. if (async_escmd) {
  1013. #if 0
  1014. /* backdoor for ethersound commands
  1015. *
  1016. * for now, we do not need this
  1017. *
  1018. * */
  1019. snd_printdd("lx6464es: interrupt requests escmd handling\n");
  1020. #endif
  1021. }
  1022. exit:
  1023. spin_unlock(&chip->lock);
  1024. return IRQ_HANDLED; /* this device caused the interrupt */
  1025. }
  1026. static void lx_irq_set(struct lx6464es *chip, int enable)
  1027. {
  1028. u32 reg = lx_plx_reg_read(chip, ePLX_IRQCS);
  1029. /* enable/disable interrupts
  1030. *
  1031. * Set the Doorbell and PCI interrupt enable bits
  1032. *
  1033. * */
  1034. if (enable)
  1035. reg |= (IRQCS_ENABLE_PCIIRQ | IRQCS_ENABLE_PCIDB);
  1036. else
  1037. reg &= ~(IRQCS_ENABLE_PCIIRQ | IRQCS_ENABLE_PCIDB);
  1038. lx_plx_reg_write(chip, ePLX_IRQCS, reg);
  1039. }
  1040. void lx_irq_enable(struct lx6464es *chip)
  1041. {
  1042. snd_printdd("->lx_irq_enable\n");
  1043. lx_irq_set(chip, 1);
  1044. }
  1045. void lx_irq_disable(struct lx6464es *chip)
  1046. {
  1047. snd_printdd("->lx_irq_disable\n");
  1048. lx_irq_set(chip, 0);
  1049. }