lx_core.c 35 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442
  1. /* -*- linux-c -*- *
  2. *
  3. * ALSA driver for the digigram lx6464es interface
  4. * low-level interface
  5. *
  6. * Copyright (c) 2009 Tim Blechmann <tim@klingt.org>
  7. *
  8. * This program is free software; you can redistribute it and/or modify
  9. * it under the terms of the GNU General Public License as published by
  10. * the Free Software Foundation; either version 2 of the License, or
  11. * (at your option) any later version.
  12. *
  13. * This program is distributed in the hope that it will be useful,
  14. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  15. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  16. * GNU General Public License for more details.
  17. *
  18. * You should have received a copy of the GNU General Public License
  19. * along with this program; see the file COPYING. If not, write to
  20. * the Free Software Foundation, Inc., 59 Temple Place - Suite 330,
  21. * Boston, MA 02111-1307, USA.
  22. *
  23. */
  24. /* #define RMH_DEBUG 1 */
  25. #include <linux/module.h>
  26. #include <linux/pci.h>
  27. #include <linux/delay.h>
  28. #include "lx6464es.h"
  29. #include "lx_core.h"
  30. /* low-level register access */
  31. static const unsigned long dsp_port_offsets[] = {
  32. 0,
  33. 0x400,
  34. 0x401,
  35. 0x402,
  36. 0x403,
  37. 0x404,
  38. 0x405,
  39. 0x406,
  40. 0x407,
  41. 0x408,
  42. 0x409,
  43. 0x40a,
  44. 0x40b,
  45. 0x40c,
  46. 0x410,
  47. 0x411,
  48. 0x412,
  49. 0x413,
  50. 0x414,
  51. 0x415,
  52. 0x416,
  53. 0x420,
  54. 0x430,
  55. 0x431,
  56. 0x432,
  57. 0x433,
  58. 0x434,
  59. 0x440
  60. };
  61. static void __iomem *lx_dsp_register(struct lx6464es *chip, int port)
  62. {
  63. void __iomem *base_address = chip->port_dsp_bar;
  64. return base_address + dsp_port_offsets[port]*4;
  65. }
  66. unsigned long lx_dsp_reg_read(struct lx6464es *chip, int port)
  67. {
  68. void __iomem *address = lx_dsp_register(chip, port);
  69. return ioread32(address);
  70. }
  71. void lx_dsp_reg_readbuf(struct lx6464es *chip, int port, u32 *data, u32 len)
  72. {
  73. void __iomem *address = lx_dsp_register(chip, port);
  74. memcpy_fromio(data, address, len*sizeof(u32));
  75. }
  76. void lx_dsp_reg_write(struct lx6464es *chip, int port, unsigned data)
  77. {
  78. void __iomem *address = lx_dsp_register(chip, port);
  79. iowrite32(data, address);
  80. }
  81. void lx_dsp_reg_writebuf(struct lx6464es *chip, int port, const u32 *data,
  82. u32 len)
  83. {
  84. void __iomem *address = lx_dsp_register(chip, port);
  85. memcpy_toio(address, data, len*sizeof(u32));
  86. }
  87. static const unsigned long plx_port_offsets[] = {
  88. 0x04,
  89. 0x40,
  90. 0x44,
  91. 0x48,
  92. 0x4c,
  93. 0x50,
  94. 0x54,
  95. 0x58,
  96. 0x5c,
  97. 0x64,
  98. 0x68,
  99. 0x6C
  100. };
  101. static void __iomem *lx_plx_register(struct lx6464es *chip, int port)
  102. {
  103. void __iomem *base_address = chip->port_plx_remapped;
  104. return base_address + plx_port_offsets[port];
  105. }
  106. unsigned long lx_plx_reg_read(struct lx6464es *chip, int port)
  107. {
  108. void __iomem *address = lx_plx_register(chip, port);
  109. return ioread32(address);
  110. }
  111. void lx_plx_reg_write(struct lx6464es *chip, int port, u32 data)
  112. {
  113. void __iomem *address = lx_plx_register(chip, port);
  114. iowrite32(data, address);
  115. }
  116. u32 lx_plx_mbox_read(struct lx6464es *chip, int mbox_nr)
  117. {
  118. int index;
  119. switch (mbox_nr) {
  120. case 1:
  121. index = ePLX_MBOX1; break;
  122. case 2:
  123. index = ePLX_MBOX2; break;
  124. case 3:
  125. index = ePLX_MBOX3; break;
  126. case 4:
  127. index = ePLX_MBOX4; break;
  128. case 5:
  129. index = ePLX_MBOX5; break;
  130. case 6:
  131. index = ePLX_MBOX6; break;
  132. case 7:
  133. index = ePLX_MBOX7; break;
  134. case 0: /* reserved for HF flags */
  135. snd_BUG();
  136. default:
  137. return 0xdeadbeef;
  138. }
  139. return lx_plx_reg_read(chip, index);
  140. }
  141. int lx_plx_mbox_write(struct lx6464es *chip, int mbox_nr, u32 value)
  142. {
  143. int index = -1;
  144. switch (mbox_nr) {
  145. case 1:
  146. index = ePLX_MBOX1; break;
  147. case 3:
  148. index = ePLX_MBOX3; break;
  149. case 4:
  150. index = ePLX_MBOX4; break;
  151. case 5:
  152. index = ePLX_MBOX5; break;
  153. case 6:
  154. index = ePLX_MBOX6; break;
  155. case 7:
  156. index = ePLX_MBOX7; break;
  157. case 0: /* reserved for HF flags */
  158. case 2: /* reserved for Pipe States
  159. * the DSP keeps an image of it */
  160. snd_BUG();
  161. return -EBADRQC;
  162. }
  163. lx_plx_reg_write(chip, index, value);
  164. return 0;
  165. }
  166. /* rmh */
  167. #ifdef CONFIG_SND_DEBUG
  168. #define CMD_NAME(a) a
  169. #else
  170. #define CMD_NAME(a) NULL
  171. #endif
  172. #define Reg_CSM_MR 0x00000002
  173. #define Reg_CSM_MC 0x00000001
  174. struct dsp_cmd_info {
  175. u32 dcCodeOp; /* Op Code of the command (usually 1st 24-bits
  176. * word).*/
  177. u16 dcCmdLength; /* Command length in words of 24 bits.*/
  178. u16 dcStatusType; /* Status type: 0 for fixed length, 1 for
  179. * random. */
  180. u16 dcStatusLength; /* Status length (if fixed).*/
  181. char *dcOpName;
  182. };
  183. /*
  184. Initialization and control data for the Microblaze interface
  185. - OpCode:
  186. the opcode field of the command set at the proper offset
  187. - CmdLength
  188. the number of command words
  189. - StatusType
  190. offset in the status registers: 0 means that the return value may be
  191. different from 0, and must be read
  192. - StatusLength
  193. the number of status words (in addition to the return value)
  194. */
  195. static struct dsp_cmd_info dsp_commands[] =
  196. {
  197. { (CMD_00_INFO_DEBUG << OPCODE_OFFSET) , 1 /*custom*/
  198. , 1 , 0 /**/ , CMD_NAME("INFO_DEBUG") },
  199. { (CMD_01_GET_SYS_CFG << OPCODE_OFFSET) , 1 /**/
  200. , 1 , 2 /**/ , CMD_NAME("GET_SYS_CFG") },
  201. { (CMD_02_SET_GRANULARITY << OPCODE_OFFSET) , 1 /**/
  202. , 1 , 0 /**/ , CMD_NAME("SET_GRANULARITY") },
  203. { (CMD_03_SET_TIMER_IRQ << OPCODE_OFFSET) , 1 /**/
  204. , 1 , 0 /**/ , CMD_NAME("SET_TIMER_IRQ") },
  205. { (CMD_04_GET_EVENT << OPCODE_OFFSET) , 1 /**/
  206. , 1 , 0 /*up to 10*/ , CMD_NAME("GET_EVENT") },
  207. { (CMD_05_GET_PIPES << OPCODE_OFFSET) , 1 /**/
  208. , 1 , 2 /*up to 4*/ , CMD_NAME("GET_PIPES") },
  209. { (CMD_06_ALLOCATE_PIPE << OPCODE_OFFSET) , 1 /**/
  210. , 0 , 0 /**/ , CMD_NAME("ALLOCATE_PIPE") },
  211. { (CMD_07_RELEASE_PIPE << OPCODE_OFFSET) , 1 /**/
  212. , 0 , 0 /**/ , CMD_NAME("RELEASE_PIPE") },
  213. { (CMD_08_ASK_BUFFERS << OPCODE_OFFSET) , 1 /**/
  214. , 1 , MAX_STREAM_BUFFER , CMD_NAME("ASK_BUFFERS") },
  215. { (CMD_09_STOP_PIPE << OPCODE_OFFSET) , 1 /**/
  216. , 0 , 0 /*up to 2*/ , CMD_NAME("STOP_PIPE") },
  217. { (CMD_0A_GET_PIPE_SPL_COUNT << OPCODE_OFFSET) , 1 /**/
  218. , 1 , 1 /*up to 2*/ , CMD_NAME("GET_PIPE_SPL_COUNT") },
  219. { (CMD_0B_TOGGLE_PIPE_STATE << OPCODE_OFFSET) , 1 /*up to 5*/
  220. , 1 , 0 /**/ , CMD_NAME("TOGGLE_PIPE_STATE") },
  221. { (CMD_0C_DEF_STREAM << OPCODE_OFFSET) , 1 /*up to 4*/
  222. , 1 , 0 /**/ , CMD_NAME("DEF_STREAM") },
  223. { (CMD_0D_SET_MUTE << OPCODE_OFFSET) , 3 /**/
  224. , 1 , 0 /**/ , CMD_NAME("SET_MUTE") },
  225. { (CMD_0E_GET_STREAM_SPL_COUNT << OPCODE_OFFSET) , 1/**/
  226. , 1 , 2 /**/ , CMD_NAME("GET_STREAM_SPL_COUNT") },
  227. { (CMD_0F_UPDATE_BUFFER << OPCODE_OFFSET) , 3 /*up to 4*/
  228. , 0 , 1 /**/ , CMD_NAME("UPDATE_BUFFER") },
  229. { (CMD_10_GET_BUFFER << OPCODE_OFFSET) , 1 /**/
  230. , 1 , 4 /**/ , CMD_NAME("GET_BUFFER") },
  231. { (CMD_11_CANCEL_BUFFER << OPCODE_OFFSET) , 1 /**/
  232. , 1 , 1 /*up to 4*/ , CMD_NAME("CANCEL_BUFFER") },
  233. { (CMD_12_GET_PEAK << OPCODE_OFFSET) , 1 /**/
  234. , 1 , 1 /**/ , CMD_NAME("GET_PEAK") },
  235. { (CMD_13_SET_STREAM_STATE << OPCODE_OFFSET) , 1 /**/
  236. , 1 , 0 /**/ , CMD_NAME("SET_STREAM_STATE") },
  237. };
  238. static void lx_message_init(struct lx_rmh *rmh, enum cmd_mb_opcodes cmd)
  239. {
  240. snd_BUG_ON(cmd >= CMD_14_INVALID);
  241. rmh->cmd[0] = dsp_commands[cmd].dcCodeOp;
  242. rmh->cmd_len = dsp_commands[cmd].dcCmdLength;
  243. rmh->stat_len = dsp_commands[cmd].dcStatusLength;
  244. rmh->dsp_stat = dsp_commands[cmd].dcStatusType;
  245. rmh->cmd_idx = cmd;
  246. memset(&rmh->cmd[1], 0, (REG_CRM_NUMBER - 1) * sizeof(u32));
  247. #ifdef CONFIG_SND_DEBUG
  248. memset(rmh->stat, 0, REG_CRM_NUMBER * sizeof(u32));
  249. #endif
  250. #ifdef RMH_DEBUG
  251. rmh->cmd_idx = cmd;
  252. #endif
  253. }
  254. #ifdef RMH_DEBUG
  255. #define LXRMH "lx6464es rmh: "
  256. static void lx_message_dump(struct lx_rmh *rmh)
  257. {
  258. u8 idx = rmh->cmd_idx;
  259. int i;
  260. snd_printk(LXRMH "command %s\n", dsp_commands[idx].dcOpName);
  261. for (i = 0; i != rmh->cmd_len; ++i)
  262. snd_printk(LXRMH "\tcmd[%d] %08x\n", i, rmh->cmd[i]);
  263. for (i = 0; i != rmh->stat_len; ++i)
  264. snd_printk(LXRMH "\tstat[%d]: %08x\n", i, rmh->stat[i]);
  265. snd_printk("\n");
  266. }
  267. #else
  268. static inline void lx_message_dump(struct lx_rmh *rmh)
  269. {}
  270. #endif
  271. /* sleep 500 - 100 = 400 times 100us -> the timeout is >= 40 ms */
  272. #define XILINX_TIMEOUT_MS 40
  273. #define XILINX_POLL_NO_SLEEP 100
  274. #define XILINX_POLL_ITERATIONS 150
  275. static int lx_message_send(struct lx6464es *chip, struct lx_rmh *rmh)
  276. {
  277. u32 reg = ED_DSP_TIMED_OUT;
  278. int dwloop;
  279. int answer_received;
  280. if (lx_dsp_reg_read(chip, eReg_CSM) & (Reg_CSM_MC | Reg_CSM_MR)) {
  281. snd_printk(KERN_ERR LXP "PIOSendMessage eReg_CSM %x\n", reg);
  282. return -EBUSY;
  283. }
  284. /* write command */
  285. lx_dsp_reg_writebuf(chip, eReg_CRM1, rmh->cmd, rmh->cmd_len);
  286. snd_BUG_ON(atomic_read(&chip->send_message_locked) != 0);
  287. atomic_set(&chip->send_message_locked, 1);
  288. /* MicoBlaze gogogo */
  289. lx_dsp_reg_write(chip, eReg_CSM, Reg_CSM_MC);
  290. /* wait for interrupt to answer */
  291. for (dwloop = 0; dwloop != XILINX_TIMEOUT_MS; ++dwloop) {
  292. answer_received = atomic_read(&chip->send_message_locked);
  293. if (answer_received == 0)
  294. break;
  295. msleep(1);
  296. }
  297. if (answer_received == 0) {
  298. /* in Debug mode verify Reg_CSM_MR */
  299. snd_BUG_ON(!(lx_dsp_reg_read(chip, eReg_CSM) & Reg_CSM_MR));
  300. /* command finished, read status */
  301. if (rmh->dsp_stat == 0)
  302. reg = lx_dsp_reg_read(chip, eReg_CRM1);
  303. else
  304. reg = 0;
  305. } else {
  306. int i;
  307. snd_printk(KERN_WARNING LXP "TIMEOUT lx_message_send! "
  308. "Interrupts disabled?\n");
  309. /* attente bit Reg_CSM_MR */
  310. for (i = 0; i != XILINX_POLL_ITERATIONS; i++) {
  311. if ((lx_dsp_reg_read(chip, eReg_CSM) & Reg_CSM_MR)) {
  312. if (rmh->dsp_stat == 0)
  313. reg = lx_dsp_reg_read(chip, eReg_CRM1);
  314. else
  315. reg = 0;
  316. goto polling_successful;
  317. }
  318. if (i > XILINX_POLL_NO_SLEEP)
  319. msleep(1);
  320. }
  321. snd_printk(KERN_WARNING LXP "TIMEOUT lx_message_send! "
  322. "polling failed\n");
  323. polling_successful:
  324. atomic_set(&chip->send_message_locked, 0);
  325. }
  326. if ((reg & ERROR_VALUE) == 0) {
  327. /* read response */
  328. if (rmh->stat_len) {
  329. snd_BUG_ON(rmh->stat_len >= (REG_CRM_NUMBER-1));
  330. lx_dsp_reg_readbuf(chip, eReg_CRM2, rmh->stat,
  331. rmh->stat_len);
  332. }
  333. } else
  334. snd_printk(KERN_WARNING LXP "lx_message_send: error_value %x\n",
  335. reg);
  336. /* clear Reg_CSM_MR */
  337. lx_dsp_reg_write(chip, eReg_CSM, 0);
  338. switch (reg) {
  339. case ED_DSP_TIMED_OUT:
  340. snd_printk(KERN_WARNING LXP "lx_message_send: dsp timeout\n");
  341. return -ETIMEDOUT;
  342. case ED_DSP_CRASHED:
  343. snd_printk(KERN_WARNING LXP "lx_message_send: dsp crashed\n");
  344. return -EAGAIN;
  345. }
  346. lx_message_dump(rmh);
  347. return 0;
  348. }
  349. static int lx_message_send_atomic(struct lx6464es *chip, struct lx_rmh *rmh)
  350. {
  351. u32 reg = ED_DSP_TIMED_OUT;
  352. int dwloop;
  353. if (lx_dsp_reg_read(chip, eReg_CSM) & (Reg_CSM_MC | Reg_CSM_MR)) {
  354. snd_printk(KERN_ERR LXP "PIOSendMessage eReg_CSM %x\n", reg);
  355. return -EBUSY;
  356. }
  357. /* write command */
  358. lx_dsp_reg_writebuf(chip, eReg_CRM1, rmh->cmd, rmh->cmd_len);
  359. /* MicoBlaze gogogo */
  360. lx_dsp_reg_write(chip, eReg_CSM, Reg_CSM_MC);
  361. /* wait for interrupt to answer */
  362. for (dwloop = 0; dwloop != XILINX_TIMEOUT_MS * 1000; ++dwloop) {
  363. if (lx_dsp_reg_read(chip, eReg_CSM) & Reg_CSM_MR) {
  364. if (rmh->dsp_stat == 0)
  365. reg = lx_dsp_reg_read(chip, eReg_CRM1);
  366. else
  367. reg = 0;
  368. goto polling_successful;
  369. } else
  370. udelay(1);
  371. }
  372. snd_printk(KERN_WARNING LXP "TIMEOUT lx_message_send_atomic! "
  373. "polling failed\n");
  374. polling_successful:
  375. if ((reg & ERROR_VALUE) == 0) {
  376. /* read response */
  377. if (rmh->stat_len) {
  378. snd_BUG_ON(rmh->stat_len >= (REG_CRM_NUMBER-1));
  379. lx_dsp_reg_readbuf(chip, eReg_CRM2, rmh->stat,
  380. rmh->stat_len);
  381. }
  382. } else
  383. snd_printk(LXP "rmh error: %08x\n", reg);
  384. /* clear Reg_CSM_MR */
  385. lx_dsp_reg_write(chip, eReg_CSM, 0);
  386. switch (reg) {
  387. case ED_DSP_TIMED_OUT:
  388. snd_printk(KERN_WARNING LXP "lx_message_send: dsp timeout\n");
  389. return -ETIMEDOUT;
  390. case ED_DSP_CRASHED:
  391. snd_printk(KERN_WARNING LXP "lx_message_send: dsp crashed\n");
  392. return -EAGAIN;
  393. }
  394. lx_message_dump(rmh);
  395. return reg;
  396. }
  397. /* low-level dsp access */
  398. int __devinit lx_dsp_get_version(struct lx6464es *chip, u32 *rdsp_version)
  399. {
  400. u16 ret;
  401. unsigned long flags;
  402. spin_lock_irqsave(&chip->msg_lock, flags);
  403. lx_message_init(&chip->rmh, CMD_01_GET_SYS_CFG);
  404. ret = lx_message_send_atomic(chip, &chip->rmh);
  405. *rdsp_version = chip->rmh.stat[1];
  406. spin_unlock_irqrestore(&chip->msg_lock, flags);
  407. return ret;
  408. }
  409. int lx_dsp_get_clock_frequency(struct lx6464es *chip, u32 *rfreq)
  410. {
  411. u16 ret = 0;
  412. unsigned long flags;
  413. u32 freq_raw = 0;
  414. u32 freq = 0;
  415. u32 frequency = 0;
  416. spin_lock_irqsave(&chip->msg_lock, flags);
  417. lx_message_init(&chip->rmh, CMD_01_GET_SYS_CFG);
  418. ret = lx_message_send_atomic(chip, &chip->rmh);
  419. if (ret == 0) {
  420. freq_raw = chip->rmh.stat[0] >> FREQ_FIELD_OFFSET;
  421. freq = freq_raw & XES_FREQ_COUNT8_MASK;
  422. if ((freq < XES_FREQ_COUNT8_48_MAX) ||
  423. (freq > XES_FREQ_COUNT8_44_MIN))
  424. frequency = 0; /* unknown */
  425. else if (freq >= XES_FREQ_COUNT8_44_MAX)
  426. frequency = 44100;
  427. else
  428. frequency = 48000;
  429. }
  430. spin_unlock_irqrestore(&chip->msg_lock, flags);
  431. *rfreq = frequency * chip->freq_ratio;
  432. return ret;
  433. }
  434. int lx_dsp_get_mac(struct lx6464es *chip, u8 *mac_address)
  435. {
  436. u32 macmsb, maclsb;
  437. macmsb = lx_dsp_reg_read(chip, eReg_ADMACESMSB) & 0x00FFFFFF;
  438. maclsb = lx_dsp_reg_read(chip, eReg_ADMACESLSB) & 0x00FFFFFF;
  439. /* todo: endianess handling */
  440. mac_address[5] = ((u8 *)(&maclsb))[0];
  441. mac_address[4] = ((u8 *)(&maclsb))[1];
  442. mac_address[3] = ((u8 *)(&maclsb))[2];
  443. mac_address[2] = ((u8 *)(&macmsb))[0];
  444. mac_address[1] = ((u8 *)(&macmsb))[1];
  445. mac_address[0] = ((u8 *)(&macmsb))[2];
  446. return 0;
  447. }
  448. int lx_dsp_set_granularity(struct lx6464es *chip, u32 gran)
  449. {
  450. unsigned long flags;
  451. int ret;
  452. spin_lock_irqsave(&chip->msg_lock, flags);
  453. lx_message_init(&chip->rmh, CMD_02_SET_GRANULARITY);
  454. chip->rmh.cmd[0] |= gran;
  455. ret = lx_message_send_atomic(chip, &chip->rmh);
  456. spin_unlock_irqrestore(&chip->msg_lock, flags);
  457. return ret;
  458. }
  459. int lx_dsp_read_async_events(struct lx6464es *chip, u32 *data)
  460. {
  461. unsigned long flags;
  462. int ret;
  463. spin_lock_irqsave(&chip->msg_lock, flags);
  464. lx_message_init(&chip->rmh, CMD_04_GET_EVENT);
  465. chip->rmh.stat_len = 9; /* we don't necessarily need the full length */
  466. ret = lx_message_send_atomic(chip, &chip->rmh);
  467. if (!ret)
  468. memcpy(data, chip->rmh.stat, chip->rmh.stat_len * sizeof(u32));
  469. spin_unlock_irqrestore(&chip->msg_lock, flags);
  470. return ret;
  471. }
  472. #define CSES_TIMEOUT 100 /* microseconds */
  473. #define CSES_CE 0x0001
  474. #define CSES_BROADCAST 0x0002
  475. #define CSES_UPDATE_LDSV 0x0004
  476. int lx_dsp_es_check_pipeline(struct lx6464es *chip)
  477. {
  478. int i;
  479. for (i = 0; i != CSES_TIMEOUT; ++i) {
  480. /*
  481. * le bit CSES_UPDATE_LDSV est à 1 dés que le macprog
  482. * est pret. il re-passe à 0 lorsque le premier read a
  483. * été fait. pour l'instant on retire le test car ce bit
  484. * passe a 1 environ 200 à 400 ms aprés que le registre
  485. * confES à été écrit (kick du xilinx ES).
  486. *
  487. * On ne teste que le bit CE.
  488. * */
  489. u32 cses = lx_dsp_reg_read(chip, eReg_CSES);
  490. if ((cses & CSES_CE) == 0)
  491. return 0;
  492. udelay(1);
  493. }
  494. return -ETIMEDOUT;
  495. }
  496. #define PIPE_INFO_TO_CMD(capture, pipe) \
  497. ((u32)((u32)(pipe) | ((capture) ? ID_IS_CAPTURE : 0L)) << ID_OFFSET)
  498. /* low-level pipe handling */
  499. int lx_pipe_allocate(struct lx6464es *chip, u32 pipe, int is_capture,
  500. int channels)
  501. {
  502. int err;
  503. unsigned long flags;
  504. u32 pipe_cmd = PIPE_INFO_TO_CMD(is_capture, pipe);
  505. spin_lock_irqsave(&chip->msg_lock, flags);
  506. lx_message_init(&chip->rmh, CMD_06_ALLOCATE_PIPE);
  507. chip->rmh.cmd[0] |= pipe_cmd;
  508. chip->rmh.cmd[0] |= channels;
  509. err = lx_message_send_atomic(chip, &chip->rmh);
  510. spin_unlock_irqrestore(&chip->msg_lock, flags);
  511. if (err != 0)
  512. snd_printk(KERN_ERR "lx6464es: could not allocate pipe\n");
  513. return err;
  514. }
  515. int lx_pipe_release(struct lx6464es *chip, u32 pipe, int is_capture)
  516. {
  517. int err;
  518. unsigned long flags;
  519. u32 pipe_cmd = PIPE_INFO_TO_CMD(is_capture, pipe);
  520. spin_lock_irqsave(&chip->msg_lock, flags);
  521. lx_message_init(&chip->rmh, CMD_07_RELEASE_PIPE);
  522. chip->rmh.cmd[0] |= pipe_cmd;
  523. err = lx_message_send_atomic(chip, &chip->rmh);
  524. spin_unlock_irqrestore(&chip->msg_lock, flags);
  525. return err;
  526. }
  527. int lx_buffer_ask(struct lx6464es *chip, u32 pipe, int is_capture,
  528. u32 *r_needed, u32 *r_freed, u32 *size_array)
  529. {
  530. int err;
  531. unsigned long flags;
  532. u32 pipe_cmd = PIPE_INFO_TO_CMD(is_capture, pipe);
  533. #ifdef CONFIG_SND_DEBUG
  534. if (size_array)
  535. memset(size_array, 0, sizeof(u32)*MAX_STREAM_BUFFER);
  536. #endif
  537. *r_needed = 0;
  538. *r_freed = 0;
  539. spin_lock_irqsave(&chip->msg_lock, flags);
  540. lx_message_init(&chip->rmh, CMD_08_ASK_BUFFERS);
  541. chip->rmh.cmd[0] |= pipe_cmd;
  542. err = lx_message_send_atomic(chip, &chip->rmh);
  543. if (!err) {
  544. int i;
  545. for (i = 0; i < MAX_STREAM_BUFFER; ++i) {
  546. u32 stat = chip->rmh.stat[i];
  547. if (stat & (BF_EOB << BUFF_FLAGS_OFFSET)) {
  548. /* finished */
  549. *r_freed += 1;
  550. if (size_array)
  551. size_array[i] = stat & MASK_DATA_SIZE;
  552. } else if ((stat & (BF_VALID << BUFF_FLAGS_OFFSET))
  553. == 0)
  554. /* free */
  555. *r_needed += 1;
  556. }
  557. #if 0
  558. snd_printdd(LXP "CMD_08_ASK_BUFFERS: needed %d, freed %d\n",
  559. *r_needed, *r_freed);
  560. for (i = 0; i < MAX_STREAM_BUFFER; ++i) {
  561. for (i = 0; i != chip->rmh.stat_len; ++i)
  562. snd_printdd(" stat[%d]: %x, %x\n", i,
  563. chip->rmh.stat[i],
  564. chip->rmh.stat[i] & MASK_DATA_SIZE);
  565. }
  566. #endif
  567. }
  568. spin_unlock_irqrestore(&chip->msg_lock, flags);
  569. return err;
  570. }
  571. int lx_pipe_stop(struct lx6464es *chip, u32 pipe, int is_capture)
  572. {
  573. int err;
  574. unsigned long flags;
  575. u32 pipe_cmd = PIPE_INFO_TO_CMD(is_capture, pipe);
  576. spin_lock_irqsave(&chip->msg_lock, flags);
  577. lx_message_init(&chip->rmh, CMD_09_STOP_PIPE);
  578. chip->rmh.cmd[0] |= pipe_cmd;
  579. err = lx_message_send_atomic(chip, &chip->rmh);
  580. spin_unlock_irqrestore(&chip->msg_lock, flags);
  581. return err;
  582. }
  583. static int lx_pipe_toggle_state(struct lx6464es *chip, u32 pipe, int is_capture)
  584. {
  585. int err;
  586. unsigned long flags;
  587. u32 pipe_cmd = PIPE_INFO_TO_CMD(is_capture, pipe);
  588. spin_lock_irqsave(&chip->msg_lock, flags);
  589. lx_message_init(&chip->rmh, CMD_0B_TOGGLE_PIPE_STATE);
  590. chip->rmh.cmd[0] |= pipe_cmd;
  591. err = lx_message_send_atomic(chip, &chip->rmh);
  592. spin_unlock_irqrestore(&chip->msg_lock, flags);
  593. return err;
  594. }
  595. int lx_pipe_start(struct lx6464es *chip, u32 pipe, int is_capture)
  596. {
  597. int err;
  598. err = lx_pipe_wait_for_idle(chip, pipe, is_capture);
  599. if (err < 0)
  600. return err;
  601. err = lx_pipe_toggle_state(chip, pipe, is_capture);
  602. return err;
  603. }
  604. int lx_pipe_pause(struct lx6464es *chip, u32 pipe, int is_capture)
  605. {
  606. int err = 0;
  607. err = lx_pipe_wait_for_start(chip, pipe, is_capture);
  608. if (err < 0)
  609. return err;
  610. err = lx_pipe_toggle_state(chip, pipe, is_capture);
  611. return err;
  612. }
  613. int lx_pipe_sample_count(struct lx6464es *chip, u32 pipe, int is_capture,
  614. u64 *rsample_count)
  615. {
  616. int err;
  617. unsigned long flags;
  618. u32 pipe_cmd = PIPE_INFO_TO_CMD(is_capture, pipe);
  619. spin_lock_irqsave(&chip->msg_lock, flags);
  620. lx_message_init(&chip->rmh, CMD_0A_GET_PIPE_SPL_COUNT);
  621. chip->rmh.cmd[0] |= pipe_cmd;
  622. chip->rmh.stat_len = 2; /* need all words here! */
  623. err = lx_message_send_atomic(chip, &chip->rmh); /* don't sleep! */
  624. if (err != 0)
  625. snd_printk(KERN_ERR
  626. "lx6464es: could not query pipe's sample count\n");
  627. else {
  628. *rsample_count = ((u64)(chip->rmh.stat[0] & MASK_SPL_COUNT_HI)
  629. << 24) /* hi part */
  630. + chip->rmh.stat[1]; /* lo part */
  631. }
  632. spin_unlock_irqrestore(&chip->msg_lock, flags);
  633. return err;
  634. }
  635. int lx_pipe_state(struct lx6464es *chip, u32 pipe, int is_capture, u16 *rstate)
  636. {
  637. int err;
  638. unsigned long flags;
  639. u32 pipe_cmd = PIPE_INFO_TO_CMD(is_capture, pipe);
  640. spin_lock_irqsave(&chip->msg_lock, flags);
  641. lx_message_init(&chip->rmh, CMD_0A_GET_PIPE_SPL_COUNT);
  642. chip->rmh.cmd[0] |= pipe_cmd;
  643. err = lx_message_send_atomic(chip, &chip->rmh);
  644. if (err != 0)
  645. snd_printk(KERN_ERR "lx6464es: could not query pipe's state\n");
  646. else
  647. *rstate = (chip->rmh.stat[0] >> PSTATE_OFFSET) & 0x0F;
  648. spin_unlock_irqrestore(&chip->msg_lock, flags);
  649. return err;
  650. }
  651. static int lx_pipe_wait_for_state(struct lx6464es *chip, u32 pipe,
  652. int is_capture, u16 state)
  653. {
  654. int i;
  655. /* max 2*PCMOnlyGranularity = 2*1024 at 44100 = < 50 ms:
  656. * timeout 50 ms */
  657. for (i = 0; i != 50; ++i) {
  658. u16 current_state;
  659. int err = lx_pipe_state(chip, pipe, is_capture, &current_state);
  660. if (err < 0)
  661. return err;
  662. if (current_state == state)
  663. return 0;
  664. mdelay(1);
  665. }
  666. return -ETIMEDOUT;
  667. }
  668. int lx_pipe_wait_for_start(struct lx6464es *chip, u32 pipe, int is_capture)
  669. {
  670. return lx_pipe_wait_for_state(chip, pipe, is_capture, PSTATE_RUN);
  671. }
  672. int lx_pipe_wait_for_idle(struct lx6464es *chip, u32 pipe, int is_capture)
  673. {
  674. return lx_pipe_wait_for_state(chip, pipe, is_capture, PSTATE_IDLE);
  675. }
  676. /* low-level stream handling */
  677. int lx_stream_set_state(struct lx6464es *chip, u32 pipe,
  678. int is_capture, enum stream_state_t state)
  679. {
  680. int err;
  681. unsigned long flags;
  682. u32 pipe_cmd = PIPE_INFO_TO_CMD(is_capture, pipe);
  683. spin_lock_irqsave(&chip->msg_lock, flags);
  684. lx_message_init(&chip->rmh, CMD_13_SET_STREAM_STATE);
  685. chip->rmh.cmd[0] |= pipe_cmd;
  686. chip->rmh.cmd[0] |= state;
  687. err = lx_message_send_atomic(chip, &chip->rmh);
  688. spin_unlock_irqrestore(&chip->msg_lock, flags);
  689. return err;
  690. }
  691. int lx_stream_set_format(struct lx6464es *chip, struct snd_pcm_runtime *runtime,
  692. u32 pipe, int is_capture)
  693. {
  694. int err;
  695. unsigned long flags;
  696. u32 pipe_cmd = PIPE_INFO_TO_CMD(is_capture, pipe);
  697. u32 channels = runtime->channels;
  698. if (runtime->channels != channels)
  699. snd_printk(KERN_ERR LXP "channel count mismatch: %d vs %d",
  700. runtime->channels, channels);
  701. spin_lock_irqsave(&chip->msg_lock, flags);
  702. lx_message_init(&chip->rmh, CMD_0C_DEF_STREAM);
  703. chip->rmh.cmd[0] |= pipe_cmd;
  704. if (runtime->sample_bits == 16)
  705. /* 16 bit format */
  706. chip->rmh.cmd[0] |= (STREAM_FMT_16b << STREAM_FMT_OFFSET);
  707. if (snd_pcm_format_little_endian(runtime->format))
  708. /* little endian/intel format */
  709. chip->rmh.cmd[0] |= (STREAM_FMT_intel << STREAM_FMT_OFFSET);
  710. chip->rmh.cmd[0] |= channels-1;
  711. err = lx_message_send_atomic(chip, &chip->rmh);
  712. spin_unlock_irqrestore(&chip->msg_lock, flags);
  713. return err;
  714. }
  715. int lx_stream_state(struct lx6464es *chip, u32 pipe, int is_capture,
  716. int *rstate)
  717. {
  718. int err;
  719. unsigned long flags;
  720. u32 pipe_cmd = PIPE_INFO_TO_CMD(is_capture, pipe);
  721. spin_lock_irqsave(&chip->msg_lock, flags);
  722. lx_message_init(&chip->rmh, CMD_0E_GET_STREAM_SPL_COUNT);
  723. chip->rmh.cmd[0] |= pipe_cmd;
  724. err = lx_message_send_atomic(chip, &chip->rmh);
  725. *rstate = (chip->rmh.stat[0] & SF_START) ? START_STATE : PAUSE_STATE;
  726. spin_unlock_irqrestore(&chip->msg_lock, flags);
  727. return err;
  728. }
  729. int lx_stream_sample_position(struct lx6464es *chip, u32 pipe, int is_capture,
  730. u64 *r_bytepos)
  731. {
  732. int err;
  733. unsigned long flags;
  734. u32 pipe_cmd = PIPE_INFO_TO_CMD(is_capture, pipe);
  735. spin_lock_irqsave(&chip->msg_lock, flags);
  736. lx_message_init(&chip->rmh, CMD_0E_GET_STREAM_SPL_COUNT);
  737. chip->rmh.cmd[0] |= pipe_cmd;
  738. err = lx_message_send_atomic(chip, &chip->rmh);
  739. *r_bytepos = ((u64) (chip->rmh.stat[0] & MASK_SPL_COUNT_HI)
  740. << 32) /* hi part */
  741. + chip->rmh.stat[1]; /* lo part */
  742. spin_unlock_irqrestore(&chip->msg_lock, flags);
  743. return err;
  744. }
  745. /* low-level buffer handling */
  746. int lx_buffer_give(struct lx6464es *chip, u32 pipe, int is_capture,
  747. u32 buffer_size, u32 buf_address_lo, u32 buf_address_hi,
  748. u32 *r_buffer_index)
  749. {
  750. int err;
  751. unsigned long flags;
  752. u32 pipe_cmd = PIPE_INFO_TO_CMD(is_capture, pipe);
  753. spin_lock_irqsave(&chip->msg_lock, flags);
  754. lx_message_init(&chip->rmh, CMD_0F_UPDATE_BUFFER);
  755. chip->rmh.cmd[0] |= pipe_cmd;
  756. chip->rmh.cmd[0] |= BF_NOTIFY_EOB; /* request interrupt notification */
  757. /* todo: pause request, circular buffer */
  758. chip->rmh.cmd[1] = buffer_size & MASK_DATA_SIZE;
  759. chip->rmh.cmd[2] = buf_address_lo;
  760. if (buf_address_hi) {
  761. chip->rmh.cmd_len = 4;
  762. chip->rmh.cmd[3] = buf_address_hi;
  763. chip->rmh.cmd[0] |= BF_64BITS_ADR;
  764. }
  765. err = lx_message_send_atomic(chip, &chip->rmh);
  766. if (err == 0) {
  767. *r_buffer_index = chip->rmh.stat[0];
  768. goto done;
  769. }
  770. if (err == EB_RBUFFERS_TABLE_OVERFLOW)
  771. snd_printk(LXP "lx_buffer_give EB_RBUFFERS_TABLE_OVERFLOW\n");
  772. if (err == EB_INVALID_STREAM)
  773. snd_printk(LXP "lx_buffer_give EB_INVALID_STREAM\n");
  774. if (err == EB_CMD_REFUSED)
  775. snd_printk(LXP "lx_buffer_give EB_CMD_REFUSED\n");
  776. done:
  777. spin_unlock_irqrestore(&chip->msg_lock, flags);
  778. return err;
  779. }
  780. int lx_buffer_free(struct lx6464es *chip, u32 pipe, int is_capture,
  781. u32 *r_buffer_size)
  782. {
  783. int err;
  784. unsigned long flags;
  785. u32 pipe_cmd = PIPE_INFO_TO_CMD(is_capture, pipe);
  786. spin_lock_irqsave(&chip->msg_lock, flags);
  787. lx_message_init(&chip->rmh, CMD_11_CANCEL_BUFFER);
  788. chip->rmh.cmd[0] |= pipe_cmd;
  789. chip->rmh.cmd[0] |= MASK_BUFFER_ID; /* ask for the current buffer: the
  790. * microblaze will seek for it */
  791. err = lx_message_send_atomic(chip, &chip->rmh);
  792. if (err == 0)
  793. *r_buffer_size = chip->rmh.stat[0] & MASK_DATA_SIZE;
  794. spin_unlock_irqrestore(&chip->msg_lock, flags);
  795. return err;
  796. }
  797. int lx_buffer_cancel(struct lx6464es *chip, u32 pipe, int is_capture,
  798. u32 buffer_index)
  799. {
  800. int err;
  801. unsigned long flags;
  802. u32 pipe_cmd = PIPE_INFO_TO_CMD(is_capture, pipe);
  803. spin_lock_irqsave(&chip->msg_lock, flags);
  804. lx_message_init(&chip->rmh, CMD_11_CANCEL_BUFFER);
  805. chip->rmh.cmd[0] |= pipe_cmd;
  806. chip->rmh.cmd[0] |= buffer_index;
  807. err = lx_message_send_atomic(chip, &chip->rmh);
  808. spin_unlock_irqrestore(&chip->msg_lock, flags);
  809. return err;
  810. }
  811. /* low-level gain/peak handling
  812. *
  813. * \todo: can we unmute capture/playback channels independently?
  814. *
  815. * */
  816. int lx_level_unmute(struct lx6464es *chip, int is_capture, int unmute)
  817. {
  818. int err;
  819. unsigned long flags;
  820. /* bit set to 1: channel muted */
  821. u64 mute_mask = unmute ? 0 : 0xFFFFFFFFFFFFFFFFLLU;
  822. spin_lock_irqsave(&chip->msg_lock, flags);
  823. lx_message_init(&chip->rmh, CMD_0D_SET_MUTE);
  824. chip->rmh.cmd[0] |= PIPE_INFO_TO_CMD(is_capture, 0);
  825. chip->rmh.cmd[1] = (u32)(mute_mask >> (u64)32); /* hi part */
  826. chip->rmh.cmd[2] = (u32)(mute_mask & (u64)0xFFFFFFFF); /* lo part */
  827. snd_printk("mute %x %x %x\n", chip->rmh.cmd[0], chip->rmh.cmd[1],
  828. chip->rmh.cmd[2]);
  829. err = lx_message_send_atomic(chip, &chip->rmh);
  830. spin_unlock_irqrestore(&chip->msg_lock, flags);
  831. return err;
  832. }
  833. static u32 peak_map[] = {
  834. 0x00000109, /* -90.308dB */
  835. 0x0000083B, /* -72.247dB */
  836. 0x000020C4, /* -60.205dB */
  837. 0x00008273, /* -48.030dB */
  838. 0x00020756, /* -36.005dB */
  839. 0x00040C37, /* -30.001dB */
  840. 0x00081385, /* -24.002dB */
  841. 0x00101D3F, /* -18.000dB */
  842. 0x0016C310, /* -15.000dB */
  843. 0x002026F2, /* -12.001dB */
  844. 0x002D6A86, /* -9.000dB */
  845. 0x004026E6, /* -6.004dB */
  846. 0x005A9DF6, /* -3.000dB */
  847. 0x0065AC8B, /* -2.000dB */
  848. 0x00721481, /* -1.000dB */
  849. 0x007FFFFF, /* FS */
  850. };
  851. int lx_level_peaks(struct lx6464es *chip, int is_capture, int channels,
  852. u32 *r_levels)
  853. {
  854. int err = 0;
  855. unsigned long flags;
  856. int i;
  857. spin_lock_irqsave(&chip->msg_lock, flags);
  858. for (i = 0; i < channels; i += 4) {
  859. u32 s0, s1, s2, s3;
  860. lx_message_init(&chip->rmh, CMD_12_GET_PEAK);
  861. chip->rmh.cmd[0] |= PIPE_INFO_TO_CMD(is_capture, i);
  862. err = lx_message_send_atomic(chip, &chip->rmh);
  863. if (err == 0) {
  864. s0 = peak_map[chip->rmh.stat[0] & 0x0F];
  865. s1 = peak_map[(chip->rmh.stat[0] >> 4) & 0xf];
  866. s2 = peak_map[(chip->rmh.stat[0] >> 8) & 0xf];
  867. s3 = peak_map[(chip->rmh.stat[0] >> 12) & 0xf];
  868. } else
  869. s0 = s1 = s2 = s3 = 0;
  870. r_levels[0] = s0;
  871. r_levels[1] = s1;
  872. r_levels[2] = s2;
  873. r_levels[3] = s3;
  874. r_levels += 4;
  875. }
  876. spin_unlock_irqrestore(&chip->msg_lock, flags);
  877. return err;
  878. }
  879. /* interrupt handling */
  880. #define PCX_IRQ_NONE 0
  881. #define IRQCS_ACTIVE_PCIDB 0x00002000L /* Bit nø 13 */
  882. #define IRQCS_ENABLE_PCIIRQ 0x00000100L /* Bit nø 08 */
  883. #define IRQCS_ENABLE_PCIDB 0x00000200L /* Bit nø 09 */
  884. static u32 lx_interrupt_test_ack(struct lx6464es *chip)
  885. {
  886. u32 irqcs = lx_plx_reg_read(chip, ePLX_IRQCS);
  887. /* Test if PCI Doorbell interrupt is active */
  888. if (irqcs & IRQCS_ACTIVE_PCIDB) {
  889. u32 temp;
  890. irqcs = PCX_IRQ_NONE;
  891. while ((temp = lx_plx_reg_read(chip, ePLX_L2PCIDB))) {
  892. /* RAZ interrupt */
  893. irqcs |= temp;
  894. lx_plx_reg_write(chip, ePLX_L2PCIDB, temp);
  895. }
  896. return irqcs;
  897. }
  898. return PCX_IRQ_NONE;
  899. }
  900. static int lx_interrupt_ack(struct lx6464es *chip, u32 *r_irqsrc,
  901. int *r_async_pending, int *r_async_escmd)
  902. {
  903. u32 irq_async;
  904. u32 irqsrc = lx_interrupt_test_ack(chip);
  905. if (irqsrc == PCX_IRQ_NONE)
  906. return 0;
  907. *r_irqsrc = irqsrc;
  908. irq_async = irqsrc & MASK_SYS_ASYNC_EVENTS; /* + EtherSound response
  909. * (set by xilinx) + EOB */
  910. if (irq_async & MASK_SYS_STATUS_ESA) {
  911. irq_async &= ~MASK_SYS_STATUS_ESA;
  912. *r_async_escmd = 1;
  913. }
  914. if (irqsrc & MASK_SYS_STATUS_CMD_DONE)
  915. /* xilinx command notification */
  916. atomic_set(&chip->send_message_locked, 0);
  917. if (irq_async) {
  918. /* snd_printd("interrupt: async event pending\n"); */
  919. *r_async_pending = 1;
  920. }
  921. return 1;
  922. }
  923. static int lx_interrupt_handle_async_events(struct lx6464es *chip, u32 irqsrc,
  924. int *r_freq_changed,
  925. u64 *r_notified_in_pipe_mask,
  926. u64 *r_notified_out_pipe_mask)
  927. {
  928. int err;
  929. u32 stat[9]; /* answer from CMD_04_GET_EVENT */
  930. /* On peut optimiser pour ne pas lire les evenements vides
  931. * les mots de réponse sont dans l'ordre suivant :
  932. * Stat[0] mot de status général
  933. * Stat[1] fin de buffer OUT pF
  934. * Stat[2] fin de buffer OUT pf
  935. * Stat[3] fin de buffer IN pF
  936. * Stat[4] fin de buffer IN pf
  937. * Stat[5] underrun poid fort
  938. * Stat[6] underrun poid faible
  939. * Stat[7] overrun poid fort
  940. * Stat[8] overrun poid faible
  941. * */
  942. u64 orun_mask;
  943. u64 urun_mask;
  944. #if 0
  945. int has_underrun = (irqsrc & MASK_SYS_STATUS_URUN) ? 1 : 0;
  946. int has_overrun = (irqsrc & MASK_SYS_STATUS_ORUN) ? 1 : 0;
  947. #endif
  948. int eb_pending_out = (irqsrc & MASK_SYS_STATUS_EOBO) ? 1 : 0;
  949. int eb_pending_in = (irqsrc & MASK_SYS_STATUS_EOBI) ? 1 : 0;
  950. *r_freq_changed = (irqsrc & MASK_SYS_STATUS_FREQ) ? 1 : 0;
  951. err = lx_dsp_read_async_events(chip, stat);
  952. if (err < 0)
  953. return err;
  954. if (eb_pending_in) {
  955. *r_notified_in_pipe_mask = ((u64)stat[3] << 32)
  956. + stat[4];
  957. snd_printdd(LXP "interrupt: EOBI pending %llx\n",
  958. *r_notified_in_pipe_mask);
  959. }
  960. if (eb_pending_out) {
  961. *r_notified_out_pipe_mask = ((u64)stat[1] << 32)
  962. + stat[2];
  963. snd_printdd(LXP "interrupt: EOBO pending %llx\n",
  964. *r_notified_out_pipe_mask);
  965. }
  966. orun_mask = ((u64)stat[7] << 32) + stat[8];
  967. urun_mask = ((u64)stat[5] << 32) + stat[6];
  968. /* todo: handle xrun notification */
  969. return err;
  970. }
  971. static int lx_interrupt_request_new_buffer(struct lx6464es *chip,
  972. struct lx_stream *lx_stream)
  973. {
  974. struct snd_pcm_substream *substream = lx_stream->stream;
  975. int is_capture = lx_stream->is_capture;
  976. int err;
  977. unsigned long flags;
  978. const u32 channels = substream->runtime->channels;
  979. const u32 bytes_per_frame = channels * 3;
  980. const u32 period_size = substream->runtime->period_size;
  981. const u32 period_bytes = period_size * bytes_per_frame;
  982. const u32 pos = lx_stream->frame_pos;
  983. const u32 next_pos = ((pos+1) == substream->runtime->periods) ?
  984. 0 : pos + 1;
  985. dma_addr_t buf = substream->dma_buffer.addr + pos * period_bytes;
  986. u32 buf_hi = 0;
  987. u32 buf_lo = 0;
  988. u32 buffer_index = 0;
  989. u32 needed, freed;
  990. u32 size_array[MAX_STREAM_BUFFER];
  991. snd_printdd("->lx_interrupt_request_new_buffer\n");
  992. spin_lock_irqsave(&chip->lock, flags);
  993. err = lx_buffer_ask(chip, 0, is_capture, &needed, &freed, size_array);
  994. snd_printdd(LXP "interrupt: needed %d, freed %d\n", needed, freed);
  995. unpack_pointer(buf, &buf_lo, &buf_hi);
  996. err = lx_buffer_give(chip, 0, is_capture, period_bytes, buf_lo, buf_hi,
  997. &buffer_index);
  998. snd_printdd(LXP "interrupt: gave buffer index %x on %p (%d bytes)\n",
  999. buffer_index, (void *)buf, period_bytes);
  1000. lx_stream->frame_pos = next_pos;
  1001. spin_unlock_irqrestore(&chip->lock, flags);
  1002. return err;
  1003. }
  1004. void lx_tasklet_playback(unsigned long data)
  1005. {
  1006. struct lx6464es *chip = (struct lx6464es *)data;
  1007. struct lx_stream *lx_stream = &chip->playback_stream;
  1008. int err;
  1009. snd_printdd("->lx_tasklet_playback\n");
  1010. err = lx_interrupt_request_new_buffer(chip, lx_stream);
  1011. if (err < 0)
  1012. snd_printk(KERN_ERR LXP
  1013. "cannot request new buffer for playback\n");
  1014. snd_pcm_period_elapsed(lx_stream->stream);
  1015. }
  1016. void lx_tasklet_capture(unsigned long data)
  1017. {
  1018. struct lx6464es *chip = (struct lx6464es *)data;
  1019. struct lx_stream *lx_stream = &chip->capture_stream;
  1020. int err;
  1021. snd_printdd("->lx_tasklet_capture\n");
  1022. err = lx_interrupt_request_new_buffer(chip, lx_stream);
  1023. if (err < 0)
  1024. snd_printk(KERN_ERR LXP
  1025. "cannot request new buffer for capture\n");
  1026. snd_pcm_period_elapsed(lx_stream->stream);
  1027. }
  1028. static int lx_interrupt_handle_audio_transfer(struct lx6464es *chip,
  1029. u64 notified_in_pipe_mask,
  1030. u64 notified_out_pipe_mask)
  1031. {
  1032. int err = 0;
  1033. if (notified_in_pipe_mask) {
  1034. snd_printdd(LXP "requesting audio transfer for capture\n");
  1035. tasklet_hi_schedule(&chip->tasklet_capture);
  1036. }
  1037. if (notified_out_pipe_mask) {
  1038. snd_printdd(LXP "requesting audio transfer for playback\n");
  1039. tasklet_hi_schedule(&chip->tasklet_playback);
  1040. }
  1041. return err;
  1042. }
  1043. irqreturn_t lx_interrupt(int irq, void *dev_id)
  1044. {
  1045. struct lx6464es *chip = dev_id;
  1046. int async_pending, async_escmd;
  1047. u32 irqsrc;
  1048. spin_lock(&chip->lock);
  1049. snd_printdd("**************************************************\n");
  1050. if (!lx_interrupt_ack(chip, &irqsrc, &async_pending, &async_escmd)) {
  1051. spin_unlock(&chip->lock);
  1052. snd_printdd("IRQ_NONE\n");
  1053. return IRQ_NONE; /* this device did not cause the interrupt */
  1054. }
  1055. if (irqsrc & MASK_SYS_STATUS_CMD_DONE)
  1056. goto exit;
  1057. #if 0
  1058. if (irqsrc & MASK_SYS_STATUS_EOBI)
  1059. snd_printdd(LXP "interrupt: EOBI\n");
  1060. if (irqsrc & MASK_SYS_STATUS_EOBO)
  1061. snd_printdd(LXP "interrupt: EOBO\n");
  1062. if (irqsrc & MASK_SYS_STATUS_URUN)
  1063. snd_printdd(LXP "interrupt: URUN\n");
  1064. if (irqsrc & MASK_SYS_STATUS_ORUN)
  1065. snd_printdd(LXP "interrupt: ORUN\n");
  1066. #endif
  1067. if (async_pending) {
  1068. u64 notified_in_pipe_mask = 0;
  1069. u64 notified_out_pipe_mask = 0;
  1070. int freq_changed;
  1071. int err;
  1072. /* handle async events */
  1073. err = lx_interrupt_handle_async_events(chip, irqsrc,
  1074. &freq_changed,
  1075. &notified_in_pipe_mask,
  1076. &notified_out_pipe_mask);
  1077. if (err)
  1078. snd_printk(KERN_ERR LXP
  1079. "error handling async events\n");
  1080. err = lx_interrupt_handle_audio_transfer(chip,
  1081. notified_in_pipe_mask,
  1082. notified_out_pipe_mask
  1083. );
  1084. if (err)
  1085. snd_printk(KERN_ERR LXP
  1086. "error during audio transfer\n");
  1087. }
  1088. if (async_escmd) {
  1089. #if 0
  1090. /* backdoor for ethersound commands
  1091. *
  1092. * for now, we do not need this
  1093. *
  1094. * */
  1095. snd_printdd("lx6464es: interrupt requests escmd handling\n");
  1096. #endif
  1097. }
  1098. exit:
  1099. spin_unlock(&chip->lock);
  1100. return IRQ_HANDLED; /* this device caused the interrupt */
  1101. }
  1102. static void lx_irq_set(struct lx6464es *chip, int enable)
  1103. {
  1104. u32 reg = lx_plx_reg_read(chip, ePLX_IRQCS);
  1105. /* enable/disable interrupts
  1106. *
  1107. * Set the Doorbell and PCI interrupt enable bits
  1108. *
  1109. * */
  1110. if (enable)
  1111. reg |= (IRQCS_ENABLE_PCIIRQ | IRQCS_ENABLE_PCIDB);
  1112. else
  1113. reg &= ~(IRQCS_ENABLE_PCIIRQ | IRQCS_ENABLE_PCIDB);
  1114. lx_plx_reg_write(chip, ePLX_IRQCS, reg);
  1115. }
  1116. void lx_irq_enable(struct lx6464es *chip)
  1117. {
  1118. snd_printdd("->lx_irq_enable\n");
  1119. lx_irq_set(chip, 1);
  1120. }
  1121. void lx_irq_disable(struct lx6464es *chip)
  1122. {
  1123. snd_printdd("->lx_irq_disable\n");
  1124. lx_irq_set(chip, 0);
  1125. }