soc-cache.c 38 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631
  1. /*
  2. * soc-cache.c -- ASoC register cache helpers
  3. *
  4. * Copyright 2009 Wolfson Microelectronics PLC.
  5. *
  6. * Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
  7. *
  8. * This program is free software; you can redistribute it and/or modify it
  9. * under the terms of the GNU General Public License as published by the
  10. * Free Software Foundation; either version 2 of the License, or (at your
  11. * option) any later version.
  12. */
  13. #include <linux/i2c.h>
  14. #include <linux/spi/spi.h>
  15. #include <sound/soc.h>
  16. #include <linux/lzo.h>
  17. #include <linux/bitmap.h>
  18. #include <linux/rbtree.h>
  19. #include <trace/events/asoc.h>
  20. #if defined(CONFIG_SPI_MASTER)
  21. static int do_spi_write(void *control_data, const void *msg,
  22. int len)
  23. {
  24. struct spi_device *spi = control_data;
  25. struct spi_transfer t;
  26. struct spi_message m;
  27. if (len <= 0)
  28. return 0;
  29. spi_message_init(&m);
  30. memset(&t, 0, sizeof t);
  31. t.tx_buf = msg;
  32. t.len = len;
  33. spi_message_add_tail(&t, &m);
  34. spi_sync(spi, &m);
  35. return len;
  36. }
  37. #endif
  38. static int do_hw_write(struct snd_soc_codec *codec, unsigned int reg,
  39. unsigned int value, const void *data, int len)
  40. {
  41. int ret;
  42. if (!snd_soc_codec_volatile_register(codec, reg) &&
  43. reg < codec->driver->reg_cache_size &&
  44. !codec->cache_bypass) {
  45. ret = snd_soc_cache_write(codec, reg, value);
  46. if (ret < 0)
  47. return -1;
  48. }
  49. if (codec->cache_only) {
  50. codec->cache_sync = 1;
  51. return 0;
  52. }
  53. ret = codec->hw_write(codec->control_data, data, len);
  54. if (ret == len)
  55. return 0;
  56. if (ret < 0)
  57. return ret;
  58. else
  59. return -EIO;
  60. }
  61. static unsigned int do_hw_read(struct snd_soc_codec *codec, unsigned int reg)
  62. {
  63. int ret;
  64. unsigned int val;
  65. if (reg >= codec->driver->reg_cache_size ||
  66. snd_soc_codec_volatile_register(codec, reg) ||
  67. codec->cache_bypass) {
  68. if (codec->cache_only)
  69. return -1;
  70. BUG_ON(!codec->hw_read);
  71. return codec->hw_read(codec, reg);
  72. }
  73. ret = snd_soc_cache_read(codec, reg, &val);
  74. if (ret < 0)
  75. return -1;
  76. return val;
  77. }
  78. static unsigned int snd_soc_4_12_read(struct snd_soc_codec *codec,
  79. unsigned int reg)
  80. {
  81. return do_hw_read(codec, reg);
  82. }
  83. static int snd_soc_4_12_write(struct snd_soc_codec *codec, unsigned int reg,
  84. unsigned int value)
  85. {
  86. u8 data[2];
  87. data[0] = (reg << 4) | ((value >> 8) & 0x000f);
  88. data[1] = value & 0x00ff;
  89. return do_hw_write(codec, reg, value, data, 2);
  90. }
  91. #if defined(CONFIG_SPI_MASTER)
  92. static int snd_soc_4_12_spi_write(void *control_data, const char *data,
  93. int len)
  94. {
  95. u8 msg[2];
  96. msg[0] = data[1];
  97. msg[1] = data[0];
  98. return do_spi_write(control_data, msg, len);
  99. }
  100. #else
  101. #define snd_soc_4_12_spi_write NULL
  102. #endif
  103. static unsigned int snd_soc_7_9_read(struct snd_soc_codec *codec,
  104. unsigned int reg)
  105. {
  106. return do_hw_read(codec, reg);
  107. }
  108. static int snd_soc_7_9_write(struct snd_soc_codec *codec, unsigned int reg,
  109. unsigned int value)
  110. {
  111. u16 data;
  112. data = cpu_to_be16((reg << 9) | (value & 0x1ff));
  113. return do_hw_write(codec, reg, value, &data, 2);
  114. }
  115. #if defined(CONFIG_SPI_MASTER)
  116. static int snd_soc_7_9_spi_write(void *control_data, const char *data,
  117. int len)
  118. {
  119. u8 msg[2];
  120. msg[0] = data[0];
  121. msg[1] = data[1];
  122. return do_spi_write(control_data, msg, len);
  123. }
  124. #else
  125. #define snd_soc_7_9_spi_write NULL
  126. #endif
  127. static int snd_soc_8_8_write(struct snd_soc_codec *codec, unsigned int reg,
  128. unsigned int value)
  129. {
  130. u8 data[2];
  131. reg &= 0xff;
  132. data[0] = reg;
  133. data[1] = value & 0xff;
  134. return do_hw_write(codec, reg, value, data, 2);
  135. }
  136. static unsigned int snd_soc_8_8_read(struct snd_soc_codec *codec,
  137. unsigned int reg)
  138. {
  139. return do_hw_read(codec, reg);
  140. }
  141. #if defined(CONFIG_SPI_MASTER)
  142. static int snd_soc_8_8_spi_write(void *control_data, const char *data,
  143. int len)
  144. {
  145. u8 msg[2];
  146. msg[0] = data[0];
  147. msg[1] = data[1];
  148. return do_spi_write(control_data, msg, len);
  149. }
  150. #else
  151. #define snd_soc_8_8_spi_write NULL
  152. #endif
  153. static int snd_soc_8_16_write(struct snd_soc_codec *codec, unsigned int reg,
  154. unsigned int value)
  155. {
  156. u8 data[3];
  157. u16 val = cpu_to_be16(value);
  158. data[0] = reg;
  159. memcpy(&data[1], &val, sizeof(val));
  160. return do_hw_write(codec, reg, value, data, 3);
  161. }
  162. static unsigned int snd_soc_8_16_read(struct snd_soc_codec *codec,
  163. unsigned int reg)
  164. {
  165. return do_hw_read(codec, reg);
  166. }
  167. #if defined(CONFIG_SPI_MASTER)
  168. static int snd_soc_8_16_spi_write(void *control_data, const char *data,
  169. int len)
  170. {
  171. u8 msg[3];
  172. msg[0] = data[0];
  173. msg[1] = data[1];
  174. msg[2] = data[2];
  175. return do_spi_write(control_data, msg, len);
  176. }
  177. #else
  178. #define snd_soc_8_16_spi_write NULL
  179. #endif
  180. #if defined(CONFIG_I2C) || (defined(CONFIG_I2C_MODULE) && defined(MODULE))
  181. static unsigned int do_i2c_read(struct snd_soc_codec *codec,
  182. void *reg, int reglen,
  183. void *data, int datalen)
  184. {
  185. struct i2c_msg xfer[2];
  186. int ret;
  187. struct i2c_client *client = codec->control_data;
  188. /* Write register */
  189. xfer[0].addr = client->addr;
  190. xfer[0].flags = 0;
  191. xfer[0].len = reglen;
  192. xfer[0].buf = reg;
  193. /* Read data */
  194. xfer[1].addr = client->addr;
  195. xfer[1].flags = I2C_M_RD;
  196. xfer[1].len = datalen;
  197. xfer[1].buf = data;
  198. ret = i2c_transfer(client->adapter, xfer, 2);
  199. if (ret == 2)
  200. return 0;
  201. else if (ret < 0)
  202. return ret;
  203. else
  204. return -EIO;
  205. }
  206. #endif
  207. #if defined(CONFIG_I2C) || (defined(CONFIG_I2C_MODULE) && defined(MODULE))
  208. static unsigned int snd_soc_8_8_read_i2c(struct snd_soc_codec *codec,
  209. unsigned int r)
  210. {
  211. u8 reg = r;
  212. u8 data;
  213. int ret;
  214. ret = do_i2c_read(codec, &reg, 1, &data, 1);
  215. if (ret < 0)
  216. return 0;
  217. return data;
  218. }
  219. #else
  220. #define snd_soc_8_8_read_i2c NULL
  221. #endif
  222. #if defined(CONFIG_I2C) || (defined(CONFIG_I2C_MODULE) && defined(MODULE))
  223. static unsigned int snd_soc_8_16_read_i2c(struct snd_soc_codec *codec,
  224. unsigned int r)
  225. {
  226. u8 reg = r;
  227. u16 data;
  228. int ret;
  229. ret = do_i2c_read(codec, &reg, 1, &data, 2);
  230. if (ret < 0)
  231. return 0;
  232. return (data >> 8) | ((data & 0xff) << 8);
  233. }
  234. #else
  235. #define snd_soc_8_16_read_i2c NULL
  236. #endif
  237. #if defined(CONFIG_I2C) || (defined(CONFIG_I2C_MODULE) && defined(MODULE))
  238. static unsigned int snd_soc_16_8_read_i2c(struct snd_soc_codec *codec,
  239. unsigned int r)
  240. {
  241. u16 reg = r;
  242. u8 data;
  243. int ret;
  244. ret = do_i2c_read(codec, &reg, 2, &data, 1);
  245. if (ret < 0)
  246. return 0;
  247. return data;
  248. }
  249. #else
  250. #define snd_soc_16_8_read_i2c NULL
  251. #endif
  252. static unsigned int snd_soc_16_8_read(struct snd_soc_codec *codec,
  253. unsigned int reg)
  254. {
  255. return do_hw_read(codec, reg);
  256. }
  257. static int snd_soc_16_8_write(struct snd_soc_codec *codec, unsigned int reg,
  258. unsigned int value)
  259. {
  260. u8 data[3];
  261. data[0] = (reg >> 8) & 0xff;
  262. data[1] = reg & 0xff;
  263. data[2] = value;
  264. reg &= 0xff;
  265. return do_hw_write(codec, reg, value, data, 3);
  266. }
  267. #if defined(CONFIG_SPI_MASTER)
  268. static int snd_soc_16_8_spi_write(void *control_data, const char *data,
  269. int len)
  270. {
  271. u8 msg[3];
  272. msg[0] = data[0];
  273. msg[1] = data[1];
  274. msg[2] = data[2];
  275. return do_spi_write(control_data, msg, len);
  276. }
  277. #else
  278. #define snd_soc_16_8_spi_write NULL
  279. #endif
  280. #if defined(CONFIG_I2C) || (defined(CONFIG_I2C_MODULE) && defined(MODULE))
  281. static unsigned int snd_soc_16_16_read_i2c(struct snd_soc_codec *codec,
  282. unsigned int r)
  283. {
  284. u16 reg = cpu_to_be16(r);
  285. u16 data;
  286. int ret;
  287. ret = do_i2c_read(codec, &reg, 2, &data, 2);
  288. if (ret < 0)
  289. return 0;
  290. return be16_to_cpu(data);
  291. }
  292. #else
  293. #define snd_soc_16_16_read_i2c NULL
  294. #endif
  295. static unsigned int snd_soc_16_16_read(struct snd_soc_codec *codec,
  296. unsigned int reg)
  297. {
  298. return do_hw_read(codec, reg);
  299. }
  300. static int snd_soc_16_16_write(struct snd_soc_codec *codec, unsigned int reg,
  301. unsigned int value)
  302. {
  303. u16 data[2];
  304. data[0] = cpu_to_be16(reg);
  305. data[1] = cpu_to_be16(value);
  306. return do_hw_write(codec, reg, value, data, sizeof(data));
  307. }
  308. #if defined(CONFIG_SPI_MASTER)
  309. static int snd_soc_16_16_spi_write(void *control_data, const char *data,
  310. int len)
  311. {
  312. u8 msg[4];
  313. msg[0] = data[0];
  314. msg[1] = data[1];
  315. msg[2] = data[2];
  316. msg[3] = data[3];
  317. return do_spi_write(control_data, msg, len);
  318. }
  319. #else
  320. #define snd_soc_16_16_spi_write NULL
  321. #endif
  322. /* Primitive bulk write support for soc-cache. The data pointed to by
  323. * `data' needs to already be in the form the hardware expects
  324. * including any leading register specific data. Any data written
  325. * through this function will not go through the cache as it only
  326. * handles writing to volatile or out of bounds registers.
  327. */
  328. static int snd_soc_hw_bulk_write_raw(struct snd_soc_codec *codec, unsigned int reg,
  329. const void *data, size_t len)
  330. {
  331. int ret;
  332. /* To ensure that we don't get out of sync with the cache, check
  333. * whether the base register is volatile or if we've directly asked
  334. * to bypass the cache. Out of bounds registers are considered
  335. * volatile.
  336. */
  337. if (!codec->cache_bypass
  338. && !snd_soc_codec_volatile_register(codec, reg)
  339. && reg < codec->driver->reg_cache_size)
  340. return -EINVAL;
  341. switch (codec->control_type) {
  342. #if defined(CONFIG_I2C) || (defined(CONFIG_I2C_MODULE) && defined(MODULE))
  343. case SND_SOC_I2C:
  344. ret = i2c_master_send(codec->control_data, data, len);
  345. break;
  346. #endif
  347. #if defined(CONFIG_SPI_MASTER)
  348. case SND_SOC_SPI:
  349. ret = do_spi_write(codec->control_data, data, len);
  350. break;
  351. #endif
  352. default:
  353. BUG();
  354. }
  355. if (ret == len)
  356. return 0;
  357. if (ret < 0)
  358. return ret;
  359. else
  360. return -EIO;
  361. }
  362. static struct {
  363. int addr_bits;
  364. int data_bits;
  365. int (*write)(struct snd_soc_codec *codec, unsigned int, unsigned int);
  366. int (*spi_write)(void *, const char *, int);
  367. unsigned int (*read)(struct snd_soc_codec *, unsigned int);
  368. unsigned int (*i2c_read)(struct snd_soc_codec *, unsigned int);
  369. } io_types[] = {
  370. {
  371. .addr_bits = 4, .data_bits = 12,
  372. .write = snd_soc_4_12_write, .read = snd_soc_4_12_read,
  373. .spi_write = snd_soc_4_12_spi_write,
  374. },
  375. {
  376. .addr_bits = 7, .data_bits = 9,
  377. .write = snd_soc_7_9_write, .read = snd_soc_7_9_read,
  378. .spi_write = snd_soc_7_9_spi_write,
  379. },
  380. {
  381. .addr_bits = 8, .data_bits = 8,
  382. .write = snd_soc_8_8_write, .read = snd_soc_8_8_read,
  383. .i2c_read = snd_soc_8_8_read_i2c,
  384. .spi_write = snd_soc_8_8_spi_write,
  385. },
  386. {
  387. .addr_bits = 8, .data_bits = 16,
  388. .write = snd_soc_8_16_write, .read = snd_soc_8_16_read,
  389. .i2c_read = snd_soc_8_16_read_i2c,
  390. .spi_write = snd_soc_8_16_spi_write,
  391. },
  392. {
  393. .addr_bits = 16, .data_bits = 8,
  394. .write = snd_soc_16_8_write, .read = snd_soc_16_8_read,
  395. .i2c_read = snd_soc_16_8_read_i2c,
  396. .spi_write = snd_soc_16_8_spi_write,
  397. },
  398. {
  399. .addr_bits = 16, .data_bits = 16,
  400. .write = snd_soc_16_16_write, .read = snd_soc_16_16_read,
  401. .i2c_read = snd_soc_16_16_read_i2c,
  402. .spi_write = snd_soc_16_16_spi_write,
  403. },
  404. };
  405. /**
  406. * snd_soc_codec_set_cache_io: Set up standard I/O functions.
  407. *
  408. * @codec: CODEC to configure.
  409. * @addr_bits: Number of bits of register address data.
  410. * @data_bits: Number of bits of data per register.
  411. * @control: Control bus used.
  412. *
  413. * Register formats are frequently shared between many I2C and SPI
  414. * devices. In order to promote code reuse the ASoC core provides
  415. * some standard implementations of CODEC read and write operations
  416. * which can be set up using this function.
  417. *
  418. * The caller is responsible for allocating and initialising the
  419. * actual cache.
  420. *
  421. * Note that at present this code cannot be used by CODECs with
  422. * volatile registers.
  423. */
  424. int snd_soc_codec_set_cache_io(struct snd_soc_codec *codec,
  425. int addr_bits, int data_bits,
  426. enum snd_soc_control_type control)
  427. {
  428. int i;
  429. for (i = 0; i < ARRAY_SIZE(io_types); i++)
  430. if (io_types[i].addr_bits == addr_bits &&
  431. io_types[i].data_bits == data_bits)
  432. break;
  433. if (i == ARRAY_SIZE(io_types)) {
  434. printk(KERN_ERR
  435. "No I/O functions for %d bit address %d bit data\n",
  436. addr_bits, data_bits);
  437. return -EINVAL;
  438. }
  439. codec->write = io_types[i].write;
  440. codec->read = io_types[i].read;
  441. codec->bulk_write_raw = snd_soc_hw_bulk_write_raw;
  442. switch (control) {
  443. case SND_SOC_CUSTOM:
  444. break;
  445. case SND_SOC_I2C:
  446. #if defined(CONFIG_I2C) || (defined(CONFIG_I2C_MODULE) && defined(MODULE))
  447. codec->hw_write = (hw_write_t)i2c_master_send;
  448. #endif
  449. if (io_types[i].i2c_read)
  450. codec->hw_read = io_types[i].i2c_read;
  451. codec->control_data = container_of(codec->dev,
  452. struct i2c_client,
  453. dev);
  454. break;
  455. case SND_SOC_SPI:
  456. if (io_types[i].spi_write)
  457. codec->hw_write = io_types[i].spi_write;
  458. codec->control_data = container_of(codec->dev,
  459. struct spi_device,
  460. dev);
  461. break;
  462. }
  463. return 0;
  464. }
  465. EXPORT_SYMBOL_GPL(snd_soc_codec_set_cache_io);
  466. static bool snd_soc_set_cache_val(void *base, unsigned int idx,
  467. unsigned int val, unsigned int word_size)
  468. {
  469. switch (word_size) {
  470. case 1: {
  471. u8 *cache = base;
  472. if (cache[idx] == val)
  473. return true;
  474. cache[idx] = val;
  475. break;
  476. }
  477. case 2: {
  478. u16 *cache = base;
  479. if (cache[idx] == val)
  480. return true;
  481. cache[idx] = val;
  482. break;
  483. }
  484. default:
  485. BUG();
  486. }
  487. return false;
  488. }
  489. static unsigned int snd_soc_get_cache_val(const void *base, unsigned int idx,
  490. unsigned int word_size)
  491. {
  492. switch (word_size) {
  493. case 1: {
  494. const u8 *cache = base;
  495. return cache[idx];
  496. }
  497. case 2: {
  498. const u16 *cache = base;
  499. return cache[idx];
  500. }
  501. default:
  502. BUG();
  503. }
  504. /* unreachable */
  505. return -1;
  506. }
  507. struct snd_soc_rbtree_node {
  508. struct rb_node node; /* the actual rbtree node holding this block */
  509. unsigned int base_reg; /* base register handled by this block */
  510. unsigned int word_size; /* number of bytes needed to represent the register index */
  511. void *block; /* block of adjacent registers */
  512. unsigned int blklen; /* number of registers available in the block */
  513. } __attribute__ ((packed));
  514. struct snd_soc_rbtree_ctx {
  515. struct rb_root root;
  516. struct snd_soc_rbtree_node *cached_rbnode;
  517. };
  518. static inline void snd_soc_rbtree_get_base_top_reg(
  519. struct snd_soc_rbtree_node *rbnode,
  520. unsigned int *base, unsigned int *top)
  521. {
  522. *base = rbnode->base_reg;
  523. *top = rbnode->base_reg + rbnode->blklen - 1;
  524. }
  525. static unsigned int snd_soc_rbtree_get_register(
  526. struct snd_soc_rbtree_node *rbnode, unsigned int idx)
  527. {
  528. unsigned int val;
  529. switch (rbnode->word_size) {
  530. case 1: {
  531. u8 *p = rbnode->block;
  532. val = p[idx];
  533. return val;
  534. }
  535. case 2: {
  536. u16 *p = rbnode->block;
  537. val = p[idx];
  538. return val;
  539. }
  540. default:
  541. BUG();
  542. break;
  543. }
  544. return -1;
  545. }
  546. static void snd_soc_rbtree_set_register(struct snd_soc_rbtree_node *rbnode,
  547. unsigned int idx, unsigned int val)
  548. {
  549. switch (rbnode->word_size) {
  550. case 1: {
  551. u8 *p = rbnode->block;
  552. p[idx] = val;
  553. break;
  554. }
  555. case 2: {
  556. u16 *p = rbnode->block;
  557. p[idx] = val;
  558. break;
  559. }
  560. default:
  561. BUG();
  562. break;
  563. }
  564. }
  565. static struct snd_soc_rbtree_node *snd_soc_rbtree_lookup(
  566. struct rb_root *root, unsigned int reg)
  567. {
  568. struct rb_node *node;
  569. struct snd_soc_rbtree_node *rbnode;
  570. unsigned int base_reg, top_reg;
  571. node = root->rb_node;
  572. while (node) {
  573. rbnode = container_of(node, struct snd_soc_rbtree_node, node);
  574. snd_soc_rbtree_get_base_top_reg(rbnode, &base_reg, &top_reg);
  575. if (reg >= base_reg && reg <= top_reg)
  576. return rbnode;
  577. else if (reg > top_reg)
  578. node = node->rb_right;
  579. else if (reg < base_reg)
  580. node = node->rb_left;
  581. }
  582. return NULL;
  583. }
  584. static int snd_soc_rbtree_insert(struct rb_root *root,
  585. struct snd_soc_rbtree_node *rbnode)
  586. {
  587. struct rb_node **new, *parent;
  588. struct snd_soc_rbtree_node *rbnode_tmp;
  589. unsigned int base_reg_tmp, top_reg_tmp;
  590. unsigned int base_reg;
  591. parent = NULL;
  592. new = &root->rb_node;
  593. while (*new) {
  594. rbnode_tmp = container_of(*new, struct snd_soc_rbtree_node,
  595. node);
  596. /* base and top registers of the current rbnode */
  597. snd_soc_rbtree_get_base_top_reg(rbnode_tmp, &base_reg_tmp,
  598. &top_reg_tmp);
  599. /* base register of the rbnode to be added */
  600. base_reg = rbnode->base_reg;
  601. parent = *new;
  602. /* if this register has already been inserted, just return */
  603. if (base_reg >= base_reg_tmp &&
  604. base_reg <= top_reg_tmp)
  605. return 0;
  606. else if (base_reg > top_reg_tmp)
  607. new = &((*new)->rb_right);
  608. else if (base_reg < base_reg_tmp)
  609. new = &((*new)->rb_left);
  610. }
  611. /* insert the node into the rbtree */
  612. rb_link_node(&rbnode->node, parent, new);
  613. rb_insert_color(&rbnode->node, root);
  614. return 1;
  615. }
  616. static int snd_soc_rbtree_cache_sync(struct snd_soc_codec *codec)
  617. {
  618. struct snd_soc_rbtree_ctx *rbtree_ctx;
  619. struct rb_node *node;
  620. struct snd_soc_rbtree_node *rbnode;
  621. unsigned int regtmp;
  622. unsigned int val;
  623. int ret;
  624. int i;
  625. rbtree_ctx = codec->reg_cache;
  626. for (node = rb_first(&rbtree_ctx->root); node; node = rb_next(node)) {
  627. rbnode = rb_entry(node, struct snd_soc_rbtree_node, node);
  628. for (i = 0; i < rbnode->blklen; ++i) {
  629. regtmp = rbnode->base_reg + i;
  630. WARN_ON(codec->writable_register &&
  631. codec->writable_register(codec, regtmp));
  632. val = snd_soc_rbtree_get_register(rbnode, i);
  633. codec->cache_bypass = 1;
  634. ret = snd_soc_write(codec, regtmp, val);
  635. codec->cache_bypass = 0;
  636. if (ret)
  637. return ret;
  638. dev_dbg(codec->dev, "Synced register %#x, value = %#x\n",
  639. regtmp, val);
  640. }
  641. }
  642. return 0;
  643. }
  644. static int snd_soc_rbtree_insert_to_block(struct snd_soc_rbtree_node *rbnode,
  645. unsigned int pos, unsigned int reg,
  646. unsigned int value)
  647. {
  648. u8 *blk;
  649. blk = krealloc(rbnode->block,
  650. (rbnode->blklen + 1) * rbnode->word_size, GFP_KERNEL);
  651. if (!blk)
  652. return -ENOMEM;
  653. /* insert the register value in the correct place in the rbnode block */
  654. memmove(blk + (pos + 1) * rbnode->word_size,
  655. blk + pos * rbnode->word_size,
  656. (rbnode->blklen - pos) * rbnode->word_size);
  657. /* update the rbnode block, its size and the base register */
  658. rbnode->block = blk;
  659. rbnode->blklen++;
  660. if (!pos)
  661. rbnode->base_reg = reg;
  662. snd_soc_rbtree_set_register(rbnode, pos, value);
  663. return 0;
  664. }
  665. static int snd_soc_rbtree_cache_write(struct snd_soc_codec *codec,
  666. unsigned int reg, unsigned int value)
  667. {
  668. struct snd_soc_rbtree_ctx *rbtree_ctx;
  669. struct snd_soc_rbtree_node *rbnode, *rbnode_tmp;
  670. struct rb_node *node;
  671. unsigned int val;
  672. unsigned int reg_tmp;
  673. unsigned int base_reg, top_reg;
  674. unsigned int pos;
  675. int i;
  676. int ret;
  677. rbtree_ctx = codec->reg_cache;
  678. /* look up the required register in the cached rbnode */
  679. rbnode = rbtree_ctx->cached_rbnode;
  680. if (rbnode) {
  681. snd_soc_rbtree_get_base_top_reg(rbnode, &base_reg, &top_reg);
  682. if (reg >= base_reg && reg <= top_reg) {
  683. reg_tmp = reg - base_reg;
  684. val = snd_soc_rbtree_get_register(rbnode, reg_tmp);
  685. if (val == value)
  686. return 0;
  687. snd_soc_rbtree_set_register(rbnode, reg_tmp, value);
  688. return 0;
  689. }
  690. }
  691. /* if we can't locate it in the cached rbnode we'll have
  692. * to traverse the rbtree looking for it.
  693. */
  694. rbnode = snd_soc_rbtree_lookup(&rbtree_ctx->root, reg);
  695. if (rbnode) {
  696. reg_tmp = reg - rbnode->base_reg;
  697. val = snd_soc_rbtree_get_register(rbnode, reg_tmp);
  698. if (val == value)
  699. return 0;
  700. snd_soc_rbtree_set_register(rbnode, reg_tmp, value);
  701. rbtree_ctx->cached_rbnode = rbnode;
  702. } else {
  703. /* bail out early, no need to create the rbnode yet */
  704. if (!value)
  705. return 0;
  706. /* look for an adjacent register to the one we are about to add */
  707. for (node = rb_first(&rbtree_ctx->root); node;
  708. node = rb_next(node)) {
  709. rbnode_tmp = rb_entry(node, struct snd_soc_rbtree_node, node);
  710. for (i = 0; i < rbnode_tmp->blklen; ++i) {
  711. reg_tmp = rbnode_tmp->base_reg + i;
  712. if (abs(reg_tmp - reg) != 1)
  713. continue;
  714. /* decide where in the block to place our register */
  715. if (reg_tmp + 1 == reg)
  716. pos = i + 1;
  717. else
  718. pos = i;
  719. ret = snd_soc_rbtree_insert_to_block(rbnode_tmp, pos,
  720. reg, value);
  721. if (ret)
  722. return ret;
  723. rbtree_ctx->cached_rbnode = rbnode_tmp;
  724. return 0;
  725. }
  726. }
  727. /* we did not manage to find a place to insert it in an existing
  728. * block so create a new rbnode with a single register in its block.
  729. * This block will get populated further if any other adjacent
  730. * registers get modified in the future.
  731. */
  732. rbnode = kzalloc(sizeof *rbnode, GFP_KERNEL);
  733. if (!rbnode)
  734. return -ENOMEM;
  735. rbnode->blklen = 1;
  736. rbnode->base_reg = reg;
  737. rbnode->word_size = codec->driver->reg_word_size;
  738. rbnode->block = kmalloc(rbnode->blklen * rbnode->word_size,
  739. GFP_KERNEL);
  740. if (!rbnode->block) {
  741. kfree(rbnode);
  742. return -ENOMEM;
  743. }
  744. snd_soc_rbtree_set_register(rbnode, 0, value);
  745. snd_soc_rbtree_insert(&rbtree_ctx->root, rbnode);
  746. rbtree_ctx->cached_rbnode = rbnode;
  747. }
  748. return 0;
  749. }
  750. static int snd_soc_rbtree_cache_read(struct snd_soc_codec *codec,
  751. unsigned int reg, unsigned int *value)
  752. {
  753. struct snd_soc_rbtree_ctx *rbtree_ctx;
  754. struct snd_soc_rbtree_node *rbnode;
  755. unsigned int base_reg, top_reg;
  756. unsigned int reg_tmp;
  757. rbtree_ctx = codec->reg_cache;
  758. /* look up the required register in the cached rbnode */
  759. rbnode = rbtree_ctx->cached_rbnode;
  760. if (rbnode) {
  761. snd_soc_rbtree_get_base_top_reg(rbnode, &base_reg, &top_reg);
  762. if (reg >= base_reg && reg <= top_reg) {
  763. reg_tmp = reg - base_reg;
  764. *value = snd_soc_rbtree_get_register(rbnode, reg_tmp);
  765. return 0;
  766. }
  767. }
  768. /* if we can't locate it in the cached rbnode we'll have
  769. * to traverse the rbtree looking for it.
  770. */
  771. rbnode = snd_soc_rbtree_lookup(&rbtree_ctx->root, reg);
  772. if (rbnode) {
  773. reg_tmp = reg - rbnode->base_reg;
  774. *value = snd_soc_rbtree_get_register(rbnode, reg_tmp);
  775. rbtree_ctx->cached_rbnode = rbnode;
  776. } else {
  777. /* uninitialized registers default to 0 */
  778. *value = 0;
  779. }
  780. return 0;
  781. }
  782. static int snd_soc_rbtree_cache_exit(struct snd_soc_codec *codec)
  783. {
  784. struct rb_node *next;
  785. struct snd_soc_rbtree_ctx *rbtree_ctx;
  786. struct snd_soc_rbtree_node *rbtree_node;
  787. /* if we've already been called then just return */
  788. rbtree_ctx = codec->reg_cache;
  789. if (!rbtree_ctx)
  790. return 0;
  791. /* free up the rbtree */
  792. next = rb_first(&rbtree_ctx->root);
  793. while (next) {
  794. rbtree_node = rb_entry(next, struct snd_soc_rbtree_node, node);
  795. next = rb_next(&rbtree_node->node);
  796. rb_erase(&rbtree_node->node, &rbtree_ctx->root);
  797. kfree(rbtree_node->block);
  798. kfree(rbtree_node);
  799. }
  800. /* release the resources */
  801. kfree(codec->reg_cache);
  802. codec->reg_cache = NULL;
  803. return 0;
  804. }
  805. static int snd_soc_rbtree_cache_init(struct snd_soc_codec *codec)
  806. {
  807. struct snd_soc_rbtree_ctx *rbtree_ctx;
  808. unsigned int word_size;
  809. unsigned int val;
  810. int i;
  811. int ret;
  812. codec->reg_cache = kmalloc(sizeof *rbtree_ctx, GFP_KERNEL);
  813. if (!codec->reg_cache)
  814. return -ENOMEM;
  815. rbtree_ctx = codec->reg_cache;
  816. rbtree_ctx->root = RB_ROOT;
  817. rbtree_ctx->cached_rbnode = NULL;
  818. if (!codec->reg_def_copy)
  819. return 0;
  820. word_size = codec->driver->reg_word_size;
  821. for (i = 0; i < codec->driver->reg_cache_size; ++i) {
  822. val = snd_soc_get_cache_val(codec->reg_def_copy, i,
  823. word_size);
  824. if (!val)
  825. continue;
  826. ret = snd_soc_rbtree_cache_write(codec, i, val);
  827. if (ret)
  828. goto err;
  829. }
  830. return 0;
  831. err:
  832. snd_soc_cache_exit(codec);
  833. return ret;
  834. }
  835. #ifdef CONFIG_SND_SOC_CACHE_LZO
  836. struct snd_soc_lzo_ctx {
  837. void *wmem;
  838. void *dst;
  839. const void *src;
  840. size_t src_len;
  841. size_t dst_len;
  842. size_t decompressed_size;
  843. unsigned long *sync_bmp;
  844. int sync_bmp_nbits;
  845. };
  846. #define LZO_BLOCK_NUM 8
  847. static int snd_soc_lzo_block_count(void)
  848. {
  849. return LZO_BLOCK_NUM;
  850. }
  851. static int snd_soc_lzo_prepare(struct snd_soc_lzo_ctx *lzo_ctx)
  852. {
  853. lzo_ctx->wmem = kmalloc(LZO1X_MEM_COMPRESS, GFP_KERNEL);
  854. if (!lzo_ctx->wmem)
  855. return -ENOMEM;
  856. return 0;
  857. }
  858. static int snd_soc_lzo_compress(struct snd_soc_lzo_ctx *lzo_ctx)
  859. {
  860. size_t compress_size;
  861. int ret;
  862. ret = lzo1x_1_compress(lzo_ctx->src, lzo_ctx->src_len,
  863. lzo_ctx->dst, &compress_size, lzo_ctx->wmem);
  864. if (ret != LZO_E_OK || compress_size > lzo_ctx->dst_len)
  865. return -EINVAL;
  866. lzo_ctx->dst_len = compress_size;
  867. return 0;
  868. }
  869. static int snd_soc_lzo_decompress(struct snd_soc_lzo_ctx *lzo_ctx)
  870. {
  871. size_t dst_len;
  872. int ret;
  873. dst_len = lzo_ctx->dst_len;
  874. ret = lzo1x_decompress_safe(lzo_ctx->src, lzo_ctx->src_len,
  875. lzo_ctx->dst, &dst_len);
  876. if (ret != LZO_E_OK || dst_len != lzo_ctx->dst_len)
  877. return -EINVAL;
  878. return 0;
  879. }
  880. static int snd_soc_lzo_compress_cache_block(struct snd_soc_codec *codec,
  881. struct snd_soc_lzo_ctx *lzo_ctx)
  882. {
  883. int ret;
  884. lzo_ctx->dst_len = lzo1x_worst_compress(PAGE_SIZE);
  885. lzo_ctx->dst = kmalloc(lzo_ctx->dst_len, GFP_KERNEL);
  886. if (!lzo_ctx->dst) {
  887. lzo_ctx->dst_len = 0;
  888. return -ENOMEM;
  889. }
  890. ret = snd_soc_lzo_compress(lzo_ctx);
  891. if (ret < 0)
  892. return ret;
  893. return 0;
  894. }
  895. static int snd_soc_lzo_decompress_cache_block(struct snd_soc_codec *codec,
  896. struct snd_soc_lzo_ctx *lzo_ctx)
  897. {
  898. int ret;
  899. lzo_ctx->dst_len = lzo_ctx->decompressed_size;
  900. lzo_ctx->dst = kmalloc(lzo_ctx->dst_len, GFP_KERNEL);
  901. if (!lzo_ctx->dst) {
  902. lzo_ctx->dst_len = 0;
  903. return -ENOMEM;
  904. }
  905. ret = snd_soc_lzo_decompress(lzo_ctx);
  906. if (ret < 0)
  907. return ret;
  908. return 0;
  909. }
  910. static inline int snd_soc_lzo_get_blkindex(struct snd_soc_codec *codec,
  911. unsigned int reg)
  912. {
  913. const struct snd_soc_codec_driver *codec_drv;
  914. codec_drv = codec->driver;
  915. return (reg * codec_drv->reg_word_size) /
  916. DIV_ROUND_UP(codec->reg_size, snd_soc_lzo_block_count());
  917. }
  918. static inline int snd_soc_lzo_get_blkpos(struct snd_soc_codec *codec,
  919. unsigned int reg)
  920. {
  921. const struct snd_soc_codec_driver *codec_drv;
  922. codec_drv = codec->driver;
  923. return reg % (DIV_ROUND_UP(codec->reg_size, snd_soc_lzo_block_count()) /
  924. codec_drv->reg_word_size);
  925. }
  926. static inline int snd_soc_lzo_get_blksize(struct snd_soc_codec *codec)
  927. {
  928. const struct snd_soc_codec_driver *codec_drv;
  929. codec_drv = codec->driver;
  930. return DIV_ROUND_UP(codec->reg_size, snd_soc_lzo_block_count());
  931. }
  932. static int snd_soc_lzo_cache_sync(struct snd_soc_codec *codec)
  933. {
  934. struct snd_soc_lzo_ctx **lzo_blocks;
  935. unsigned int val;
  936. int i;
  937. int ret;
  938. lzo_blocks = codec->reg_cache;
  939. for_each_set_bit(i, lzo_blocks[0]->sync_bmp, lzo_blocks[0]->sync_bmp_nbits) {
  940. WARN_ON(codec->writable_register &&
  941. codec->writable_register(codec, i));
  942. ret = snd_soc_cache_read(codec, i, &val);
  943. if (ret)
  944. return ret;
  945. codec->cache_bypass = 1;
  946. ret = snd_soc_write(codec, i, val);
  947. codec->cache_bypass = 0;
  948. if (ret)
  949. return ret;
  950. dev_dbg(codec->dev, "Synced register %#x, value = %#x\n",
  951. i, val);
  952. }
  953. return 0;
  954. }
  955. static int snd_soc_lzo_cache_write(struct snd_soc_codec *codec,
  956. unsigned int reg, unsigned int value)
  957. {
  958. struct snd_soc_lzo_ctx *lzo_block, **lzo_blocks;
  959. int ret, blkindex, blkpos;
  960. size_t blksize, tmp_dst_len;
  961. void *tmp_dst;
  962. /* index of the compressed lzo block */
  963. blkindex = snd_soc_lzo_get_blkindex(codec, reg);
  964. /* register index within the decompressed block */
  965. blkpos = snd_soc_lzo_get_blkpos(codec, reg);
  966. /* size of the compressed block */
  967. blksize = snd_soc_lzo_get_blksize(codec);
  968. lzo_blocks = codec->reg_cache;
  969. lzo_block = lzo_blocks[blkindex];
  970. /* save the pointer and length of the compressed block */
  971. tmp_dst = lzo_block->dst;
  972. tmp_dst_len = lzo_block->dst_len;
  973. /* prepare the source to be the compressed block */
  974. lzo_block->src = lzo_block->dst;
  975. lzo_block->src_len = lzo_block->dst_len;
  976. /* decompress the block */
  977. ret = snd_soc_lzo_decompress_cache_block(codec, lzo_block);
  978. if (ret < 0) {
  979. kfree(lzo_block->dst);
  980. goto out;
  981. }
  982. /* write the new value to the cache */
  983. if (snd_soc_set_cache_val(lzo_block->dst, blkpos, value,
  984. codec->driver->reg_word_size)) {
  985. kfree(lzo_block->dst);
  986. goto out;
  987. }
  988. /* prepare the source to be the decompressed block */
  989. lzo_block->src = lzo_block->dst;
  990. lzo_block->src_len = lzo_block->dst_len;
  991. /* compress the block */
  992. ret = snd_soc_lzo_compress_cache_block(codec, lzo_block);
  993. if (ret < 0) {
  994. kfree(lzo_block->dst);
  995. kfree(lzo_block->src);
  996. goto out;
  997. }
  998. /* set the bit so we know we have to sync this register */
  999. set_bit(reg, lzo_block->sync_bmp);
  1000. kfree(tmp_dst);
  1001. kfree(lzo_block->src);
  1002. return 0;
  1003. out:
  1004. lzo_block->dst = tmp_dst;
  1005. lzo_block->dst_len = tmp_dst_len;
  1006. return ret;
  1007. }
  1008. static int snd_soc_lzo_cache_read(struct snd_soc_codec *codec,
  1009. unsigned int reg, unsigned int *value)
  1010. {
  1011. struct snd_soc_lzo_ctx *lzo_block, **lzo_blocks;
  1012. int ret, blkindex, blkpos;
  1013. size_t blksize, tmp_dst_len;
  1014. void *tmp_dst;
  1015. *value = 0;
  1016. /* index of the compressed lzo block */
  1017. blkindex = snd_soc_lzo_get_blkindex(codec, reg);
  1018. /* register index within the decompressed block */
  1019. blkpos = snd_soc_lzo_get_blkpos(codec, reg);
  1020. /* size of the compressed block */
  1021. blksize = snd_soc_lzo_get_blksize(codec);
  1022. lzo_blocks = codec->reg_cache;
  1023. lzo_block = lzo_blocks[blkindex];
  1024. /* save the pointer and length of the compressed block */
  1025. tmp_dst = lzo_block->dst;
  1026. tmp_dst_len = lzo_block->dst_len;
  1027. /* prepare the source to be the compressed block */
  1028. lzo_block->src = lzo_block->dst;
  1029. lzo_block->src_len = lzo_block->dst_len;
  1030. /* decompress the block */
  1031. ret = snd_soc_lzo_decompress_cache_block(codec, lzo_block);
  1032. if (ret >= 0)
  1033. /* fetch the value from the cache */
  1034. *value = snd_soc_get_cache_val(lzo_block->dst, blkpos,
  1035. codec->driver->reg_word_size);
  1036. kfree(lzo_block->dst);
  1037. /* restore the pointer and length of the compressed block */
  1038. lzo_block->dst = tmp_dst;
  1039. lzo_block->dst_len = tmp_dst_len;
  1040. return 0;
  1041. }
  1042. static int snd_soc_lzo_cache_exit(struct snd_soc_codec *codec)
  1043. {
  1044. struct snd_soc_lzo_ctx **lzo_blocks;
  1045. int i, blkcount;
  1046. lzo_blocks = codec->reg_cache;
  1047. if (!lzo_blocks)
  1048. return 0;
  1049. blkcount = snd_soc_lzo_block_count();
  1050. /*
  1051. * the pointer to the bitmap used for syncing the cache
  1052. * is shared amongst all lzo_blocks. Ensure it is freed
  1053. * only once.
  1054. */
  1055. if (lzo_blocks[0])
  1056. kfree(lzo_blocks[0]->sync_bmp);
  1057. for (i = 0; i < blkcount; ++i) {
  1058. if (lzo_blocks[i]) {
  1059. kfree(lzo_blocks[i]->wmem);
  1060. kfree(lzo_blocks[i]->dst);
  1061. }
  1062. /* each lzo_block is a pointer returned by kmalloc or NULL */
  1063. kfree(lzo_blocks[i]);
  1064. }
  1065. kfree(lzo_blocks);
  1066. codec->reg_cache = NULL;
  1067. return 0;
  1068. }
  1069. static int snd_soc_lzo_cache_init(struct snd_soc_codec *codec)
  1070. {
  1071. struct snd_soc_lzo_ctx **lzo_blocks;
  1072. size_t bmp_size;
  1073. const struct snd_soc_codec_driver *codec_drv;
  1074. int ret, tofree, i, blksize, blkcount;
  1075. const char *p, *end;
  1076. unsigned long *sync_bmp;
  1077. ret = 0;
  1078. codec_drv = codec->driver;
  1079. /*
  1080. * If we have not been given a default register cache
  1081. * then allocate a dummy zero-ed out region, compress it
  1082. * and remember to free it afterwards.
  1083. */
  1084. tofree = 0;
  1085. if (!codec->reg_def_copy)
  1086. tofree = 1;
  1087. if (!codec->reg_def_copy) {
  1088. codec->reg_def_copy = kzalloc(codec->reg_size, GFP_KERNEL);
  1089. if (!codec->reg_def_copy)
  1090. return -ENOMEM;
  1091. }
  1092. blkcount = snd_soc_lzo_block_count();
  1093. codec->reg_cache = kzalloc(blkcount * sizeof *lzo_blocks,
  1094. GFP_KERNEL);
  1095. if (!codec->reg_cache) {
  1096. ret = -ENOMEM;
  1097. goto err_tofree;
  1098. }
  1099. lzo_blocks = codec->reg_cache;
  1100. /*
  1101. * allocate a bitmap to be used when syncing the cache with
  1102. * the hardware. Each time a register is modified, the corresponding
  1103. * bit is set in the bitmap, so we know that we have to sync
  1104. * that register.
  1105. */
  1106. bmp_size = codec_drv->reg_cache_size;
  1107. sync_bmp = kmalloc(BITS_TO_LONGS(bmp_size) * sizeof(long),
  1108. GFP_KERNEL);
  1109. if (!sync_bmp) {
  1110. ret = -ENOMEM;
  1111. goto err;
  1112. }
  1113. bitmap_zero(sync_bmp, bmp_size);
  1114. /* allocate the lzo blocks and initialize them */
  1115. for (i = 0; i < blkcount; ++i) {
  1116. lzo_blocks[i] = kzalloc(sizeof **lzo_blocks,
  1117. GFP_KERNEL);
  1118. if (!lzo_blocks[i]) {
  1119. kfree(sync_bmp);
  1120. ret = -ENOMEM;
  1121. goto err;
  1122. }
  1123. lzo_blocks[i]->sync_bmp = sync_bmp;
  1124. lzo_blocks[i]->sync_bmp_nbits = bmp_size;
  1125. /* alloc the working space for the compressed block */
  1126. ret = snd_soc_lzo_prepare(lzo_blocks[i]);
  1127. if (ret < 0)
  1128. goto err;
  1129. }
  1130. blksize = snd_soc_lzo_get_blksize(codec);
  1131. p = codec->reg_def_copy;
  1132. end = codec->reg_def_copy + codec->reg_size;
  1133. /* compress the register map and fill the lzo blocks */
  1134. for (i = 0; i < blkcount; ++i, p += blksize) {
  1135. lzo_blocks[i]->src = p;
  1136. if (p + blksize > end)
  1137. lzo_blocks[i]->src_len = end - p;
  1138. else
  1139. lzo_blocks[i]->src_len = blksize;
  1140. ret = snd_soc_lzo_compress_cache_block(codec,
  1141. lzo_blocks[i]);
  1142. if (ret < 0)
  1143. goto err;
  1144. lzo_blocks[i]->decompressed_size =
  1145. lzo_blocks[i]->src_len;
  1146. }
  1147. if (tofree) {
  1148. kfree(codec->reg_def_copy);
  1149. codec->reg_def_copy = NULL;
  1150. }
  1151. return 0;
  1152. err:
  1153. snd_soc_cache_exit(codec);
  1154. err_tofree:
  1155. if (tofree) {
  1156. kfree(codec->reg_def_copy);
  1157. codec->reg_def_copy = NULL;
  1158. }
  1159. return ret;
  1160. }
  1161. #endif
  1162. static int snd_soc_flat_cache_sync(struct snd_soc_codec *codec)
  1163. {
  1164. int i;
  1165. int ret;
  1166. const struct snd_soc_codec_driver *codec_drv;
  1167. unsigned int val;
  1168. codec_drv = codec->driver;
  1169. for (i = 0; i < codec_drv->reg_cache_size; ++i) {
  1170. WARN_ON(codec->writable_register &&
  1171. codec->writable_register(codec, i));
  1172. ret = snd_soc_cache_read(codec, i, &val);
  1173. if (ret)
  1174. return ret;
  1175. if (codec->reg_def_copy)
  1176. if (snd_soc_get_cache_val(codec->reg_def_copy,
  1177. i, codec_drv->reg_word_size) == val)
  1178. continue;
  1179. ret = snd_soc_write(codec, i, val);
  1180. if (ret)
  1181. return ret;
  1182. dev_dbg(codec->dev, "Synced register %#x, value = %#x\n",
  1183. i, val);
  1184. }
  1185. return 0;
  1186. }
  1187. static int snd_soc_flat_cache_write(struct snd_soc_codec *codec,
  1188. unsigned int reg, unsigned int value)
  1189. {
  1190. snd_soc_set_cache_val(codec->reg_cache, reg, value,
  1191. codec->driver->reg_word_size);
  1192. return 0;
  1193. }
  1194. static int snd_soc_flat_cache_read(struct snd_soc_codec *codec,
  1195. unsigned int reg, unsigned int *value)
  1196. {
  1197. *value = snd_soc_get_cache_val(codec->reg_cache, reg,
  1198. codec->driver->reg_word_size);
  1199. return 0;
  1200. }
  1201. static int snd_soc_flat_cache_exit(struct snd_soc_codec *codec)
  1202. {
  1203. if (!codec->reg_cache)
  1204. return 0;
  1205. kfree(codec->reg_cache);
  1206. codec->reg_cache = NULL;
  1207. return 0;
  1208. }
  1209. static int snd_soc_flat_cache_init(struct snd_soc_codec *codec)
  1210. {
  1211. const struct snd_soc_codec_driver *codec_drv;
  1212. codec_drv = codec->driver;
  1213. if (codec->reg_def_copy)
  1214. codec->reg_cache = kmemdup(codec->reg_def_copy,
  1215. codec->reg_size, GFP_KERNEL);
  1216. else
  1217. codec->reg_cache = kzalloc(codec->reg_size, GFP_KERNEL);
  1218. if (!codec->reg_cache)
  1219. return -ENOMEM;
  1220. return 0;
  1221. }
  1222. /* an array of all supported compression types */
  1223. static const struct snd_soc_cache_ops cache_types[] = {
  1224. /* Flat *must* be the first entry for fallback */
  1225. {
  1226. .id = SND_SOC_FLAT_COMPRESSION,
  1227. .name = "flat",
  1228. .init = snd_soc_flat_cache_init,
  1229. .exit = snd_soc_flat_cache_exit,
  1230. .read = snd_soc_flat_cache_read,
  1231. .write = snd_soc_flat_cache_write,
  1232. .sync = snd_soc_flat_cache_sync
  1233. },
  1234. #ifdef CONFIG_SND_SOC_CACHE_LZO
  1235. {
  1236. .id = SND_SOC_LZO_COMPRESSION,
  1237. .name = "LZO",
  1238. .init = snd_soc_lzo_cache_init,
  1239. .exit = snd_soc_lzo_cache_exit,
  1240. .read = snd_soc_lzo_cache_read,
  1241. .write = snd_soc_lzo_cache_write,
  1242. .sync = snd_soc_lzo_cache_sync
  1243. },
  1244. #endif
  1245. {
  1246. .id = SND_SOC_RBTREE_COMPRESSION,
  1247. .name = "rbtree",
  1248. .init = snd_soc_rbtree_cache_init,
  1249. .exit = snd_soc_rbtree_cache_exit,
  1250. .read = snd_soc_rbtree_cache_read,
  1251. .write = snd_soc_rbtree_cache_write,
  1252. .sync = snd_soc_rbtree_cache_sync
  1253. }
  1254. };
  1255. int snd_soc_cache_init(struct snd_soc_codec *codec)
  1256. {
  1257. int i;
  1258. for (i = 0; i < ARRAY_SIZE(cache_types); ++i)
  1259. if (cache_types[i].id == codec->compress_type)
  1260. break;
  1261. /* Fall back to flat compression */
  1262. if (i == ARRAY_SIZE(cache_types)) {
  1263. dev_warn(codec->dev, "Could not match compress type: %d\n",
  1264. codec->compress_type);
  1265. i = 0;
  1266. }
  1267. mutex_init(&codec->cache_rw_mutex);
  1268. codec->cache_ops = &cache_types[i];
  1269. if (codec->cache_ops->init) {
  1270. if (codec->cache_ops->name)
  1271. dev_dbg(codec->dev, "Initializing %s cache for %s codec\n",
  1272. codec->cache_ops->name, codec->name);
  1273. return codec->cache_ops->init(codec);
  1274. }
  1275. return -ENOSYS;
  1276. }
  1277. /*
  1278. * NOTE: keep in mind that this function might be called
  1279. * multiple times.
  1280. */
  1281. int snd_soc_cache_exit(struct snd_soc_codec *codec)
  1282. {
  1283. if (codec->cache_ops && codec->cache_ops->exit) {
  1284. if (codec->cache_ops->name)
  1285. dev_dbg(codec->dev, "Destroying %s cache for %s codec\n",
  1286. codec->cache_ops->name, codec->name);
  1287. return codec->cache_ops->exit(codec);
  1288. }
  1289. return -ENOSYS;
  1290. }
  1291. /**
  1292. * snd_soc_cache_read: Fetch the value of a given register from the cache.
  1293. *
  1294. * @codec: CODEC to configure.
  1295. * @reg: The register index.
  1296. * @value: The value to be returned.
  1297. */
  1298. int snd_soc_cache_read(struct snd_soc_codec *codec,
  1299. unsigned int reg, unsigned int *value)
  1300. {
  1301. int ret;
  1302. mutex_lock(&codec->cache_rw_mutex);
  1303. if (value && codec->cache_ops && codec->cache_ops->read) {
  1304. ret = codec->cache_ops->read(codec, reg, value);
  1305. mutex_unlock(&codec->cache_rw_mutex);
  1306. return ret;
  1307. }
  1308. mutex_unlock(&codec->cache_rw_mutex);
  1309. return -ENOSYS;
  1310. }
  1311. EXPORT_SYMBOL_GPL(snd_soc_cache_read);
  1312. /**
  1313. * snd_soc_cache_write: Set the value of a given register in the cache.
  1314. *
  1315. * @codec: CODEC to configure.
  1316. * @reg: The register index.
  1317. * @value: The new register value.
  1318. */
  1319. int snd_soc_cache_write(struct snd_soc_codec *codec,
  1320. unsigned int reg, unsigned int value)
  1321. {
  1322. int ret;
  1323. mutex_lock(&codec->cache_rw_mutex);
  1324. if (codec->cache_ops && codec->cache_ops->write) {
  1325. ret = codec->cache_ops->write(codec, reg, value);
  1326. mutex_unlock(&codec->cache_rw_mutex);
  1327. return ret;
  1328. }
  1329. mutex_unlock(&codec->cache_rw_mutex);
  1330. return -ENOSYS;
  1331. }
  1332. EXPORT_SYMBOL_GPL(snd_soc_cache_write);
  1333. /**
  1334. * snd_soc_cache_sync: Sync the register cache with the hardware.
  1335. *
  1336. * @codec: CODEC to configure.
  1337. *
  1338. * Any registers that should not be synced should be marked as
  1339. * volatile. In general drivers can choose not to use the provided
  1340. * syncing functionality if they so require.
  1341. */
  1342. int snd_soc_cache_sync(struct snd_soc_codec *codec)
  1343. {
  1344. int ret;
  1345. const char *name;
  1346. if (!codec->cache_sync) {
  1347. return 0;
  1348. }
  1349. if (!codec->cache_ops || !codec->cache_ops->sync)
  1350. return -ENOSYS;
  1351. if (codec->cache_ops->name)
  1352. name = codec->cache_ops->name;
  1353. else
  1354. name = "unknown";
  1355. if (codec->cache_ops->name)
  1356. dev_dbg(codec->dev, "Syncing %s cache for %s codec\n",
  1357. codec->cache_ops->name, codec->name);
  1358. trace_snd_soc_cache_sync(codec, name, "start");
  1359. ret = codec->cache_ops->sync(codec);
  1360. if (!ret)
  1361. codec->cache_sync = 0;
  1362. trace_snd_soc_cache_sync(codec, name, "end");
  1363. return ret;
  1364. }
  1365. EXPORT_SYMBOL_GPL(snd_soc_cache_sync);
  1366. static int snd_soc_get_reg_access_index(struct snd_soc_codec *codec,
  1367. unsigned int reg)
  1368. {
  1369. const struct snd_soc_codec_driver *codec_drv;
  1370. unsigned int min, max, index;
  1371. codec_drv = codec->driver;
  1372. min = 0;
  1373. max = codec_drv->reg_access_size - 1;
  1374. do {
  1375. index = (min + max) / 2;
  1376. if (codec_drv->reg_access_default[index].reg == reg)
  1377. return index;
  1378. if (codec_drv->reg_access_default[index].reg < reg)
  1379. min = index + 1;
  1380. else
  1381. max = index;
  1382. } while (min <= max);
  1383. return -1;
  1384. }
  1385. int snd_soc_default_volatile_register(struct snd_soc_codec *codec,
  1386. unsigned int reg)
  1387. {
  1388. int index;
  1389. if (reg >= codec->driver->reg_cache_size)
  1390. return 1;
  1391. index = snd_soc_get_reg_access_index(codec, reg);
  1392. if (index < 0)
  1393. return 0;
  1394. return codec->driver->reg_access_default[index].vol;
  1395. }
  1396. EXPORT_SYMBOL_GPL(snd_soc_default_volatile_register);
  1397. int snd_soc_default_readable_register(struct snd_soc_codec *codec,
  1398. unsigned int reg)
  1399. {
  1400. int index;
  1401. if (reg >= codec->driver->reg_cache_size)
  1402. return 1;
  1403. index = snd_soc_get_reg_access_index(codec, reg);
  1404. if (index < 0)
  1405. return 0;
  1406. return codec->driver->reg_access_default[index].read;
  1407. }
  1408. EXPORT_SYMBOL_GPL(snd_soc_default_readable_register);
  1409. int snd_soc_default_writable_register(struct snd_soc_codec *codec,
  1410. unsigned int reg)
  1411. {
  1412. int index;
  1413. if (reg >= codec->driver->reg_cache_size)
  1414. return 1;
  1415. index = snd_soc_get_reg_access_index(codec, reg);
  1416. if (index < 0)
  1417. return 0;
  1418. return codec->driver->reg_access_default[index].write;
  1419. }
  1420. EXPORT_SYMBOL_GPL(snd_soc_default_writable_register);