soc-cache.c 36 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522
  1. /*
  2. * soc-cache.c -- ASoC register cache helpers
  3. *
  4. * Copyright 2009 Wolfson Microelectronics PLC.
  5. *
  6. * Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
  7. *
  8. * This program is free software; you can redistribute it and/or modify it
  9. * under the terms of the GNU General Public License as published by the
  10. * Free Software Foundation; either version 2 of the License, or (at your
  11. * option) any later version.
  12. */
  13. #include <linux/i2c.h>
  14. #include <linux/spi/spi.h>
  15. #include <sound/soc.h>
  16. #include <linux/lzo.h>
  17. #include <linux/bitmap.h>
  18. #include <linux/rbtree.h>
  19. #include <trace/events/asoc.h>
  20. #ifdef CONFIG_SPI_MASTER
  21. static int do_spi_write(void *control, const char *data, int len)
  22. {
  23. struct spi_device *spi = control;
  24. int ret;
  25. ret = spi_write(spi, data, len);
  26. if (ret < 0)
  27. return ret;
  28. return len;
  29. }
  30. #endif
  31. static int do_hw_write(struct snd_soc_codec *codec, unsigned int reg,
  32. unsigned int value, const void *data, int len)
  33. {
  34. int ret;
  35. if (!snd_soc_codec_volatile_register(codec, reg) &&
  36. reg < codec->driver->reg_cache_size &&
  37. !codec->cache_bypass) {
  38. ret = snd_soc_cache_write(codec, reg, value);
  39. if (ret < 0)
  40. return -1;
  41. }
  42. if (codec->cache_only) {
  43. codec->cache_sync = 1;
  44. return 0;
  45. }
  46. ret = codec->hw_write(codec->control_data, data, len);
  47. if (ret == len)
  48. return 0;
  49. if (ret < 0)
  50. return ret;
  51. else
  52. return -EIO;
  53. }
  54. static unsigned int do_hw_read(struct snd_soc_codec *codec, unsigned int reg)
  55. {
  56. int ret;
  57. unsigned int val;
  58. if (reg >= codec->driver->reg_cache_size ||
  59. snd_soc_codec_volatile_register(codec, reg) ||
  60. codec->cache_bypass) {
  61. if (codec->cache_only)
  62. return -1;
  63. BUG_ON(!codec->hw_read);
  64. return codec->hw_read(codec, reg);
  65. }
  66. ret = snd_soc_cache_read(codec, reg, &val);
  67. if (ret < 0)
  68. return -1;
  69. return val;
  70. }
  71. static unsigned int snd_soc_4_12_read(struct snd_soc_codec *codec,
  72. unsigned int reg)
  73. {
  74. return do_hw_read(codec, reg);
  75. }
  76. static int snd_soc_4_12_write(struct snd_soc_codec *codec, unsigned int reg,
  77. unsigned int value)
  78. {
  79. u16 data;
  80. data = cpu_to_be16((reg << 12) | (value & 0xffffff));
  81. return do_hw_write(codec, reg, value, &data, 2);
  82. }
  83. static unsigned int snd_soc_7_9_read(struct snd_soc_codec *codec,
  84. unsigned int reg)
  85. {
  86. return do_hw_read(codec, reg);
  87. }
  88. static int snd_soc_7_9_write(struct snd_soc_codec *codec, unsigned int reg,
  89. unsigned int value)
  90. {
  91. u16 data;
  92. data = cpu_to_be16((reg << 9) | (value & 0x1ff));
  93. return do_hw_write(codec, reg, value, &data, 2);
  94. }
  95. static int snd_soc_8_8_write(struct snd_soc_codec *codec, unsigned int reg,
  96. unsigned int value)
  97. {
  98. u8 data[2];
  99. reg &= 0xff;
  100. data[0] = reg;
  101. data[1] = value & 0xff;
  102. return do_hw_write(codec, reg, value, data, 2);
  103. }
  104. static unsigned int snd_soc_8_8_read(struct snd_soc_codec *codec,
  105. unsigned int reg)
  106. {
  107. return do_hw_read(codec, reg);
  108. }
  109. static int snd_soc_8_16_write(struct snd_soc_codec *codec, unsigned int reg,
  110. unsigned int value)
  111. {
  112. u8 data[3];
  113. u16 val = cpu_to_be16(value);
  114. data[0] = reg;
  115. memcpy(&data[1], &val, sizeof(val));
  116. return do_hw_write(codec, reg, value, data, 3);
  117. }
  118. static unsigned int snd_soc_8_16_read(struct snd_soc_codec *codec,
  119. unsigned int reg)
  120. {
  121. return do_hw_read(codec, reg);
  122. }
  123. #if defined(CONFIG_I2C) || (defined(CONFIG_I2C_MODULE) && defined(MODULE))
  124. static unsigned int do_i2c_read(struct snd_soc_codec *codec,
  125. void *reg, int reglen,
  126. void *data, int datalen)
  127. {
  128. struct i2c_msg xfer[2];
  129. int ret;
  130. struct i2c_client *client = codec->control_data;
  131. /* Write register */
  132. xfer[0].addr = client->addr;
  133. xfer[0].flags = 0;
  134. xfer[0].len = reglen;
  135. xfer[0].buf = reg;
  136. /* Read data */
  137. xfer[1].addr = client->addr;
  138. xfer[1].flags = I2C_M_RD;
  139. xfer[1].len = datalen;
  140. xfer[1].buf = data;
  141. ret = i2c_transfer(client->adapter, xfer, 2);
  142. if (ret == 2)
  143. return 0;
  144. else if (ret < 0)
  145. return ret;
  146. else
  147. return -EIO;
  148. }
  149. #endif
  150. #if defined(CONFIG_I2C) || (defined(CONFIG_I2C_MODULE) && defined(MODULE))
  151. static unsigned int snd_soc_8_8_read_i2c(struct snd_soc_codec *codec,
  152. unsigned int r)
  153. {
  154. u8 reg = r;
  155. u8 data;
  156. int ret;
  157. ret = do_i2c_read(codec, &reg, 1, &data, 1);
  158. if (ret < 0)
  159. return 0;
  160. return data;
  161. }
  162. #else
  163. #define snd_soc_8_8_read_i2c NULL
  164. #endif
  165. #if defined(CONFIG_I2C) || (defined(CONFIG_I2C_MODULE) && defined(MODULE))
  166. static unsigned int snd_soc_8_16_read_i2c(struct snd_soc_codec *codec,
  167. unsigned int r)
  168. {
  169. u8 reg = r;
  170. u16 data;
  171. int ret;
  172. ret = do_i2c_read(codec, &reg, 1, &data, 2);
  173. if (ret < 0)
  174. return 0;
  175. return (data >> 8) | ((data & 0xff) << 8);
  176. }
  177. #else
  178. #define snd_soc_8_16_read_i2c NULL
  179. #endif
  180. #if defined(CONFIG_I2C) || (defined(CONFIG_I2C_MODULE) && defined(MODULE))
  181. static unsigned int snd_soc_16_8_read_i2c(struct snd_soc_codec *codec,
  182. unsigned int r)
  183. {
  184. u16 reg = r;
  185. u8 data;
  186. int ret;
  187. ret = do_i2c_read(codec, &reg, 2, &data, 1);
  188. if (ret < 0)
  189. return 0;
  190. return data;
  191. }
  192. #else
  193. #define snd_soc_16_8_read_i2c NULL
  194. #endif
  195. static unsigned int snd_soc_16_8_read(struct snd_soc_codec *codec,
  196. unsigned int reg)
  197. {
  198. return do_hw_read(codec, reg);
  199. }
  200. static int snd_soc_16_8_write(struct snd_soc_codec *codec, unsigned int reg,
  201. unsigned int value)
  202. {
  203. u8 data[3];
  204. u16 rval = cpu_to_be16(reg);
  205. memcpy(data, &rval, sizeof(rval));
  206. data[2] = value;
  207. return do_hw_write(codec, reg, value, data, 3);
  208. }
  209. #if defined(CONFIG_I2C) || (defined(CONFIG_I2C_MODULE) && defined(MODULE))
  210. static unsigned int snd_soc_16_16_read_i2c(struct snd_soc_codec *codec,
  211. unsigned int r)
  212. {
  213. u16 reg = cpu_to_be16(r);
  214. u16 data;
  215. int ret;
  216. ret = do_i2c_read(codec, &reg, 2, &data, 2);
  217. if (ret < 0)
  218. return 0;
  219. return be16_to_cpu(data);
  220. }
  221. #else
  222. #define snd_soc_16_16_read_i2c NULL
  223. #endif
  224. static unsigned int snd_soc_16_16_read(struct snd_soc_codec *codec,
  225. unsigned int reg)
  226. {
  227. return do_hw_read(codec, reg);
  228. }
  229. static int snd_soc_16_16_write(struct snd_soc_codec *codec, unsigned int reg,
  230. unsigned int value)
  231. {
  232. u16 data[2];
  233. data[0] = cpu_to_be16(reg);
  234. data[1] = cpu_to_be16(value);
  235. return do_hw_write(codec, reg, value, data, sizeof(data));
  236. }
  237. /* Primitive bulk write support for soc-cache. The data pointed to by
  238. * `data' needs to already be in the form the hardware expects
  239. * including any leading register specific data. Any data written
  240. * through this function will not go through the cache as it only
  241. * handles writing to volatile or out of bounds registers.
  242. */
  243. static int snd_soc_hw_bulk_write_raw(struct snd_soc_codec *codec, unsigned int reg,
  244. const void *data, size_t len)
  245. {
  246. int ret;
  247. /* To ensure that we don't get out of sync with the cache, check
  248. * whether the base register is volatile or if we've directly asked
  249. * to bypass the cache. Out of bounds registers are considered
  250. * volatile.
  251. */
  252. if (!codec->cache_bypass
  253. && !snd_soc_codec_volatile_register(codec, reg)
  254. && reg < codec->driver->reg_cache_size)
  255. return -EINVAL;
  256. switch (codec->control_type) {
  257. #if defined(CONFIG_I2C) || (defined(CONFIG_I2C_MODULE) && defined(MODULE))
  258. case SND_SOC_I2C:
  259. ret = i2c_master_send(codec->control_data, data, len);
  260. break;
  261. #endif
  262. #if defined(CONFIG_SPI_MASTER)
  263. case SND_SOC_SPI:
  264. ret = spi_write(codec->control_data, data, len);
  265. break;
  266. #endif
  267. default:
  268. BUG();
  269. }
  270. if (ret == len)
  271. return 0;
  272. if (ret < 0)
  273. return ret;
  274. else
  275. return -EIO;
  276. }
  277. static struct {
  278. int addr_bits;
  279. int data_bits;
  280. int (*write)(struct snd_soc_codec *codec, unsigned int, unsigned int);
  281. unsigned int (*read)(struct snd_soc_codec *, unsigned int);
  282. unsigned int (*i2c_read)(struct snd_soc_codec *, unsigned int);
  283. } io_types[] = {
  284. {
  285. .addr_bits = 4, .data_bits = 12,
  286. .write = snd_soc_4_12_write, .read = snd_soc_4_12_read,
  287. },
  288. {
  289. .addr_bits = 7, .data_bits = 9,
  290. .write = snd_soc_7_9_write, .read = snd_soc_7_9_read,
  291. },
  292. {
  293. .addr_bits = 8, .data_bits = 8,
  294. .write = snd_soc_8_8_write, .read = snd_soc_8_8_read,
  295. .i2c_read = snd_soc_8_8_read_i2c,
  296. },
  297. {
  298. .addr_bits = 8, .data_bits = 16,
  299. .write = snd_soc_8_16_write, .read = snd_soc_8_16_read,
  300. .i2c_read = snd_soc_8_16_read_i2c,
  301. },
  302. {
  303. .addr_bits = 16, .data_bits = 8,
  304. .write = snd_soc_16_8_write, .read = snd_soc_16_8_read,
  305. .i2c_read = snd_soc_16_8_read_i2c,
  306. },
  307. {
  308. .addr_bits = 16, .data_bits = 16,
  309. .write = snd_soc_16_16_write, .read = snd_soc_16_16_read,
  310. .i2c_read = snd_soc_16_16_read_i2c,
  311. },
  312. };
  313. /**
  314. * snd_soc_codec_set_cache_io: Set up standard I/O functions.
  315. *
  316. * @codec: CODEC to configure.
  317. * @addr_bits: Number of bits of register address data.
  318. * @data_bits: Number of bits of data per register.
  319. * @control: Control bus used.
  320. *
  321. * Register formats are frequently shared between many I2C and SPI
  322. * devices. In order to promote code reuse the ASoC core provides
  323. * some standard implementations of CODEC read and write operations
  324. * which can be set up using this function.
  325. *
  326. * The caller is responsible for allocating and initialising the
  327. * actual cache.
  328. *
  329. * Note that at present this code cannot be used by CODECs with
  330. * volatile registers.
  331. */
  332. int snd_soc_codec_set_cache_io(struct snd_soc_codec *codec,
  333. int addr_bits, int data_bits,
  334. enum snd_soc_control_type control)
  335. {
  336. int i;
  337. for (i = 0; i < ARRAY_SIZE(io_types); i++)
  338. if (io_types[i].addr_bits == addr_bits &&
  339. io_types[i].data_bits == data_bits)
  340. break;
  341. if (i == ARRAY_SIZE(io_types)) {
  342. printk(KERN_ERR
  343. "No I/O functions for %d bit address %d bit data\n",
  344. addr_bits, data_bits);
  345. return -EINVAL;
  346. }
  347. codec->write = io_types[i].write;
  348. codec->read = io_types[i].read;
  349. codec->bulk_write_raw = snd_soc_hw_bulk_write_raw;
  350. switch (control) {
  351. case SND_SOC_CUSTOM:
  352. break;
  353. case SND_SOC_I2C:
  354. #if defined(CONFIG_I2C) || (defined(CONFIG_I2C_MODULE) && defined(MODULE))
  355. codec->hw_write = (hw_write_t)i2c_master_send;
  356. #endif
  357. if (io_types[i].i2c_read)
  358. codec->hw_read = io_types[i].i2c_read;
  359. codec->control_data = container_of(codec->dev,
  360. struct i2c_client,
  361. dev);
  362. break;
  363. case SND_SOC_SPI:
  364. #ifdef CONFIG_SPI_MASTER
  365. codec->hw_write = do_spi_write;
  366. #endif
  367. codec->control_data = container_of(codec->dev,
  368. struct spi_device,
  369. dev);
  370. break;
  371. }
  372. return 0;
  373. }
  374. EXPORT_SYMBOL_GPL(snd_soc_codec_set_cache_io);
  375. static bool snd_soc_set_cache_val(void *base, unsigned int idx,
  376. unsigned int val, unsigned int word_size)
  377. {
  378. switch (word_size) {
  379. case 1: {
  380. u8 *cache = base;
  381. if (cache[idx] == val)
  382. return true;
  383. cache[idx] = val;
  384. break;
  385. }
  386. case 2: {
  387. u16 *cache = base;
  388. if (cache[idx] == val)
  389. return true;
  390. cache[idx] = val;
  391. break;
  392. }
  393. default:
  394. BUG();
  395. }
  396. return false;
  397. }
  398. static unsigned int snd_soc_get_cache_val(const void *base, unsigned int idx,
  399. unsigned int word_size)
  400. {
  401. if (!base)
  402. return -1;
  403. switch (word_size) {
  404. case 1: {
  405. const u8 *cache = base;
  406. return cache[idx];
  407. }
  408. case 2: {
  409. const u16 *cache = base;
  410. return cache[idx];
  411. }
  412. default:
  413. BUG();
  414. }
  415. /* unreachable */
  416. return -1;
  417. }
  418. struct snd_soc_rbtree_node {
  419. struct rb_node node; /* the actual rbtree node holding this block */
  420. unsigned int base_reg; /* base register handled by this block */
  421. unsigned int word_size; /* number of bytes needed to represent the register index */
  422. void *block; /* block of adjacent registers */
  423. unsigned int blklen; /* number of registers available in the block */
  424. } __attribute__ ((packed));
  425. struct snd_soc_rbtree_ctx {
  426. struct rb_root root;
  427. struct snd_soc_rbtree_node *cached_rbnode;
  428. };
  429. static inline void snd_soc_rbtree_get_base_top_reg(
  430. struct snd_soc_rbtree_node *rbnode,
  431. unsigned int *base, unsigned int *top)
  432. {
  433. *base = rbnode->base_reg;
  434. *top = rbnode->base_reg + rbnode->blklen - 1;
  435. }
  436. static unsigned int snd_soc_rbtree_get_register(
  437. struct snd_soc_rbtree_node *rbnode, unsigned int idx)
  438. {
  439. unsigned int val;
  440. switch (rbnode->word_size) {
  441. case 1: {
  442. u8 *p = rbnode->block;
  443. val = p[idx];
  444. return val;
  445. }
  446. case 2: {
  447. u16 *p = rbnode->block;
  448. val = p[idx];
  449. return val;
  450. }
  451. default:
  452. BUG();
  453. break;
  454. }
  455. return -1;
  456. }
  457. static void snd_soc_rbtree_set_register(struct snd_soc_rbtree_node *rbnode,
  458. unsigned int idx, unsigned int val)
  459. {
  460. switch (rbnode->word_size) {
  461. case 1: {
  462. u8 *p = rbnode->block;
  463. p[idx] = val;
  464. break;
  465. }
  466. case 2: {
  467. u16 *p = rbnode->block;
  468. p[idx] = val;
  469. break;
  470. }
  471. default:
  472. BUG();
  473. break;
  474. }
  475. }
  476. static struct snd_soc_rbtree_node *snd_soc_rbtree_lookup(
  477. struct rb_root *root, unsigned int reg)
  478. {
  479. struct rb_node *node;
  480. struct snd_soc_rbtree_node *rbnode;
  481. unsigned int base_reg, top_reg;
  482. node = root->rb_node;
  483. while (node) {
  484. rbnode = container_of(node, struct snd_soc_rbtree_node, node);
  485. snd_soc_rbtree_get_base_top_reg(rbnode, &base_reg, &top_reg);
  486. if (reg >= base_reg && reg <= top_reg)
  487. return rbnode;
  488. else if (reg > top_reg)
  489. node = node->rb_right;
  490. else if (reg < base_reg)
  491. node = node->rb_left;
  492. }
  493. return NULL;
  494. }
  495. static int snd_soc_rbtree_insert(struct rb_root *root,
  496. struct snd_soc_rbtree_node *rbnode)
  497. {
  498. struct rb_node **new, *parent;
  499. struct snd_soc_rbtree_node *rbnode_tmp;
  500. unsigned int base_reg_tmp, top_reg_tmp;
  501. unsigned int base_reg;
  502. parent = NULL;
  503. new = &root->rb_node;
  504. while (*new) {
  505. rbnode_tmp = container_of(*new, struct snd_soc_rbtree_node,
  506. node);
  507. /* base and top registers of the current rbnode */
  508. snd_soc_rbtree_get_base_top_reg(rbnode_tmp, &base_reg_tmp,
  509. &top_reg_tmp);
  510. /* base register of the rbnode to be added */
  511. base_reg = rbnode->base_reg;
  512. parent = *new;
  513. /* if this register has already been inserted, just return */
  514. if (base_reg >= base_reg_tmp &&
  515. base_reg <= top_reg_tmp)
  516. return 0;
  517. else if (base_reg > top_reg_tmp)
  518. new = &((*new)->rb_right);
  519. else if (base_reg < base_reg_tmp)
  520. new = &((*new)->rb_left);
  521. }
  522. /* insert the node into the rbtree */
  523. rb_link_node(&rbnode->node, parent, new);
  524. rb_insert_color(&rbnode->node, root);
  525. return 1;
  526. }
  527. static int snd_soc_rbtree_cache_sync(struct snd_soc_codec *codec)
  528. {
  529. struct snd_soc_rbtree_ctx *rbtree_ctx;
  530. struct rb_node *node;
  531. struct snd_soc_rbtree_node *rbnode;
  532. unsigned int regtmp;
  533. unsigned int val;
  534. int ret;
  535. int i;
  536. rbtree_ctx = codec->reg_cache;
  537. for (node = rb_first(&rbtree_ctx->root); node; node = rb_next(node)) {
  538. rbnode = rb_entry(node, struct snd_soc_rbtree_node, node);
  539. for (i = 0; i < rbnode->blklen; ++i) {
  540. regtmp = rbnode->base_reg + i;
  541. WARN_ON(codec->writable_register &&
  542. codec->writable_register(codec, regtmp));
  543. val = snd_soc_rbtree_get_register(rbnode, i);
  544. codec->cache_bypass = 1;
  545. ret = snd_soc_write(codec, regtmp, val);
  546. codec->cache_bypass = 0;
  547. if (ret)
  548. return ret;
  549. dev_dbg(codec->dev, "Synced register %#x, value = %#x\n",
  550. regtmp, val);
  551. }
  552. }
  553. return 0;
  554. }
  555. static int snd_soc_rbtree_insert_to_block(struct snd_soc_rbtree_node *rbnode,
  556. unsigned int pos, unsigned int reg,
  557. unsigned int value)
  558. {
  559. u8 *blk;
  560. blk = krealloc(rbnode->block,
  561. (rbnode->blklen + 1) * rbnode->word_size, GFP_KERNEL);
  562. if (!blk)
  563. return -ENOMEM;
  564. /* insert the register value in the correct place in the rbnode block */
  565. memmove(blk + (pos + 1) * rbnode->word_size,
  566. blk + pos * rbnode->word_size,
  567. (rbnode->blklen - pos) * rbnode->word_size);
  568. /* update the rbnode block, its size and the base register */
  569. rbnode->block = blk;
  570. rbnode->blklen++;
  571. if (!pos)
  572. rbnode->base_reg = reg;
  573. snd_soc_rbtree_set_register(rbnode, pos, value);
  574. return 0;
  575. }
  576. static int snd_soc_rbtree_cache_write(struct snd_soc_codec *codec,
  577. unsigned int reg, unsigned int value)
  578. {
  579. struct snd_soc_rbtree_ctx *rbtree_ctx;
  580. struct snd_soc_rbtree_node *rbnode, *rbnode_tmp;
  581. struct rb_node *node;
  582. unsigned int val;
  583. unsigned int reg_tmp;
  584. unsigned int base_reg, top_reg;
  585. unsigned int pos;
  586. int i;
  587. int ret;
  588. rbtree_ctx = codec->reg_cache;
  589. /* look up the required register in the cached rbnode */
  590. rbnode = rbtree_ctx->cached_rbnode;
  591. if (rbnode) {
  592. snd_soc_rbtree_get_base_top_reg(rbnode, &base_reg, &top_reg);
  593. if (reg >= base_reg && reg <= top_reg) {
  594. reg_tmp = reg - base_reg;
  595. val = snd_soc_rbtree_get_register(rbnode, reg_tmp);
  596. if (val == value)
  597. return 0;
  598. snd_soc_rbtree_set_register(rbnode, reg_tmp, value);
  599. return 0;
  600. }
  601. }
  602. /* if we can't locate it in the cached rbnode we'll have
  603. * to traverse the rbtree looking for it.
  604. */
  605. rbnode = snd_soc_rbtree_lookup(&rbtree_ctx->root, reg);
  606. if (rbnode) {
  607. reg_tmp = reg - rbnode->base_reg;
  608. val = snd_soc_rbtree_get_register(rbnode, reg_tmp);
  609. if (val == value)
  610. return 0;
  611. snd_soc_rbtree_set_register(rbnode, reg_tmp, value);
  612. rbtree_ctx->cached_rbnode = rbnode;
  613. } else {
  614. /* bail out early, no need to create the rbnode yet */
  615. if (!value)
  616. return 0;
  617. /* look for an adjacent register to the one we are about to add */
  618. for (node = rb_first(&rbtree_ctx->root); node;
  619. node = rb_next(node)) {
  620. rbnode_tmp = rb_entry(node, struct snd_soc_rbtree_node, node);
  621. for (i = 0; i < rbnode_tmp->blklen; ++i) {
  622. reg_tmp = rbnode_tmp->base_reg + i;
  623. if (abs(reg_tmp - reg) != 1)
  624. continue;
  625. /* decide where in the block to place our register */
  626. if (reg_tmp + 1 == reg)
  627. pos = i + 1;
  628. else
  629. pos = i;
  630. ret = snd_soc_rbtree_insert_to_block(rbnode_tmp, pos,
  631. reg, value);
  632. if (ret)
  633. return ret;
  634. rbtree_ctx->cached_rbnode = rbnode_tmp;
  635. return 0;
  636. }
  637. }
  638. /* we did not manage to find a place to insert it in an existing
  639. * block so create a new rbnode with a single register in its block.
  640. * This block will get populated further if any other adjacent
  641. * registers get modified in the future.
  642. */
  643. rbnode = kzalloc(sizeof *rbnode, GFP_KERNEL);
  644. if (!rbnode)
  645. return -ENOMEM;
  646. rbnode->blklen = 1;
  647. rbnode->base_reg = reg;
  648. rbnode->word_size = codec->driver->reg_word_size;
  649. rbnode->block = kmalloc(rbnode->blklen * rbnode->word_size,
  650. GFP_KERNEL);
  651. if (!rbnode->block) {
  652. kfree(rbnode);
  653. return -ENOMEM;
  654. }
  655. snd_soc_rbtree_set_register(rbnode, 0, value);
  656. snd_soc_rbtree_insert(&rbtree_ctx->root, rbnode);
  657. rbtree_ctx->cached_rbnode = rbnode;
  658. }
  659. return 0;
  660. }
  661. static int snd_soc_rbtree_cache_read(struct snd_soc_codec *codec,
  662. unsigned int reg, unsigned int *value)
  663. {
  664. struct snd_soc_rbtree_ctx *rbtree_ctx;
  665. struct snd_soc_rbtree_node *rbnode;
  666. unsigned int base_reg, top_reg;
  667. unsigned int reg_tmp;
  668. rbtree_ctx = codec->reg_cache;
  669. /* look up the required register in the cached rbnode */
  670. rbnode = rbtree_ctx->cached_rbnode;
  671. if (rbnode) {
  672. snd_soc_rbtree_get_base_top_reg(rbnode, &base_reg, &top_reg);
  673. if (reg >= base_reg && reg <= top_reg) {
  674. reg_tmp = reg - base_reg;
  675. *value = snd_soc_rbtree_get_register(rbnode, reg_tmp);
  676. return 0;
  677. }
  678. }
  679. /* if we can't locate it in the cached rbnode we'll have
  680. * to traverse the rbtree looking for it.
  681. */
  682. rbnode = snd_soc_rbtree_lookup(&rbtree_ctx->root, reg);
  683. if (rbnode) {
  684. reg_tmp = reg - rbnode->base_reg;
  685. *value = snd_soc_rbtree_get_register(rbnode, reg_tmp);
  686. rbtree_ctx->cached_rbnode = rbnode;
  687. } else {
  688. /* uninitialized registers default to 0 */
  689. *value = 0;
  690. }
  691. return 0;
  692. }
  693. static int snd_soc_rbtree_cache_exit(struct snd_soc_codec *codec)
  694. {
  695. struct rb_node *next;
  696. struct snd_soc_rbtree_ctx *rbtree_ctx;
  697. struct snd_soc_rbtree_node *rbtree_node;
  698. /* if we've already been called then just return */
  699. rbtree_ctx = codec->reg_cache;
  700. if (!rbtree_ctx)
  701. return 0;
  702. /* free up the rbtree */
  703. next = rb_first(&rbtree_ctx->root);
  704. while (next) {
  705. rbtree_node = rb_entry(next, struct snd_soc_rbtree_node, node);
  706. next = rb_next(&rbtree_node->node);
  707. rb_erase(&rbtree_node->node, &rbtree_ctx->root);
  708. kfree(rbtree_node->block);
  709. kfree(rbtree_node);
  710. }
  711. /* release the resources */
  712. kfree(codec->reg_cache);
  713. codec->reg_cache = NULL;
  714. return 0;
  715. }
  716. static int snd_soc_rbtree_cache_init(struct snd_soc_codec *codec)
  717. {
  718. struct snd_soc_rbtree_ctx *rbtree_ctx;
  719. unsigned int word_size;
  720. unsigned int val;
  721. int i;
  722. int ret;
  723. codec->reg_cache = kmalloc(sizeof *rbtree_ctx, GFP_KERNEL);
  724. if (!codec->reg_cache)
  725. return -ENOMEM;
  726. rbtree_ctx = codec->reg_cache;
  727. rbtree_ctx->root = RB_ROOT;
  728. rbtree_ctx->cached_rbnode = NULL;
  729. if (!codec->reg_def_copy)
  730. return 0;
  731. word_size = codec->driver->reg_word_size;
  732. for (i = 0; i < codec->driver->reg_cache_size; ++i) {
  733. val = snd_soc_get_cache_val(codec->reg_def_copy, i,
  734. word_size);
  735. if (!val)
  736. continue;
  737. ret = snd_soc_rbtree_cache_write(codec, i, val);
  738. if (ret)
  739. goto err;
  740. }
  741. return 0;
  742. err:
  743. snd_soc_cache_exit(codec);
  744. return ret;
  745. }
  746. #ifdef CONFIG_SND_SOC_CACHE_LZO
  747. struct snd_soc_lzo_ctx {
  748. void *wmem;
  749. void *dst;
  750. const void *src;
  751. size_t src_len;
  752. size_t dst_len;
  753. size_t decompressed_size;
  754. unsigned long *sync_bmp;
  755. int sync_bmp_nbits;
  756. };
  757. #define LZO_BLOCK_NUM 8
  758. static int snd_soc_lzo_block_count(void)
  759. {
  760. return LZO_BLOCK_NUM;
  761. }
  762. static int snd_soc_lzo_prepare(struct snd_soc_lzo_ctx *lzo_ctx)
  763. {
  764. lzo_ctx->wmem = kmalloc(LZO1X_MEM_COMPRESS, GFP_KERNEL);
  765. if (!lzo_ctx->wmem)
  766. return -ENOMEM;
  767. return 0;
  768. }
  769. static int snd_soc_lzo_compress(struct snd_soc_lzo_ctx *lzo_ctx)
  770. {
  771. size_t compress_size;
  772. int ret;
  773. ret = lzo1x_1_compress(lzo_ctx->src, lzo_ctx->src_len,
  774. lzo_ctx->dst, &compress_size, lzo_ctx->wmem);
  775. if (ret != LZO_E_OK || compress_size > lzo_ctx->dst_len)
  776. return -EINVAL;
  777. lzo_ctx->dst_len = compress_size;
  778. return 0;
  779. }
  780. static int snd_soc_lzo_decompress(struct snd_soc_lzo_ctx *lzo_ctx)
  781. {
  782. size_t dst_len;
  783. int ret;
  784. dst_len = lzo_ctx->dst_len;
  785. ret = lzo1x_decompress_safe(lzo_ctx->src, lzo_ctx->src_len,
  786. lzo_ctx->dst, &dst_len);
  787. if (ret != LZO_E_OK || dst_len != lzo_ctx->dst_len)
  788. return -EINVAL;
  789. return 0;
  790. }
  791. static int snd_soc_lzo_compress_cache_block(struct snd_soc_codec *codec,
  792. struct snd_soc_lzo_ctx *lzo_ctx)
  793. {
  794. int ret;
  795. lzo_ctx->dst_len = lzo1x_worst_compress(PAGE_SIZE);
  796. lzo_ctx->dst = kmalloc(lzo_ctx->dst_len, GFP_KERNEL);
  797. if (!lzo_ctx->dst) {
  798. lzo_ctx->dst_len = 0;
  799. return -ENOMEM;
  800. }
  801. ret = snd_soc_lzo_compress(lzo_ctx);
  802. if (ret < 0)
  803. return ret;
  804. return 0;
  805. }
  806. static int snd_soc_lzo_decompress_cache_block(struct snd_soc_codec *codec,
  807. struct snd_soc_lzo_ctx *lzo_ctx)
  808. {
  809. int ret;
  810. lzo_ctx->dst_len = lzo_ctx->decompressed_size;
  811. lzo_ctx->dst = kmalloc(lzo_ctx->dst_len, GFP_KERNEL);
  812. if (!lzo_ctx->dst) {
  813. lzo_ctx->dst_len = 0;
  814. return -ENOMEM;
  815. }
  816. ret = snd_soc_lzo_decompress(lzo_ctx);
  817. if (ret < 0)
  818. return ret;
  819. return 0;
  820. }
  821. static inline int snd_soc_lzo_get_blkindex(struct snd_soc_codec *codec,
  822. unsigned int reg)
  823. {
  824. const struct snd_soc_codec_driver *codec_drv;
  825. codec_drv = codec->driver;
  826. return (reg * codec_drv->reg_word_size) /
  827. DIV_ROUND_UP(codec->reg_size, snd_soc_lzo_block_count());
  828. }
  829. static inline int snd_soc_lzo_get_blkpos(struct snd_soc_codec *codec,
  830. unsigned int reg)
  831. {
  832. const struct snd_soc_codec_driver *codec_drv;
  833. codec_drv = codec->driver;
  834. return reg % (DIV_ROUND_UP(codec->reg_size, snd_soc_lzo_block_count()) /
  835. codec_drv->reg_word_size);
  836. }
  837. static inline int snd_soc_lzo_get_blksize(struct snd_soc_codec *codec)
  838. {
  839. const struct snd_soc_codec_driver *codec_drv;
  840. codec_drv = codec->driver;
  841. return DIV_ROUND_UP(codec->reg_size, snd_soc_lzo_block_count());
  842. }
  843. static int snd_soc_lzo_cache_sync(struct snd_soc_codec *codec)
  844. {
  845. struct snd_soc_lzo_ctx **lzo_blocks;
  846. unsigned int val;
  847. int i;
  848. int ret;
  849. lzo_blocks = codec->reg_cache;
  850. for_each_set_bit(i, lzo_blocks[0]->sync_bmp, lzo_blocks[0]->sync_bmp_nbits) {
  851. WARN_ON(codec->writable_register &&
  852. codec->writable_register(codec, i));
  853. ret = snd_soc_cache_read(codec, i, &val);
  854. if (ret)
  855. return ret;
  856. codec->cache_bypass = 1;
  857. ret = snd_soc_write(codec, i, val);
  858. codec->cache_bypass = 0;
  859. if (ret)
  860. return ret;
  861. dev_dbg(codec->dev, "Synced register %#x, value = %#x\n",
  862. i, val);
  863. }
  864. return 0;
  865. }
  866. static int snd_soc_lzo_cache_write(struct snd_soc_codec *codec,
  867. unsigned int reg, unsigned int value)
  868. {
  869. struct snd_soc_lzo_ctx *lzo_block, **lzo_blocks;
  870. int ret, blkindex, blkpos;
  871. size_t blksize, tmp_dst_len;
  872. void *tmp_dst;
  873. /* index of the compressed lzo block */
  874. blkindex = snd_soc_lzo_get_blkindex(codec, reg);
  875. /* register index within the decompressed block */
  876. blkpos = snd_soc_lzo_get_blkpos(codec, reg);
  877. /* size of the compressed block */
  878. blksize = snd_soc_lzo_get_blksize(codec);
  879. lzo_blocks = codec->reg_cache;
  880. lzo_block = lzo_blocks[blkindex];
  881. /* save the pointer and length of the compressed block */
  882. tmp_dst = lzo_block->dst;
  883. tmp_dst_len = lzo_block->dst_len;
  884. /* prepare the source to be the compressed block */
  885. lzo_block->src = lzo_block->dst;
  886. lzo_block->src_len = lzo_block->dst_len;
  887. /* decompress the block */
  888. ret = snd_soc_lzo_decompress_cache_block(codec, lzo_block);
  889. if (ret < 0) {
  890. kfree(lzo_block->dst);
  891. goto out;
  892. }
  893. /* write the new value to the cache */
  894. if (snd_soc_set_cache_val(lzo_block->dst, blkpos, value,
  895. codec->driver->reg_word_size)) {
  896. kfree(lzo_block->dst);
  897. goto out;
  898. }
  899. /* prepare the source to be the decompressed block */
  900. lzo_block->src = lzo_block->dst;
  901. lzo_block->src_len = lzo_block->dst_len;
  902. /* compress the block */
  903. ret = snd_soc_lzo_compress_cache_block(codec, lzo_block);
  904. if (ret < 0) {
  905. kfree(lzo_block->dst);
  906. kfree(lzo_block->src);
  907. goto out;
  908. }
  909. /* set the bit so we know we have to sync this register */
  910. set_bit(reg, lzo_block->sync_bmp);
  911. kfree(tmp_dst);
  912. kfree(lzo_block->src);
  913. return 0;
  914. out:
  915. lzo_block->dst = tmp_dst;
  916. lzo_block->dst_len = tmp_dst_len;
  917. return ret;
  918. }
  919. static int snd_soc_lzo_cache_read(struct snd_soc_codec *codec,
  920. unsigned int reg, unsigned int *value)
  921. {
  922. struct snd_soc_lzo_ctx *lzo_block, **lzo_blocks;
  923. int ret, blkindex, blkpos;
  924. size_t blksize, tmp_dst_len;
  925. void *tmp_dst;
  926. *value = 0;
  927. /* index of the compressed lzo block */
  928. blkindex = snd_soc_lzo_get_blkindex(codec, reg);
  929. /* register index within the decompressed block */
  930. blkpos = snd_soc_lzo_get_blkpos(codec, reg);
  931. /* size of the compressed block */
  932. blksize = snd_soc_lzo_get_blksize(codec);
  933. lzo_blocks = codec->reg_cache;
  934. lzo_block = lzo_blocks[blkindex];
  935. /* save the pointer and length of the compressed block */
  936. tmp_dst = lzo_block->dst;
  937. tmp_dst_len = lzo_block->dst_len;
  938. /* prepare the source to be the compressed block */
  939. lzo_block->src = lzo_block->dst;
  940. lzo_block->src_len = lzo_block->dst_len;
  941. /* decompress the block */
  942. ret = snd_soc_lzo_decompress_cache_block(codec, lzo_block);
  943. if (ret >= 0)
  944. /* fetch the value from the cache */
  945. *value = snd_soc_get_cache_val(lzo_block->dst, blkpos,
  946. codec->driver->reg_word_size);
  947. kfree(lzo_block->dst);
  948. /* restore the pointer and length of the compressed block */
  949. lzo_block->dst = tmp_dst;
  950. lzo_block->dst_len = tmp_dst_len;
  951. return 0;
  952. }
  953. static int snd_soc_lzo_cache_exit(struct snd_soc_codec *codec)
  954. {
  955. struct snd_soc_lzo_ctx **lzo_blocks;
  956. int i, blkcount;
  957. lzo_blocks = codec->reg_cache;
  958. if (!lzo_blocks)
  959. return 0;
  960. blkcount = snd_soc_lzo_block_count();
  961. /*
  962. * the pointer to the bitmap used for syncing the cache
  963. * is shared amongst all lzo_blocks. Ensure it is freed
  964. * only once.
  965. */
  966. if (lzo_blocks[0])
  967. kfree(lzo_blocks[0]->sync_bmp);
  968. for (i = 0; i < blkcount; ++i) {
  969. if (lzo_blocks[i]) {
  970. kfree(lzo_blocks[i]->wmem);
  971. kfree(lzo_blocks[i]->dst);
  972. }
  973. /* each lzo_block is a pointer returned by kmalloc or NULL */
  974. kfree(lzo_blocks[i]);
  975. }
  976. kfree(lzo_blocks);
  977. codec->reg_cache = NULL;
  978. return 0;
  979. }
  980. static int snd_soc_lzo_cache_init(struct snd_soc_codec *codec)
  981. {
  982. struct snd_soc_lzo_ctx **lzo_blocks;
  983. size_t bmp_size;
  984. const struct snd_soc_codec_driver *codec_drv;
  985. int ret, tofree, i, blksize, blkcount;
  986. const char *p, *end;
  987. unsigned long *sync_bmp;
  988. ret = 0;
  989. codec_drv = codec->driver;
  990. /*
  991. * If we have not been given a default register cache
  992. * then allocate a dummy zero-ed out region, compress it
  993. * and remember to free it afterwards.
  994. */
  995. tofree = 0;
  996. if (!codec->reg_def_copy)
  997. tofree = 1;
  998. if (!codec->reg_def_copy) {
  999. codec->reg_def_copy = kzalloc(codec->reg_size, GFP_KERNEL);
  1000. if (!codec->reg_def_copy)
  1001. return -ENOMEM;
  1002. }
  1003. blkcount = snd_soc_lzo_block_count();
  1004. codec->reg_cache = kzalloc(blkcount * sizeof *lzo_blocks,
  1005. GFP_KERNEL);
  1006. if (!codec->reg_cache) {
  1007. ret = -ENOMEM;
  1008. goto err_tofree;
  1009. }
  1010. lzo_blocks = codec->reg_cache;
  1011. /*
  1012. * allocate a bitmap to be used when syncing the cache with
  1013. * the hardware. Each time a register is modified, the corresponding
  1014. * bit is set in the bitmap, so we know that we have to sync
  1015. * that register.
  1016. */
  1017. bmp_size = codec_drv->reg_cache_size;
  1018. sync_bmp = kmalloc(BITS_TO_LONGS(bmp_size) * sizeof(long),
  1019. GFP_KERNEL);
  1020. if (!sync_bmp) {
  1021. ret = -ENOMEM;
  1022. goto err;
  1023. }
  1024. bitmap_zero(sync_bmp, bmp_size);
  1025. /* allocate the lzo blocks and initialize them */
  1026. for (i = 0; i < blkcount; ++i) {
  1027. lzo_blocks[i] = kzalloc(sizeof **lzo_blocks,
  1028. GFP_KERNEL);
  1029. if (!lzo_blocks[i]) {
  1030. kfree(sync_bmp);
  1031. ret = -ENOMEM;
  1032. goto err;
  1033. }
  1034. lzo_blocks[i]->sync_bmp = sync_bmp;
  1035. lzo_blocks[i]->sync_bmp_nbits = bmp_size;
  1036. /* alloc the working space for the compressed block */
  1037. ret = snd_soc_lzo_prepare(lzo_blocks[i]);
  1038. if (ret < 0)
  1039. goto err;
  1040. }
  1041. blksize = snd_soc_lzo_get_blksize(codec);
  1042. p = codec->reg_def_copy;
  1043. end = codec->reg_def_copy + codec->reg_size;
  1044. /* compress the register map and fill the lzo blocks */
  1045. for (i = 0; i < blkcount; ++i, p += blksize) {
  1046. lzo_blocks[i]->src = p;
  1047. if (p + blksize > end)
  1048. lzo_blocks[i]->src_len = end - p;
  1049. else
  1050. lzo_blocks[i]->src_len = blksize;
  1051. ret = snd_soc_lzo_compress_cache_block(codec,
  1052. lzo_blocks[i]);
  1053. if (ret < 0)
  1054. goto err;
  1055. lzo_blocks[i]->decompressed_size =
  1056. lzo_blocks[i]->src_len;
  1057. }
  1058. if (tofree) {
  1059. kfree(codec->reg_def_copy);
  1060. codec->reg_def_copy = NULL;
  1061. }
  1062. return 0;
  1063. err:
  1064. snd_soc_cache_exit(codec);
  1065. err_tofree:
  1066. if (tofree) {
  1067. kfree(codec->reg_def_copy);
  1068. codec->reg_def_copy = NULL;
  1069. }
  1070. return ret;
  1071. }
  1072. #endif
  1073. static int snd_soc_flat_cache_sync(struct snd_soc_codec *codec)
  1074. {
  1075. int i;
  1076. int ret;
  1077. const struct snd_soc_codec_driver *codec_drv;
  1078. unsigned int val;
  1079. codec_drv = codec->driver;
  1080. for (i = 0; i < codec_drv->reg_cache_size; ++i) {
  1081. WARN_ON(codec->writable_register &&
  1082. codec->writable_register(codec, i));
  1083. ret = snd_soc_cache_read(codec, i, &val);
  1084. if (ret)
  1085. return ret;
  1086. if (codec->reg_def_copy)
  1087. if (snd_soc_get_cache_val(codec->reg_def_copy,
  1088. i, codec_drv->reg_word_size) == val)
  1089. continue;
  1090. ret = snd_soc_write(codec, i, val);
  1091. if (ret)
  1092. return ret;
  1093. dev_dbg(codec->dev, "Synced register %#x, value = %#x\n",
  1094. i, val);
  1095. }
  1096. return 0;
  1097. }
  1098. static int snd_soc_flat_cache_write(struct snd_soc_codec *codec,
  1099. unsigned int reg, unsigned int value)
  1100. {
  1101. snd_soc_set_cache_val(codec->reg_cache, reg, value,
  1102. codec->driver->reg_word_size);
  1103. return 0;
  1104. }
  1105. static int snd_soc_flat_cache_read(struct snd_soc_codec *codec,
  1106. unsigned int reg, unsigned int *value)
  1107. {
  1108. *value = snd_soc_get_cache_val(codec->reg_cache, reg,
  1109. codec->driver->reg_word_size);
  1110. return 0;
  1111. }
  1112. static int snd_soc_flat_cache_exit(struct snd_soc_codec *codec)
  1113. {
  1114. if (!codec->reg_cache)
  1115. return 0;
  1116. kfree(codec->reg_cache);
  1117. codec->reg_cache = NULL;
  1118. return 0;
  1119. }
  1120. static int snd_soc_flat_cache_init(struct snd_soc_codec *codec)
  1121. {
  1122. const struct snd_soc_codec_driver *codec_drv;
  1123. codec_drv = codec->driver;
  1124. if (codec->reg_def_copy)
  1125. codec->reg_cache = kmemdup(codec->reg_def_copy,
  1126. codec->reg_size, GFP_KERNEL);
  1127. else
  1128. codec->reg_cache = kzalloc(codec->reg_size, GFP_KERNEL);
  1129. if (!codec->reg_cache)
  1130. return -ENOMEM;
  1131. return 0;
  1132. }
  1133. /* an array of all supported compression types */
  1134. static const struct snd_soc_cache_ops cache_types[] = {
  1135. /* Flat *must* be the first entry for fallback */
  1136. {
  1137. .id = SND_SOC_FLAT_COMPRESSION,
  1138. .name = "flat",
  1139. .init = snd_soc_flat_cache_init,
  1140. .exit = snd_soc_flat_cache_exit,
  1141. .read = snd_soc_flat_cache_read,
  1142. .write = snd_soc_flat_cache_write,
  1143. .sync = snd_soc_flat_cache_sync
  1144. },
  1145. #ifdef CONFIG_SND_SOC_CACHE_LZO
  1146. {
  1147. .id = SND_SOC_LZO_COMPRESSION,
  1148. .name = "LZO",
  1149. .init = snd_soc_lzo_cache_init,
  1150. .exit = snd_soc_lzo_cache_exit,
  1151. .read = snd_soc_lzo_cache_read,
  1152. .write = snd_soc_lzo_cache_write,
  1153. .sync = snd_soc_lzo_cache_sync
  1154. },
  1155. #endif
  1156. {
  1157. .id = SND_SOC_RBTREE_COMPRESSION,
  1158. .name = "rbtree",
  1159. .init = snd_soc_rbtree_cache_init,
  1160. .exit = snd_soc_rbtree_cache_exit,
  1161. .read = snd_soc_rbtree_cache_read,
  1162. .write = snd_soc_rbtree_cache_write,
  1163. .sync = snd_soc_rbtree_cache_sync
  1164. }
  1165. };
  1166. int snd_soc_cache_init(struct snd_soc_codec *codec)
  1167. {
  1168. int i;
  1169. for (i = 0; i < ARRAY_SIZE(cache_types); ++i)
  1170. if (cache_types[i].id == codec->compress_type)
  1171. break;
  1172. /* Fall back to flat compression */
  1173. if (i == ARRAY_SIZE(cache_types)) {
  1174. dev_warn(codec->dev, "Could not match compress type: %d\n",
  1175. codec->compress_type);
  1176. i = 0;
  1177. }
  1178. mutex_init(&codec->cache_rw_mutex);
  1179. codec->cache_ops = &cache_types[i];
  1180. if (codec->cache_ops->init) {
  1181. if (codec->cache_ops->name)
  1182. dev_dbg(codec->dev, "Initializing %s cache for %s codec\n",
  1183. codec->cache_ops->name, codec->name);
  1184. return codec->cache_ops->init(codec);
  1185. }
  1186. return -ENOSYS;
  1187. }
  1188. /*
  1189. * NOTE: keep in mind that this function might be called
  1190. * multiple times.
  1191. */
  1192. int snd_soc_cache_exit(struct snd_soc_codec *codec)
  1193. {
  1194. if (codec->cache_ops && codec->cache_ops->exit) {
  1195. if (codec->cache_ops->name)
  1196. dev_dbg(codec->dev, "Destroying %s cache for %s codec\n",
  1197. codec->cache_ops->name, codec->name);
  1198. return codec->cache_ops->exit(codec);
  1199. }
  1200. return -ENOSYS;
  1201. }
  1202. /**
  1203. * snd_soc_cache_read: Fetch the value of a given register from the cache.
  1204. *
  1205. * @codec: CODEC to configure.
  1206. * @reg: The register index.
  1207. * @value: The value to be returned.
  1208. */
  1209. int snd_soc_cache_read(struct snd_soc_codec *codec,
  1210. unsigned int reg, unsigned int *value)
  1211. {
  1212. int ret;
  1213. mutex_lock(&codec->cache_rw_mutex);
  1214. if (value && codec->cache_ops && codec->cache_ops->read) {
  1215. ret = codec->cache_ops->read(codec, reg, value);
  1216. mutex_unlock(&codec->cache_rw_mutex);
  1217. return ret;
  1218. }
  1219. mutex_unlock(&codec->cache_rw_mutex);
  1220. return -ENOSYS;
  1221. }
  1222. EXPORT_SYMBOL_GPL(snd_soc_cache_read);
  1223. /**
  1224. * snd_soc_cache_write: Set the value of a given register in the cache.
  1225. *
  1226. * @codec: CODEC to configure.
  1227. * @reg: The register index.
  1228. * @value: The new register value.
  1229. */
  1230. int snd_soc_cache_write(struct snd_soc_codec *codec,
  1231. unsigned int reg, unsigned int value)
  1232. {
  1233. int ret;
  1234. mutex_lock(&codec->cache_rw_mutex);
  1235. if (codec->cache_ops && codec->cache_ops->write) {
  1236. ret = codec->cache_ops->write(codec, reg, value);
  1237. mutex_unlock(&codec->cache_rw_mutex);
  1238. return ret;
  1239. }
  1240. mutex_unlock(&codec->cache_rw_mutex);
  1241. return -ENOSYS;
  1242. }
  1243. EXPORT_SYMBOL_GPL(snd_soc_cache_write);
  1244. /**
  1245. * snd_soc_cache_sync: Sync the register cache with the hardware.
  1246. *
  1247. * @codec: CODEC to configure.
  1248. *
  1249. * Any registers that should not be synced should be marked as
  1250. * volatile. In general drivers can choose not to use the provided
  1251. * syncing functionality if they so require.
  1252. */
  1253. int snd_soc_cache_sync(struct snd_soc_codec *codec)
  1254. {
  1255. int ret;
  1256. const char *name;
  1257. if (!codec->cache_sync) {
  1258. return 0;
  1259. }
  1260. if (!codec->cache_ops || !codec->cache_ops->sync)
  1261. return -ENOSYS;
  1262. if (codec->cache_ops->name)
  1263. name = codec->cache_ops->name;
  1264. else
  1265. name = "unknown";
  1266. if (codec->cache_ops->name)
  1267. dev_dbg(codec->dev, "Syncing %s cache for %s codec\n",
  1268. codec->cache_ops->name, codec->name);
  1269. trace_snd_soc_cache_sync(codec, name, "start");
  1270. ret = codec->cache_ops->sync(codec);
  1271. if (!ret)
  1272. codec->cache_sync = 0;
  1273. trace_snd_soc_cache_sync(codec, name, "end");
  1274. return ret;
  1275. }
  1276. EXPORT_SYMBOL_GPL(snd_soc_cache_sync);
  1277. static int snd_soc_get_reg_access_index(struct snd_soc_codec *codec,
  1278. unsigned int reg)
  1279. {
  1280. const struct snd_soc_codec_driver *codec_drv;
  1281. unsigned int min, max, index;
  1282. codec_drv = codec->driver;
  1283. min = 0;
  1284. max = codec_drv->reg_access_size - 1;
  1285. do {
  1286. index = (min + max) / 2;
  1287. if (codec_drv->reg_access_default[index].reg == reg)
  1288. return index;
  1289. if (codec_drv->reg_access_default[index].reg < reg)
  1290. min = index + 1;
  1291. else
  1292. max = index;
  1293. } while (min <= max);
  1294. return -1;
  1295. }
  1296. int snd_soc_default_volatile_register(struct snd_soc_codec *codec,
  1297. unsigned int reg)
  1298. {
  1299. int index;
  1300. if (reg >= codec->driver->reg_cache_size)
  1301. return 1;
  1302. index = snd_soc_get_reg_access_index(codec, reg);
  1303. if (index < 0)
  1304. return 0;
  1305. return codec->driver->reg_access_default[index].vol;
  1306. }
  1307. EXPORT_SYMBOL_GPL(snd_soc_default_volatile_register);
  1308. int snd_soc_default_readable_register(struct snd_soc_codec *codec,
  1309. unsigned int reg)
  1310. {
  1311. int index;
  1312. if (reg >= codec->driver->reg_cache_size)
  1313. return 1;
  1314. index = snd_soc_get_reg_access_index(codec, reg);
  1315. if (index < 0)
  1316. return 0;
  1317. return codec->driver->reg_access_default[index].read;
  1318. }
  1319. EXPORT_SYMBOL_GPL(snd_soc_default_readable_register);
  1320. int snd_soc_default_writable_register(struct snd_soc_codec *codec,
  1321. unsigned int reg)
  1322. {
  1323. int index;
  1324. if (reg >= codec->driver->reg_cache_size)
  1325. return 1;
  1326. index = snd_soc_get_reg_access_index(codec, reg);
  1327. if (index < 0)
  1328. return 0;
  1329. return codec->driver->reg_access_default[index].write;
  1330. }
  1331. EXPORT_SYMBOL_GPL(snd_soc_default_writable_register);