soc-cache.c 36 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527
  1. /*
  2. * soc-cache.c -- ASoC register cache helpers
  3. *
  4. * Copyright 2009 Wolfson Microelectronics PLC.
  5. *
  6. * Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
  7. *
  8. * This program is free software; you can redistribute it and/or modify it
  9. * under the terms of the GNU General Public License as published by the
  10. * Free Software Foundation; either version 2 of the License, or (at your
  11. * option) any later version.
  12. */
  13. #include <linux/i2c.h>
  14. #include <linux/spi/spi.h>
  15. #include <sound/soc.h>
  16. #include <linux/lzo.h>
  17. #include <linux/bitmap.h>
  18. #include <linux/rbtree.h>
  19. #include <trace/events/asoc.h>
  20. #ifdef CONFIG_SPI_MASTER
  21. static int do_spi_write(void *control, const char *data, int len)
  22. {
  23. struct spi_device *spi = control;
  24. int ret;
  25. ret = spi_write(spi, data, len);
  26. if (ret < 0)
  27. return ret;
  28. return len;
  29. }
  30. #endif
  31. static int do_hw_write(struct snd_soc_codec *codec, unsigned int reg,
  32. unsigned int value, const void *data, int len)
  33. {
  34. int ret;
  35. if (!snd_soc_codec_volatile_register(codec, reg) &&
  36. reg < codec->driver->reg_cache_size &&
  37. !codec->cache_bypass) {
  38. ret = snd_soc_cache_write(codec, reg, value);
  39. if (ret < 0)
  40. return -1;
  41. }
  42. if (codec->cache_only) {
  43. codec->cache_sync = 1;
  44. return 0;
  45. }
  46. ret = codec->hw_write(codec->control_data, data, len);
  47. if (ret == len)
  48. return 0;
  49. if (ret < 0)
  50. return ret;
  51. else
  52. return -EIO;
  53. }
  54. static unsigned int do_hw_read(struct snd_soc_codec *codec, unsigned int reg)
  55. {
  56. int ret;
  57. unsigned int val;
  58. if (reg >= codec->driver->reg_cache_size ||
  59. snd_soc_codec_volatile_register(codec, reg) ||
  60. codec->cache_bypass) {
  61. if (codec->cache_only)
  62. return -1;
  63. BUG_ON(!codec->hw_read);
  64. return codec->hw_read(codec, reg);
  65. }
  66. ret = snd_soc_cache_read(codec, reg, &val);
  67. if (ret < 0)
  68. return -1;
  69. return val;
  70. }
  71. static unsigned int snd_soc_4_12_read(struct snd_soc_codec *codec,
  72. unsigned int reg)
  73. {
  74. return do_hw_read(codec, reg);
  75. }
  76. static int snd_soc_4_12_write(struct snd_soc_codec *codec, unsigned int reg,
  77. unsigned int value)
  78. {
  79. u16 data;
  80. data = cpu_to_be16((reg << 12) | (value & 0xffffff));
  81. return do_hw_write(codec, reg, value, &data, 2);
  82. }
  83. static unsigned int snd_soc_7_9_read(struct snd_soc_codec *codec,
  84. unsigned int reg)
  85. {
  86. return do_hw_read(codec, reg);
  87. }
  88. static int snd_soc_7_9_write(struct snd_soc_codec *codec, unsigned int reg,
  89. unsigned int value)
  90. {
  91. u16 data;
  92. data = cpu_to_be16((reg << 9) | (value & 0x1ff));
  93. return do_hw_write(codec, reg, value, &data, 2);
  94. }
  95. static int snd_soc_8_8_write(struct snd_soc_codec *codec, unsigned int reg,
  96. unsigned int value)
  97. {
  98. u8 data[2];
  99. reg &= 0xff;
  100. data[0] = reg;
  101. data[1] = value & 0xff;
  102. return do_hw_write(codec, reg, value, data, 2);
  103. }
  104. static unsigned int snd_soc_8_8_read(struct snd_soc_codec *codec,
  105. unsigned int reg)
  106. {
  107. return do_hw_read(codec, reg);
  108. }
  109. static int snd_soc_8_16_write(struct snd_soc_codec *codec, unsigned int reg,
  110. unsigned int value)
  111. {
  112. u8 data[3];
  113. u16 val = cpu_to_be16(value);
  114. data[0] = reg;
  115. memcpy(&data[1], &val, sizeof(val));
  116. return do_hw_write(codec, reg, value, data, 3);
  117. }
  118. static unsigned int snd_soc_8_16_read(struct snd_soc_codec *codec,
  119. unsigned int reg)
  120. {
  121. return do_hw_read(codec, reg);
  122. }
  123. #if defined(CONFIG_I2C) || (defined(CONFIG_I2C_MODULE) && defined(MODULE))
  124. static unsigned int do_i2c_read(struct snd_soc_codec *codec,
  125. void *reg, int reglen,
  126. void *data, int datalen)
  127. {
  128. struct i2c_msg xfer[2];
  129. int ret;
  130. struct i2c_client *client = codec->control_data;
  131. /* Write register */
  132. xfer[0].addr = client->addr;
  133. xfer[0].flags = 0;
  134. xfer[0].len = reglen;
  135. xfer[0].buf = reg;
  136. /* Read data */
  137. xfer[1].addr = client->addr;
  138. xfer[1].flags = I2C_M_RD;
  139. xfer[1].len = datalen;
  140. xfer[1].buf = data;
  141. ret = i2c_transfer(client->adapter, xfer, 2);
  142. if (ret == 2)
  143. return 0;
  144. else if (ret < 0)
  145. return ret;
  146. else
  147. return -EIO;
  148. }
  149. #endif
  150. #if defined(CONFIG_I2C) || (defined(CONFIG_I2C_MODULE) && defined(MODULE))
  151. static unsigned int snd_soc_8_8_read_i2c(struct snd_soc_codec *codec,
  152. unsigned int r)
  153. {
  154. u8 reg = r;
  155. u8 data;
  156. int ret;
  157. ret = do_i2c_read(codec, &reg, 1, &data, 1);
  158. if (ret < 0)
  159. return 0;
  160. return data;
  161. }
  162. #else
  163. #define snd_soc_8_8_read_i2c NULL
  164. #endif
  165. #if defined(CONFIG_I2C) || (defined(CONFIG_I2C_MODULE) && defined(MODULE))
  166. static unsigned int snd_soc_8_16_read_i2c(struct snd_soc_codec *codec,
  167. unsigned int r)
  168. {
  169. u8 reg = r;
  170. u16 data;
  171. int ret;
  172. ret = do_i2c_read(codec, &reg, 1, &data, 2);
  173. if (ret < 0)
  174. return 0;
  175. return (data >> 8) | ((data & 0xff) << 8);
  176. }
  177. #else
  178. #define snd_soc_8_16_read_i2c NULL
  179. #endif
  180. #if defined(CONFIG_I2C) || (defined(CONFIG_I2C_MODULE) && defined(MODULE))
  181. static unsigned int snd_soc_16_8_read_i2c(struct snd_soc_codec *codec,
  182. unsigned int r)
  183. {
  184. u16 reg = r;
  185. u8 data;
  186. int ret;
  187. ret = do_i2c_read(codec, &reg, 2, &data, 1);
  188. if (ret < 0)
  189. return 0;
  190. return data;
  191. }
  192. #else
  193. #define snd_soc_16_8_read_i2c NULL
  194. #endif
  195. static unsigned int snd_soc_16_8_read(struct snd_soc_codec *codec,
  196. unsigned int reg)
  197. {
  198. return do_hw_read(codec, reg);
  199. }
  200. static int snd_soc_16_8_write(struct snd_soc_codec *codec, unsigned int reg,
  201. unsigned int value)
  202. {
  203. u8 data[3];
  204. u16 rval = cpu_to_be16(reg);
  205. memcpy(data, &rval, sizeof(rval));
  206. data[2] = value;
  207. return do_hw_write(codec, reg, value, data, 3);
  208. }
  209. #if defined(CONFIG_I2C) || (defined(CONFIG_I2C_MODULE) && defined(MODULE))
  210. static unsigned int snd_soc_16_16_read_i2c(struct snd_soc_codec *codec,
  211. unsigned int r)
  212. {
  213. u16 reg = cpu_to_be16(r);
  214. u16 data;
  215. int ret;
  216. ret = do_i2c_read(codec, &reg, 2, &data, 2);
  217. if (ret < 0)
  218. return 0;
  219. return be16_to_cpu(data);
  220. }
  221. #else
  222. #define snd_soc_16_16_read_i2c NULL
  223. #endif
  224. static unsigned int snd_soc_16_16_read(struct snd_soc_codec *codec,
  225. unsigned int reg)
  226. {
  227. return do_hw_read(codec, reg);
  228. }
  229. static int snd_soc_16_16_write(struct snd_soc_codec *codec, unsigned int reg,
  230. unsigned int value)
  231. {
  232. u16 data[2];
  233. data[0] = cpu_to_be16(reg);
  234. data[1] = cpu_to_be16(value);
  235. return do_hw_write(codec, reg, value, data, sizeof(data));
  236. }
  237. /* Primitive bulk write support for soc-cache. The data pointed to by
  238. * `data' needs to already be in the form the hardware expects
  239. * including any leading register specific data. Any data written
  240. * through this function will not go through the cache as it only
  241. * handles writing to volatile or out of bounds registers.
  242. */
  243. static int snd_soc_hw_bulk_write_raw(struct snd_soc_codec *codec, unsigned int reg,
  244. const void *data, size_t len)
  245. {
  246. int ret;
  247. /* To ensure that we don't get out of sync with the cache, check
  248. * whether the base register is volatile or if we've directly asked
  249. * to bypass the cache. Out of bounds registers are considered
  250. * volatile.
  251. */
  252. if (!codec->cache_bypass
  253. && !snd_soc_codec_volatile_register(codec, reg)
  254. && reg < codec->driver->reg_cache_size)
  255. return -EINVAL;
  256. switch (codec->control_type) {
  257. #if defined(CONFIG_I2C) || (defined(CONFIG_I2C_MODULE) && defined(MODULE))
  258. case SND_SOC_I2C:
  259. ret = i2c_master_send(codec->control_data, data, len);
  260. break;
  261. #endif
  262. #if defined(CONFIG_SPI_MASTER)
  263. case SND_SOC_SPI:
  264. ret = spi_write(codec->control_data, data, len);
  265. break;
  266. #endif
  267. default:
  268. BUG();
  269. }
  270. if (ret == len)
  271. return 0;
  272. if (ret < 0)
  273. return ret;
  274. else
  275. return -EIO;
  276. }
  277. static struct {
  278. int addr_bits;
  279. int data_bits;
  280. int (*write)(struct snd_soc_codec *codec, unsigned int, unsigned int);
  281. unsigned int (*read)(struct snd_soc_codec *, unsigned int);
  282. unsigned int (*i2c_read)(struct snd_soc_codec *, unsigned int);
  283. } io_types[] = {
  284. {
  285. .addr_bits = 4, .data_bits = 12,
  286. .write = snd_soc_4_12_write, .read = snd_soc_4_12_read,
  287. },
  288. {
  289. .addr_bits = 7, .data_bits = 9,
  290. .write = snd_soc_7_9_write, .read = snd_soc_7_9_read,
  291. },
  292. {
  293. .addr_bits = 8, .data_bits = 8,
  294. .write = snd_soc_8_8_write, .read = snd_soc_8_8_read,
  295. .i2c_read = snd_soc_8_8_read_i2c,
  296. },
  297. {
  298. .addr_bits = 8, .data_bits = 16,
  299. .write = snd_soc_8_16_write, .read = snd_soc_8_16_read,
  300. .i2c_read = snd_soc_8_16_read_i2c,
  301. },
  302. {
  303. .addr_bits = 16, .data_bits = 8,
  304. .write = snd_soc_16_8_write, .read = snd_soc_16_8_read,
  305. .i2c_read = snd_soc_16_8_read_i2c,
  306. },
  307. {
  308. .addr_bits = 16, .data_bits = 16,
  309. .write = snd_soc_16_16_write, .read = snd_soc_16_16_read,
  310. .i2c_read = snd_soc_16_16_read_i2c,
  311. },
  312. };
  313. /**
  314. * snd_soc_codec_set_cache_io: Set up standard I/O functions.
  315. *
  316. * @codec: CODEC to configure.
  317. * @addr_bits: Number of bits of register address data.
  318. * @data_bits: Number of bits of data per register.
  319. * @control: Control bus used.
  320. *
  321. * Register formats are frequently shared between many I2C and SPI
  322. * devices. In order to promote code reuse the ASoC core provides
  323. * some standard implementations of CODEC read and write operations
  324. * which can be set up using this function.
  325. *
  326. * The caller is responsible for allocating and initialising the
  327. * actual cache.
  328. *
  329. * Note that at present this code cannot be used by CODECs with
  330. * volatile registers.
  331. */
  332. int snd_soc_codec_set_cache_io(struct snd_soc_codec *codec,
  333. int addr_bits, int data_bits,
  334. enum snd_soc_control_type control)
  335. {
  336. int i;
  337. for (i = 0; i < ARRAY_SIZE(io_types); i++)
  338. if (io_types[i].addr_bits == addr_bits &&
  339. io_types[i].data_bits == data_bits)
  340. break;
  341. if (i == ARRAY_SIZE(io_types)) {
  342. printk(KERN_ERR
  343. "No I/O functions for %d bit address %d bit data\n",
  344. addr_bits, data_bits);
  345. return -EINVAL;
  346. }
  347. codec->write = io_types[i].write;
  348. codec->read = io_types[i].read;
  349. codec->bulk_write_raw = snd_soc_hw_bulk_write_raw;
  350. switch (control) {
  351. case SND_SOC_CUSTOM:
  352. break;
  353. case SND_SOC_I2C:
  354. #if defined(CONFIG_I2C) || (defined(CONFIG_I2C_MODULE) && defined(MODULE))
  355. codec->hw_write = (hw_write_t)i2c_master_send;
  356. #endif
  357. if (io_types[i].i2c_read)
  358. codec->hw_read = io_types[i].i2c_read;
  359. codec->control_data = container_of(codec->dev,
  360. struct i2c_client,
  361. dev);
  362. break;
  363. case SND_SOC_SPI:
  364. #ifdef CONFIG_SPI_MASTER
  365. codec->hw_write = do_spi_write;
  366. #endif
  367. codec->control_data = container_of(codec->dev,
  368. struct spi_device,
  369. dev);
  370. break;
  371. }
  372. return 0;
  373. }
  374. EXPORT_SYMBOL_GPL(snd_soc_codec_set_cache_io);
  375. static bool snd_soc_set_cache_val(void *base, unsigned int idx,
  376. unsigned int val, unsigned int word_size)
  377. {
  378. switch (word_size) {
  379. case 1: {
  380. u8 *cache = base;
  381. if (cache[idx] == val)
  382. return true;
  383. cache[idx] = val;
  384. break;
  385. }
  386. case 2: {
  387. u16 *cache = base;
  388. if (cache[idx] == val)
  389. return true;
  390. cache[idx] = val;
  391. break;
  392. }
  393. default:
  394. BUG();
  395. }
  396. return false;
  397. }
  398. static unsigned int snd_soc_get_cache_val(const void *base, unsigned int idx,
  399. unsigned int word_size)
  400. {
  401. if (!base)
  402. return -1;
  403. switch (word_size) {
  404. case 1: {
  405. const u8 *cache = base;
  406. return cache[idx];
  407. }
  408. case 2: {
  409. const u16 *cache = base;
  410. return cache[idx];
  411. }
  412. default:
  413. BUG();
  414. }
  415. /* unreachable */
  416. return -1;
  417. }
  418. struct snd_soc_rbtree_node {
  419. struct rb_node node; /* the actual rbtree node holding this block */
  420. unsigned int base_reg; /* base register handled by this block */
  421. unsigned int word_size; /* number of bytes needed to represent the register index */
  422. void *block; /* block of adjacent registers */
  423. unsigned int blklen; /* number of registers available in the block */
  424. } __attribute__ ((packed));
  425. struct snd_soc_rbtree_ctx {
  426. struct rb_root root;
  427. struct snd_soc_rbtree_node *cached_rbnode;
  428. };
  429. static inline void snd_soc_rbtree_get_base_top_reg(
  430. struct snd_soc_rbtree_node *rbnode,
  431. unsigned int *base, unsigned int *top)
  432. {
  433. *base = rbnode->base_reg;
  434. *top = rbnode->base_reg + rbnode->blklen - 1;
  435. }
  436. static unsigned int snd_soc_rbtree_get_register(
  437. struct snd_soc_rbtree_node *rbnode, unsigned int idx)
  438. {
  439. unsigned int val;
  440. switch (rbnode->word_size) {
  441. case 1: {
  442. u8 *p = rbnode->block;
  443. val = p[idx];
  444. return val;
  445. }
  446. case 2: {
  447. u16 *p = rbnode->block;
  448. val = p[idx];
  449. return val;
  450. }
  451. default:
  452. BUG();
  453. break;
  454. }
  455. return -1;
  456. }
  457. static void snd_soc_rbtree_set_register(struct snd_soc_rbtree_node *rbnode,
  458. unsigned int idx, unsigned int val)
  459. {
  460. switch (rbnode->word_size) {
  461. case 1: {
  462. u8 *p = rbnode->block;
  463. p[idx] = val;
  464. break;
  465. }
  466. case 2: {
  467. u16 *p = rbnode->block;
  468. p[idx] = val;
  469. break;
  470. }
  471. default:
  472. BUG();
  473. break;
  474. }
  475. }
  476. static struct snd_soc_rbtree_node *snd_soc_rbtree_lookup(
  477. struct rb_root *root, unsigned int reg)
  478. {
  479. struct rb_node *node;
  480. struct snd_soc_rbtree_node *rbnode;
  481. unsigned int base_reg, top_reg;
  482. node = root->rb_node;
  483. while (node) {
  484. rbnode = container_of(node, struct snd_soc_rbtree_node, node);
  485. snd_soc_rbtree_get_base_top_reg(rbnode, &base_reg, &top_reg);
  486. if (reg >= base_reg && reg <= top_reg)
  487. return rbnode;
  488. else if (reg > top_reg)
  489. node = node->rb_right;
  490. else if (reg < base_reg)
  491. node = node->rb_left;
  492. }
  493. return NULL;
  494. }
  495. static int snd_soc_rbtree_insert(struct rb_root *root,
  496. struct snd_soc_rbtree_node *rbnode)
  497. {
  498. struct rb_node **new, *parent;
  499. struct snd_soc_rbtree_node *rbnode_tmp;
  500. unsigned int base_reg_tmp, top_reg_tmp;
  501. unsigned int base_reg;
  502. parent = NULL;
  503. new = &root->rb_node;
  504. while (*new) {
  505. rbnode_tmp = container_of(*new, struct snd_soc_rbtree_node,
  506. node);
  507. /* base and top registers of the current rbnode */
  508. snd_soc_rbtree_get_base_top_reg(rbnode_tmp, &base_reg_tmp,
  509. &top_reg_tmp);
  510. /* base register of the rbnode to be added */
  511. base_reg = rbnode->base_reg;
  512. parent = *new;
  513. /* if this register has already been inserted, just return */
  514. if (base_reg >= base_reg_tmp &&
  515. base_reg <= top_reg_tmp)
  516. return 0;
  517. else if (base_reg > top_reg_tmp)
  518. new = &((*new)->rb_right);
  519. else if (base_reg < base_reg_tmp)
  520. new = &((*new)->rb_left);
  521. }
  522. /* insert the node into the rbtree */
  523. rb_link_node(&rbnode->node, parent, new);
  524. rb_insert_color(&rbnode->node, root);
  525. return 1;
  526. }
  527. static int snd_soc_rbtree_cache_sync(struct snd_soc_codec *codec)
  528. {
  529. struct snd_soc_rbtree_ctx *rbtree_ctx;
  530. struct rb_node *node;
  531. struct snd_soc_rbtree_node *rbnode;
  532. unsigned int regtmp;
  533. unsigned int val, def;
  534. int ret;
  535. int i;
  536. rbtree_ctx = codec->reg_cache;
  537. for (node = rb_first(&rbtree_ctx->root); node; node = rb_next(node)) {
  538. rbnode = rb_entry(node, struct snd_soc_rbtree_node, node);
  539. for (i = 0; i < rbnode->blklen; ++i) {
  540. regtmp = rbnode->base_reg + i;
  541. WARN_ON(codec->writable_register &&
  542. codec->writable_register(codec, regtmp));
  543. val = snd_soc_rbtree_get_register(rbnode, i);
  544. def = snd_soc_get_cache_val(codec->reg_def_copy, i,
  545. rbnode->word_size);
  546. if (val == def)
  547. continue;
  548. codec->cache_bypass = 1;
  549. ret = snd_soc_write(codec, regtmp, val);
  550. codec->cache_bypass = 0;
  551. if (ret)
  552. return ret;
  553. dev_dbg(codec->dev, "Synced register %#x, value = %#x\n",
  554. regtmp, val);
  555. }
  556. }
  557. return 0;
  558. }
  559. static int snd_soc_rbtree_insert_to_block(struct snd_soc_rbtree_node *rbnode,
  560. unsigned int pos, unsigned int reg,
  561. unsigned int value)
  562. {
  563. u8 *blk;
  564. blk = krealloc(rbnode->block,
  565. (rbnode->blklen + 1) * rbnode->word_size, GFP_KERNEL);
  566. if (!blk)
  567. return -ENOMEM;
  568. /* insert the register value in the correct place in the rbnode block */
  569. memmove(blk + (pos + 1) * rbnode->word_size,
  570. blk + pos * rbnode->word_size,
  571. (rbnode->blklen - pos) * rbnode->word_size);
  572. /* update the rbnode block, its size and the base register */
  573. rbnode->block = blk;
  574. rbnode->blklen++;
  575. if (!pos)
  576. rbnode->base_reg = reg;
  577. snd_soc_rbtree_set_register(rbnode, pos, value);
  578. return 0;
  579. }
  580. static int snd_soc_rbtree_cache_write(struct snd_soc_codec *codec,
  581. unsigned int reg, unsigned int value)
  582. {
  583. struct snd_soc_rbtree_ctx *rbtree_ctx;
  584. struct snd_soc_rbtree_node *rbnode, *rbnode_tmp;
  585. struct rb_node *node;
  586. unsigned int val;
  587. unsigned int reg_tmp;
  588. unsigned int base_reg, top_reg;
  589. unsigned int pos;
  590. int i;
  591. int ret;
  592. rbtree_ctx = codec->reg_cache;
  593. /* look up the required register in the cached rbnode */
  594. rbnode = rbtree_ctx->cached_rbnode;
  595. if (rbnode) {
  596. snd_soc_rbtree_get_base_top_reg(rbnode, &base_reg, &top_reg);
  597. if (reg >= base_reg && reg <= top_reg) {
  598. reg_tmp = reg - base_reg;
  599. val = snd_soc_rbtree_get_register(rbnode, reg_tmp);
  600. if (val == value)
  601. return 0;
  602. snd_soc_rbtree_set_register(rbnode, reg_tmp, value);
  603. return 0;
  604. }
  605. }
  606. /* if we can't locate it in the cached rbnode we'll have
  607. * to traverse the rbtree looking for it.
  608. */
  609. rbnode = snd_soc_rbtree_lookup(&rbtree_ctx->root, reg);
  610. if (rbnode) {
  611. reg_tmp = reg - rbnode->base_reg;
  612. val = snd_soc_rbtree_get_register(rbnode, reg_tmp);
  613. if (val == value)
  614. return 0;
  615. snd_soc_rbtree_set_register(rbnode, reg_tmp, value);
  616. rbtree_ctx->cached_rbnode = rbnode;
  617. } else {
  618. /* bail out early, no need to create the rbnode yet */
  619. if (!value)
  620. return 0;
  621. /* look for an adjacent register to the one we are about to add */
  622. for (node = rb_first(&rbtree_ctx->root); node;
  623. node = rb_next(node)) {
  624. rbnode_tmp = rb_entry(node, struct snd_soc_rbtree_node, node);
  625. for (i = 0; i < rbnode_tmp->blklen; ++i) {
  626. reg_tmp = rbnode_tmp->base_reg + i;
  627. if (abs(reg_tmp - reg) != 1)
  628. continue;
  629. /* decide where in the block to place our register */
  630. if (reg_tmp + 1 == reg)
  631. pos = i + 1;
  632. else
  633. pos = i;
  634. ret = snd_soc_rbtree_insert_to_block(rbnode_tmp, pos,
  635. reg, value);
  636. if (ret)
  637. return ret;
  638. rbtree_ctx->cached_rbnode = rbnode_tmp;
  639. return 0;
  640. }
  641. }
  642. /* we did not manage to find a place to insert it in an existing
  643. * block so create a new rbnode with a single register in its block.
  644. * This block will get populated further if any other adjacent
  645. * registers get modified in the future.
  646. */
  647. rbnode = kzalloc(sizeof *rbnode, GFP_KERNEL);
  648. if (!rbnode)
  649. return -ENOMEM;
  650. rbnode->blklen = 1;
  651. rbnode->base_reg = reg;
  652. rbnode->word_size = codec->driver->reg_word_size;
  653. rbnode->block = kmalloc(rbnode->blklen * rbnode->word_size,
  654. GFP_KERNEL);
  655. if (!rbnode->block) {
  656. kfree(rbnode);
  657. return -ENOMEM;
  658. }
  659. snd_soc_rbtree_set_register(rbnode, 0, value);
  660. snd_soc_rbtree_insert(&rbtree_ctx->root, rbnode);
  661. rbtree_ctx->cached_rbnode = rbnode;
  662. }
  663. return 0;
  664. }
  665. static int snd_soc_rbtree_cache_read(struct snd_soc_codec *codec,
  666. unsigned int reg, unsigned int *value)
  667. {
  668. struct snd_soc_rbtree_ctx *rbtree_ctx;
  669. struct snd_soc_rbtree_node *rbnode;
  670. unsigned int base_reg, top_reg;
  671. unsigned int reg_tmp;
  672. rbtree_ctx = codec->reg_cache;
  673. /* look up the required register in the cached rbnode */
  674. rbnode = rbtree_ctx->cached_rbnode;
  675. if (rbnode) {
  676. snd_soc_rbtree_get_base_top_reg(rbnode, &base_reg, &top_reg);
  677. if (reg >= base_reg && reg <= top_reg) {
  678. reg_tmp = reg - base_reg;
  679. *value = snd_soc_rbtree_get_register(rbnode, reg_tmp);
  680. return 0;
  681. }
  682. }
  683. /* if we can't locate it in the cached rbnode we'll have
  684. * to traverse the rbtree looking for it.
  685. */
  686. rbnode = snd_soc_rbtree_lookup(&rbtree_ctx->root, reg);
  687. if (rbnode) {
  688. reg_tmp = reg - rbnode->base_reg;
  689. *value = snd_soc_rbtree_get_register(rbnode, reg_tmp);
  690. rbtree_ctx->cached_rbnode = rbnode;
  691. } else {
  692. /* uninitialized registers default to 0 */
  693. *value = 0;
  694. }
  695. return 0;
  696. }
  697. static int snd_soc_rbtree_cache_exit(struct snd_soc_codec *codec)
  698. {
  699. struct rb_node *next;
  700. struct snd_soc_rbtree_ctx *rbtree_ctx;
  701. struct snd_soc_rbtree_node *rbtree_node;
  702. /* if we've already been called then just return */
  703. rbtree_ctx = codec->reg_cache;
  704. if (!rbtree_ctx)
  705. return 0;
  706. /* free up the rbtree */
  707. next = rb_first(&rbtree_ctx->root);
  708. while (next) {
  709. rbtree_node = rb_entry(next, struct snd_soc_rbtree_node, node);
  710. next = rb_next(&rbtree_node->node);
  711. rb_erase(&rbtree_node->node, &rbtree_ctx->root);
  712. kfree(rbtree_node->block);
  713. kfree(rbtree_node);
  714. }
  715. /* release the resources */
  716. kfree(codec->reg_cache);
  717. codec->reg_cache = NULL;
  718. return 0;
  719. }
  720. static int snd_soc_rbtree_cache_init(struct snd_soc_codec *codec)
  721. {
  722. struct snd_soc_rbtree_ctx *rbtree_ctx;
  723. unsigned int word_size;
  724. unsigned int val;
  725. int i;
  726. int ret;
  727. codec->reg_cache = kmalloc(sizeof *rbtree_ctx, GFP_KERNEL);
  728. if (!codec->reg_cache)
  729. return -ENOMEM;
  730. rbtree_ctx = codec->reg_cache;
  731. rbtree_ctx->root = RB_ROOT;
  732. rbtree_ctx->cached_rbnode = NULL;
  733. if (!codec->reg_def_copy)
  734. return 0;
  735. word_size = codec->driver->reg_word_size;
  736. for (i = 0; i < codec->driver->reg_cache_size; ++i) {
  737. val = snd_soc_get_cache_val(codec->reg_def_copy, i,
  738. word_size);
  739. if (!val)
  740. continue;
  741. ret = snd_soc_rbtree_cache_write(codec, i, val);
  742. if (ret)
  743. goto err;
  744. }
  745. return 0;
  746. err:
  747. snd_soc_cache_exit(codec);
  748. return ret;
  749. }
  750. #ifdef CONFIG_SND_SOC_CACHE_LZO
  751. struct snd_soc_lzo_ctx {
  752. void *wmem;
  753. void *dst;
  754. const void *src;
  755. size_t src_len;
  756. size_t dst_len;
  757. size_t decompressed_size;
  758. unsigned long *sync_bmp;
  759. int sync_bmp_nbits;
  760. };
  761. #define LZO_BLOCK_NUM 8
  762. static int snd_soc_lzo_block_count(void)
  763. {
  764. return LZO_BLOCK_NUM;
  765. }
  766. static int snd_soc_lzo_prepare(struct snd_soc_lzo_ctx *lzo_ctx)
  767. {
  768. lzo_ctx->wmem = kmalloc(LZO1X_MEM_COMPRESS, GFP_KERNEL);
  769. if (!lzo_ctx->wmem)
  770. return -ENOMEM;
  771. return 0;
  772. }
  773. static int snd_soc_lzo_compress(struct snd_soc_lzo_ctx *lzo_ctx)
  774. {
  775. size_t compress_size;
  776. int ret;
  777. ret = lzo1x_1_compress(lzo_ctx->src, lzo_ctx->src_len,
  778. lzo_ctx->dst, &compress_size, lzo_ctx->wmem);
  779. if (ret != LZO_E_OK || compress_size > lzo_ctx->dst_len)
  780. return -EINVAL;
  781. lzo_ctx->dst_len = compress_size;
  782. return 0;
  783. }
  784. static int snd_soc_lzo_decompress(struct snd_soc_lzo_ctx *lzo_ctx)
  785. {
  786. size_t dst_len;
  787. int ret;
  788. dst_len = lzo_ctx->dst_len;
  789. ret = lzo1x_decompress_safe(lzo_ctx->src, lzo_ctx->src_len,
  790. lzo_ctx->dst, &dst_len);
  791. if (ret != LZO_E_OK || dst_len != lzo_ctx->dst_len)
  792. return -EINVAL;
  793. return 0;
  794. }
  795. static int snd_soc_lzo_compress_cache_block(struct snd_soc_codec *codec,
  796. struct snd_soc_lzo_ctx *lzo_ctx)
  797. {
  798. int ret;
  799. lzo_ctx->dst_len = lzo1x_worst_compress(PAGE_SIZE);
  800. lzo_ctx->dst = kmalloc(lzo_ctx->dst_len, GFP_KERNEL);
  801. if (!lzo_ctx->dst) {
  802. lzo_ctx->dst_len = 0;
  803. return -ENOMEM;
  804. }
  805. ret = snd_soc_lzo_compress(lzo_ctx);
  806. if (ret < 0)
  807. return ret;
  808. return 0;
  809. }
  810. static int snd_soc_lzo_decompress_cache_block(struct snd_soc_codec *codec,
  811. struct snd_soc_lzo_ctx *lzo_ctx)
  812. {
  813. int ret;
  814. lzo_ctx->dst_len = lzo_ctx->decompressed_size;
  815. lzo_ctx->dst = kmalloc(lzo_ctx->dst_len, GFP_KERNEL);
  816. if (!lzo_ctx->dst) {
  817. lzo_ctx->dst_len = 0;
  818. return -ENOMEM;
  819. }
  820. ret = snd_soc_lzo_decompress(lzo_ctx);
  821. if (ret < 0)
  822. return ret;
  823. return 0;
  824. }
  825. static inline int snd_soc_lzo_get_blkindex(struct snd_soc_codec *codec,
  826. unsigned int reg)
  827. {
  828. const struct snd_soc_codec_driver *codec_drv;
  829. codec_drv = codec->driver;
  830. return (reg * codec_drv->reg_word_size) /
  831. DIV_ROUND_UP(codec->reg_size, snd_soc_lzo_block_count());
  832. }
  833. static inline int snd_soc_lzo_get_blkpos(struct snd_soc_codec *codec,
  834. unsigned int reg)
  835. {
  836. const struct snd_soc_codec_driver *codec_drv;
  837. codec_drv = codec->driver;
  838. return reg % (DIV_ROUND_UP(codec->reg_size, snd_soc_lzo_block_count()) /
  839. codec_drv->reg_word_size);
  840. }
  841. static inline int snd_soc_lzo_get_blksize(struct snd_soc_codec *codec)
  842. {
  843. const struct snd_soc_codec_driver *codec_drv;
  844. codec_drv = codec->driver;
  845. return DIV_ROUND_UP(codec->reg_size, snd_soc_lzo_block_count());
  846. }
  847. static int snd_soc_lzo_cache_sync(struct snd_soc_codec *codec)
  848. {
  849. struct snd_soc_lzo_ctx **lzo_blocks;
  850. unsigned int val;
  851. int i;
  852. int ret;
  853. lzo_blocks = codec->reg_cache;
  854. for_each_set_bit(i, lzo_blocks[0]->sync_bmp, lzo_blocks[0]->sync_bmp_nbits) {
  855. WARN_ON(codec->writable_register &&
  856. codec->writable_register(codec, i));
  857. ret = snd_soc_cache_read(codec, i, &val);
  858. if (ret)
  859. return ret;
  860. codec->cache_bypass = 1;
  861. ret = snd_soc_write(codec, i, val);
  862. codec->cache_bypass = 0;
  863. if (ret)
  864. return ret;
  865. dev_dbg(codec->dev, "Synced register %#x, value = %#x\n",
  866. i, val);
  867. }
  868. return 0;
  869. }
  870. static int snd_soc_lzo_cache_write(struct snd_soc_codec *codec,
  871. unsigned int reg, unsigned int value)
  872. {
  873. struct snd_soc_lzo_ctx *lzo_block, **lzo_blocks;
  874. int ret, blkindex, blkpos;
  875. size_t blksize, tmp_dst_len;
  876. void *tmp_dst;
  877. /* index of the compressed lzo block */
  878. blkindex = snd_soc_lzo_get_blkindex(codec, reg);
  879. /* register index within the decompressed block */
  880. blkpos = snd_soc_lzo_get_blkpos(codec, reg);
  881. /* size of the compressed block */
  882. blksize = snd_soc_lzo_get_blksize(codec);
  883. lzo_blocks = codec->reg_cache;
  884. lzo_block = lzo_blocks[blkindex];
  885. /* save the pointer and length of the compressed block */
  886. tmp_dst = lzo_block->dst;
  887. tmp_dst_len = lzo_block->dst_len;
  888. /* prepare the source to be the compressed block */
  889. lzo_block->src = lzo_block->dst;
  890. lzo_block->src_len = lzo_block->dst_len;
  891. /* decompress the block */
  892. ret = snd_soc_lzo_decompress_cache_block(codec, lzo_block);
  893. if (ret < 0) {
  894. kfree(lzo_block->dst);
  895. goto out;
  896. }
  897. /* write the new value to the cache */
  898. if (snd_soc_set_cache_val(lzo_block->dst, blkpos, value,
  899. codec->driver->reg_word_size)) {
  900. kfree(lzo_block->dst);
  901. goto out;
  902. }
  903. /* prepare the source to be the decompressed block */
  904. lzo_block->src = lzo_block->dst;
  905. lzo_block->src_len = lzo_block->dst_len;
  906. /* compress the block */
  907. ret = snd_soc_lzo_compress_cache_block(codec, lzo_block);
  908. if (ret < 0) {
  909. kfree(lzo_block->dst);
  910. kfree(lzo_block->src);
  911. goto out;
  912. }
  913. /* set the bit so we know we have to sync this register */
  914. set_bit(reg, lzo_block->sync_bmp);
  915. kfree(tmp_dst);
  916. kfree(lzo_block->src);
  917. return 0;
  918. out:
  919. lzo_block->dst = tmp_dst;
  920. lzo_block->dst_len = tmp_dst_len;
  921. return ret;
  922. }
  923. static int snd_soc_lzo_cache_read(struct snd_soc_codec *codec,
  924. unsigned int reg, unsigned int *value)
  925. {
  926. struct snd_soc_lzo_ctx *lzo_block, **lzo_blocks;
  927. int ret, blkindex, blkpos;
  928. size_t blksize, tmp_dst_len;
  929. void *tmp_dst;
  930. *value = 0;
  931. /* index of the compressed lzo block */
  932. blkindex = snd_soc_lzo_get_blkindex(codec, reg);
  933. /* register index within the decompressed block */
  934. blkpos = snd_soc_lzo_get_blkpos(codec, reg);
  935. /* size of the compressed block */
  936. blksize = snd_soc_lzo_get_blksize(codec);
  937. lzo_blocks = codec->reg_cache;
  938. lzo_block = lzo_blocks[blkindex];
  939. /* save the pointer and length of the compressed block */
  940. tmp_dst = lzo_block->dst;
  941. tmp_dst_len = lzo_block->dst_len;
  942. /* prepare the source to be the compressed block */
  943. lzo_block->src = lzo_block->dst;
  944. lzo_block->src_len = lzo_block->dst_len;
  945. /* decompress the block */
  946. ret = snd_soc_lzo_decompress_cache_block(codec, lzo_block);
  947. if (ret >= 0)
  948. /* fetch the value from the cache */
  949. *value = snd_soc_get_cache_val(lzo_block->dst, blkpos,
  950. codec->driver->reg_word_size);
  951. kfree(lzo_block->dst);
  952. /* restore the pointer and length of the compressed block */
  953. lzo_block->dst = tmp_dst;
  954. lzo_block->dst_len = tmp_dst_len;
  955. return 0;
  956. }
  957. static int snd_soc_lzo_cache_exit(struct snd_soc_codec *codec)
  958. {
  959. struct snd_soc_lzo_ctx **lzo_blocks;
  960. int i, blkcount;
  961. lzo_blocks = codec->reg_cache;
  962. if (!lzo_blocks)
  963. return 0;
  964. blkcount = snd_soc_lzo_block_count();
  965. /*
  966. * the pointer to the bitmap used for syncing the cache
  967. * is shared amongst all lzo_blocks. Ensure it is freed
  968. * only once.
  969. */
  970. if (lzo_blocks[0])
  971. kfree(lzo_blocks[0]->sync_bmp);
  972. for (i = 0; i < blkcount; ++i) {
  973. if (lzo_blocks[i]) {
  974. kfree(lzo_blocks[i]->wmem);
  975. kfree(lzo_blocks[i]->dst);
  976. }
  977. /* each lzo_block is a pointer returned by kmalloc or NULL */
  978. kfree(lzo_blocks[i]);
  979. }
  980. kfree(lzo_blocks);
  981. codec->reg_cache = NULL;
  982. return 0;
  983. }
  984. static int snd_soc_lzo_cache_init(struct snd_soc_codec *codec)
  985. {
  986. struct snd_soc_lzo_ctx **lzo_blocks;
  987. size_t bmp_size;
  988. const struct snd_soc_codec_driver *codec_drv;
  989. int ret, tofree, i, blksize, blkcount;
  990. const char *p, *end;
  991. unsigned long *sync_bmp;
  992. ret = 0;
  993. codec_drv = codec->driver;
  994. /*
  995. * If we have not been given a default register cache
  996. * then allocate a dummy zero-ed out region, compress it
  997. * and remember to free it afterwards.
  998. */
  999. tofree = 0;
  1000. if (!codec->reg_def_copy)
  1001. tofree = 1;
  1002. if (!codec->reg_def_copy) {
  1003. codec->reg_def_copy = kzalloc(codec->reg_size, GFP_KERNEL);
  1004. if (!codec->reg_def_copy)
  1005. return -ENOMEM;
  1006. }
  1007. blkcount = snd_soc_lzo_block_count();
  1008. codec->reg_cache = kzalloc(blkcount * sizeof *lzo_blocks,
  1009. GFP_KERNEL);
  1010. if (!codec->reg_cache) {
  1011. ret = -ENOMEM;
  1012. goto err_tofree;
  1013. }
  1014. lzo_blocks = codec->reg_cache;
  1015. /*
  1016. * allocate a bitmap to be used when syncing the cache with
  1017. * the hardware. Each time a register is modified, the corresponding
  1018. * bit is set in the bitmap, so we know that we have to sync
  1019. * that register.
  1020. */
  1021. bmp_size = codec_drv->reg_cache_size;
  1022. sync_bmp = kmalloc(BITS_TO_LONGS(bmp_size) * sizeof(long),
  1023. GFP_KERNEL);
  1024. if (!sync_bmp) {
  1025. ret = -ENOMEM;
  1026. goto err;
  1027. }
  1028. bitmap_zero(sync_bmp, bmp_size);
  1029. /* allocate the lzo blocks and initialize them */
  1030. for (i = 0; i < blkcount; ++i) {
  1031. lzo_blocks[i] = kzalloc(sizeof **lzo_blocks,
  1032. GFP_KERNEL);
  1033. if (!lzo_blocks[i]) {
  1034. kfree(sync_bmp);
  1035. ret = -ENOMEM;
  1036. goto err;
  1037. }
  1038. lzo_blocks[i]->sync_bmp = sync_bmp;
  1039. lzo_blocks[i]->sync_bmp_nbits = bmp_size;
  1040. /* alloc the working space for the compressed block */
  1041. ret = snd_soc_lzo_prepare(lzo_blocks[i]);
  1042. if (ret < 0)
  1043. goto err;
  1044. }
  1045. blksize = snd_soc_lzo_get_blksize(codec);
  1046. p = codec->reg_def_copy;
  1047. end = codec->reg_def_copy + codec->reg_size;
  1048. /* compress the register map and fill the lzo blocks */
  1049. for (i = 0; i < blkcount; ++i, p += blksize) {
  1050. lzo_blocks[i]->src = p;
  1051. if (p + blksize > end)
  1052. lzo_blocks[i]->src_len = end - p;
  1053. else
  1054. lzo_blocks[i]->src_len = blksize;
  1055. ret = snd_soc_lzo_compress_cache_block(codec,
  1056. lzo_blocks[i]);
  1057. if (ret < 0)
  1058. goto err;
  1059. lzo_blocks[i]->decompressed_size =
  1060. lzo_blocks[i]->src_len;
  1061. }
  1062. if (tofree) {
  1063. kfree(codec->reg_def_copy);
  1064. codec->reg_def_copy = NULL;
  1065. }
  1066. return 0;
  1067. err:
  1068. snd_soc_cache_exit(codec);
  1069. err_tofree:
  1070. if (tofree) {
  1071. kfree(codec->reg_def_copy);
  1072. codec->reg_def_copy = NULL;
  1073. }
  1074. return ret;
  1075. }
  1076. #endif
  1077. static int snd_soc_flat_cache_sync(struct snd_soc_codec *codec)
  1078. {
  1079. int i;
  1080. int ret;
  1081. const struct snd_soc_codec_driver *codec_drv;
  1082. unsigned int val;
  1083. codec_drv = codec->driver;
  1084. for (i = 0; i < codec_drv->reg_cache_size; ++i) {
  1085. WARN_ON(codec->writable_register &&
  1086. codec->writable_register(codec, i));
  1087. ret = snd_soc_cache_read(codec, i, &val);
  1088. if (ret)
  1089. return ret;
  1090. if (codec->reg_def_copy)
  1091. if (snd_soc_get_cache_val(codec->reg_def_copy,
  1092. i, codec_drv->reg_word_size) == val)
  1093. continue;
  1094. ret = snd_soc_write(codec, i, val);
  1095. if (ret)
  1096. return ret;
  1097. dev_dbg(codec->dev, "Synced register %#x, value = %#x\n",
  1098. i, val);
  1099. }
  1100. return 0;
  1101. }
  1102. static int snd_soc_flat_cache_write(struct snd_soc_codec *codec,
  1103. unsigned int reg, unsigned int value)
  1104. {
  1105. snd_soc_set_cache_val(codec->reg_cache, reg, value,
  1106. codec->driver->reg_word_size);
  1107. return 0;
  1108. }
  1109. static int snd_soc_flat_cache_read(struct snd_soc_codec *codec,
  1110. unsigned int reg, unsigned int *value)
  1111. {
  1112. *value = snd_soc_get_cache_val(codec->reg_cache, reg,
  1113. codec->driver->reg_word_size);
  1114. return 0;
  1115. }
  1116. static int snd_soc_flat_cache_exit(struct snd_soc_codec *codec)
  1117. {
  1118. if (!codec->reg_cache)
  1119. return 0;
  1120. kfree(codec->reg_cache);
  1121. codec->reg_cache = NULL;
  1122. return 0;
  1123. }
  1124. static int snd_soc_flat_cache_init(struct snd_soc_codec *codec)
  1125. {
  1126. const struct snd_soc_codec_driver *codec_drv;
  1127. codec_drv = codec->driver;
  1128. if (codec->reg_def_copy)
  1129. codec->reg_cache = kmemdup(codec->reg_def_copy,
  1130. codec->reg_size, GFP_KERNEL);
  1131. else
  1132. codec->reg_cache = kzalloc(codec->reg_size, GFP_KERNEL);
  1133. if (!codec->reg_cache)
  1134. return -ENOMEM;
  1135. return 0;
  1136. }
  1137. /* an array of all supported compression types */
  1138. static const struct snd_soc_cache_ops cache_types[] = {
  1139. /* Flat *must* be the first entry for fallback */
  1140. {
  1141. .id = SND_SOC_FLAT_COMPRESSION,
  1142. .name = "flat",
  1143. .init = snd_soc_flat_cache_init,
  1144. .exit = snd_soc_flat_cache_exit,
  1145. .read = snd_soc_flat_cache_read,
  1146. .write = snd_soc_flat_cache_write,
  1147. .sync = snd_soc_flat_cache_sync
  1148. },
  1149. #ifdef CONFIG_SND_SOC_CACHE_LZO
  1150. {
  1151. .id = SND_SOC_LZO_COMPRESSION,
  1152. .name = "LZO",
  1153. .init = snd_soc_lzo_cache_init,
  1154. .exit = snd_soc_lzo_cache_exit,
  1155. .read = snd_soc_lzo_cache_read,
  1156. .write = snd_soc_lzo_cache_write,
  1157. .sync = snd_soc_lzo_cache_sync
  1158. },
  1159. #endif
  1160. {
  1161. .id = SND_SOC_RBTREE_COMPRESSION,
  1162. .name = "rbtree",
  1163. .init = snd_soc_rbtree_cache_init,
  1164. .exit = snd_soc_rbtree_cache_exit,
  1165. .read = snd_soc_rbtree_cache_read,
  1166. .write = snd_soc_rbtree_cache_write,
  1167. .sync = snd_soc_rbtree_cache_sync
  1168. }
  1169. };
  1170. int snd_soc_cache_init(struct snd_soc_codec *codec)
  1171. {
  1172. int i;
  1173. for (i = 0; i < ARRAY_SIZE(cache_types); ++i)
  1174. if (cache_types[i].id == codec->compress_type)
  1175. break;
  1176. /* Fall back to flat compression */
  1177. if (i == ARRAY_SIZE(cache_types)) {
  1178. dev_warn(codec->dev, "Could not match compress type: %d\n",
  1179. codec->compress_type);
  1180. i = 0;
  1181. }
  1182. mutex_init(&codec->cache_rw_mutex);
  1183. codec->cache_ops = &cache_types[i];
  1184. if (codec->cache_ops->init) {
  1185. if (codec->cache_ops->name)
  1186. dev_dbg(codec->dev, "Initializing %s cache for %s codec\n",
  1187. codec->cache_ops->name, codec->name);
  1188. return codec->cache_ops->init(codec);
  1189. }
  1190. return -ENOSYS;
  1191. }
  1192. /*
  1193. * NOTE: keep in mind that this function might be called
  1194. * multiple times.
  1195. */
  1196. int snd_soc_cache_exit(struct snd_soc_codec *codec)
  1197. {
  1198. if (codec->cache_ops && codec->cache_ops->exit) {
  1199. if (codec->cache_ops->name)
  1200. dev_dbg(codec->dev, "Destroying %s cache for %s codec\n",
  1201. codec->cache_ops->name, codec->name);
  1202. return codec->cache_ops->exit(codec);
  1203. }
  1204. return -ENOSYS;
  1205. }
  1206. /**
  1207. * snd_soc_cache_read: Fetch the value of a given register from the cache.
  1208. *
  1209. * @codec: CODEC to configure.
  1210. * @reg: The register index.
  1211. * @value: The value to be returned.
  1212. */
  1213. int snd_soc_cache_read(struct snd_soc_codec *codec,
  1214. unsigned int reg, unsigned int *value)
  1215. {
  1216. int ret;
  1217. mutex_lock(&codec->cache_rw_mutex);
  1218. if (value && codec->cache_ops && codec->cache_ops->read) {
  1219. ret = codec->cache_ops->read(codec, reg, value);
  1220. mutex_unlock(&codec->cache_rw_mutex);
  1221. return ret;
  1222. }
  1223. mutex_unlock(&codec->cache_rw_mutex);
  1224. return -ENOSYS;
  1225. }
  1226. EXPORT_SYMBOL_GPL(snd_soc_cache_read);
  1227. /**
  1228. * snd_soc_cache_write: Set the value of a given register in the cache.
  1229. *
  1230. * @codec: CODEC to configure.
  1231. * @reg: The register index.
  1232. * @value: The new register value.
  1233. */
  1234. int snd_soc_cache_write(struct snd_soc_codec *codec,
  1235. unsigned int reg, unsigned int value)
  1236. {
  1237. int ret;
  1238. mutex_lock(&codec->cache_rw_mutex);
  1239. if (codec->cache_ops && codec->cache_ops->write) {
  1240. ret = codec->cache_ops->write(codec, reg, value);
  1241. mutex_unlock(&codec->cache_rw_mutex);
  1242. return ret;
  1243. }
  1244. mutex_unlock(&codec->cache_rw_mutex);
  1245. return -ENOSYS;
  1246. }
  1247. EXPORT_SYMBOL_GPL(snd_soc_cache_write);
  1248. /**
  1249. * snd_soc_cache_sync: Sync the register cache with the hardware.
  1250. *
  1251. * @codec: CODEC to configure.
  1252. *
  1253. * Any registers that should not be synced should be marked as
  1254. * volatile. In general drivers can choose not to use the provided
  1255. * syncing functionality if they so require.
  1256. */
  1257. int snd_soc_cache_sync(struct snd_soc_codec *codec)
  1258. {
  1259. int ret;
  1260. const char *name;
  1261. if (!codec->cache_sync) {
  1262. return 0;
  1263. }
  1264. if (!codec->cache_ops || !codec->cache_ops->sync)
  1265. return -ENOSYS;
  1266. if (codec->cache_ops->name)
  1267. name = codec->cache_ops->name;
  1268. else
  1269. name = "unknown";
  1270. if (codec->cache_ops->name)
  1271. dev_dbg(codec->dev, "Syncing %s cache for %s codec\n",
  1272. codec->cache_ops->name, codec->name);
  1273. trace_snd_soc_cache_sync(codec, name, "start");
  1274. ret = codec->cache_ops->sync(codec);
  1275. if (!ret)
  1276. codec->cache_sync = 0;
  1277. trace_snd_soc_cache_sync(codec, name, "end");
  1278. return ret;
  1279. }
  1280. EXPORT_SYMBOL_GPL(snd_soc_cache_sync);
  1281. static int snd_soc_get_reg_access_index(struct snd_soc_codec *codec,
  1282. unsigned int reg)
  1283. {
  1284. const struct snd_soc_codec_driver *codec_drv;
  1285. unsigned int min, max, index;
  1286. codec_drv = codec->driver;
  1287. min = 0;
  1288. max = codec_drv->reg_access_size - 1;
  1289. do {
  1290. index = (min + max) / 2;
  1291. if (codec_drv->reg_access_default[index].reg == reg)
  1292. return index;
  1293. if (codec_drv->reg_access_default[index].reg < reg)
  1294. min = index + 1;
  1295. else
  1296. max = index;
  1297. } while (min <= max);
  1298. return -1;
  1299. }
  1300. int snd_soc_default_volatile_register(struct snd_soc_codec *codec,
  1301. unsigned int reg)
  1302. {
  1303. int index;
  1304. if (reg >= codec->driver->reg_cache_size)
  1305. return 1;
  1306. index = snd_soc_get_reg_access_index(codec, reg);
  1307. if (index < 0)
  1308. return 0;
  1309. return codec->driver->reg_access_default[index].vol;
  1310. }
  1311. EXPORT_SYMBOL_GPL(snd_soc_default_volatile_register);
  1312. int snd_soc_default_readable_register(struct snd_soc_codec *codec,
  1313. unsigned int reg)
  1314. {
  1315. int index;
  1316. if (reg >= codec->driver->reg_cache_size)
  1317. return 1;
  1318. index = snd_soc_get_reg_access_index(codec, reg);
  1319. if (index < 0)
  1320. return 0;
  1321. return codec->driver->reg_access_default[index].read;
  1322. }
  1323. EXPORT_SYMBOL_GPL(snd_soc_default_readable_register);
  1324. int snd_soc_default_writable_register(struct snd_soc_codec *codec,
  1325. unsigned int reg)
  1326. {
  1327. int index;
  1328. if (reg >= codec->driver->reg_cache_size)
  1329. return 1;
  1330. index = snd_soc_get_reg_access_index(codec, reg);
  1331. if (index < 0)
  1332. return 0;
  1333. return codec->driver->reg_access_default[index].write;
  1334. }
  1335. EXPORT_SYMBOL_GPL(snd_soc_default_writable_register);