regcache-lzo.c 9.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378
  1. /*
  2. * Register cache access API - LZO caching support
  3. *
  4. * Copyright 2011 Wolfson Microelectronics plc
  5. *
  6. * Author: Dimitris Papastamos <dp@opensource.wolfsonmicro.com>
  7. *
  8. * This program is free software; you can redistribute it and/or modify
  9. * it under the terms of the GNU General Public License version 2 as
  10. * published by the Free Software Foundation.
  11. */
  12. #include <linux/slab.h>
  13. #include <linux/lzo.h>
  14. #include "internal.h"
  15. static int regcache_lzo_exit(struct regmap *map);
  16. struct regcache_lzo_ctx {
  17. void *wmem;
  18. void *dst;
  19. const void *src;
  20. size_t src_len;
  21. size_t dst_len;
  22. size_t decompressed_size;
  23. unsigned long *sync_bmp;
  24. int sync_bmp_nbits;
  25. };
  26. #define LZO_BLOCK_NUM 8
  27. static int regcache_lzo_block_count(struct regmap *map)
  28. {
  29. return LZO_BLOCK_NUM;
  30. }
  31. static int regcache_lzo_prepare(struct regcache_lzo_ctx *lzo_ctx)
  32. {
  33. lzo_ctx->wmem = kmalloc(LZO1X_MEM_COMPRESS, GFP_KERNEL);
  34. if (!lzo_ctx->wmem)
  35. return -ENOMEM;
  36. return 0;
  37. }
  38. static int regcache_lzo_compress(struct regcache_lzo_ctx *lzo_ctx)
  39. {
  40. size_t compress_size;
  41. int ret;
  42. ret = lzo1x_1_compress(lzo_ctx->src, lzo_ctx->src_len,
  43. lzo_ctx->dst, &compress_size, lzo_ctx->wmem);
  44. if (ret != LZO_E_OK || compress_size > lzo_ctx->dst_len)
  45. return -EINVAL;
  46. lzo_ctx->dst_len = compress_size;
  47. return 0;
  48. }
  49. static int regcache_lzo_decompress(struct regcache_lzo_ctx *lzo_ctx)
  50. {
  51. size_t dst_len;
  52. int ret;
  53. dst_len = lzo_ctx->dst_len;
  54. ret = lzo1x_decompress_safe(lzo_ctx->src, lzo_ctx->src_len,
  55. lzo_ctx->dst, &dst_len);
  56. if (ret != LZO_E_OK || dst_len != lzo_ctx->dst_len)
  57. return -EINVAL;
  58. return 0;
  59. }
  60. static int regcache_lzo_compress_cache_block(struct regmap *map,
  61. struct regcache_lzo_ctx *lzo_ctx)
  62. {
  63. int ret;
  64. lzo_ctx->dst_len = lzo1x_worst_compress(PAGE_SIZE);
  65. lzo_ctx->dst = kmalloc(lzo_ctx->dst_len, GFP_KERNEL);
  66. if (!lzo_ctx->dst) {
  67. lzo_ctx->dst_len = 0;
  68. return -ENOMEM;
  69. }
  70. ret = regcache_lzo_compress(lzo_ctx);
  71. if (ret < 0)
  72. return ret;
  73. return 0;
  74. }
  75. static int regcache_lzo_decompress_cache_block(struct regmap *map,
  76. struct regcache_lzo_ctx *lzo_ctx)
  77. {
  78. int ret;
  79. lzo_ctx->dst_len = lzo_ctx->decompressed_size;
  80. lzo_ctx->dst = kmalloc(lzo_ctx->dst_len, GFP_KERNEL);
  81. if (!lzo_ctx->dst) {
  82. lzo_ctx->dst_len = 0;
  83. return -ENOMEM;
  84. }
  85. ret = regcache_lzo_decompress(lzo_ctx);
  86. if (ret < 0)
  87. return ret;
  88. return 0;
  89. }
  90. static inline int regcache_lzo_get_blkindex(struct regmap *map,
  91. unsigned int reg)
  92. {
  93. return (reg * map->cache_word_size) /
  94. DIV_ROUND_UP(map->cache_size_raw,
  95. regcache_lzo_block_count(map));
  96. }
  97. static inline int regcache_lzo_get_blkpos(struct regmap *map,
  98. unsigned int reg)
  99. {
  100. return reg % (DIV_ROUND_UP(map->cache_size_raw,
  101. regcache_lzo_block_count(map)) /
  102. map->cache_word_size);
  103. }
  104. static inline int regcache_lzo_get_blksize(struct regmap *map)
  105. {
  106. return DIV_ROUND_UP(map->cache_size_raw,
  107. regcache_lzo_block_count(map));
  108. }
  109. static int regcache_lzo_init(struct regmap *map)
  110. {
  111. struct regcache_lzo_ctx **lzo_blocks;
  112. size_t bmp_size;
  113. int ret, i, blksize, blkcount;
  114. const char *p, *end;
  115. unsigned long *sync_bmp;
  116. ret = 0;
  117. blkcount = regcache_lzo_block_count(map);
  118. map->cache = kzalloc(blkcount * sizeof *lzo_blocks,
  119. GFP_KERNEL);
  120. if (!map->cache)
  121. return -ENOMEM;
  122. lzo_blocks = map->cache;
  123. /*
  124. * allocate a bitmap to be used when syncing the cache with
  125. * the hardware. Each time a register is modified, the corresponding
  126. * bit is set in the bitmap, so we know that we have to sync
  127. * that register.
  128. */
  129. bmp_size = map->num_reg_defaults_raw;
  130. sync_bmp = kmalloc(BITS_TO_LONGS(bmp_size) * sizeof(long),
  131. GFP_KERNEL);
  132. if (!sync_bmp) {
  133. ret = -ENOMEM;
  134. goto err;
  135. }
  136. bitmap_zero(sync_bmp, bmp_size);
  137. /* allocate the lzo blocks and initialize them */
  138. for (i = 0; i < blkcount; i++) {
  139. lzo_blocks[i] = kzalloc(sizeof **lzo_blocks,
  140. GFP_KERNEL);
  141. if (!lzo_blocks[i]) {
  142. kfree(sync_bmp);
  143. ret = -ENOMEM;
  144. goto err;
  145. }
  146. lzo_blocks[i]->sync_bmp = sync_bmp;
  147. lzo_blocks[i]->sync_bmp_nbits = bmp_size;
  148. /* alloc the working space for the compressed block */
  149. ret = regcache_lzo_prepare(lzo_blocks[i]);
  150. if (ret < 0)
  151. goto err;
  152. }
  153. blksize = regcache_lzo_get_blksize(map);
  154. p = map->reg_defaults_raw;
  155. end = map->reg_defaults_raw + map->cache_size_raw;
  156. /* compress the register map and fill the lzo blocks */
  157. for (i = 0; i < blkcount; i++, p += blksize) {
  158. lzo_blocks[i]->src = p;
  159. if (p + blksize > end)
  160. lzo_blocks[i]->src_len = end - p;
  161. else
  162. lzo_blocks[i]->src_len = blksize;
  163. ret = regcache_lzo_compress_cache_block(map,
  164. lzo_blocks[i]);
  165. if (ret < 0)
  166. goto err;
  167. lzo_blocks[i]->decompressed_size =
  168. lzo_blocks[i]->src_len;
  169. }
  170. return 0;
  171. err:
  172. regcache_lzo_exit(map);
  173. return ret;
  174. }
  175. static int regcache_lzo_exit(struct regmap *map)
  176. {
  177. struct regcache_lzo_ctx **lzo_blocks;
  178. int i, blkcount;
  179. lzo_blocks = map->cache;
  180. if (!lzo_blocks)
  181. return 0;
  182. blkcount = regcache_lzo_block_count(map);
  183. /*
  184. * the pointer to the bitmap used for syncing the cache
  185. * is shared amongst all lzo_blocks. Ensure it is freed
  186. * only once.
  187. */
  188. if (lzo_blocks[0])
  189. kfree(lzo_blocks[0]->sync_bmp);
  190. for (i = 0; i < blkcount; i++) {
  191. if (lzo_blocks[i]) {
  192. kfree(lzo_blocks[i]->wmem);
  193. kfree(lzo_blocks[i]->dst);
  194. }
  195. /* each lzo_block is a pointer returned by kmalloc or NULL */
  196. kfree(lzo_blocks[i]);
  197. }
  198. kfree(lzo_blocks);
  199. map->cache = NULL;
  200. return 0;
  201. }
  202. static int regcache_lzo_read(struct regmap *map,
  203. unsigned int reg, unsigned int *value)
  204. {
  205. struct regcache_lzo_ctx *lzo_block, **lzo_blocks;
  206. int ret, blkindex, blkpos;
  207. size_t blksize, tmp_dst_len;
  208. void *tmp_dst;
  209. /* index of the compressed lzo block */
  210. blkindex = regcache_lzo_get_blkindex(map, reg);
  211. /* register index within the decompressed block */
  212. blkpos = regcache_lzo_get_blkpos(map, reg);
  213. /* size of the compressed block */
  214. blksize = regcache_lzo_get_blksize(map);
  215. lzo_blocks = map->cache;
  216. lzo_block = lzo_blocks[blkindex];
  217. /* save the pointer and length of the compressed block */
  218. tmp_dst = lzo_block->dst;
  219. tmp_dst_len = lzo_block->dst_len;
  220. /* prepare the source to be the compressed block */
  221. lzo_block->src = lzo_block->dst;
  222. lzo_block->src_len = lzo_block->dst_len;
  223. /* decompress the block */
  224. ret = regcache_lzo_decompress_cache_block(map, lzo_block);
  225. if (ret >= 0)
  226. /* fetch the value from the cache */
  227. *value = regcache_get_val(lzo_block->dst, blkpos,
  228. map->cache_word_size);
  229. kfree(lzo_block->dst);
  230. /* restore the pointer and length of the compressed block */
  231. lzo_block->dst = tmp_dst;
  232. lzo_block->dst_len = tmp_dst_len;
  233. return ret;
  234. }
  235. static int regcache_lzo_write(struct regmap *map,
  236. unsigned int reg, unsigned int value)
  237. {
  238. struct regcache_lzo_ctx *lzo_block, **lzo_blocks;
  239. int ret, blkindex, blkpos;
  240. size_t blksize, tmp_dst_len;
  241. void *tmp_dst;
  242. /* index of the compressed lzo block */
  243. blkindex = regcache_lzo_get_blkindex(map, reg);
  244. /* register index within the decompressed block */
  245. blkpos = regcache_lzo_get_blkpos(map, reg);
  246. /* size of the compressed block */
  247. blksize = regcache_lzo_get_blksize(map);
  248. lzo_blocks = map->cache;
  249. lzo_block = lzo_blocks[blkindex];
  250. /* save the pointer and length of the compressed block */
  251. tmp_dst = lzo_block->dst;
  252. tmp_dst_len = lzo_block->dst_len;
  253. /* prepare the source to be the compressed block */
  254. lzo_block->src = lzo_block->dst;
  255. lzo_block->src_len = lzo_block->dst_len;
  256. /* decompress the block */
  257. ret = regcache_lzo_decompress_cache_block(map, lzo_block);
  258. if (ret < 0) {
  259. kfree(lzo_block->dst);
  260. goto out;
  261. }
  262. /* write the new value to the cache */
  263. if (regcache_set_val(lzo_block->dst, blkpos, value,
  264. map->cache_word_size)) {
  265. kfree(lzo_block->dst);
  266. goto out;
  267. }
  268. /* prepare the source to be the decompressed block */
  269. lzo_block->src = lzo_block->dst;
  270. lzo_block->src_len = lzo_block->dst_len;
  271. /* compress the block */
  272. ret = regcache_lzo_compress_cache_block(map, lzo_block);
  273. if (ret < 0) {
  274. kfree(lzo_block->dst);
  275. kfree(lzo_block->src);
  276. goto out;
  277. }
  278. /* set the bit so we know we have to sync this register */
  279. set_bit(reg, lzo_block->sync_bmp);
  280. kfree(tmp_dst);
  281. kfree(lzo_block->src);
  282. return 0;
  283. out:
  284. lzo_block->dst = tmp_dst;
  285. lzo_block->dst_len = tmp_dst_len;
  286. return ret;
  287. }
  288. static int regcache_lzo_sync(struct regmap *map, unsigned int min,
  289. unsigned int max)
  290. {
  291. struct regcache_lzo_ctx **lzo_blocks;
  292. unsigned int val;
  293. int i;
  294. int ret;
  295. lzo_blocks = map->cache;
  296. i = min;
  297. for_each_set_bit_cont(i, lzo_blocks[0]->sync_bmp,
  298. lzo_blocks[0]->sync_bmp_nbits) {
  299. if (i > max)
  300. continue;
  301. ret = regcache_read(map, i, &val);
  302. if (ret)
  303. return ret;
  304. /* Is this the hardware default? If so skip. */
  305. ret = regcache_lookup_reg(map, i);
  306. if (ret > 0 && val == map->reg_defaults[ret].def)
  307. continue;
  308. map->cache_bypass = 1;
  309. ret = _regmap_write(map, i, val);
  310. map->cache_bypass = 0;
  311. if (ret)
  312. return ret;
  313. dev_dbg(map->dev, "Synced register %#x, value %#x\n",
  314. i, val);
  315. }
  316. return 0;
  317. }
  318. struct regcache_ops regcache_lzo_ops = {
  319. .type = REGCACHE_COMPRESSED,
  320. .name = "lzo",
  321. .init = regcache_lzo_init,
  322. .exit = regcache_lzo_exit,
  323. .read = regcache_lzo_read,
  324. .write = regcache_lzo_write,
  325. .sync = regcache_lzo_sync
  326. };