regcache-rbtree.c 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539
  1. /*
  2. * Register cache access API - rbtree caching support
  3. *
  4. * Copyright 2011 Wolfson Microelectronics plc
  5. *
  6. * Author: Dimitris Papastamos <dp@opensource.wolfsonmicro.com>
  7. *
  8. * This program is free software; you can redistribute it and/or modify
  9. * it under the terms of the GNU General Public License version 2 as
  10. * published by the Free Software Foundation.
  11. */
  12. #include <linux/slab.h>
  13. #include <linux/device.h>
  14. #include <linux/debugfs.h>
  15. #include <linux/rbtree.h>
  16. #include <linux/seq_file.h>
  17. #include "internal.h"
  18. static int regcache_rbtree_write(struct regmap *map, unsigned int reg,
  19. unsigned int value);
  20. static int regcache_rbtree_exit(struct regmap *map);
  21. struct regcache_rbtree_node {
  22. /* the actual rbtree node holding this block */
  23. struct rb_node node;
  24. /* base register handled by this block */
  25. unsigned int base_reg;
  26. /* block of adjacent registers */
  27. void *block;
  28. /* Which registers are present */
  29. long *cache_present;
  30. /* number of registers available in the block */
  31. unsigned int blklen;
  32. } __attribute__ ((packed));
  33. struct regcache_rbtree_ctx {
  34. struct rb_root root;
  35. struct regcache_rbtree_node *cached_rbnode;
  36. };
  37. static inline void regcache_rbtree_get_base_top_reg(
  38. struct regmap *map,
  39. struct regcache_rbtree_node *rbnode,
  40. unsigned int *base, unsigned int *top)
  41. {
  42. *base = rbnode->base_reg;
  43. *top = rbnode->base_reg + ((rbnode->blklen - 1) * map->reg_stride);
  44. }
  45. static unsigned int regcache_rbtree_get_register(struct regmap *map,
  46. struct regcache_rbtree_node *rbnode, unsigned int idx)
  47. {
  48. return regcache_get_val(map, rbnode->block, idx);
  49. }
  50. static void regcache_rbtree_set_register(struct regmap *map,
  51. struct regcache_rbtree_node *rbnode,
  52. unsigned int idx, unsigned int val)
  53. {
  54. set_bit(idx, rbnode->cache_present);
  55. regcache_set_val(map, rbnode->block, idx, val);
  56. }
  57. static struct regcache_rbtree_node *regcache_rbtree_lookup(struct regmap *map,
  58. unsigned int reg)
  59. {
  60. struct regcache_rbtree_ctx *rbtree_ctx = map->cache;
  61. struct rb_node *node;
  62. struct regcache_rbtree_node *rbnode;
  63. unsigned int base_reg, top_reg;
  64. rbnode = rbtree_ctx->cached_rbnode;
  65. if (rbnode) {
  66. regcache_rbtree_get_base_top_reg(map, rbnode, &base_reg,
  67. &top_reg);
  68. if (reg >= base_reg && reg <= top_reg)
  69. return rbnode;
  70. }
  71. node = rbtree_ctx->root.rb_node;
  72. while (node) {
  73. rbnode = container_of(node, struct regcache_rbtree_node, node);
  74. regcache_rbtree_get_base_top_reg(map, rbnode, &base_reg,
  75. &top_reg);
  76. if (reg >= base_reg && reg <= top_reg) {
  77. rbtree_ctx->cached_rbnode = rbnode;
  78. return rbnode;
  79. } else if (reg > top_reg) {
  80. node = node->rb_right;
  81. } else if (reg < base_reg) {
  82. node = node->rb_left;
  83. }
  84. }
  85. return NULL;
  86. }
  87. static int regcache_rbtree_insert(struct regmap *map, struct rb_root *root,
  88. struct regcache_rbtree_node *rbnode)
  89. {
  90. struct rb_node **new, *parent;
  91. struct regcache_rbtree_node *rbnode_tmp;
  92. unsigned int base_reg_tmp, top_reg_tmp;
  93. unsigned int base_reg;
  94. parent = NULL;
  95. new = &root->rb_node;
  96. while (*new) {
  97. rbnode_tmp = container_of(*new, struct regcache_rbtree_node,
  98. node);
  99. /* base and top registers of the current rbnode */
  100. regcache_rbtree_get_base_top_reg(map, rbnode_tmp, &base_reg_tmp,
  101. &top_reg_tmp);
  102. /* base register of the rbnode to be added */
  103. base_reg = rbnode->base_reg;
  104. parent = *new;
  105. /* if this register has already been inserted, just return */
  106. if (base_reg >= base_reg_tmp &&
  107. base_reg <= top_reg_tmp)
  108. return 0;
  109. else if (base_reg > top_reg_tmp)
  110. new = &((*new)->rb_right);
  111. else if (base_reg < base_reg_tmp)
  112. new = &((*new)->rb_left);
  113. }
  114. /* insert the node into the rbtree */
  115. rb_link_node(&rbnode->node, parent, new);
  116. rb_insert_color(&rbnode->node, root);
  117. return 1;
  118. }
  119. #ifdef CONFIG_DEBUG_FS
  120. static int rbtree_show(struct seq_file *s, void *ignored)
  121. {
  122. struct regmap *map = s->private;
  123. struct regcache_rbtree_ctx *rbtree_ctx = map->cache;
  124. struct regcache_rbtree_node *n;
  125. struct rb_node *node;
  126. unsigned int base, top;
  127. size_t mem_size;
  128. int nodes = 0;
  129. int registers = 0;
  130. int this_registers, average;
  131. map->lock(map->lock_arg);
  132. mem_size = sizeof(*rbtree_ctx);
  133. for (node = rb_first(&rbtree_ctx->root); node != NULL;
  134. node = rb_next(node)) {
  135. n = container_of(node, struct regcache_rbtree_node, node);
  136. mem_size += sizeof(*n);
  137. mem_size += (n->blklen * map->cache_word_size);
  138. mem_size += BITS_TO_LONGS(n->blklen) * sizeof(long);
  139. regcache_rbtree_get_base_top_reg(map, n, &base, &top);
  140. this_registers = ((top - base) / map->reg_stride) + 1;
  141. seq_printf(s, "%x-%x (%d)\n", base, top, this_registers);
  142. nodes++;
  143. registers += this_registers;
  144. }
  145. if (nodes)
  146. average = registers / nodes;
  147. else
  148. average = 0;
  149. seq_printf(s, "%d nodes, %d registers, average %d registers, used %zu bytes\n",
  150. nodes, registers, average, mem_size);
  151. map->unlock(map->lock_arg);
  152. return 0;
  153. }
  154. static int rbtree_open(struct inode *inode, struct file *file)
  155. {
  156. return single_open(file, rbtree_show, inode->i_private);
  157. }
  158. static const struct file_operations rbtree_fops = {
  159. .open = rbtree_open,
  160. .read = seq_read,
  161. .llseek = seq_lseek,
  162. .release = single_release,
  163. };
  164. static void rbtree_debugfs_init(struct regmap *map)
  165. {
  166. debugfs_create_file("rbtree", 0400, map->debugfs, map, &rbtree_fops);
  167. }
  168. #else
  169. static void rbtree_debugfs_init(struct regmap *map)
  170. {
  171. }
  172. #endif
  173. static int regcache_rbtree_init(struct regmap *map)
  174. {
  175. struct regcache_rbtree_ctx *rbtree_ctx;
  176. int i;
  177. int ret;
  178. map->cache = kmalloc(sizeof *rbtree_ctx, GFP_KERNEL);
  179. if (!map->cache)
  180. return -ENOMEM;
  181. rbtree_ctx = map->cache;
  182. rbtree_ctx->root = RB_ROOT;
  183. rbtree_ctx->cached_rbnode = NULL;
  184. for (i = 0; i < map->num_reg_defaults; i++) {
  185. ret = regcache_rbtree_write(map,
  186. map->reg_defaults[i].reg,
  187. map->reg_defaults[i].def);
  188. if (ret)
  189. goto err;
  190. }
  191. rbtree_debugfs_init(map);
  192. return 0;
  193. err:
  194. regcache_rbtree_exit(map);
  195. return ret;
  196. }
  197. static int regcache_rbtree_exit(struct regmap *map)
  198. {
  199. struct rb_node *next;
  200. struct regcache_rbtree_ctx *rbtree_ctx;
  201. struct regcache_rbtree_node *rbtree_node;
  202. /* if we've already been called then just return */
  203. rbtree_ctx = map->cache;
  204. if (!rbtree_ctx)
  205. return 0;
  206. /* free up the rbtree */
  207. next = rb_first(&rbtree_ctx->root);
  208. while (next) {
  209. rbtree_node = rb_entry(next, struct regcache_rbtree_node, node);
  210. next = rb_next(&rbtree_node->node);
  211. rb_erase(&rbtree_node->node, &rbtree_ctx->root);
  212. kfree(rbtree_node->cache_present);
  213. kfree(rbtree_node->block);
  214. kfree(rbtree_node);
  215. }
  216. /* release the resources */
  217. kfree(map->cache);
  218. map->cache = NULL;
  219. return 0;
  220. }
  221. static int regcache_rbtree_read(struct regmap *map,
  222. unsigned int reg, unsigned int *value)
  223. {
  224. struct regcache_rbtree_node *rbnode;
  225. unsigned int reg_tmp;
  226. rbnode = regcache_rbtree_lookup(map, reg);
  227. if (rbnode) {
  228. reg_tmp = (reg - rbnode->base_reg) / map->reg_stride;
  229. if (!test_bit(reg_tmp, rbnode->cache_present))
  230. return -ENOENT;
  231. *value = regcache_rbtree_get_register(map, rbnode, reg_tmp);
  232. } else {
  233. return -ENOENT;
  234. }
  235. return 0;
  236. }
  237. static int regcache_rbtree_insert_to_block(struct regmap *map,
  238. struct regcache_rbtree_node *rbnode,
  239. unsigned int base_reg,
  240. unsigned int top_reg,
  241. unsigned int reg,
  242. unsigned int value)
  243. {
  244. unsigned int blklen;
  245. unsigned int pos, offset;
  246. unsigned long *present;
  247. u8 *blk;
  248. blklen = (top_reg - base_reg) / map->reg_stride + 1;
  249. pos = (reg - base_reg) / map->reg_stride;
  250. offset = (rbnode->base_reg - base_reg) / map->reg_stride;
  251. blk = krealloc(rbnode->block,
  252. blklen * map->cache_word_size,
  253. GFP_KERNEL);
  254. if (!blk)
  255. return -ENOMEM;
  256. present = krealloc(rbnode->cache_present,
  257. BITS_TO_LONGS(blklen) * sizeof(*present), GFP_KERNEL);
  258. if (!present) {
  259. kfree(blk);
  260. return -ENOMEM;
  261. }
  262. /* insert the register value in the correct place in the rbnode block */
  263. if (pos == 0) {
  264. memmove(blk + offset * map->cache_word_size,
  265. blk, rbnode->blklen * map->cache_word_size);
  266. bitmap_shift_right(present, present, offset, blklen);
  267. }
  268. /* update the rbnode block, its size and the base register */
  269. rbnode->block = blk;
  270. rbnode->blklen = blklen;
  271. rbnode->base_reg = base_reg;
  272. rbnode->cache_present = present;
  273. regcache_rbtree_set_register(map, rbnode, pos, value);
  274. return 0;
  275. }
  276. static struct regcache_rbtree_node *
  277. regcache_rbtree_node_alloc(struct regmap *map, unsigned int reg)
  278. {
  279. struct regcache_rbtree_node *rbnode;
  280. const struct regmap_range *range;
  281. int i;
  282. rbnode = kzalloc(sizeof(*rbnode), GFP_KERNEL);
  283. if (!rbnode)
  284. return NULL;
  285. /* If there is a read table then use it to guess at an allocation */
  286. if (map->rd_table) {
  287. for (i = 0; i < map->rd_table->n_yes_ranges; i++) {
  288. if (regmap_reg_in_range(reg,
  289. &map->rd_table->yes_ranges[i]))
  290. break;
  291. }
  292. if (i != map->rd_table->n_yes_ranges) {
  293. range = &map->rd_table->yes_ranges[i];
  294. rbnode->blklen = (range->range_max - range->range_min) /
  295. map->reg_stride + 1;
  296. rbnode->base_reg = range->range_min;
  297. }
  298. }
  299. if (!rbnode->blklen) {
  300. rbnode->blklen = 1;
  301. rbnode->base_reg = reg;
  302. }
  303. rbnode->block = kmalloc(rbnode->blklen * map->cache_word_size,
  304. GFP_KERNEL);
  305. if (!rbnode->block)
  306. goto err_free;
  307. rbnode->cache_present = kzalloc(BITS_TO_LONGS(rbnode->blklen) *
  308. sizeof(*rbnode->cache_present), GFP_KERNEL);
  309. if (!rbnode->cache_present)
  310. goto err_free_block;
  311. return rbnode;
  312. err_free_block:
  313. kfree(rbnode->block);
  314. err_free:
  315. kfree(rbnode);
  316. return NULL;
  317. }
  318. static int regcache_rbtree_write(struct regmap *map, unsigned int reg,
  319. unsigned int value)
  320. {
  321. struct regcache_rbtree_ctx *rbtree_ctx;
  322. struct regcache_rbtree_node *rbnode, *rbnode_tmp;
  323. struct rb_node *node;
  324. unsigned int reg_tmp;
  325. int ret;
  326. rbtree_ctx = map->cache;
  327. /* if we can't locate it in the cached rbnode we'll have
  328. * to traverse the rbtree looking for it.
  329. */
  330. rbnode = regcache_rbtree_lookup(map, reg);
  331. if (rbnode) {
  332. reg_tmp = (reg - rbnode->base_reg) / map->reg_stride;
  333. regcache_rbtree_set_register(map, rbnode, reg_tmp, value);
  334. } else {
  335. unsigned int base_reg, top_reg;
  336. unsigned int new_base_reg, new_top_reg;
  337. unsigned int min, max;
  338. unsigned int max_dist;
  339. max_dist = map->reg_stride * sizeof(*rbnode_tmp) /
  340. map->cache_word_size;
  341. if (reg < max_dist)
  342. min = 0;
  343. else
  344. min = reg - max_dist;
  345. max = reg + max_dist;
  346. /* look for an adjacent register to the one we are about to add */
  347. for (node = rb_first(&rbtree_ctx->root); node;
  348. node = rb_next(node)) {
  349. rbnode_tmp = rb_entry(node, struct regcache_rbtree_node,
  350. node);
  351. regcache_rbtree_get_base_top_reg(map, rbnode_tmp,
  352. &base_reg, &top_reg);
  353. if (base_reg <= max && top_reg >= min) {
  354. new_base_reg = min(reg, base_reg);
  355. new_top_reg = max(reg, top_reg);
  356. } else {
  357. continue;
  358. }
  359. ret = regcache_rbtree_insert_to_block(map, rbnode_tmp,
  360. new_base_reg,
  361. new_top_reg, reg,
  362. value);
  363. if (ret)
  364. return ret;
  365. rbtree_ctx->cached_rbnode = rbnode_tmp;
  366. return 0;
  367. }
  368. /* We did not manage to find a place to insert it in
  369. * an existing block so create a new rbnode.
  370. */
  371. rbnode = regcache_rbtree_node_alloc(map, reg);
  372. if (!rbnode)
  373. return -ENOMEM;
  374. regcache_rbtree_set_register(map, rbnode,
  375. reg - rbnode->base_reg, value);
  376. regcache_rbtree_insert(map, &rbtree_ctx->root, rbnode);
  377. rbtree_ctx->cached_rbnode = rbnode;
  378. }
  379. return 0;
  380. }
  381. static int regcache_rbtree_sync(struct regmap *map, unsigned int min,
  382. unsigned int max)
  383. {
  384. struct regcache_rbtree_ctx *rbtree_ctx;
  385. struct rb_node *node;
  386. struct regcache_rbtree_node *rbnode;
  387. unsigned int base_reg, top_reg;
  388. unsigned int start, end;
  389. int ret;
  390. rbtree_ctx = map->cache;
  391. for (node = rb_first(&rbtree_ctx->root); node; node = rb_next(node)) {
  392. rbnode = rb_entry(node, struct regcache_rbtree_node, node);
  393. regcache_rbtree_get_base_top_reg(map, rbnode, &base_reg,
  394. &top_reg);
  395. if (base_reg > max)
  396. break;
  397. if (top_reg < min)
  398. continue;
  399. if (min > base_reg)
  400. start = (min - base_reg) / map->reg_stride;
  401. else
  402. start = 0;
  403. if (max < top_reg)
  404. end = (max - base_reg) / map->reg_stride + 1;
  405. else
  406. end = rbnode->blklen;
  407. ret = regcache_sync_block(map, rbnode->block,
  408. rbnode->cache_present,
  409. rbnode->base_reg, start, end);
  410. if (ret != 0)
  411. return ret;
  412. }
  413. return regmap_async_complete(map);
  414. }
  415. static int regcache_rbtree_drop(struct regmap *map, unsigned int min,
  416. unsigned int max)
  417. {
  418. struct regcache_rbtree_ctx *rbtree_ctx;
  419. struct regcache_rbtree_node *rbnode;
  420. struct rb_node *node;
  421. unsigned int base_reg, top_reg;
  422. unsigned int start, end;
  423. rbtree_ctx = map->cache;
  424. for (node = rb_first(&rbtree_ctx->root); node; node = rb_next(node)) {
  425. rbnode = rb_entry(node, struct regcache_rbtree_node, node);
  426. regcache_rbtree_get_base_top_reg(map, rbnode, &base_reg,
  427. &top_reg);
  428. if (base_reg > max)
  429. break;
  430. if (top_reg < min)
  431. continue;
  432. if (min > base_reg)
  433. start = (min - base_reg) / map->reg_stride;
  434. else
  435. start = 0;
  436. if (max < top_reg)
  437. end = (max - base_reg) / map->reg_stride + 1;
  438. else
  439. end = rbnode->blklen;
  440. bitmap_clear(rbnode->cache_present, start, end - start);
  441. }
  442. return 0;
  443. }
  444. struct regcache_ops regcache_rbtree_ops = {
  445. .type = REGCACHE_RBTREE,
  446. .name = "rbtree",
  447. .init = regcache_rbtree_init,
  448. .exit = regcache_rbtree_exit,
  449. .read = regcache_rbtree_read,
  450. .write = regcache_rbtree_write,
  451. .sync = regcache_rbtree_sync,
  452. .drop = regcache_rbtree_drop,
  453. };