dm-cache-policy-cleaner.c 10 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467
  1. /*
  2. * Copyright (C) 2012 Red Hat. All rights reserved.
  3. *
  4. * writeback cache policy supporting flushing out dirty cache blocks.
  5. *
  6. * This file is released under the GPL.
  7. */
  8. #include "dm-cache-policy.h"
  9. #include "dm.h"
  10. #include <linux/hash.h>
  11. #include <linux/module.h>
  12. #include <linux/slab.h>
  13. #include <linux/vmalloc.h>
  14. /*----------------------------------------------------------------*/
  15. #define DM_MSG_PREFIX "cache cleaner"
  16. /* Cache entry struct. */
  17. struct wb_cache_entry {
  18. struct list_head list;
  19. struct hlist_node hlist;
  20. dm_oblock_t oblock;
  21. dm_cblock_t cblock;
  22. bool dirty:1;
  23. bool pending:1;
  24. };
  25. struct hash {
  26. struct hlist_head *table;
  27. dm_block_t hash_bits;
  28. unsigned nr_buckets;
  29. };
  30. struct policy {
  31. struct dm_cache_policy policy;
  32. spinlock_t lock;
  33. struct list_head free;
  34. struct list_head clean;
  35. struct list_head clean_pending;
  36. struct list_head dirty;
  37. /*
  38. * We know exactly how many cblocks will be needed,
  39. * so we can allocate them up front.
  40. */
  41. dm_cblock_t cache_size, nr_cblocks_allocated;
  42. struct wb_cache_entry *cblocks;
  43. struct hash chash;
  44. };
  45. /*----------------------------------------------------------------------------*/
  46. /*
  47. * Low-level functions.
  48. */
  49. static unsigned next_power(unsigned n, unsigned min)
  50. {
  51. return roundup_pow_of_two(max(n, min));
  52. }
  53. static struct policy *to_policy(struct dm_cache_policy *p)
  54. {
  55. return container_of(p, struct policy, policy);
  56. }
  57. static struct list_head *list_pop(struct list_head *q)
  58. {
  59. struct list_head *r = q->next;
  60. list_del(r);
  61. return r;
  62. }
  63. /*----------------------------------------------------------------------------*/
  64. /* Allocate/free various resources. */
  65. static int alloc_hash(struct hash *hash, unsigned elts)
  66. {
  67. hash->nr_buckets = next_power(elts >> 4, 16);
  68. hash->hash_bits = ffs(hash->nr_buckets) - 1;
  69. hash->table = vzalloc(sizeof(*hash->table) * hash->nr_buckets);
  70. return hash->table ? 0 : -ENOMEM;
  71. }
  72. static void free_hash(struct hash *hash)
  73. {
  74. vfree(hash->table);
  75. }
  76. static int alloc_cache_blocks_with_hash(struct policy *p, dm_cblock_t cache_size)
  77. {
  78. int r = -ENOMEM;
  79. p->cblocks = vzalloc(sizeof(*p->cblocks) * from_cblock(cache_size));
  80. if (p->cblocks) {
  81. unsigned u = from_cblock(cache_size);
  82. while (u--)
  83. list_add(&p->cblocks[u].list, &p->free);
  84. p->nr_cblocks_allocated = 0;
  85. /* Cache entries hash. */
  86. r = alloc_hash(&p->chash, from_cblock(cache_size));
  87. if (r)
  88. vfree(p->cblocks);
  89. }
  90. return r;
  91. }
  92. static void free_cache_blocks_and_hash(struct policy *p)
  93. {
  94. free_hash(&p->chash);
  95. vfree(p->cblocks);
  96. }
  97. static struct wb_cache_entry *alloc_cache_entry(struct policy *p)
  98. {
  99. struct wb_cache_entry *e;
  100. BUG_ON(from_cblock(p->nr_cblocks_allocated) >= from_cblock(p->cache_size));
  101. e = list_entry(list_pop(&p->free), struct wb_cache_entry, list);
  102. p->nr_cblocks_allocated = to_cblock(from_cblock(p->nr_cblocks_allocated) + 1);
  103. return e;
  104. }
  105. /*----------------------------------------------------------------------------*/
  106. /* Hash functions (lookup, insert, remove). */
  107. static struct wb_cache_entry *lookup_cache_entry(struct policy *p, dm_oblock_t oblock)
  108. {
  109. struct hash *hash = &p->chash;
  110. unsigned h = hash_64(from_oblock(oblock), hash->hash_bits);
  111. struct wb_cache_entry *cur;
  112. struct hlist_head *bucket = &hash->table[h];
  113. hlist_for_each_entry(cur, bucket, hlist) {
  114. if (cur->oblock == oblock) {
  115. /* Move upfront bucket for faster access. */
  116. hlist_del(&cur->hlist);
  117. hlist_add_head(&cur->hlist, bucket);
  118. return cur;
  119. }
  120. }
  121. return NULL;
  122. }
  123. static void insert_cache_hash_entry(struct policy *p, struct wb_cache_entry *e)
  124. {
  125. unsigned h = hash_64(from_oblock(e->oblock), p->chash.hash_bits);
  126. hlist_add_head(&e->hlist, &p->chash.table[h]);
  127. }
  128. static void remove_cache_hash_entry(struct wb_cache_entry *e)
  129. {
  130. hlist_del(&e->hlist);
  131. }
  132. /* Public interface (see dm-cache-policy.h */
  133. static int wb_map(struct dm_cache_policy *pe, dm_oblock_t oblock,
  134. bool can_block, bool can_migrate, bool discarded_oblock,
  135. struct bio *bio, struct policy_result *result)
  136. {
  137. struct policy *p = to_policy(pe);
  138. struct wb_cache_entry *e;
  139. unsigned long flags;
  140. result->op = POLICY_MISS;
  141. if (can_block)
  142. spin_lock_irqsave(&p->lock, flags);
  143. else if (!spin_trylock_irqsave(&p->lock, flags))
  144. return -EWOULDBLOCK;
  145. e = lookup_cache_entry(p, oblock);
  146. if (e) {
  147. result->op = POLICY_HIT;
  148. result->cblock = e->cblock;
  149. }
  150. spin_unlock_irqrestore(&p->lock, flags);
  151. return 0;
  152. }
  153. static int wb_lookup(struct dm_cache_policy *pe, dm_oblock_t oblock, dm_cblock_t *cblock)
  154. {
  155. int r;
  156. struct policy *p = to_policy(pe);
  157. struct wb_cache_entry *e;
  158. unsigned long flags;
  159. if (!spin_trylock_irqsave(&p->lock, flags))
  160. return -EWOULDBLOCK;
  161. e = lookup_cache_entry(p, oblock);
  162. if (e) {
  163. *cblock = e->cblock;
  164. r = 0;
  165. } else
  166. r = -ENOENT;
  167. spin_unlock_irqrestore(&p->lock, flags);
  168. return r;
  169. }
  170. static void __set_clear_dirty(struct dm_cache_policy *pe, dm_oblock_t oblock, bool set)
  171. {
  172. struct policy *p = to_policy(pe);
  173. struct wb_cache_entry *e;
  174. e = lookup_cache_entry(p, oblock);
  175. BUG_ON(!e);
  176. if (set) {
  177. if (!e->dirty) {
  178. e->dirty = true;
  179. list_move(&e->list, &p->dirty);
  180. }
  181. } else {
  182. if (e->dirty) {
  183. e->pending = false;
  184. e->dirty = false;
  185. list_move(&e->list, &p->clean);
  186. }
  187. }
  188. }
  189. static void wb_set_dirty(struct dm_cache_policy *pe, dm_oblock_t oblock)
  190. {
  191. struct policy *p = to_policy(pe);
  192. unsigned long flags;
  193. spin_lock_irqsave(&p->lock, flags);
  194. __set_clear_dirty(pe, oblock, true);
  195. spin_unlock_irqrestore(&p->lock, flags);
  196. }
  197. static void wb_clear_dirty(struct dm_cache_policy *pe, dm_oblock_t oblock)
  198. {
  199. struct policy *p = to_policy(pe);
  200. unsigned long flags;
  201. spin_lock_irqsave(&p->lock, flags);
  202. __set_clear_dirty(pe, oblock, false);
  203. spin_unlock_irqrestore(&p->lock, flags);
  204. }
  205. static void add_cache_entry(struct policy *p, struct wb_cache_entry *e)
  206. {
  207. insert_cache_hash_entry(p, e);
  208. if (e->dirty)
  209. list_add(&e->list, &p->dirty);
  210. else
  211. list_add(&e->list, &p->clean);
  212. }
  213. static int wb_load_mapping(struct dm_cache_policy *pe,
  214. dm_oblock_t oblock, dm_cblock_t cblock,
  215. uint32_t hint, bool hint_valid)
  216. {
  217. int r;
  218. struct policy *p = to_policy(pe);
  219. struct wb_cache_entry *e = alloc_cache_entry(p);
  220. if (e) {
  221. e->cblock = cblock;
  222. e->oblock = oblock;
  223. e->dirty = false; /* blocks default to clean */
  224. add_cache_entry(p, e);
  225. r = 0;
  226. } else
  227. r = -ENOMEM;
  228. return r;
  229. }
  230. static void wb_destroy(struct dm_cache_policy *pe)
  231. {
  232. struct policy *p = to_policy(pe);
  233. free_cache_blocks_and_hash(p);
  234. kfree(p);
  235. }
  236. static struct wb_cache_entry *__wb_force_remove_mapping(struct policy *p, dm_oblock_t oblock)
  237. {
  238. struct wb_cache_entry *r = lookup_cache_entry(p, oblock);
  239. BUG_ON(!r);
  240. remove_cache_hash_entry(r);
  241. list_del(&r->list);
  242. return r;
  243. }
  244. static void wb_remove_mapping(struct dm_cache_policy *pe, dm_oblock_t oblock)
  245. {
  246. struct policy *p = to_policy(pe);
  247. struct wb_cache_entry *e;
  248. unsigned long flags;
  249. spin_lock_irqsave(&p->lock, flags);
  250. e = __wb_force_remove_mapping(p, oblock);
  251. list_add_tail(&e->list, &p->free);
  252. BUG_ON(!from_cblock(p->nr_cblocks_allocated));
  253. p->nr_cblocks_allocated = to_cblock(from_cblock(p->nr_cblocks_allocated) - 1);
  254. spin_unlock_irqrestore(&p->lock, flags);
  255. }
  256. static void wb_force_mapping(struct dm_cache_policy *pe,
  257. dm_oblock_t current_oblock, dm_oblock_t oblock)
  258. {
  259. struct policy *p = to_policy(pe);
  260. struct wb_cache_entry *e;
  261. unsigned long flags;
  262. spin_lock_irqsave(&p->lock, flags);
  263. e = __wb_force_remove_mapping(p, current_oblock);
  264. e->oblock = oblock;
  265. add_cache_entry(p, e);
  266. spin_unlock_irqrestore(&p->lock, flags);
  267. }
  268. static struct wb_cache_entry *get_next_dirty_entry(struct policy *p)
  269. {
  270. struct list_head *l;
  271. struct wb_cache_entry *r;
  272. if (list_empty(&p->dirty))
  273. return NULL;
  274. l = list_pop(&p->dirty);
  275. r = container_of(l, struct wb_cache_entry, list);
  276. list_add(l, &p->clean_pending);
  277. return r;
  278. }
  279. static int wb_writeback_work(struct dm_cache_policy *pe,
  280. dm_oblock_t *oblock,
  281. dm_cblock_t *cblock)
  282. {
  283. int r = -ENOENT;
  284. struct policy *p = to_policy(pe);
  285. struct wb_cache_entry *e;
  286. unsigned long flags;
  287. spin_lock_irqsave(&p->lock, flags);
  288. e = get_next_dirty_entry(p);
  289. if (e) {
  290. *oblock = e->oblock;
  291. *cblock = e->cblock;
  292. r = 0;
  293. }
  294. spin_unlock_irqrestore(&p->lock, flags);
  295. return r;
  296. }
  297. static dm_cblock_t wb_residency(struct dm_cache_policy *pe)
  298. {
  299. return to_policy(pe)->nr_cblocks_allocated;
  300. }
  301. /* Init the policy plugin interface function pointers. */
  302. static void init_policy_functions(struct policy *p)
  303. {
  304. p->policy.destroy = wb_destroy;
  305. p->policy.map = wb_map;
  306. p->policy.lookup = wb_lookup;
  307. p->policy.set_dirty = wb_set_dirty;
  308. p->policy.clear_dirty = wb_clear_dirty;
  309. p->policy.load_mapping = wb_load_mapping;
  310. p->policy.walk_mappings = NULL;
  311. p->policy.remove_mapping = wb_remove_mapping;
  312. p->policy.writeback_work = wb_writeback_work;
  313. p->policy.force_mapping = wb_force_mapping;
  314. p->policy.residency = wb_residency;
  315. p->policy.tick = NULL;
  316. }
  317. static struct dm_cache_policy *wb_create(dm_cblock_t cache_size,
  318. sector_t origin_size,
  319. sector_t cache_block_size)
  320. {
  321. int r;
  322. struct policy *p = kzalloc(sizeof(*p), GFP_KERNEL);
  323. if (!p)
  324. return NULL;
  325. init_policy_functions(p);
  326. INIT_LIST_HEAD(&p->free);
  327. INIT_LIST_HEAD(&p->clean);
  328. INIT_LIST_HEAD(&p->clean_pending);
  329. INIT_LIST_HEAD(&p->dirty);
  330. p->cache_size = cache_size;
  331. spin_lock_init(&p->lock);
  332. /* Allocate cache entry structs and add them to free list. */
  333. r = alloc_cache_blocks_with_hash(p, cache_size);
  334. if (!r)
  335. return &p->policy;
  336. kfree(p);
  337. return NULL;
  338. }
  339. /*----------------------------------------------------------------------------*/
  340. static struct dm_cache_policy_type wb_policy_type = {
  341. .name = "cleaner",
  342. .version = {1, 0, 0},
  343. .hint_size = 0,
  344. .owner = THIS_MODULE,
  345. .create = wb_create
  346. };
  347. static int __init wb_init(void)
  348. {
  349. int r = dm_cache_policy_register(&wb_policy_type);
  350. if (r < 0)
  351. DMERR("register failed %d", r);
  352. else
  353. DMINFO("version %u.%u.%u loaded",
  354. wb_policy_type.version[0],
  355. wb_policy_type.version[1],
  356. wb_policy_type.version[2]);
  357. return r;
  358. }
  359. static void __exit wb_exit(void)
  360. {
  361. dm_cache_policy_unregister(&wb_policy_type);
  362. }
  363. module_init(wb_init);
  364. module_exit(wb_exit);
  365. MODULE_AUTHOR("Heinz Mauelshagen <dm-devel@redhat.com>");
  366. MODULE_LICENSE("GPL");
  367. MODULE_DESCRIPTION("cleaner cache policy");