dm-bio-prison.c 9.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396
  1. /*
  2. * Copyright (C) 2012 Red Hat, Inc.
  3. *
  4. * This file is released under the GPL.
  5. */
  6. #include "dm.h"
  7. #include "dm-bio-prison.h"
  8. #include <linux/spinlock.h>
  9. #include <linux/mempool.h>
  10. #include <linux/module.h>
  11. #include <linux/slab.h>
  12. /*----------------------------------------------------------------*/
  13. struct dm_bio_prison {
  14. spinlock_t lock;
  15. mempool_t *cell_pool;
  16. unsigned nr_buckets;
  17. unsigned hash_mask;
  18. struct hlist_head *cells;
  19. };
  20. /*----------------------------------------------------------------*/
  21. static uint32_t calc_nr_buckets(unsigned nr_cells)
  22. {
  23. uint32_t n = 128;
  24. nr_cells /= 4;
  25. nr_cells = min(nr_cells, 8192u);
  26. while (n < nr_cells)
  27. n <<= 1;
  28. return n;
  29. }
  30. static struct kmem_cache *_cell_cache;
  31. /*
  32. * @nr_cells should be the number of cells you want in use _concurrently_.
  33. * Don't confuse it with the number of distinct keys.
  34. */
  35. struct dm_bio_prison *dm_bio_prison_create(unsigned nr_cells)
  36. {
  37. unsigned i;
  38. uint32_t nr_buckets = calc_nr_buckets(nr_cells);
  39. size_t len = sizeof(struct dm_bio_prison) +
  40. (sizeof(struct hlist_head) * nr_buckets);
  41. struct dm_bio_prison *prison = kmalloc(len, GFP_KERNEL);
  42. if (!prison)
  43. return NULL;
  44. spin_lock_init(&prison->lock);
  45. prison->cell_pool = mempool_create_slab_pool(nr_cells, _cell_cache);
  46. if (!prison->cell_pool) {
  47. kfree(prison);
  48. return NULL;
  49. }
  50. prison->nr_buckets = nr_buckets;
  51. prison->hash_mask = nr_buckets - 1;
  52. prison->cells = (struct hlist_head *) (prison + 1);
  53. for (i = 0; i < nr_buckets; i++)
  54. INIT_HLIST_HEAD(prison->cells + i);
  55. return prison;
  56. }
  57. EXPORT_SYMBOL_GPL(dm_bio_prison_create);
  58. void dm_bio_prison_destroy(struct dm_bio_prison *prison)
  59. {
  60. mempool_destroy(prison->cell_pool);
  61. kfree(prison);
  62. }
  63. EXPORT_SYMBOL_GPL(dm_bio_prison_destroy);
  64. struct dm_bio_prison_cell *dm_bio_prison_alloc_cell(struct dm_bio_prison *prison, gfp_t gfp)
  65. {
  66. return mempool_alloc(prison->cell_pool, gfp);
  67. }
  68. EXPORT_SYMBOL_GPL(dm_bio_prison_alloc_cell);
  69. void dm_bio_prison_free_cell(struct dm_bio_prison *prison,
  70. struct dm_bio_prison_cell *cell)
  71. {
  72. mempool_free(cell, prison->cell_pool);
  73. }
  74. EXPORT_SYMBOL_GPL(dm_bio_prison_free_cell);
  75. static uint32_t hash_key(struct dm_bio_prison *prison, struct dm_cell_key *key)
  76. {
  77. const unsigned long BIG_PRIME = 4294967291UL;
  78. uint64_t hash = key->block * BIG_PRIME;
  79. return (uint32_t) (hash & prison->hash_mask);
  80. }
  81. static int keys_equal(struct dm_cell_key *lhs, struct dm_cell_key *rhs)
  82. {
  83. return (lhs->virtual == rhs->virtual) &&
  84. (lhs->dev == rhs->dev) &&
  85. (lhs->block == rhs->block);
  86. }
  87. static struct dm_bio_prison_cell *__search_bucket(struct hlist_head *bucket,
  88. struct dm_cell_key *key)
  89. {
  90. struct dm_bio_prison_cell *cell;
  91. hlist_for_each_entry(cell, bucket, list)
  92. if (keys_equal(&cell->key, key))
  93. return cell;
  94. return NULL;
  95. }
  96. static void __setup_new_cell(struct dm_bio_prison *prison,
  97. struct dm_cell_key *key,
  98. struct bio *holder,
  99. uint32_t hash,
  100. struct dm_bio_prison_cell *cell)
  101. {
  102. memcpy(&cell->key, key, sizeof(cell->key));
  103. cell->holder = holder;
  104. bio_list_init(&cell->bios);
  105. hlist_add_head(&cell->list, prison->cells + hash);
  106. }
  107. static int __bio_detain(struct dm_bio_prison *prison,
  108. struct dm_cell_key *key,
  109. struct bio *inmate,
  110. struct dm_bio_prison_cell *cell_prealloc,
  111. struct dm_bio_prison_cell **cell_result)
  112. {
  113. uint32_t hash = hash_key(prison, key);
  114. struct dm_bio_prison_cell *cell;
  115. cell = __search_bucket(prison->cells + hash, key);
  116. if (cell) {
  117. if (inmate)
  118. bio_list_add(&cell->bios, inmate);
  119. *cell_result = cell;
  120. return 1;
  121. }
  122. __setup_new_cell(prison, key, inmate, hash, cell_prealloc);
  123. *cell_result = cell_prealloc;
  124. return 0;
  125. }
  126. static int bio_detain(struct dm_bio_prison *prison,
  127. struct dm_cell_key *key,
  128. struct bio *inmate,
  129. struct dm_bio_prison_cell *cell_prealloc,
  130. struct dm_bio_prison_cell **cell_result)
  131. {
  132. int r;
  133. unsigned long flags;
  134. spin_lock_irqsave(&prison->lock, flags);
  135. r = __bio_detain(prison, key, inmate, cell_prealloc, cell_result);
  136. spin_unlock_irqrestore(&prison->lock, flags);
  137. return r;
  138. }
  139. int dm_bio_detain(struct dm_bio_prison *prison,
  140. struct dm_cell_key *key,
  141. struct bio *inmate,
  142. struct dm_bio_prison_cell *cell_prealloc,
  143. struct dm_bio_prison_cell **cell_result)
  144. {
  145. return bio_detain(prison, key, inmate, cell_prealloc, cell_result);
  146. }
  147. EXPORT_SYMBOL_GPL(dm_bio_detain);
  148. int dm_get_cell(struct dm_bio_prison *prison,
  149. struct dm_cell_key *key,
  150. struct dm_bio_prison_cell *cell_prealloc,
  151. struct dm_bio_prison_cell **cell_result)
  152. {
  153. return bio_detain(prison, key, NULL, cell_prealloc, cell_result);
  154. }
  155. EXPORT_SYMBOL_GPL(dm_get_cell);
  156. /*
  157. * @inmates must have been initialised prior to this call
  158. */
  159. static void __cell_release(struct dm_bio_prison_cell *cell,
  160. struct bio_list *inmates)
  161. {
  162. hlist_del(&cell->list);
  163. if (inmates) {
  164. if (cell->holder)
  165. bio_list_add(inmates, cell->holder);
  166. bio_list_merge(inmates, &cell->bios);
  167. }
  168. }
  169. void dm_cell_release(struct dm_bio_prison *prison,
  170. struct dm_bio_prison_cell *cell,
  171. struct bio_list *bios)
  172. {
  173. unsigned long flags;
  174. spin_lock_irqsave(&prison->lock, flags);
  175. __cell_release(cell, bios);
  176. spin_unlock_irqrestore(&prison->lock, flags);
  177. }
  178. EXPORT_SYMBOL_GPL(dm_cell_release);
  179. /*
  180. * Sometimes we don't want the holder, just the additional bios.
  181. */
  182. static void __cell_release_no_holder(struct dm_bio_prison_cell *cell,
  183. struct bio_list *inmates)
  184. {
  185. hlist_del(&cell->list);
  186. bio_list_merge(inmates, &cell->bios);
  187. }
  188. void dm_cell_release_no_holder(struct dm_bio_prison *prison,
  189. struct dm_bio_prison_cell *cell,
  190. struct bio_list *inmates)
  191. {
  192. unsigned long flags;
  193. spin_lock_irqsave(&prison->lock, flags);
  194. __cell_release_no_holder(cell, inmates);
  195. spin_unlock_irqrestore(&prison->lock, flags);
  196. }
  197. EXPORT_SYMBOL_GPL(dm_cell_release_no_holder);
  198. void dm_cell_error(struct dm_bio_prison *prison,
  199. struct dm_bio_prison_cell *cell)
  200. {
  201. struct bio_list bios;
  202. struct bio *bio;
  203. unsigned long flags;
  204. bio_list_init(&bios);
  205. spin_lock_irqsave(&prison->lock, flags);
  206. __cell_release(cell, &bios);
  207. spin_unlock_irqrestore(&prison->lock, flags);
  208. while ((bio = bio_list_pop(&bios)))
  209. bio_io_error(bio);
  210. }
  211. EXPORT_SYMBOL_GPL(dm_cell_error);
  212. /*----------------------------------------------------------------*/
  213. #define DEFERRED_SET_SIZE 64
  214. struct dm_deferred_entry {
  215. struct dm_deferred_set *ds;
  216. unsigned count;
  217. struct list_head work_items;
  218. };
  219. struct dm_deferred_set {
  220. spinlock_t lock;
  221. unsigned current_entry;
  222. unsigned sweeper;
  223. struct dm_deferred_entry entries[DEFERRED_SET_SIZE];
  224. };
  225. struct dm_deferred_set *dm_deferred_set_create(void)
  226. {
  227. int i;
  228. struct dm_deferred_set *ds;
  229. ds = kmalloc(sizeof(*ds), GFP_KERNEL);
  230. if (!ds)
  231. return NULL;
  232. spin_lock_init(&ds->lock);
  233. ds->current_entry = 0;
  234. ds->sweeper = 0;
  235. for (i = 0; i < DEFERRED_SET_SIZE; i++) {
  236. ds->entries[i].ds = ds;
  237. ds->entries[i].count = 0;
  238. INIT_LIST_HEAD(&ds->entries[i].work_items);
  239. }
  240. return ds;
  241. }
  242. EXPORT_SYMBOL_GPL(dm_deferred_set_create);
  243. void dm_deferred_set_destroy(struct dm_deferred_set *ds)
  244. {
  245. kfree(ds);
  246. }
  247. EXPORT_SYMBOL_GPL(dm_deferred_set_destroy);
  248. struct dm_deferred_entry *dm_deferred_entry_inc(struct dm_deferred_set *ds)
  249. {
  250. unsigned long flags;
  251. struct dm_deferred_entry *entry;
  252. spin_lock_irqsave(&ds->lock, flags);
  253. entry = ds->entries + ds->current_entry;
  254. entry->count++;
  255. spin_unlock_irqrestore(&ds->lock, flags);
  256. return entry;
  257. }
  258. EXPORT_SYMBOL_GPL(dm_deferred_entry_inc);
  259. static unsigned ds_next(unsigned index)
  260. {
  261. return (index + 1) % DEFERRED_SET_SIZE;
  262. }
  263. static void __sweep(struct dm_deferred_set *ds, struct list_head *head)
  264. {
  265. while ((ds->sweeper != ds->current_entry) &&
  266. !ds->entries[ds->sweeper].count) {
  267. list_splice_init(&ds->entries[ds->sweeper].work_items, head);
  268. ds->sweeper = ds_next(ds->sweeper);
  269. }
  270. if ((ds->sweeper == ds->current_entry) && !ds->entries[ds->sweeper].count)
  271. list_splice_init(&ds->entries[ds->sweeper].work_items, head);
  272. }
  273. void dm_deferred_entry_dec(struct dm_deferred_entry *entry, struct list_head *head)
  274. {
  275. unsigned long flags;
  276. spin_lock_irqsave(&entry->ds->lock, flags);
  277. BUG_ON(!entry->count);
  278. --entry->count;
  279. __sweep(entry->ds, head);
  280. spin_unlock_irqrestore(&entry->ds->lock, flags);
  281. }
  282. EXPORT_SYMBOL_GPL(dm_deferred_entry_dec);
  283. /*
  284. * Returns 1 if deferred or 0 if no pending items to delay job.
  285. */
  286. int dm_deferred_set_add_work(struct dm_deferred_set *ds, struct list_head *work)
  287. {
  288. int r = 1;
  289. unsigned long flags;
  290. unsigned next_entry;
  291. spin_lock_irqsave(&ds->lock, flags);
  292. if ((ds->sweeper == ds->current_entry) &&
  293. !ds->entries[ds->current_entry].count)
  294. r = 0;
  295. else {
  296. list_add(work, &ds->entries[ds->current_entry].work_items);
  297. next_entry = ds_next(ds->current_entry);
  298. if (!ds->entries[next_entry].count)
  299. ds->current_entry = next_entry;
  300. }
  301. spin_unlock_irqrestore(&ds->lock, flags);
  302. return r;
  303. }
  304. EXPORT_SYMBOL_GPL(dm_deferred_set_add_work);
  305. /*----------------------------------------------------------------*/
  306. static int __init dm_bio_prison_init(void)
  307. {
  308. _cell_cache = KMEM_CACHE(dm_bio_prison_cell, 0);
  309. if (!_cell_cache)
  310. return -ENOMEM;
  311. return 0;
  312. }
  313. static void __exit dm_bio_prison_exit(void)
  314. {
  315. kmem_cache_destroy(_cell_cache);
  316. _cell_cache = NULL;
  317. }
  318. /*
  319. * module hooks
  320. */
  321. module_init(dm_bio_prison_init);
  322. module_exit(dm_bio_prison_exit);
  323. MODULE_DESCRIPTION(DM_NAME " bio prison");
  324. MODULE_AUTHOR("Joe Thornber <dm-devel@redhat.com>");
  325. MODULE_LICENSE("GPL");