dm-region-hash.c 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703
  1. /*
  2. * Copyright (C) 2003 Sistina Software Limited.
  3. * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
  4. *
  5. * This file is released under the GPL.
  6. */
  7. #include <linux/dm-dirty-log.h>
  8. #include <linux/dm-region-hash.h>
  9. #include <linux/ctype.h>
  10. #include <linux/init.h>
  11. #include <linux/module.h>
  12. #include <linux/vmalloc.h>
  13. #include "dm.h"
  14. #define DM_MSG_PREFIX "region hash"
  15. /*-----------------------------------------------------------------
  16. * Region hash
  17. *
  18. * The mirror splits itself up into discrete regions. Each
  19. * region can be in one of three states: clean, dirty,
  20. * nosync. There is no need to put clean regions in the hash.
  21. *
  22. * In addition to being present in the hash table a region _may_
  23. * be present on one of three lists.
  24. *
  25. * clean_regions: Regions on this list have no io pending to
  26. * them, they are in sync, we are no longer interested in them,
  27. * they are dull. dm_rh_update_states() will remove them from the
  28. * hash table.
  29. *
  30. * quiesced_regions: These regions have been spun down, ready
  31. * for recovery. rh_recovery_start() will remove regions from
  32. * this list and hand them to kmirrord, which will schedule the
  33. * recovery io with kcopyd.
  34. *
  35. * recovered_regions: Regions that kcopyd has successfully
  36. * recovered. dm_rh_update_states() will now schedule any delayed
  37. * io, up the recovery_count, and remove the region from the
  38. * hash.
  39. *
  40. * There are 2 locks:
  41. * A rw spin lock 'hash_lock' protects just the hash table,
  42. * this is never held in write mode from interrupt context,
  43. * which I believe means that we only have to disable irqs when
  44. * doing a write lock.
  45. *
  46. * An ordinary spin lock 'region_lock' that protects the three
  47. * lists in the region_hash, with the 'state', 'list' and
  48. * 'delayed_bios' fields of the regions. This is used from irq
  49. * context, so all other uses will have to suspend local irqs.
  50. *---------------------------------------------------------------*/
  51. struct dm_region_hash {
  52. uint32_t region_size;
  53. unsigned region_shift;
  54. /* holds persistent region state */
  55. struct dm_dirty_log *log;
  56. /* hash table */
  57. rwlock_t hash_lock;
  58. mempool_t *region_pool;
  59. unsigned mask;
  60. unsigned nr_buckets;
  61. unsigned prime;
  62. unsigned shift;
  63. struct list_head *buckets;
  64. unsigned max_recovery; /* Max # of regions to recover in parallel */
  65. spinlock_t region_lock;
  66. atomic_t recovery_in_flight;
  67. struct semaphore recovery_count;
  68. struct list_head clean_regions;
  69. struct list_head quiesced_regions;
  70. struct list_head recovered_regions;
  71. struct list_head failed_recovered_regions;
  72. void *context;
  73. sector_t target_begin;
  74. /* Callback function to schedule bios writes */
  75. void (*dispatch_bios)(void *context, struct bio_list *bios);
  76. /* Callback function to wakeup callers worker thread. */
  77. void (*wakeup_workers)(void *context);
  78. /* Callback function to wakeup callers recovery waiters. */
  79. void (*wakeup_all_recovery_waiters)(void *context);
  80. };
  81. struct dm_region {
  82. struct dm_region_hash *rh; /* FIXME: can we get rid of this ? */
  83. region_t key;
  84. int state;
  85. struct list_head hash_list;
  86. struct list_head list;
  87. atomic_t pending;
  88. struct bio_list delayed_bios;
  89. };
  90. /*
  91. * Conversion fns
  92. */
  93. static region_t dm_rh_sector_to_region(struct dm_region_hash *rh, sector_t sector)
  94. {
  95. return sector >> rh->region_shift;
  96. }
  97. sector_t dm_rh_region_to_sector(struct dm_region_hash *rh, region_t region)
  98. {
  99. return region << rh->region_shift;
  100. }
  101. EXPORT_SYMBOL_GPL(dm_rh_region_to_sector);
  102. region_t dm_rh_bio_to_region(struct dm_region_hash *rh, struct bio *bio)
  103. {
  104. return dm_rh_sector_to_region(rh, bio->bi_sector - rh->target_begin);
  105. }
  106. EXPORT_SYMBOL_GPL(dm_rh_bio_to_region);
  107. void *dm_rh_region_context(struct dm_region *reg)
  108. {
  109. return reg->rh->context;
  110. }
  111. EXPORT_SYMBOL_GPL(dm_rh_region_context);
  112. region_t dm_rh_get_region_key(struct dm_region *reg)
  113. {
  114. return reg->key;
  115. }
  116. EXPORT_SYMBOL_GPL(dm_rh_get_region_key);
  117. sector_t dm_rh_get_region_size(struct dm_region_hash *rh)
  118. {
  119. return rh->region_size;
  120. }
  121. EXPORT_SYMBOL_GPL(dm_rh_get_region_size);
  122. /*
  123. * FIXME: shall we pass in a structure instead of all these args to
  124. * dm_region_hash_create()????
  125. */
  126. #define RH_HASH_MULT 2654435387U
  127. #define RH_HASH_SHIFT 12
  128. #define MIN_REGIONS 64
  129. struct dm_region_hash *dm_region_hash_create(
  130. void *context, void (*dispatch_bios)(void *context,
  131. struct bio_list *bios),
  132. void (*wakeup_workers)(void *context),
  133. void (*wakeup_all_recovery_waiters)(void *context),
  134. sector_t target_begin, unsigned max_recovery,
  135. struct dm_dirty_log *log, uint32_t region_size,
  136. region_t nr_regions)
  137. {
  138. struct dm_region_hash *rh;
  139. unsigned nr_buckets, max_buckets;
  140. size_t i;
  141. /*
  142. * Calculate a suitable number of buckets for our hash
  143. * table.
  144. */
  145. max_buckets = nr_regions >> 6;
  146. for (nr_buckets = 128u; nr_buckets < max_buckets; nr_buckets <<= 1)
  147. ;
  148. nr_buckets >>= 1;
  149. rh = kmalloc(sizeof(*rh), GFP_KERNEL);
  150. if (!rh) {
  151. DMERR("unable to allocate region hash memory");
  152. return ERR_PTR(-ENOMEM);
  153. }
  154. rh->context = context;
  155. rh->dispatch_bios = dispatch_bios;
  156. rh->wakeup_workers = wakeup_workers;
  157. rh->wakeup_all_recovery_waiters = wakeup_all_recovery_waiters;
  158. rh->target_begin = target_begin;
  159. rh->max_recovery = max_recovery;
  160. rh->log = log;
  161. rh->region_size = region_size;
  162. rh->region_shift = ffs(region_size) - 1;
  163. rwlock_init(&rh->hash_lock);
  164. rh->mask = nr_buckets - 1;
  165. rh->nr_buckets = nr_buckets;
  166. rh->shift = RH_HASH_SHIFT;
  167. rh->prime = RH_HASH_MULT;
  168. rh->buckets = vmalloc(nr_buckets * sizeof(*rh->buckets));
  169. if (!rh->buckets) {
  170. DMERR("unable to allocate region hash bucket memory");
  171. kfree(rh);
  172. return ERR_PTR(-ENOMEM);
  173. }
  174. for (i = 0; i < nr_buckets; i++)
  175. INIT_LIST_HEAD(rh->buckets + i);
  176. spin_lock_init(&rh->region_lock);
  177. sema_init(&rh->recovery_count, 0);
  178. atomic_set(&rh->recovery_in_flight, 0);
  179. INIT_LIST_HEAD(&rh->clean_regions);
  180. INIT_LIST_HEAD(&rh->quiesced_regions);
  181. INIT_LIST_HEAD(&rh->recovered_regions);
  182. INIT_LIST_HEAD(&rh->failed_recovered_regions);
  183. rh->region_pool = mempool_create_kmalloc_pool(MIN_REGIONS,
  184. sizeof(struct dm_region));
  185. if (!rh->region_pool) {
  186. vfree(rh->buckets);
  187. kfree(rh);
  188. rh = ERR_PTR(-ENOMEM);
  189. }
  190. return rh;
  191. }
  192. EXPORT_SYMBOL_GPL(dm_region_hash_create);
  193. void dm_region_hash_destroy(struct dm_region_hash *rh)
  194. {
  195. unsigned h;
  196. struct dm_region *reg, *nreg;
  197. BUG_ON(!list_empty(&rh->quiesced_regions));
  198. for (h = 0; h < rh->nr_buckets; h++) {
  199. list_for_each_entry_safe(reg, nreg, rh->buckets + h,
  200. hash_list) {
  201. BUG_ON(atomic_read(&reg->pending));
  202. mempool_free(reg, rh->region_pool);
  203. }
  204. }
  205. if (rh->log)
  206. dm_dirty_log_destroy(rh->log);
  207. if (rh->region_pool)
  208. mempool_destroy(rh->region_pool);
  209. vfree(rh->buckets);
  210. kfree(rh);
  211. }
  212. EXPORT_SYMBOL_GPL(dm_region_hash_destroy);
  213. struct dm_dirty_log *dm_rh_dirty_log(struct dm_region_hash *rh)
  214. {
  215. return rh->log;
  216. }
  217. EXPORT_SYMBOL_GPL(dm_rh_dirty_log);
  218. static unsigned rh_hash(struct dm_region_hash *rh, region_t region)
  219. {
  220. return (unsigned) ((region * rh->prime) >> rh->shift) & rh->mask;
  221. }
  222. static struct dm_region *__rh_lookup(struct dm_region_hash *rh, region_t region)
  223. {
  224. struct dm_region *reg;
  225. struct list_head *bucket = rh->buckets + rh_hash(rh, region);
  226. list_for_each_entry(reg, bucket, hash_list)
  227. if (reg->key == region)
  228. return reg;
  229. return NULL;
  230. }
  231. static void __rh_insert(struct dm_region_hash *rh, struct dm_region *reg)
  232. {
  233. list_add(&reg->hash_list, rh->buckets + rh_hash(rh, reg->key));
  234. }
  235. static struct dm_region *__rh_alloc(struct dm_region_hash *rh, region_t region)
  236. {
  237. struct dm_region *reg, *nreg;
  238. nreg = mempool_alloc(rh->region_pool, GFP_ATOMIC);
  239. if (unlikely(!nreg))
  240. nreg = kmalloc(sizeof(*nreg), GFP_NOIO);
  241. nreg->state = rh->log->type->in_sync(rh->log, region, 1) ?
  242. DM_RH_CLEAN : DM_RH_NOSYNC;
  243. nreg->rh = rh;
  244. nreg->key = region;
  245. INIT_LIST_HEAD(&nreg->list);
  246. atomic_set(&nreg->pending, 0);
  247. bio_list_init(&nreg->delayed_bios);
  248. write_lock_irq(&rh->hash_lock);
  249. reg = __rh_lookup(rh, region);
  250. if (reg)
  251. /* We lost the race. */
  252. mempool_free(nreg, rh->region_pool);
  253. else {
  254. __rh_insert(rh, nreg);
  255. if (nreg->state == DM_RH_CLEAN) {
  256. spin_lock(&rh->region_lock);
  257. list_add(&nreg->list, &rh->clean_regions);
  258. spin_unlock(&rh->region_lock);
  259. }
  260. reg = nreg;
  261. }
  262. write_unlock_irq(&rh->hash_lock);
  263. return reg;
  264. }
  265. static struct dm_region *__rh_find(struct dm_region_hash *rh, region_t region)
  266. {
  267. struct dm_region *reg;
  268. reg = __rh_lookup(rh, region);
  269. if (!reg) {
  270. read_unlock(&rh->hash_lock);
  271. reg = __rh_alloc(rh, region);
  272. read_lock(&rh->hash_lock);
  273. }
  274. return reg;
  275. }
  276. int dm_rh_get_state(struct dm_region_hash *rh, region_t region, int may_block)
  277. {
  278. int r;
  279. struct dm_region *reg;
  280. read_lock(&rh->hash_lock);
  281. reg = __rh_lookup(rh, region);
  282. read_unlock(&rh->hash_lock);
  283. if (reg)
  284. return reg->state;
  285. /*
  286. * The region wasn't in the hash, so we fall back to the
  287. * dirty log.
  288. */
  289. r = rh->log->type->in_sync(rh->log, region, may_block);
  290. /*
  291. * Any error from the dirty log (eg. -EWOULDBLOCK) gets
  292. * taken as a DM_RH_NOSYNC
  293. */
  294. return r == 1 ? DM_RH_CLEAN : DM_RH_NOSYNC;
  295. }
  296. EXPORT_SYMBOL_GPL(dm_rh_get_state);
  297. static void complete_resync_work(struct dm_region *reg, int success)
  298. {
  299. struct dm_region_hash *rh = reg->rh;
  300. rh->log->type->set_region_sync(rh->log, reg->key, success);
  301. /*
  302. * Dispatch the bios before we call 'wake_up_all'.
  303. * This is important because if we are suspending,
  304. * we want to know that recovery is complete and
  305. * the work queue is flushed. If we wake_up_all
  306. * before we dispatch_bios (queue bios and call wake()),
  307. * then we risk suspending before the work queue
  308. * has been properly flushed.
  309. */
  310. rh->dispatch_bios(rh->context, &reg->delayed_bios);
  311. if (atomic_dec_and_test(&rh->recovery_in_flight))
  312. rh->wakeup_all_recovery_waiters(rh->context);
  313. up(&rh->recovery_count);
  314. }
  315. /* dm_rh_mark_nosync
  316. * @ms
  317. * @bio
  318. * @done
  319. * @error
  320. *
  321. * The bio was written on some mirror(s) but failed on other mirror(s).
  322. * We can successfully endio the bio but should avoid the region being
  323. * marked clean by setting the state DM_RH_NOSYNC.
  324. *
  325. * This function is _not_ safe in interrupt context!
  326. */
  327. void dm_rh_mark_nosync(struct dm_region_hash *rh,
  328. struct bio *bio, unsigned done, int error)
  329. {
  330. unsigned long flags;
  331. struct dm_dirty_log *log = rh->log;
  332. struct dm_region *reg;
  333. region_t region = dm_rh_bio_to_region(rh, bio);
  334. int recovering = 0;
  335. /* We must inform the log that the sync count has changed. */
  336. log->type->set_region_sync(log, region, 0);
  337. read_lock(&rh->hash_lock);
  338. reg = __rh_find(rh, region);
  339. read_unlock(&rh->hash_lock);
  340. /* region hash entry should exist because write was in-flight */
  341. BUG_ON(!reg);
  342. BUG_ON(!list_empty(&reg->list));
  343. spin_lock_irqsave(&rh->region_lock, flags);
  344. /*
  345. * Possible cases:
  346. * 1) DM_RH_DIRTY
  347. * 2) DM_RH_NOSYNC: was dirty, other preceeding writes failed
  348. * 3) DM_RH_RECOVERING: flushing pending writes
  349. * Either case, the region should have not been connected to list.
  350. */
  351. recovering = (reg->state == DM_RH_RECOVERING);
  352. reg->state = DM_RH_NOSYNC;
  353. BUG_ON(!list_empty(&reg->list));
  354. spin_unlock_irqrestore(&rh->region_lock, flags);
  355. bio_endio(bio, error);
  356. if (recovering)
  357. complete_resync_work(reg, 0);
  358. }
  359. EXPORT_SYMBOL_GPL(dm_rh_mark_nosync);
  360. void dm_rh_update_states(struct dm_region_hash *rh, int errors_handled)
  361. {
  362. struct dm_region *reg, *next;
  363. LIST_HEAD(clean);
  364. LIST_HEAD(recovered);
  365. LIST_HEAD(failed_recovered);
  366. /*
  367. * Quickly grab the lists.
  368. */
  369. write_lock_irq(&rh->hash_lock);
  370. spin_lock(&rh->region_lock);
  371. if (!list_empty(&rh->clean_regions)) {
  372. list_splice_init(&rh->clean_regions, &clean);
  373. list_for_each_entry(reg, &clean, list)
  374. list_del(&reg->hash_list);
  375. }
  376. if (!list_empty(&rh->recovered_regions)) {
  377. list_splice_init(&rh->recovered_regions, &recovered);
  378. list_for_each_entry(reg, &recovered, list)
  379. list_del(&reg->hash_list);
  380. }
  381. if (!list_empty(&rh->failed_recovered_regions)) {
  382. list_splice_init(&rh->failed_recovered_regions,
  383. &failed_recovered);
  384. list_for_each_entry(reg, &failed_recovered, list)
  385. list_del(&reg->hash_list);
  386. }
  387. spin_unlock(&rh->region_lock);
  388. write_unlock_irq(&rh->hash_lock);
  389. /*
  390. * All the regions on the recovered and clean lists have
  391. * now been pulled out of the system, so no need to do
  392. * any more locking.
  393. */
  394. list_for_each_entry_safe(reg, next, &recovered, list) {
  395. rh->log->type->clear_region(rh->log, reg->key);
  396. complete_resync_work(reg, 1);
  397. mempool_free(reg, rh->region_pool);
  398. }
  399. list_for_each_entry_safe(reg, next, &failed_recovered, list) {
  400. complete_resync_work(reg, errors_handled ? 0 : 1);
  401. mempool_free(reg, rh->region_pool);
  402. }
  403. list_for_each_entry_safe(reg, next, &clean, list) {
  404. rh->log->type->clear_region(rh->log, reg->key);
  405. mempool_free(reg, rh->region_pool);
  406. }
  407. rh->log->type->flush(rh->log);
  408. }
  409. EXPORT_SYMBOL_GPL(dm_rh_update_states);
  410. static void rh_inc(struct dm_region_hash *rh, region_t region)
  411. {
  412. struct dm_region *reg;
  413. read_lock(&rh->hash_lock);
  414. reg = __rh_find(rh, region);
  415. spin_lock_irq(&rh->region_lock);
  416. atomic_inc(&reg->pending);
  417. if (reg->state == DM_RH_CLEAN) {
  418. reg->state = DM_RH_DIRTY;
  419. list_del_init(&reg->list); /* take off the clean list */
  420. spin_unlock_irq(&rh->region_lock);
  421. rh->log->type->mark_region(rh->log, reg->key);
  422. } else
  423. spin_unlock_irq(&rh->region_lock);
  424. read_unlock(&rh->hash_lock);
  425. }
  426. void dm_rh_inc_pending(struct dm_region_hash *rh, struct bio_list *bios)
  427. {
  428. struct bio *bio;
  429. for (bio = bios->head; bio; bio = bio->bi_next)
  430. rh_inc(rh, dm_rh_bio_to_region(rh, bio));
  431. }
  432. EXPORT_SYMBOL_GPL(dm_rh_inc_pending);
  433. void dm_rh_dec(struct dm_region_hash *rh, region_t region)
  434. {
  435. unsigned long flags;
  436. struct dm_region *reg;
  437. int should_wake = 0;
  438. read_lock(&rh->hash_lock);
  439. reg = __rh_lookup(rh, region);
  440. read_unlock(&rh->hash_lock);
  441. spin_lock_irqsave(&rh->region_lock, flags);
  442. if (atomic_dec_and_test(&reg->pending)) {
  443. /*
  444. * There is no pending I/O for this region.
  445. * We can move the region to corresponding list for next action.
  446. * At this point, the region is not yet connected to any list.
  447. *
  448. * If the state is DM_RH_NOSYNC, the region should be kept off
  449. * from clean list.
  450. * The hash entry for DM_RH_NOSYNC will remain in memory
  451. * until the region is recovered or the map is reloaded.
  452. */
  453. /* do nothing for DM_RH_NOSYNC */
  454. if (reg->state == DM_RH_RECOVERING) {
  455. list_add_tail(&reg->list, &rh->quiesced_regions);
  456. } else if (reg->state == DM_RH_DIRTY) {
  457. reg->state = DM_RH_CLEAN;
  458. list_add(&reg->list, &rh->clean_regions);
  459. }
  460. should_wake = 1;
  461. }
  462. spin_unlock_irqrestore(&rh->region_lock, flags);
  463. if (should_wake)
  464. rh->wakeup_workers(rh->context);
  465. }
  466. EXPORT_SYMBOL_GPL(dm_rh_dec);
  467. /*
  468. * Starts quiescing a region in preparation for recovery.
  469. */
  470. static int __rh_recovery_prepare(struct dm_region_hash *rh)
  471. {
  472. int r;
  473. region_t region;
  474. struct dm_region *reg;
  475. /*
  476. * Ask the dirty log what's next.
  477. */
  478. r = rh->log->type->get_resync_work(rh->log, &region);
  479. if (r <= 0)
  480. return r;
  481. /*
  482. * Get this region, and start it quiescing by setting the
  483. * recovering flag.
  484. */
  485. read_lock(&rh->hash_lock);
  486. reg = __rh_find(rh, region);
  487. read_unlock(&rh->hash_lock);
  488. spin_lock_irq(&rh->region_lock);
  489. reg->state = DM_RH_RECOVERING;
  490. /* Already quiesced ? */
  491. if (atomic_read(&reg->pending))
  492. list_del_init(&reg->list);
  493. else
  494. list_move(&reg->list, &rh->quiesced_regions);
  495. spin_unlock_irq(&rh->region_lock);
  496. return 1;
  497. }
  498. void dm_rh_recovery_prepare(struct dm_region_hash *rh)
  499. {
  500. /* Extra reference to avoid race with dm_rh_stop_recovery */
  501. atomic_inc(&rh->recovery_in_flight);
  502. while (!down_trylock(&rh->recovery_count)) {
  503. atomic_inc(&rh->recovery_in_flight);
  504. if (__rh_recovery_prepare(rh) <= 0) {
  505. atomic_dec(&rh->recovery_in_flight);
  506. up(&rh->recovery_count);
  507. break;
  508. }
  509. }
  510. /* Drop the extra reference */
  511. if (atomic_dec_and_test(&rh->recovery_in_flight))
  512. rh->wakeup_all_recovery_waiters(rh->context);
  513. }
  514. EXPORT_SYMBOL_GPL(dm_rh_recovery_prepare);
  515. /*
  516. * Returns any quiesced regions.
  517. */
  518. struct dm_region *dm_rh_recovery_start(struct dm_region_hash *rh)
  519. {
  520. struct dm_region *reg = NULL;
  521. spin_lock_irq(&rh->region_lock);
  522. if (!list_empty(&rh->quiesced_regions)) {
  523. reg = list_entry(rh->quiesced_regions.next,
  524. struct dm_region, list);
  525. list_del_init(&reg->list); /* remove from the quiesced list */
  526. }
  527. spin_unlock_irq(&rh->region_lock);
  528. return reg;
  529. }
  530. EXPORT_SYMBOL_GPL(dm_rh_recovery_start);
  531. void dm_rh_recovery_end(struct dm_region *reg, int success)
  532. {
  533. struct dm_region_hash *rh = reg->rh;
  534. spin_lock_irq(&rh->region_lock);
  535. if (success)
  536. list_add(&reg->list, &reg->rh->recovered_regions);
  537. else {
  538. reg->state = DM_RH_NOSYNC;
  539. list_add(&reg->list, &reg->rh->failed_recovered_regions);
  540. }
  541. spin_unlock_irq(&rh->region_lock);
  542. rh->wakeup_workers(rh->context);
  543. }
  544. EXPORT_SYMBOL_GPL(dm_rh_recovery_end);
  545. /* Return recovery in flight count. */
  546. int dm_rh_recovery_in_flight(struct dm_region_hash *rh)
  547. {
  548. return atomic_read(&rh->recovery_in_flight);
  549. }
  550. EXPORT_SYMBOL_GPL(dm_rh_recovery_in_flight);
  551. int dm_rh_flush(struct dm_region_hash *rh)
  552. {
  553. return rh->log->type->flush(rh->log);
  554. }
  555. EXPORT_SYMBOL_GPL(dm_rh_flush);
  556. void dm_rh_delay(struct dm_region_hash *rh, struct bio *bio)
  557. {
  558. struct dm_region *reg;
  559. read_lock(&rh->hash_lock);
  560. reg = __rh_find(rh, dm_rh_bio_to_region(rh, bio));
  561. bio_list_add(&reg->delayed_bios, bio);
  562. read_unlock(&rh->hash_lock);
  563. }
  564. EXPORT_SYMBOL_GPL(dm_rh_delay);
  565. void dm_rh_stop_recovery(struct dm_region_hash *rh)
  566. {
  567. int i;
  568. /* wait for any recovering regions */
  569. for (i = 0; i < rh->max_recovery; i++)
  570. down(&rh->recovery_count);
  571. }
  572. EXPORT_SYMBOL_GPL(dm_rh_stop_recovery);
  573. void dm_rh_start_recovery(struct dm_region_hash *rh)
  574. {
  575. int i;
  576. for (i = 0; i < rh->max_recovery; i++)
  577. up(&rh->recovery_count);
  578. rh->wakeup_workers(rh->context);
  579. }
  580. EXPORT_SYMBOL_GPL(dm_rh_start_recovery);
  581. MODULE_DESCRIPTION(DM_NAME " region hash");
  582. MODULE_AUTHOR("Joe Thornber/Heinz Mauelshagen <dm-devel@redhat.com>");
  583. MODULE_LICENSE("GPL");