dm-raid1.c 30 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310
  1. /*
  2. * Copyright (C) 2003 Sistina Software Limited.
  3. *
  4. * This file is released under the GPL.
  5. */
  6. #include "dm.h"
  7. #include "dm-bio-list.h"
  8. #include "dm-io.h"
  9. #include "dm-log.h"
  10. #include "kcopyd.h"
  11. #include <linux/ctype.h>
  12. #include <linux/init.h>
  13. #include <linux/mempool.h>
  14. #include <linux/module.h>
  15. #include <linux/pagemap.h>
  16. #include <linux/slab.h>
  17. #include <linux/time.h>
  18. #include <linux/vmalloc.h>
  19. #include <linux/workqueue.h>
  20. #define DM_MSG_PREFIX "raid1"
  21. static struct workqueue_struct *_kmirrord_wq;
  22. static struct work_struct _kmirrord_work;
  23. static DECLARE_WAIT_QUEUE_HEAD(_kmirrord_recovery_stopped);
  24. static inline void wake(void)
  25. {
  26. queue_work(_kmirrord_wq, &_kmirrord_work);
  27. }
  28. /*-----------------------------------------------------------------
  29. * Region hash
  30. *
  31. * The mirror splits itself up into discrete regions. Each
  32. * region can be in one of three states: clean, dirty,
  33. * nosync. There is no need to put clean regions in the hash.
  34. *
  35. * In addition to being present in the hash table a region _may_
  36. * be present on one of three lists.
  37. *
  38. * clean_regions: Regions on this list have no io pending to
  39. * them, they are in sync, we are no longer interested in them,
  40. * they are dull. rh_update_states() will remove them from the
  41. * hash table.
  42. *
  43. * quiesced_regions: These regions have been spun down, ready
  44. * for recovery. rh_recovery_start() will remove regions from
  45. * this list and hand them to kmirrord, which will schedule the
  46. * recovery io with kcopyd.
  47. *
  48. * recovered_regions: Regions that kcopyd has successfully
  49. * recovered. rh_update_states() will now schedule any delayed
  50. * io, up the recovery_count, and remove the region from the
  51. * hash.
  52. *
  53. * There are 2 locks:
  54. * A rw spin lock 'hash_lock' protects just the hash table,
  55. * this is never held in write mode from interrupt context,
  56. * which I believe means that we only have to disable irqs when
  57. * doing a write lock.
  58. *
  59. * An ordinary spin lock 'region_lock' that protects the three
  60. * lists in the region_hash, with the 'state', 'list' and
  61. * 'bhs_delayed' fields of the regions. This is used from irq
  62. * context, so all other uses will have to suspend local irqs.
  63. *---------------------------------------------------------------*/
  64. struct mirror_set;
  65. struct region_hash {
  66. struct mirror_set *ms;
  67. uint32_t region_size;
  68. unsigned region_shift;
  69. /* holds persistent region state */
  70. struct dirty_log *log;
  71. /* hash table */
  72. rwlock_t hash_lock;
  73. mempool_t *region_pool;
  74. unsigned int mask;
  75. unsigned int nr_buckets;
  76. struct list_head *buckets;
  77. spinlock_t region_lock;
  78. atomic_t recovery_in_flight;
  79. struct semaphore recovery_count;
  80. struct list_head clean_regions;
  81. struct list_head quiesced_regions;
  82. struct list_head recovered_regions;
  83. };
  84. enum {
  85. RH_CLEAN,
  86. RH_DIRTY,
  87. RH_NOSYNC,
  88. RH_RECOVERING
  89. };
  90. struct region {
  91. struct region_hash *rh; /* FIXME: can we get rid of this ? */
  92. region_t key;
  93. int state;
  94. struct list_head hash_list;
  95. struct list_head list;
  96. atomic_t pending;
  97. struct bio_list delayed_bios;
  98. };
  99. /*-----------------------------------------------------------------
  100. * Mirror set structures.
  101. *---------------------------------------------------------------*/
  102. struct mirror {
  103. atomic_t error_count;
  104. struct dm_dev *dev;
  105. sector_t offset;
  106. };
  107. struct mirror_set {
  108. struct dm_target *ti;
  109. struct list_head list;
  110. struct region_hash rh;
  111. struct kcopyd_client *kcopyd_client;
  112. spinlock_t lock; /* protects the next two lists */
  113. struct bio_list reads;
  114. struct bio_list writes;
  115. /* recovery */
  116. region_t nr_regions;
  117. int in_sync;
  118. struct mirror *default_mirror; /* Default mirror */
  119. unsigned int nr_mirrors;
  120. struct mirror mirror[0];
  121. };
  122. /*
  123. * Conversion fns
  124. */
  125. static inline region_t bio_to_region(struct region_hash *rh, struct bio *bio)
  126. {
  127. return (bio->bi_sector - rh->ms->ti->begin) >> rh->region_shift;
  128. }
  129. static inline sector_t region_to_sector(struct region_hash *rh, region_t region)
  130. {
  131. return region << rh->region_shift;
  132. }
  133. /* FIXME move this */
  134. static void queue_bio(struct mirror_set *ms, struct bio *bio, int rw);
  135. #define MIN_REGIONS 64
  136. #define MAX_RECOVERY 1
  137. static int rh_init(struct region_hash *rh, struct mirror_set *ms,
  138. struct dirty_log *log, uint32_t region_size,
  139. region_t nr_regions)
  140. {
  141. unsigned int nr_buckets, max_buckets;
  142. size_t i;
  143. /*
  144. * Calculate a suitable number of buckets for our hash
  145. * table.
  146. */
  147. max_buckets = nr_regions >> 6;
  148. for (nr_buckets = 128u; nr_buckets < max_buckets; nr_buckets <<= 1)
  149. ;
  150. nr_buckets >>= 1;
  151. rh->ms = ms;
  152. rh->log = log;
  153. rh->region_size = region_size;
  154. rh->region_shift = ffs(region_size) - 1;
  155. rwlock_init(&rh->hash_lock);
  156. rh->mask = nr_buckets - 1;
  157. rh->nr_buckets = nr_buckets;
  158. rh->buckets = vmalloc(nr_buckets * sizeof(*rh->buckets));
  159. if (!rh->buckets) {
  160. DMERR("unable to allocate region hash memory");
  161. return -ENOMEM;
  162. }
  163. for (i = 0; i < nr_buckets; i++)
  164. INIT_LIST_HEAD(rh->buckets + i);
  165. spin_lock_init(&rh->region_lock);
  166. sema_init(&rh->recovery_count, 0);
  167. atomic_set(&rh->recovery_in_flight, 0);
  168. INIT_LIST_HEAD(&rh->clean_regions);
  169. INIT_LIST_HEAD(&rh->quiesced_regions);
  170. INIT_LIST_HEAD(&rh->recovered_regions);
  171. rh->region_pool = mempool_create_kmalloc_pool(MIN_REGIONS,
  172. sizeof(struct region));
  173. if (!rh->region_pool) {
  174. vfree(rh->buckets);
  175. rh->buckets = NULL;
  176. return -ENOMEM;
  177. }
  178. return 0;
  179. }
  180. static void rh_exit(struct region_hash *rh)
  181. {
  182. unsigned int h;
  183. struct region *reg, *nreg;
  184. BUG_ON(!list_empty(&rh->quiesced_regions));
  185. for (h = 0; h < rh->nr_buckets; h++) {
  186. list_for_each_entry_safe(reg, nreg, rh->buckets + h, hash_list) {
  187. BUG_ON(atomic_read(&reg->pending));
  188. mempool_free(reg, rh->region_pool);
  189. }
  190. }
  191. if (rh->log)
  192. dm_destroy_dirty_log(rh->log);
  193. if (rh->region_pool)
  194. mempool_destroy(rh->region_pool);
  195. vfree(rh->buckets);
  196. }
  197. #define RH_HASH_MULT 2654435387U
  198. static inline unsigned int rh_hash(struct region_hash *rh, region_t region)
  199. {
  200. return (unsigned int) ((region * RH_HASH_MULT) >> 12) & rh->mask;
  201. }
  202. static struct region *__rh_lookup(struct region_hash *rh, region_t region)
  203. {
  204. struct region *reg;
  205. list_for_each_entry (reg, rh->buckets + rh_hash(rh, region), hash_list)
  206. if (reg->key == region)
  207. return reg;
  208. return NULL;
  209. }
  210. static void __rh_insert(struct region_hash *rh, struct region *reg)
  211. {
  212. unsigned int h = rh_hash(rh, reg->key);
  213. list_add(&reg->hash_list, rh->buckets + h);
  214. }
  215. static struct region *__rh_alloc(struct region_hash *rh, region_t region)
  216. {
  217. struct region *reg, *nreg;
  218. read_unlock(&rh->hash_lock);
  219. nreg = mempool_alloc(rh->region_pool, GFP_ATOMIC);
  220. if (unlikely(!nreg))
  221. nreg = kmalloc(sizeof(struct region), GFP_NOIO);
  222. nreg->state = rh->log->type->in_sync(rh->log, region, 1) ?
  223. RH_CLEAN : RH_NOSYNC;
  224. nreg->rh = rh;
  225. nreg->key = region;
  226. INIT_LIST_HEAD(&nreg->list);
  227. atomic_set(&nreg->pending, 0);
  228. bio_list_init(&nreg->delayed_bios);
  229. write_lock_irq(&rh->hash_lock);
  230. reg = __rh_lookup(rh, region);
  231. if (reg)
  232. /* we lost the race */
  233. mempool_free(nreg, rh->region_pool);
  234. else {
  235. __rh_insert(rh, nreg);
  236. if (nreg->state == RH_CLEAN) {
  237. spin_lock(&rh->region_lock);
  238. list_add(&nreg->list, &rh->clean_regions);
  239. spin_unlock(&rh->region_lock);
  240. }
  241. reg = nreg;
  242. }
  243. write_unlock_irq(&rh->hash_lock);
  244. read_lock(&rh->hash_lock);
  245. return reg;
  246. }
  247. static inline struct region *__rh_find(struct region_hash *rh, region_t region)
  248. {
  249. struct region *reg;
  250. reg = __rh_lookup(rh, region);
  251. if (!reg)
  252. reg = __rh_alloc(rh, region);
  253. return reg;
  254. }
  255. static int rh_state(struct region_hash *rh, region_t region, int may_block)
  256. {
  257. int r;
  258. struct region *reg;
  259. read_lock(&rh->hash_lock);
  260. reg = __rh_lookup(rh, region);
  261. read_unlock(&rh->hash_lock);
  262. if (reg)
  263. return reg->state;
  264. /*
  265. * The region wasn't in the hash, so we fall back to the
  266. * dirty log.
  267. */
  268. r = rh->log->type->in_sync(rh->log, region, may_block);
  269. /*
  270. * Any error from the dirty log (eg. -EWOULDBLOCK) gets
  271. * taken as a RH_NOSYNC
  272. */
  273. return r == 1 ? RH_CLEAN : RH_NOSYNC;
  274. }
  275. static inline int rh_in_sync(struct region_hash *rh,
  276. region_t region, int may_block)
  277. {
  278. int state = rh_state(rh, region, may_block);
  279. return state == RH_CLEAN || state == RH_DIRTY;
  280. }
  281. static void dispatch_bios(struct mirror_set *ms, struct bio_list *bio_list)
  282. {
  283. struct bio *bio;
  284. while ((bio = bio_list_pop(bio_list))) {
  285. queue_bio(ms, bio, WRITE);
  286. }
  287. }
  288. static void complete_resync_work(struct region *reg, int success)
  289. {
  290. struct region_hash *rh = reg->rh;
  291. rh->log->type->set_region_sync(rh->log, reg->key, success);
  292. dispatch_bios(rh->ms, &reg->delayed_bios);
  293. if (atomic_dec_and_test(&rh->recovery_in_flight))
  294. wake_up_all(&_kmirrord_recovery_stopped);
  295. up(&rh->recovery_count);
  296. }
  297. static void rh_update_states(struct region_hash *rh)
  298. {
  299. struct region *reg, *next;
  300. LIST_HEAD(clean);
  301. LIST_HEAD(recovered);
  302. /*
  303. * Quickly grab the lists.
  304. */
  305. write_lock_irq(&rh->hash_lock);
  306. spin_lock(&rh->region_lock);
  307. if (!list_empty(&rh->clean_regions)) {
  308. list_splice(&rh->clean_regions, &clean);
  309. INIT_LIST_HEAD(&rh->clean_regions);
  310. list_for_each_entry (reg, &clean, list) {
  311. rh->log->type->clear_region(rh->log, reg->key);
  312. list_del(&reg->hash_list);
  313. }
  314. }
  315. if (!list_empty(&rh->recovered_regions)) {
  316. list_splice(&rh->recovered_regions, &recovered);
  317. INIT_LIST_HEAD(&rh->recovered_regions);
  318. list_for_each_entry (reg, &recovered, list)
  319. list_del(&reg->hash_list);
  320. }
  321. spin_unlock(&rh->region_lock);
  322. write_unlock_irq(&rh->hash_lock);
  323. /*
  324. * All the regions on the recovered and clean lists have
  325. * now been pulled out of the system, so no need to do
  326. * any more locking.
  327. */
  328. list_for_each_entry_safe (reg, next, &recovered, list) {
  329. rh->log->type->clear_region(rh->log, reg->key);
  330. complete_resync_work(reg, 1);
  331. mempool_free(reg, rh->region_pool);
  332. }
  333. if (!list_empty(&recovered))
  334. rh->log->type->flush(rh->log);
  335. list_for_each_entry_safe (reg, next, &clean, list)
  336. mempool_free(reg, rh->region_pool);
  337. }
  338. static void rh_inc(struct region_hash *rh, region_t region)
  339. {
  340. struct region *reg;
  341. read_lock(&rh->hash_lock);
  342. reg = __rh_find(rh, region);
  343. spin_lock_irq(&rh->region_lock);
  344. atomic_inc(&reg->pending);
  345. if (reg->state == RH_CLEAN) {
  346. reg->state = RH_DIRTY;
  347. list_del_init(&reg->list); /* take off the clean list */
  348. spin_unlock_irq(&rh->region_lock);
  349. rh->log->type->mark_region(rh->log, reg->key);
  350. } else
  351. spin_unlock_irq(&rh->region_lock);
  352. read_unlock(&rh->hash_lock);
  353. }
  354. static void rh_inc_pending(struct region_hash *rh, struct bio_list *bios)
  355. {
  356. struct bio *bio;
  357. for (bio = bios->head; bio; bio = bio->bi_next)
  358. rh_inc(rh, bio_to_region(rh, bio));
  359. }
  360. static void rh_dec(struct region_hash *rh, region_t region)
  361. {
  362. unsigned long flags;
  363. struct region *reg;
  364. int should_wake = 0;
  365. read_lock(&rh->hash_lock);
  366. reg = __rh_lookup(rh, region);
  367. read_unlock(&rh->hash_lock);
  368. spin_lock_irqsave(&rh->region_lock, flags);
  369. if (atomic_dec_and_test(&reg->pending)) {
  370. /*
  371. * There is no pending I/O for this region.
  372. * We can move the region to corresponding list for next action.
  373. * At this point, the region is not yet connected to any list.
  374. *
  375. * If the state is RH_NOSYNC, the region should be kept off
  376. * from clean list.
  377. * The hash entry for RH_NOSYNC will remain in memory
  378. * until the region is recovered or the map is reloaded.
  379. */
  380. /* do nothing for RH_NOSYNC */
  381. if (reg->state == RH_RECOVERING) {
  382. list_add_tail(&reg->list, &rh->quiesced_regions);
  383. } else if (reg->state == RH_DIRTY) {
  384. reg->state = RH_CLEAN;
  385. list_add(&reg->list, &rh->clean_regions);
  386. }
  387. should_wake = 1;
  388. }
  389. spin_unlock_irqrestore(&rh->region_lock, flags);
  390. if (should_wake)
  391. wake();
  392. }
  393. /*
  394. * Starts quiescing a region in preparation for recovery.
  395. */
  396. static int __rh_recovery_prepare(struct region_hash *rh)
  397. {
  398. int r;
  399. struct region *reg;
  400. region_t region;
  401. /*
  402. * Ask the dirty log what's next.
  403. */
  404. r = rh->log->type->get_resync_work(rh->log, &region);
  405. if (r <= 0)
  406. return r;
  407. /*
  408. * Get this region, and start it quiescing by setting the
  409. * recovering flag.
  410. */
  411. read_lock(&rh->hash_lock);
  412. reg = __rh_find(rh, region);
  413. read_unlock(&rh->hash_lock);
  414. spin_lock_irq(&rh->region_lock);
  415. reg->state = RH_RECOVERING;
  416. /* Already quiesced ? */
  417. if (atomic_read(&reg->pending))
  418. list_del_init(&reg->list);
  419. else
  420. list_move(&reg->list, &rh->quiesced_regions);
  421. spin_unlock_irq(&rh->region_lock);
  422. return 1;
  423. }
  424. static void rh_recovery_prepare(struct region_hash *rh)
  425. {
  426. /* Extra reference to avoid race with rh_stop_recovery */
  427. atomic_inc(&rh->recovery_in_flight);
  428. while (!down_trylock(&rh->recovery_count)) {
  429. atomic_inc(&rh->recovery_in_flight);
  430. if (__rh_recovery_prepare(rh) <= 0) {
  431. atomic_dec(&rh->recovery_in_flight);
  432. up(&rh->recovery_count);
  433. break;
  434. }
  435. }
  436. /* Drop the extra reference */
  437. if (atomic_dec_and_test(&rh->recovery_in_flight))
  438. wake_up_all(&_kmirrord_recovery_stopped);
  439. }
  440. /*
  441. * Returns any quiesced regions.
  442. */
  443. static struct region *rh_recovery_start(struct region_hash *rh)
  444. {
  445. struct region *reg = NULL;
  446. spin_lock_irq(&rh->region_lock);
  447. if (!list_empty(&rh->quiesced_regions)) {
  448. reg = list_entry(rh->quiesced_regions.next,
  449. struct region, list);
  450. list_del_init(&reg->list); /* remove from the quiesced list */
  451. }
  452. spin_unlock_irq(&rh->region_lock);
  453. return reg;
  454. }
  455. /* FIXME: success ignored for now */
  456. static void rh_recovery_end(struct region *reg, int success)
  457. {
  458. struct region_hash *rh = reg->rh;
  459. spin_lock_irq(&rh->region_lock);
  460. list_add(&reg->list, &reg->rh->recovered_regions);
  461. spin_unlock_irq(&rh->region_lock);
  462. wake();
  463. }
  464. static void rh_flush(struct region_hash *rh)
  465. {
  466. rh->log->type->flush(rh->log);
  467. }
  468. static void rh_delay(struct region_hash *rh, struct bio *bio)
  469. {
  470. struct region *reg;
  471. read_lock(&rh->hash_lock);
  472. reg = __rh_find(rh, bio_to_region(rh, bio));
  473. bio_list_add(&reg->delayed_bios, bio);
  474. read_unlock(&rh->hash_lock);
  475. }
  476. static void rh_stop_recovery(struct region_hash *rh)
  477. {
  478. int i;
  479. /* wait for any recovering regions */
  480. for (i = 0; i < MAX_RECOVERY; i++)
  481. down(&rh->recovery_count);
  482. }
  483. static void rh_start_recovery(struct region_hash *rh)
  484. {
  485. int i;
  486. for (i = 0; i < MAX_RECOVERY; i++)
  487. up(&rh->recovery_count);
  488. wake();
  489. }
  490. /*
  491. * Every mirror should look like this one.
  492. */
  493. #define DEFAULT_MIRROR 0
  494. /*
  495. * This is yucky. We squirrel the mirror_set struct away inside
  496. * bi_next for write buffers. This is safe since the bh
  497. * doesn't get submitted to the lower levels of block layer.
  498. */
  499. static struct mirror_set *bio_get_ms(struct bio *bio)
  500. {
  501. return (struct mirror_set *) bio->bi_next;
  502. }
  503. static void bio_set_ms(struct bio *bio, struct mirror_set *ms)
  504. {
  505. bio->bi_next = (struct bio *) ms;
  506. }
  507. /*-----------------------------------------------------------------
  508. * Recovery.
  509. *
  510. * When a mirror is first activated we may find that some regions
  511. * are in the no-sync state. We have to recover these by
  512. * recopying from the default mirror to all the others.
  513. *---------------------------------------------------------------*/
  514. static void recovery_complete(int read_err, unsigned int write_err,
  515. void *context)
  516. {
  517. struct region *reg = (struct region *) context;
  518. /* FIXME: better error handling */
  519. rh_recovery_end(reg, !(read_err || write_err));
  520. }
  521. static int recover(struct mirror_set *ms, struct region *reg)
  522. {
  523. int r;
  524. unsigned int i;
  525. struct io_region from, to[KCOPYD_MAX_REGIONS], *dest;
  526. struct mirror *m;
  527. unsigned long flags = 0;
  528. /* fill in the source */
  529. m = ms->default_mirror;
  530. from.bdev = m->dev->bdev;
  531. from.sector = m->offset + region_to_sector(reg->rh, reg->key);
  532. if (reg->key == (ms->nr_regions - 1)) {
  533. /*
  534. * The final region may be smaller than
  535. * region_size.
  536. */
  537. from.count = ms->ti->len & (reg->rh->region_size - 1);
  538. if (!from.count)
  539. from.count = reg->rh->region_size;
  540. } else
  541. from.count = reg->rh->region_size;
  542. /* fill in the destinations */
  543. for (i = 0, dest = to; i < ms->nr_mirrors; i++) {
  544. if (&ms->mirror[i] == ms->default_mirror)
  545. continue;
  546. m = ms->mirror + i;
  547. dest->bdev = m->dev->bdev;
  548. dest->sector = m->offset + region_to_sector(reg->rh, reg->key);
  549. dest->count = from.count;
  550. dest++;
  551. }
  552. /* hand to kcopyd */
  553. set_bit(KCOPYD_IGNORE_ERROR, &flags);
  554. r = kcopyd_copy(ms->kcopyd_client, &from, ms->nr_mirrors - 1, to, flags,
  555. recovery_complete, reg);
  556. return r;
  557. }
  558. static void do_recovery(struct mirror_set *ms)
  559. {
  560. int r;
  561. struct region *reg;
  562. struct dirty_log *log = ms->rh.log;
  563. /*
  564. * Start quiescing some regions.
  565. */
  566. rh_recovery_prepare(&ms->rh);
  567. /*
  568. * Copy any already quiesced regions.
  569. */
  570. while ((reg = rh_recovery_start(&ms->rh))) {
  571. r = recover(ms, reg);
  572. if (r)
  573. rh_recovery_end(reg, 0);
  574. }
  575. /*
  576. * Update the in sync flag.
  577. */
  578. if (!ms->in_sync &&
  579. (log->type->get_sync_count(log) == ms->nr_regions)) {
  580. /* the sync is complete */
  581. dm_table_event(ms->ti->table);
  582. ms->in_sync = 1;
  583. }
  584. }
  585. /*-----------------------------------------------------------------
  586. * Reads
  587. *---------------------------------------------------------------*/
  588. static struct mirror *choose_mirror(struct mirror_set *ms, sector_t sector)
  589. {
  590. /* FIXME: add read balancing */
  591. return ms->default_mirror;
  592. }
  593. /*
  594. * remap a buffer to a particular mirror.
  595. */
  596. static void map_bio(struct mirror_set *ms, struct mirror *m, struct bio *bio)
  597. {
  598. bio->bi_bdev = m->dev->bdev;
  599. bio->bi_sector = m->offset + (bio->bi_sector - ms->ti->begin);
  600. }
  601. static void do_reads(struct mirror_set *ms, struct bio_list *reads)
  602. {
  603. region_t region;
  604. struct bio *bio;
  605. struct mirror *m;
  606. while ((bio = bio_list_pop(reads))) {
  607. region = bio_to_region(&ms->rh, bio);
  608. /*
  609. * We can only read balance if the region is in sync.
  610. */
  611. if (rh_in_sync(&ms->rh, region, 0))
  612. m = choose_mirror(ms, bio->bi_sector);
  613. else
  614. m = ms->default_mirror;
  615. map_bio(ms, m, bio);
  616. generic_make_request(bio);
  617. }
  618. }
  619. /*-----------------------------------------------------------------
  620. * Writes.
  621. *
  622. * We do different things with the write io depending on the
  623. * state of the region that it's in:
  624. *
  625. * SYNC: increment pending, use kcopyd to write to *all* mirrors
  626. * RECOVERING: delay the io until recovery completes
  627. * NOSYNC: increment pending, just write to the default mirror
  628. *---------------------------------------------------------------*/
  629. static void write_callback(unsigned long error, void *context)
  630. {
  631. unsigned int i;
  632. int uptodate = 1;
  633. struct bio *bio = (struct bio *) context;
  634. struct mirror_set *ms;
  635. ms = bio_get_ms(bio);
  636. bio_set_ms(bio, NULL);
  637. /*
  638. * NOTE: We don't decrement the pending count here,
  639. * instead it is done by the targets endio function.
  640. * This way we handle both writes to SYNC and NOSYNC
  641. * regions with the same code.
  642. */
  643. if (error) {
  644. /*
  645. * only error the io if all mirrors failed.
  646. * FIXME: bogus
  647. */
  648. uptodate = 0;
  649. for (i = 0; i < ms->nr_mirrors; i++)
  650. if (!test_bit(i, &error)) {
  651. uptodate = 1;
  652. break;
  653. }
  654. }
  655. bio_endio(bio, bio->bi_size, 0);
  656. }
  657. static void do_write(struct mirror_set *ms, struct bio *bio)
  658. {
  659. unsigned int i;
  660. struct io_region io[KCOPYD_MAX_REGIONS+1];
  661. struct mirror *m;
  662. for (i = 0; i < ms->nr_mirrors; i++) {
  663. m = ms->mirror + i;
  664. io[i].bdev = m->dev->bdev;
  665. io[i].sector = m->offset + (bio->bi_sector - ms->ti->begin);
  666. io[i].count = bio->bi_size >> 9;
  667. }
  668. bio_set_ms(bio, ms);
  669. dm_io_async_bvec(ms->nr_mirrors, io, WRITE,
  670. bio->bi_io_vec + bio->bi_idx,
  671. write_callback, bio);
  672. }
  673. static void do_writes(struct mirror_set *ms, struct bio_list *writes)
  674. {
  675. int state;
  676. struct bio *bio;
  677. struct bio_list sync, nosync, recover, *this_list = NULL;
  678. if (!writes->head)
  679. return;
  680. /*
  681. * Classify each write.
  682. */
  683. bio_list_init(&sync);
  684. bio_list_init(&nosync);
  685. bio_list_init(&recover);
  686. while ((bio = bio_list_pop(writes))) {
  687. state = rh_state(&ms->rh, bio_to_region(&ms->rh, bio), 1);
  688. switch (state) {
  689. case RH_CLEAN:
  690. case RH_DIRTY:
  691. this_list = &sync;
  692. break;
  693. case RH_NOSYNC:
  694. this_list = &nosync;
  695. break;
  696. case RH_RECOVERING:
  697. this_list = &recover;
  698. break;
  699. }
  700. bio_list_add(this_list, bio);
  701. }
  702. /*
  703. * Increment the pending counts for any regions that will
  704. * be written to (writes to recover regions are going to
  705. * be delayed).
  706. */
  707. rh_inc_pending(&ms->rh, &sync);
  708. rh_inc_pending(&ms->rh, &nosync);
  709. rh_flush(&ms->rh);
  710. /*
  711. * Dispatch io.
  712. */
  713. while ((bio = bio_list_pop(&sync)))
  714. do_write(ms, bio);
  715. while ((bio = bio_list_pop(&recover)))
  716. rh_delay(&ms->rh, bio);
  717. while ((bio = bio_list_pop(&nosync))) {
  718. map_bio(ms, ms->default_mirror, bio);
  719. generic_make_request(bio);
  720. }
  721. }
  722. /*-----------------------------------------------------------------
  723. * kmirrord
  724. *---------------------------------------------------------------*/
  725. static LIST_HEAD(_mirror_sets);
  726. static DECLARE_RWSEM(_mirror_sets_lock);
  727. static void do_mirror(struct mirror_set *ms)
  728. {
  729. struct bio_list reads, writes;
  730. spin_lock(&ms->lock);
  731. reads = ms->reads;
  732. writes = ms->writes;
  733. bio_list_init(&ms->reads);
  734. bio_list_init(&ms->writes);
  735. spin_unlock(&ms->lock);
  736. rh_update_states(&ms->rh);
  737. do_recovery(ms);
  738. do_reads(ms, &reads);
  739. do_writes(ms, &writes);
  740. }
  741. static void do_work(struct work_struct *ignored)
  742. {
  743. struct mirror_set *ms;
  744. down_read(&_mirror_sets_lock);
  745. list_for_each_entry (ms, &_mirror_sets, list)
  746. do_mirror(ms);
  747. up_read(&_mirror_sets_lock);
  748. }
  749. /*-----------------------------------------------------------------
  750. * Target functions
  751. *---------------------------------------------------------------*/
  752. static struct mirror_set *alloc_context(unsigned int nr_mirrors,
  753. uint32_t region_size,
  754. struct dm_target *ti,
  755. struct dirty_log *dl)
  756. {
  757. size_t len;
  758. struct mirror_set *ms = NULL;
  759. if (array_too_big(sizeof(*ms), sizeof(ms->mirror[0]), nr_mirrors))
  760. return NULL;
  761. len = sizeof(*ms) + (sizeof(ms->mirror[0]) * nr_mirrors);
  762. ms = kmalloc(len, GFP_KERNEL);
  763. if (!ms) {
  764. ti->error = "Cannot allocate mirror context";
  765. return NULL;
  766. }
  767. memset(ms, 0, len);
  768. spin_lock_init(&ms->lock);
  769. ms->ti = ti;
  770. ms->nr_mirrors = nr_mirrors;
  771. ms->nr_regions = dm_sector_div_up(ti->len, region_size);
  772. ms->in_sync = 0;
  773. ms->default_mirror = &ms->mirror[DEFAULT_MIRROR];
  774. if (rh_init(&ms->rh, ms, dl, region_size, ms->nr_regions)) {
  775. ti->error = "Error creating dirty region hash";
  776. kfree(ms);
  777. return NULL;
  778. }
  779. return ms;
  780. }
  781. static void free_context(struct mirror_set *ms, struct dm_target *ti,
  782. unsigned int m)
  783. {
  784. while (m--)
  785. dm_put_device(ti, ms->mirror[m].dev);
  786. rh_exit(&ms->rh);
  787. kfree(ms);
  788. }
  789. static inline int _check_region_size(struct dm_target *ti, uint32_t size)
  790. {
  791. return !(size % (PAGE_SIZE >> 9) || (size & (size - 1)) ||
  792. size > ti->len);
  793. }
  794. static int get_mirror(struct mirror_set *ms, struct dm_target *ti,
  795. unsigned int mirror, char **argv)
  796. {
  797. unsigned long long offset;
  798. if (sscanf(argv[1], "%llu", &offset) != 1) {
  799. ti->error = "Invalid offset";
  800. return -EINVAL;
  801. }
  802. if (dm_get_device(ti, argv[0], offset, ti->len,
  803. dm_table_get_mode(ti->table),
  804. &ms->mirror[mirror].dev)) {
  805. ti->error = "Device lookup failure";
  806. return -ENXIO;
  807. }
  808. ms->mirror[mirror].offset = offset;
  809. return 0;
  810. }
  811. static int add_mirror_set(struct mirror_set *ms)
  812. {
  813. down_write(&_mirror_sets_lock);
  814. list_add_tail(&ms->list, &_mirror_sets);
  815. up_write(&_mirror_sets_lock);
  816. wake();
  817. return 0;
  818. }
  819. static void del_mirror_set(struct mirror_set *ms)
  820. {
  821. down_write(&_mirror_sets_lock);
  822. list_del(&ms->list);
  823. up_write(&_mirror_sets_lock);
  824. }
  825. /*
  826. * Create dirty log: log_type #log_params <log_params>
  827. */
  828. static struct dirty_log *create_dirty_log(struct dm_target *ti,
  829. unsigned int argc, char **argv,
  830. unsigned int *args_used)
  831. {
  832. unsigned int param_count;
  833. struct dirty_log *dl;
  834. if (argc < 2) {
  835. ti->error = "Insufficient mirror log arguments";
  836. return NULL;
  837. }
  838. if (sscanf(argv[1], "%u", &param_count) != 1) {
  839. ti->error = "Invalid mirror log argument count";
  840. return NULL;
  841. }
  842. *args_used = 2 + param_count;
  843. if (argc < *args_used) {
  844. ti->error = "Insufficient mirror log arguments";
  845. return NULL;
  846. }
  847. dl = dm_create_dirty_log(argv[0], ti, param_count, argv + 2);
  848. if (!dl) {
  849. ti->error = "Error creating mirror dirty log";
  850. return NULL;
  851. }
  852. if (!_check_region_size(ti, dl->type->get_region_size(dl))) {
  853. ti->error = "Invalid region size";
  854. dm_destroy_dirty_log(dl);
  855. return NULL;
  856. }
  857. return dl;
  858. }
  859. /*
  860. * Construct a mirror mapping:
  861. *
  862. * log_type #log_params <log_params>
  863. * #mirrors [mirror_path offset]{2,}
  864. *
  865. * log_type is "core" or "disk"
  866. * #log_params is between 1 and 3
  867. */
  868. #define DM_IO_PAGES 64
  869. static int mirror_ctr(struct dm_target *ti, unsigned int argc, char **argv)
  870. {
  871. int r;
  872. unsigned int nr_mirrors, m, args_used;
  873. struct mirror_set *ms;
  874. struct dirty_log *dl;
  875. dl = create_dirty_log(ti, argc, argv, &args_used);
  876. if (!dl)
  877. return -EINVAL;
  878. argv += args_used;
  879. argc -= args_used;
  880. if (!argc || sscanf(argv[0], "%u", &nr_mirrors) != 1 ||
  881. nr_mirrors < 2 || nr_mirrors > KCOPYD_MAX_REGIONS + 1) {
  882. ti->error = "Invalid number of mirrors";
  883. dm_destroy_dirty_log(dl);
  884. return -EINVAL;
  885. }
  886. argv++, argc--;
  887. if (argc != nr_mirrors * 2) {
  888. ti->error = "Wrong number of mirror arguments";
  889. dm_destroy_dirty_log(dl);
  890. return -EINVAL;
  891. }
  892. ms = alloc_context(nr_mirrors, dl->type->get_region_size(dl), ti, dl);
  893. if (!ms) {
  894. dm_destroy_dirty_log(dl);
  895. return -ENOMEM;
  896. }
  897. /* Get the mirror parameter sets */
  898. for (m = 0; m < nr_mirrors; m++) {
  899. r = get_mirror(ms, ti, m, argv);
  900. if (r) {
  901. free_context(ms, ti, m);
  902. return r;
  903. }
  904. argv += 2;
  905. argc -= 2;
  906. }
  907. ti->private = ms;
  908. ti->split_io = ms->rh.region_size;
  909. r = kcopyd_client_create(DM_IO_PAGES, &ms->kcopyd_client);
  910. if (r) {
  911. free_context(ms, ti, ms->nr_mirrors);
  912. return r;
  913. }
  914. add_mirror_set(ms);
  915. return 0;
  916. }
  917. static void mirror_dtr(struct dm_target *ti)
  918. {
  919. struct mirror_set *ms = (struct mirror_set *) ti->private;
  920. del_mirror_set(ms);
  921. kcopyd_client_destroy(ms->kcopyd_client);
  922. free_context(ms, ti, ms->nr_mirrors);
  923. }
  924. static void queue_bio(struct mirror_set *ms, struct bio *bio, int rw)
  925. {
  926. int should_wake = 0;
  927. struct bio_list *bl;
  928. bl = (rw == WRITE) ? &ms->writes : &ms->reads;
  929. spin_lock(&ms->lock);
  930. should_wake = !(bl->head);
  931. bio_list_add(bl, bio);
  932. spin_unlock(&ms->lock);
  933. if (should_wake)
  934. wake();
  935. }
  936. /*
  937. * Mirror mapping function
  938. */
  939. static int mirror_map(struct dm_target *ti, struct bio *bio,
  940. union map_info *map_context)
  941. {
  942. int r, rw = bio_rw(bio);
  943. struct mirror *m;
  944. struct mirror_set *ms = ti->private;
  945. map_context->ll = bio_to_region(&ms->rh, bio);
  946. if (rw == WRITE) {
  947. queue_bio(ms, bio, rw);
  948. return DM_MAPIO_SUBMITTED;
  949. }
  950. r = ms->rh.log->type->in_sync(ms->rh.log,
  951. bio_to_region(&ms->rh, bio), 0);
  952. if (r < 0 && r != -EWOULDBLOCK)
  953. return r;
  954. if (r == -EWOULDBLOCK) /* FIXME: ugly */
  955. r = DM_MAPIO_SUBMITTED;
  956. /*
  957. * We don't want to fast track a recovery just for a read
  958. * ahead. So we just let it silently fail.
  959. * FIXME: get rid of this.
  960. */
  961. if (!r && rw == READA)
  962. return -EIO;
  963. if (!r) {
  964. /* Pass this io over to the daemon */
  965. queue_bio(ms, bio, rw);
  966. return DM_MAPIO_SUBMITTED;
  967. }
  968. m = choose_mirror(ms, bio->bi_sector);
  969. if (!m)
  970. return -EIO;
  971. map_bio(ms, m, bio);
  972. return DM_MAPIO_REMAPPED;
  973. }
  974. static int mirror_end_io(struct dm_target *ti, struct bio *bio,
  975. int error, union map_info *map_context)
  976. {
  977. int rw = bio_rw(bio);
  978. struct mirror_set *ms = (struct mirror_set *) ti->private;
  979. region_t region = map_context->ll;
  980. /*
  981. * We need to dec pending if this was a write.
  982. */
  983. if (rw == WRITE)
  984. rh_dec(&ms->rh, region);
  985. return 0;
  986. }
  987. static void mirror_postsuspend(struct dm_target *ti)
  988. {
  989. struct mirror_set *ms = (struct mirror_set *) ti->private;
  990. struct dirty_log *log = ms->rh.log;
  991. rh_stop_recovery(&ms->rh);
  992. /* Wait for all I/O we generated to complete */
  993. wait_event(_kmirrord_recovery_stopped,
  994. !atomic_read(&ms->rh.recovery_in_flight));
  995. if (log->type->suspend && log->type->suspend(log))
  996. /* FIXME: need better error handling */
  997. DMWARN("log suspend failed");
  998. }
  999. static void mirror_resume(struct dm_target *ti)
  1000. {
  1001. struct mirror_set *ms = (struct mirror_set *) ti->private;
  1002. struct dirty_log *log = ms->rh.log;
  1003. if (log->type->resume && log->type->resume(log))
  1004. /* FIXME: need better error handling */
  1005. DMWARN("log resume failed");
  1006. rh_start_recovery(&ms->rh);
  1007. }
  1008. static int mirror_status(struct dm_target *ti, status_type_t type,
  1009. char *result, unsigned int maxlen)
  1010. {
  1011. unsigned int m, sz;
  1012. struct mirror_set *ms = (struct mirror_set *) ti->private;
  1013. sz = ms->rh.log->type->status(ms->rh.log, type, result, maxlen);
  1014. switch (type) {
  1015. case STATUSTYPE_INFO:
  1016. DMEMIT("%d ", ms->nr_mirrors);
  1017. for (m = 0; m < ms->nr_mirrors; m++)
  1018. DMEMIT("%s ", ms->mirror[m].dev->name);
  1019. DMEMIT("%llu/%llu",
  1020. (unsigned long long)ms->rh.log->type->
  1021. get_sync_count(ms->rh.log),
  1022. (unsigned long long)ms->nr_regions);
  1023. break;
  1024. case STATUSTYPE_TABLE:
  1025. DMEMIT("%d", ms->nr_mirrors);
  1026. for (m = 0; m < ms->nr_mirrors; m++)
  1027. DMEMIT(" %s %llu", ms->mirror[m].dev->name,
  1028. (unsigned long long)ms->mirror[m].offset);
  1029. }
  1030. return 0;
  1031. }
  1032. static struct target_type mirror_target = {
  1033. .name = "mirror",
  1034. .version = {1, 0, 2},
  1035. .module = THIS_MODULE,
  1036. .ctr = mirror_ctr,
  1037. .dtr = mirror_dtr,
  1038. .map = mirror_map,
  1039. .end_io = mirror_end_io,
  1040. .postsuspend = mirror_postsuspend,
  1041. .resume = mirror_resume,
  1042. .status = mirror_status,
  1043. };
  1044. static int __init dm_mirror_init(void)
  1045. {
  1046. int r;
  1047. r = dm_dirty_log_init();
  1048. if (r)
  1049. return r;
  1050. _kmirrord_wq = create_singlethread_workqueue("kmirrord");
  1051. if (!_kmirrord_wq) {
  1052. DMERR("couldn't start kmirrord");
  1053. dm_dirty_log_exit();
  1054. return r;
  1055. }
  1056. INIT_WORK(&_kmirrord_work, do_work);
  1057. r = dm_register_target(&mirror_target);
  1058. if (r < 0) {
  1059. DMERR("%s: Failed to register mirror target",
  1060. mirror_target.name);
  1061. dm_dirty_log_exit();
  1062. destroy_workqueue(_kmirrord_wq);
  1063. }
  1064. return r;
  1065. }
  1066. static void __exit dm_mirror_exit(void)
  1067. {
  1068. int r;
  1069. r = dm_unregister_target(&mirror_target);
  1070. if (r < 0)
  1071. DMERR("%s: unregister failed %d", mirror_target.name, r);
  1072. destroy_workqueue(_kmirrord_wq);
  1073. dm_dirty_log_exit();
  1074. }
  1075. /* Module hooks */
  1076. module_init(dm_mirror_init);
  1077. module_exit(dm_mirror_exit);
  1078. MODULE_DESCRIPTION(DM_NAME " mirror target");
  1079. MODULE_AUTHOR("Joe Thornber");
  1080. MODULE_LICENSE("GPL");