dm-raid1.c 30 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347
  1. /*
  2. * Copyright (C) 2003 Sistina Software Limited.
  3. *
  4. * This file is released under the GPL.
  5. */
  6. #include "dm.h"
  7. #include "dm-bio-list.h"
  8. #include "dm-io.h"
  9. #include "dm-log.h"
  10. #include "kcopyd.h"
  11. #include <linux/ctype.h>
  12. #include <linux/init.h>
  13. #include <linux/mempool.h>
  14. #include <linux/module.h>
  15. #include <linux/pagemap.h>
  16. #include <linux/slab.h>
  17. #include <linux/time.h>
  18. #include <linux/vmalloc.h>
  19. #include <linux/workqueue.h>
  20. #define DM_MSG_PREFIX "raid1"
  21. #define DM_RAID1_HANDLE_ERRORS 0x01
  22. static DECLARE_WAIT_QUEUE_HEAD(_kmirrord_recovery_stopped);
  23. /*-----------------------------------------------------------------
  24. * Region hash
  25. *
  26. * The mirror splits itself up into discrete regions. Each
  27. * region can be in one of three states: clean, dirty,
  28. * nosync. There is no need to put clean regions in the hash.
  29. *
  30. * In addition to being present in the hash table a region _may_
  31. * be present on one of three lists.
  32. *
  33. * clean_regions: Regions on this list have no io pending to
  34. * them, they are in sync, we are no longer interested in them,
  35. * they are dull. rh_update_states() will remove them from the
  36. * hash table.
  37. *
  38. * quiesced_regions: These regions have been spun down, ready
  39. * for recovery. rh_recovery_start() will remove regions from
  40. * this list and hand them to kmirrord, which will schedule the
  41. * recovery io with kcopyd.
  42. *
  43. * recovered_regions: Regions that kcopyd has successfully
  44. * recovered. rh_update_states() will now schedule any delayed
  45. * io, up the recovery_count, and remove the region from the
  46. * hash.
  47. *
  48. * There are 2 locks:
  49. * A rw spin lock 'hash_lock' protects just the hash table,
  50. * this is never held in write mode from interrupt context,
  51. * which I believe means that we only have to disable irqs when
  52. * doing a write lock.
  53. *
  54. * An ordinary spin lock 'region_lock' that protects the three
  55. * lists in the region_hash, with the 'state', 'list' and
  56. * 'bhs_delayed' fields of the regions. This is used from irq
  57. * context, so all other uses will have to suspend local irqs.
  58. *---------------------------------------------------------------*/
  59. struct mirror_set;
  60. struct region_hash {
  61. struct mirror_set *ms;
  62. uint32_t region_size;
  63. unsigned region_shift;
  64. /* holds persistent region state */
  65. struct dirty_log *log;
  66. /* hash table */
  67. rwlock_t hash_lock;
  68. mempool_t *region_pool;
  69. unsigned int mask;
  70. unsigned int nr_buckets;
  71. struct list_head *buckets;
  72. spinlock_t region_lock;
  73. atomic_t recovery_in_flight;
  74. struct semaphore recovery_count;
  75. struct list_head clean_regions;
  76. struct list_head quiesced_regions;
  77. struct list_head recovered_regions;
  78. };
  79. enum {
  80. RH_CLEAN,
  81. RH_DIRTY,
  82. RH_NOSYNC,
  83. RH_RECOVERING
  84. };
  85. struct region {
  86. struct region_hash *rh; /* FIXME: can we get rid of this ? */
  87. region_t key;
  88. int state;
  89. struct list_head hash_list;
  90. struct list_head list;
  91. atomic_t pending;
  92. struct bio_list delayed_bios;
  93. };
  94. /*-----------------------------------------------------------------
  95. * Mirror set structures.
  96. *---------------------------------------------------------------*/
  97. struct mirror {
  98. atomic_t error_count;
  99. struct dm_dev *dev;
  100. sector_t offset;
  101. };
  102. struct mirror_set {
  103. struct dm_target *ti;
  104. struct list_head list;
  105. struct region_hash rh;
  106. struct kcopyd_client *kcopyd_client;
  107. uint64_t features;
  108. spinlock_t lock; /* protects the next two lists */
  109. struct bio_list reads;
  110. struct bio_list writes;
  111. /* recovery */
  112. region_t nr_regions;
  113. int in_sync;
  114. struct mirror *default_mirror; /* Default mirror */
  115. struct workqueue_struct *kmirrord_wq;
  116. struct work_struct kmirrord_work;
  117. unsigned int nr_mirrors;
  118. struct mirror mirror[0];
  119. };
  120. /*
  121. * Conversion fns
  122. */
  123. static inline region_t bio_to_region(struct region_hash *rh, struct bio *bio)
  124. {
  125. return (bio->bi_sector - rh->ms->ti->begin) >> rh->region_shift;
  126. }
  127. static inline sector_t region_to_sector(struct region_hash *rh, region_t region)
  128. {
  129. return region << rh->region_shift;
  130. }
  131. static void wake(struct mirror_set *ms)
  132. {
  133. queue_work(ms->kmirrord_wq, &ms->kmirrord_work);
  134. }
  135. /* FIXME move this */
  136. static void queue_bio(struct mirror_set *ms, struct bio *bio, int rw);
  137. #define MIN_REGIONS 64
  138. #define MAX_RECOVERY 1
  139. static int rh_init(struct region_hash *rh, struct mirror_set *ms,
  140. struct dirty_log *log, uint32_t region_size,
  141. region_t nr_regions)
  142. {
  143. unsigned int nr_buckets, max_buckets;
  144. size_t i;
  145. /*
  146. * Calculate a suitable number of buckets for our hash
  147. * table.
  148. */
  149. max_buckets = nr_regions >> 6;
  150. for (nr_buckets = 128u; nr_buckets < max_buckets; nr_buckets <<= 1)
  151. ;
  152. nr_buckets >>= 1;
  153. rh->ms = ms;
  154. rh->log = log;
  155. rh->region_size = region_size;
  156. rh->region_shift = ffs(region_size) - 1;
  157. rwlock_init(&rh->hash_lock);
  158. rh->mask = nr_buckets - 1;
  159. rh->nr_buckets = nr_buckets;
  160. rh->buckets = vmalloc(nr_buckets * sizeof(*rh->buckets));
  161. if (!rh->buckets) {
  162. DMERR("unable to allocate region hash memory");
  163. return -ENOMEM;
  164. }
  165. for (i = 0; i < nr_buckets; i++)
  166. INIT_LIST_HEAD(rh->buckets + i);
  167. spin_lock_init(&rh->region_lock);
  168. sema_init(&rh->recovery_count, 0);
  169. atomic_set(&rh->recovery_in_flight, 0);
  170. INIT_LIST_HEAD(&rh->clean_regions);
  171. INIT_LIST_HEAD(&rh->quiesced_regions);
  172. INIT_LIST_HEAD(&rh->recovered_regions);
  173. rh->region_pool = mempool_create_kmalloc_pool(MIN_REGIONS,
  174. sizeof(struct region));
  175. if (!rh->region_pool) {
  176. vfree(rh->buckets);
  177. rh->buckets = NULL;
  178. return -ENOMEM;
  179. }
  180. return 0;
  181. }
  182. static void rh_exit(struct region_hash *rh)
  183. {
  184. unsigned int h;
  185. struct region *reg, *nreg;
  186. BUG_ON(!list_empty(&rh->quiesced_regions));
  187. for (h = 0; h < rh->nr_buckets; h++) {
  188. list_for_each_entry_safe(reg, nreg, rh->buckets + h, hash_list) {
  189. BUG_ON(atomic_read(&reg->pending));
  190. mempool_free(reg, rh->region_pool);
  191. }
  192. }
  193. if (rh->log)
  194. dm_destroy_dirty_log(rh->log);
  195. if (rh->region_pool)
  196. mempool_destroy(rh->region_pool);
  197. vfree(rh->buckets);
  198. }
  199. #define RH_HASH_MULT 2654435387U
  200. static inline unsigned int rh_hash(struct region_hash *rh, region_t region)
  201. {
  202. return (unsigned int) ((region * RH_HASH_MULT) >> 12) & rh->mask;
  203. }
  204. static struct region *__rh_lookup(struct region_hash *rh, region_t region)
  205. {
  206. struct region *reg;
  207. list_for_each_entry (reg, rh->buckets + rh_hash(rh, region), hash_list)
  208. if (reg->key == region)
  209. return reg;
  210. return NULL;
  211. }
  212. static void __rh_insert(struct region_hash *rh, struct region *reg)
  213. {
  214. unsigned int h = rh_hash(rh, reg->key);
  215. list_add(&reg->hash_list, rh->buckets + h);
  216. }
  217. static struct region *__rh_alloc(struct region_hash *rh, region_t region)
  218. {
  219. struct region *reg, *nreg;
  220. read_unlock(&rh->hash_lock);
  221. nreg = mempool_alloc(rh->region_pool, GFP_ATOMIC);
  222. if (unlikely(!nreg))
  223. nreg = kmalloc(sizeof(struct region), GFP_NOIO);
  224. nreg->state = rh->log->type->in_sync(rh->log, region, 1) ?
  225. RH_CLEAN : RH_NOSYNC;
  226. nreg->rh = rh;
  227. nreg->key = region;
  228. INIT_LIST_HEAD(&nreg->list);
  229. atomic_set(&nreg->pending, 0);
  230. bio_list_init(&nreg->delayed_bios);
  231. write_lock_irq(&rh->hash_lock);
  232. reg = __rh_lookup(rh, region);
  233. if (reg)
  234. /* we lost the race */
  235. mempool_free(nreg, rh->region_pool);
  236. else {
  237. __rh_insert(rh, nreg);
  238. if (nreg->state == RH_CLEAN) {
  239. spin_lock(&rh->region_lock);
  240. list_add(&nreg->list, &rh->clean_regions);
  241. spin_unlock(&rh->region_lock);
  242. }
  243. reg = nreg;
  244. }
  245. write_unlock_irq(&rh->hash_lock);
  246. read_lock(&rh->hash_lock);
  247. return reg;
  248. }
  249. static inline struct region *__rh_find(struct region_hash *rh, region_t region)
  250. {
  251. struct region *reg;
  252. reg = __rh_lookup(rh, region);
  253. if (!reg)
  254. reg = __rh_alloc(rh, region);
  255. return reg;
  256. }
  257. static int rh_state(struct region_hash *rh, region_t region, int may_block)
  258. {
  259. int r;
  260. struct region *reg;
  261. read_lock(&rh->hash_lock);
  262. reg = __rh_lookup(rh, region);
  263. read_unlock(&rh->hash_lock);
  264. if (reg)
  265. return reg->state;
  266. /*
  267. * The region wasn't in the hash, so we fall back to the
  268. * dirty log.
  269. */
  270. r = rh->log->type->in_sync(rh->log, region, may_block);
  271. /*
  272. * Any error from the dirty log (eg. -EWOULDBLOCK) gets
  273. * taken as a RH_NOSYNC
  274. */
  275. return r == 1 ? RH_CLEAN : RH_NOSYNC;
  276. }
  277. static inline int rh_in_sync(struct region_hash *rh,
  278. region_t region, int may_block)
  279. {
  280. int state = rh_state(rh, region, may_block);
  281. return state == RH_CLEAN || state == RH_DIRTY;
  282. }
  283. static void dispatch_bios(struct mirror_set *ms, struct bio_list *bio_list)
  284. {
  285. struct bio *bio;
  286. while ((bio = bio_list_pop(bio_list))) {
  287. queue_bio(ms, bio, WRITE);
  288. }
  289. }
  290. static void complete_resync_work(struct region *reg, int success)
  291. {
  292. struct region_hash *rh = reg->rh;
  293. rh->log->type->set_region_sync(rh->log, reg->key, success);
  294. dispatch_bios(rh->ms, &reg->delayed_bios);
  295. if (atomic_dec_and_test(&rh->recovery_in_flight))
  296. wake_up_all(&_kmirrord_recovery_stopped);
  297. up(&rh->recovery_count);
  298. }
  299. static void rh_update_states(struct region_hash *rh)
  300. {
  301. struct region *reg, *next;
  302. LIST_HEAD(clean);
  303. LIST_HEAD(recovered);
  304. /*
  305. * Quickly grab the lists.
  306. */
  307. write_lock_irq(&rh->hash_lock);
  308. spin_lock(&rh->region_lock);
  309. if (!list_empty(&rh->clean_regions)) {
  310. list_splice(&rh->clean_regions, &clean);
  311. INIT_LIST_HEAD(&rh->clean_regions);
  312. list_for_each_entry (reg, &clean, list) {
  313. rh->log->type->clear_region(rh->log, reg->key);
  314. list_del(&reg->hash_list);
  315. }
  316. }
  317. if (!list_empty(&rh->recovered_regions)) {
  318. list_splice(&rh->recovered_regions, &recovered);
  319. INIT_LIST_HEAD(&rh->recovered_regions);
  320. list_for_each_entry (reg, &recovered, list)
  321. list_del(&reg->hash_list);
  322. }
  323. spin_unlock(&rh->region_lock);
  324. write_unlock_irq(&rh->hash_lock);
  325. /*
  326. * All the regions on the recovered and clean lists have
  327. * now been pulled out of the system, so no need to do
  328. * any more locking.
  329. */
  330. list_for_each_entry_safe (reg, next, &recovered, list) {
  331. rh->log->type->clear_region(rh->log, reg->key);
  332. complete_resync_work(reg, 1);
  333. mempool_free(reg, rh->region_pool);
  334. }
  335. if (!list_empty(&recovered))
  336. rh->log->type->flush(rh->log);
  337. list_for_each_entry_safe (reg, next, &clean, list)
  338. mempool_free(reg, rh->region_pool);
  339. }
  340. static void rh_inc(struct region_hash *rh, region_t region)
  341. {
  342. struct region *reg;
  343. read_lock(&rh->hash_lock);
  344. reg = __rh_find(rh, region);
  345. spin_lock_irq(&rh->region_lock);
  346. atomic_inc(&reg->pending);
  347. if (reg->state == RH_CLEAN) {
  348. reg->state = RH_DIRTY;
  349. list_del_init(&reg->list); /* take off the clean list */
  350. spin_unlock_irq(&rh->region_lock);
  351. rh->log->type->mark_region(rh->log, reg->key);
  352. } else
  353. spin_unlock_irq(&rh->region_lock);
  354. read_unlock(&rh->hash_lock);
  355. }
  356. static void rh_inc_pending(struct region_hash *rh, struct bio_list *bios)
  357. {
  358. struct bio *bio;
  359. for (bio = bios->head; bio; bio = bio->bi_next)
  360. rh_inc(rh, bio_to_region(rh, bio));
  361. }
  362. static void rh_dec(struct region_hash *rh, region_t region)
  363. {
  364. unsigned long flags;
  365. struct region *reg;
  366. int should_wake = 0;
  367. read_lock(&rh->hash_lock);
  368. reg = __rh_lookup(rh, region);
  369. read_unlock(&rh->hash_lock);
  370. spin_lock_irqsave(&rh->region_lock, flags);
  371. if (atomic_dec_and_test(&reg->pending)) {
  372. /*
  373. * There is no pending I/O for this region.
  374. * We can move the region to corresponding list for next action.
  375. * At this point, the region is not yet connected to any list.
  376. *
  377. * If the state is RH_NOSYNC, the region should be kept off
  378. * from clean list.
  379. * The hash entry for RH_NOSYNC will remain in memory
  380. * until the region is recovered or the map is reloaded.
  381. */
  382. /* do nothing for RH_NOSYNC */
  383. if (reg->state == RH_RECOVERING) {
  384. list_add_tail(&reg->list, &rh->quiesced_regions);
  385. } else if (reg->state == RH_DIRTY) {
  386. reg->state = RH_CLEAN;
  387. list_add(&reg->list, &rh->clean_regions);
  388. }
  389. should_wake = 1;
  390. }
  391. spin_unlock_irqrestore(&rh->region_lock, flags);
  392. if (should_wake)
  393. wake(rh->ms);
  394. }
  395. /*
  396. * Starts quiescing a region in preparation for recovery.
  397. */
  398. static int __rh_recovery_prepare(struct region_hash *rh)
  399. {
  400. int r;
  401. struct region *reg;
  402. region_t region;
  403. /*
  404. * Ask the dirty log what's next.
  405. */
  406. r = rh->log->type->get_resync_work(rh->log, &region);
  407. if (r <= 0)
  408. return r;
  409. /*
  410. * Get this region, and start it quiescing by setting the
  411. * recovering flag.
  412. */
  413. read_lock(&rh->hash_lock);
  414. reg = __rh_find(rh, region);
  415. read_unlock(&rh->hash_lock);
  416. spin_lock_irq(&rh->region_lock);
  417. reg->state = RH_RECOVERING;
  418. /* Already quiesced ? */
  419. if (atomic_read(&reg->pending))
  420. list_del_init(&reg->list);
  421. else
  422. list_move(&reg->list, &rh->quiesced_regions);
  423. spin_unlock_irq(&rh->region_lock);
  424. return 1;
  425. }
  426. static void rh_recovery_prepare(struct region_hash *rh)
  427. {
  428. /* Extra reference to avoid race with rh_stop_recovery */
  429. atomic_inc(&rh->recovery_in_flight);
  430. while (!down_trylock(&rh->recovery_count)) {
  431. atomic_inc(&rh->recovery_in_flight);
  432. if (__rh_recovery_prepare(rh) <= 0) {
  433. atomic_dec(&rh->recovery_in_flight);
  434. up(&rh->recovery_count);
  435. break;
  436. }
  437. }
  438. /* Drop the extra reference */
  439. if (atomic_dec_and_test(&rh->recovery_in_flight))
  440. wake_up_all(&_kmirrord_recovery_stopped);
  441. }
  442. /*
  443. * Returns any quiesced regions.
  444. */
  445. static struct region *rh_recovery_start(struct region_hash *rh)
  446. {
  447. struct region *reg = NULL;
  448. spin_lock_irq(&rh->region_lock);
  449. if (!list_empty(&rh->quiesced_regions)) {
  450. reg = list_entry(rh->quiesced_regions.next,
  451. struct region, list);
  452. list_del_init(&reg->list); /* remove from the quiesced list */
  453. }
  454. spin_unlock_irq(&rh->region_lock);
  455. return reg;
  456. }
  457. /* FIXME: success ignored for now */
  458. static void rh_recovery_end(struct region *reg, int success)
  459. {
  460. struct region_hash *rh = reg->rh;
  461. spin_lock_irq(&rh->region_lock);
  462. list_add(&reg->list, &reg->rh->recovered_regions);
  463. spin_unlock_irq(&rh->region_lock);
  464. wake(rh->ms);
  465. }
  466. static void rh_flush(struct region_hash *rh)
  467. {
  468. rh->log->type->flush(rh->log);
  469. }
  470. static void rh_delay(struct region_hash *rh, struct bio *bio)
  471. {
  472. struct region *reg;
  473. read_lock(&rh->hash_lock);
  474. reg = __rh_find(rh, bio_to_region(rh, bio));
  475. bio_list_add(&reg->delayed_bios, bio);
  476. read_unlock(&rh->hash_lock);
  477. }
  478. static void rh_stop_recovery(struct region_hash *rh)
  479. {
  480. int i;
  481. /* wait for any recovering regions */
  482. for (i = 0; i < MAX_RECOVERY; i++)
  483. down(&rh->recovery_count);
  484. }
  485. static void rh_start_recovery(struct region_hash *rh)
  486. {
  487. int i;
  488. for (i = 0; i < MAX_RECOVERY; i++)
  489. up(&rh->recovery_count);
  490. wake(rh->ms);
  491. }
  492. /*
  493. * Every mirror should look like this one.
  494. */
  495. #define DEFAULT_MIRROR 0
  496. /*
  497. * This is yucky. We squirrel the mirror_set struct away inside
  498. * bi_next for write buffers. This is safe since the bh
  499. * doesn't get submitted to the lower levels of block layer.
  500. */
  501. static struct mirror_set *bio_get_ms(struct bio *bio)
  502. {
  503. return (struct mirror_set *) bio->bi_next;
  504. }
  505. static void bio_set_ms(struct bio *bio, struct mirror_set *ms)
  506. {
  507. bio->bi_next = (struct bio *) ms;
  508. }
  509. /*-----------------------------------------------------------------
  510. * Recovery.
  511. *
  512. * When a mirror is first activated we may find that some regions
  513. * are in the no-sync state. We have to recover these by
  514. * recopying from the default mirror to all the others.
  515. *---------------------------------------------------------------*/
  516. static void recovery_complete(int read_err, unsigned int write_err,
  517. void *context)
  518. {
  519. struct region *reg = (struct region *) context;
  520. /* FIXME: better error handling */
  521. rh_recovery_end(reg, !(read_err || write_err));
  522. }
  523. static int recover(struct mirror_set *ms, struct region *reg)
  524. {
  525. int r;
  526. unsigned int i;
  527. struct io_region from, to[KCOPYD_MAX_REGIONS], *dest;
  528. struct mirror *m;
  529. unsigned long flags = 0;
  530. /* fill in the source */
  531. m = ms->default_mirror;
  532. from.bdev = m->dev->bdev;
  533. from.sector = m->offset + region_to_sector(reg->rh, reg->key);
  534. if (reg->key == (ms->nr_regions - 1)) {
  535. /*
  536. * The final region may be smaller than
  537. * region_size.
  538. */
  539. from.count = ms->ti->len & (reg->rh->region_size - 1);
  540. if (!from.count)
  541. from.count = reg->rh->region_size;
  542. } else
  543. from.count = reg->rh->region_size;
  544. /* fill in the destinations */
  545. for (i = 0, dest = to; i < ms->nr_mirrors; i++) {
  546. if (&ms->mirror[i] == ms->default_mirror)
  547. continue;
  548. m = ms->mirror + i;
  549. dest->bdev = m->dev->bdev;
  550. dest->sector = m->offset + region_to_sector(reg->rh, reg->key);
  551. dest->count = from.count;
  552. dest++;
  553. }
  554. /* hand to kcopyd */
  555. set_bit(KCOPYD_IGNORE_ERROR, &flags);
  556. r = kcopyd_copy(ms->kcopyd_client, &from, ms->nr_mirrors - 1, to, flags,
  557. recovery_complete, reg);
  558. return r;
  559. }
  560. static void do_recovery(struct mirror_set *ms)
  561. {
  562. int r;
  563. struct region *reg;
  564. struct dirty_log *log = ms->rh.log;
  565. /*
  566. * Start quiescing some regions.
  567. */
  568. rh_recovery_prepare(&ms->rh);
  569. /*
  570. * Copy any already quiesced regions.
  571. */
  572. while ((reg = rh_recovery_start(&ms->rh))) {
  573. r = recover(ms, reg);
  574. if (r)
  575. rh_recovery_end(reg, 0);
  576. }
  577. /*
  578. * Update the in sync flag.
  579. */
  580. if (!ms->in_sync &&
  581. (log->type->get_sync_count(log) == ms->nr_regions)) {
  582. /* the sync is complete */
  583. dm_table_event(ms->ti->table);
  584. ms->in_sync = 1;
  585. }
  586. }
  587. /*-----------------------------------------------------------------
  588. * Reads
  589. *---------------------------------------------------------------*/
  590. static struct mirror *choose_mirror(struct mirror_set *ms, sector_t sector)
  591. {
  592. /* FIXME: add read balancing */
  593. return ms->default_mirror;
  594. }
  595. /*
  596. * remap a buffer to a particular mirror.
  597. */
  598. static void map_bio(struct mirror_set *ms, struct mirror *m, struct bio *bio)
  599. {
  600. bio->bi_bdev = m->dev->bdev;
  601. bio->bi_sector = m->offset + (bio->bi_sector - ms->ti->begin);
  602. }
  603. static void do_reads(struct mirror_set *ms, struct bio_list *reads)
  604. {
  605. region_t region;
  606. struct bio *bio;
  607. struct mirror *m;
  608. while ((bio = bio_list_pop(reads))) {
  609. region = bio_to_region(&ms->rh, bio);
  610. /*
  611. * We can only read balance if the region is in sync.
  612. */
  613. if (rh_in_sync(&ms->rh, region, 0))
  614. m = choose_mirror(ms, bio->bi_sector);
  615. else
  616. m = ms->default_mirror;
  617. map_bio(ms, m, bio);
  618. generic_make_request(bio);
  619. }
  620. }
  621. /*-----------------------------------------------------------------
  622. * Writes.
  623. *
  624. * We do different things with the write io depending on the
  625. * state of the region that it's in:
  626. *
  627. * SYNC: increment pending, use kcopyd to write to *all* mirrors
  628. * RECOVERING: delay the io until recovery completes
  629. * NOSYNC: increment pending, just write to the default mirror
  630. *---------------------------------------------------------------*/
  631. static void write_callback(unsigned long error, void *context)
  632. {
  633. unsigned int i;
  634. int uptodate = 1;
  635. struct bio *bio = (struct bio *) context;
  636. struct mirror_set *ms;
  637. ms = bio_get_ms(bio);
  638. bio_set_ms(bio, NULL);
  639. /*
  640. * NOTE: We don't decrement the pending count here,
  641. * instead it is done by the targets endio function.
  642. * This way we handle both writes to SYNC and NOSYNC
  643. * regions with the same code.
  644. */
  645. if (error) {
  646. /*
  647. * only error the io if all mirrors failed.
  648. * FIXME: bogus
  649. */
  650. uptodate = 0;
  651. for (i = 0; i < ms->nr_mirrors; i++)
  652. if (!test_bit(i, &error)) {
  653. uptodate = 1;
  654. break;
  655. }
  656. }
  657. bio_endio(bio, bio->bi_size, 0);
  658. }
  659. static void do_write(struct mirror_set *ms, struct bio *bio)
  660. {
  661. unsigned int i;
  662. struct io_region io[KCOPYD_MAX_REGIONS+1];
  663. struct mirror *m;
  664. for (i = 0; i < ms->nr_mirrors; i++) {
  665. m = ms->mirror + i;
  666. io[i].bdev = m->dev->bdev;
  667. io[i].sector = m->offset + (bio->bi_sector - ms->ti->begin);
  668. io[i].count = bio->bi_size >> 9;
  669. }
  670. bio_set_ms(bio, ms);
  671. dm_io_async_bvec(ms->nr_mirrors, io, WRITE,
  672. bio->bi_io_vec + bio->bi_idx,
  673. write_callback, bio);
  674. }
  675. static void do_writes(struct mirror_set *ms, struct bio_list *writes)
  676. {
  677. int state;
  678. struct bio *bio;
  679. struct bio_list sync, nosync, recover, *this_list = NULL;
  680. if (!writes->head)
  681. return;
  682. /*
  683. * Classify each write.
  684. */
  685. bio_list_init(&sync);
  686. bio_list_init(&nosync);
  687. bio_list_init(&recover);
  688. while ((bio = bio_list_pop(writes))) {
  689. state = rh_state(&ms->rh, bio_to_region(&ms->rh, bio), 1);
  690. switch (state) {
  691. case RH_CLEAN:
  692. case RH_DIRTY:
  693. this_list = &sync;
  694. break;
  695. case RH_NOSYNC:
  696. this_list = &nosync;
  697. break;
  698. case RH_RECOVERING:
  699. this_list = &recover;
  700. break;
  701. }
  702. bio_list_add(this_list, bio);
  703. }
  704. /*
  705. * Increment the pending counts for any regions that will
  706. * be written to (writes to recover regions are going to
  707. * be delayed).
  708. */
  709. rh_inc_pending(&ms->rh, &sync);
  710. rh_inc_pending(&ms->rh, &nosync);
  711. rh_flush(&ms->rh);
  712. /*
  713. * Dispatch io.
  714. */
  715. while ((bio = bio_list_pop(&sync)))
  716. do_write(ms, bio);
  717. while ((bio = bio_list_pop(&recover)))
  718. rh_delay(&ms->rh, bio);
  719. while ((bio = bio_list_pop(&nosync))) {
  720. map_bio(ms, ms->default_mirror, bio);
  721. generic_make_request(bio);
  722. }
  723. }
  724. /*-----------------------------------------------------------------
  725. * kmirrord
  726. *---------------------------------------------------------------*/
  727. static void do_mirror(struct work_struct *work)
  728. {
  729. struct mirror_set *ms =container_of(work, struct mirror_set,
  730. kmirrord_work);
  731. struct bio_list reads, writes;
  732. spin_lock(&ms->lock);
  733. reads = ms->reads;
  734. writes = ms->writes;
  735. bio_list_init(&ms->reads);
  736. bio_list_init(&ms->writes);
  737. spin_unlock(&ms->lock);
  738. rh_update_states(&ms->rh);
  739. do_recovery(ms);
  740. do_reads(ms, &reads);
  741. do_writes(ms, &writes);
  742. }
  743. /*-----------------------------------------------------------------
  744. * Target functions
  745. *---------------------------------------------------------------*/
  746. static struct mirror_set *alloc_context(unsigned int nr_mirrors,
  747. uint32_t region_size,
  748. struct dm_target *ti,
  749. struct dirty_log *dl)
  750. {
  751. size_t len;
  752. struct mirror_set *ms = NULL;
  753. if (array_too_big(sizeof(*ms), sizeof(ms->mirror[0]), nr_mirrors))
  754. return NULL;
  755. len = sizeof(*ms) + (sizeof(ms->mirror[0]) * nr_mirrors);
  756. ms = kmalloc(len, GFP_KERNEL);
  757. if (!ms) {
  758. ti->error = "Cannot allocate mirror context";
  759. return NULL;
  760. }
  761. memset(ms, 0, len);
  762. spin_lock_init(&ms->lock);
  763. ms->ti = ti;
  764. ms->nr_mirrors = nr_mirrors;
  765. ms->nr_regions = dm_sector_div_up(ti->len, region_size);
  766. ms->in_sync = 0;
  767. ms->default_mirror = &ms->mirror[DEFAULT_MIRROR];
  768. if (rh_init(&ms->rh, ms, dl, region_size, ms->nr_regions)) {
  769. ti->error = "Error creating dirty region hash";
  770. kfree(ms);
  771. return NULL;
  772. }
  773. return ms;
  774. }
  775. static void free_context(struct mirror_set *ms, struct dm_target *ti,
  776. unsigned int m)
  777. {
  778. while (m--)
  779. dm_put_device(ti, ms->mirror[m].dev);
  780. rh_exit(&ms->rh);
  781. kfree(ms);
  782. }
  783. static inline int _check_region_size(struct dm_target *ti, uint32_t size)
  784. {
  785. return !(size % (PAGE_SIZE >> 9) || (size & (size - 1)) ||
  786. size > ti->len);
  787. }
  788. static int get_mirror(struct mirror_set *ms, struct dm_target *ti,
  789. unsigned int mirror, char **argv)
  790. {
  791. unsigned long long offset;
  792. if (sscanf(argv[1], "%llu", &offset) != 1) {
  793. ti->error = "Invalid offset";
  794. return -EINVAL;
  795. }
  796. if (dm_get_device(ti, argv[0], offset, ti->len,
  797. dm_table_get_mode(ti->table),
  798. &ms->mirror[mirror].dev)) {
  799. ti->error = "Device lookup failure";
  800. return -ENXIO;
  801. }
  802. ms->mirror[mirror].offset = offset;
  803. return 0;
  804. }
  805. /*
  806. * Create dirty log: log_type #log_params <log_params>
  807. */
  808. static struct dirty_log *create_dirty_log(struct dm_target *ti,
  809. unsigned int argc, char **argv,
  810. unsigned int *args_used)
  811. {
  812. unsigned int param_count;
  813. struct dirty_log *dl;
  814. if (argc < 2) {
  815. ti->error = "Insufficient mirror log arguments";
  816. return NULL;
  817. }
  818. if (sscanf(argv[1], "%u", &param_count) != 1) {
  819. ti->error = "Invalid mirror log argument count";
  820. return NULL;
  821. }
  822. *args_used = 2 + param_count;
  823. if (argc < *args_used) {
  824. ti->error = "Insufficient mirror log arguments";
  825. return NULL;
  826. }
  827. dl = dm_create_dirty_log(argv[0], ti, param_count, argv + 2);
  828. if (!dl) {
  829. ti->error = "Error creating mirror dirty log";
  830. return NULL;
  831. }
  832. if (!_check_region_size(ti, dl->type->get_region_size(dl))) {
  833. ti->error = "Invalid region size";
  834. dm_destroy_dirty_log(dl);
  835. return NULL;
  836. }
  837. return dl;
  838. }
  839. static int parse_features(struct mirror_set *ms, unsigned argc, char **argv,
  840. unsigned *args_used)
  841. {
  842. unsigned num_features;
  843. struct dm_target *ti = ms->ti;
  844. *args_used = 0;
  845. if (!argc)
  846. return 0;
  847. if (sscanf(argv[0], "%u", &num_features) != 1) {
  848. ti->error = "Invalid number of features";
  849. return -EINVAL;
  850. }
  851. argc--;
  852. argv++;
  853. (*args_used)++;
  854. if (num_features > argc) {
  855. ti->error = "Not enough arguments to support feature count";
  856. return -EINVAL;
  857. }
  858. if (!strcmp("handle_errors", argv[0]))
  859. ms->features |= DM_RAID1_HANDLE_ERRORS;
  860. else {
  861. ti->error = "Unrecognised feature requested";
  862. return -EINVAL;
  863. }
  864. (*args_used)++;
  865. return 0;
  866. }
  867. /*
  868. * Construct a mirror mapping:
  869. *
  870. * log_type #log_params <log_params>
  871. * #mirrors [mirror_path offset]{2,}
  872. * [#features <features>]
  873. *
  874. * log_type is "core" or "disk"
  875. * #log_params is between 1 and 3
  876. *
  877. * If present, features must be "handle_errors".
  878. */
  879. #define DM_IO_PAGES 64
  880. static int mirror_ctr(struct dm_target *ti, unsigned int argc, char **argv)
  881. {
  882. int r;
  883. unsigned int nr_mirrors, m, args_used;
  884. struct mirror_set *ms;
  885. struct dirty_log *dl;
  886. dl = create_dirty_log(ti, argc, argv, &args_used);
  887. if (!dl)
  888. return -EINVAL;
  889. argv += args_used;
  890. argc -= args_used;
  891. if (!argc || sscanf(argv[0], "%u", &nr_mirrors) != 1 ||
  892. nr_mirrors < 2 || nr_mirrors > KCOPYD_MAX_REGIONS + 1) {
  893. ti->error = "Invalid number of mirrors";
  894. dm_destroy_dirty_log(dl);
  895. return -EINVAL;
  896. }
  897. argv++, argc--;
  898. if (argc < nr_mirrors * 2) {
  899. ti->error = "Too few mirror arguments";
  900. dm_destroy_dirty_log(dl);
  901. return -EINVAL;
  902. }
  903. ms = alloc_context(nr_mirrors, dl->type->get_region_size(dl), ti, dl);
  904. if (!ms) {
  905. dm_destroy_dirty_log(dl);
  906. return -ENOMEM;
  907. }
  908. /* Get the mirror parameter sets */
  909. for (m = 0; m < nr_mirrors; m++) {
  910. r = get_mirror(ms, ti, m, argv);
  911. if (r) {
  912. free_context(ms, ti, m);
  913. return r;
  914. }
  915. argv += 2;
  916. argc -= 2;
  917. }
  918. ti->private = ms;
  919. ti->split_io = ms->rh.region_size;
  920. ms->kmirrord_wq = create_singlethread_workqueue("kmirrord");
  921. if (!ms->kmirrord_wq) {
  922. DMERR("couldn't start kmirrord");
  923. free_context(ms, ti, m);
  924. return -ENOMEM;
  925. }
  926. INIT_WORK(&ms->kmirrord_work, do_mirror);
  927. r = parse_features(ms, argc, argv, &args_used);
  928. if (r) {
  929. free_context(ms, ti, ms->nr_mirrors);
  930. return r;
  931. }
  932. argv += args_used;
  933. argc -= args_used;
  934. if (argc) {
  935. ti->error = "Too many mirror arguments";
  936. free_context(ms, ti, ms->nr_mirrors);
  937. return -EINVAL;
  938. }
  939. r = kcopyd_client_create(DM_IO_PAGES, &ms->kcopyd_client);
  940. if (r) {
  941. destroy_workqueue(ms->kmirrord_wq);
  942. free_context(ms, ti, ms->nr_mirrors);
  943. return r;
  944. }
  945. wake(ms);
  946. return 0;
  947. }
  948. static void mirror_dtr(struct dm_target *ti)
  949. {
  950. struct mirror_set *ms = (struct mirror_set *) ti->private;
  951. flush_workqueue(ms->kmirrord_wq);
  952. kcopyd_client_destroy(ms->kcopyd_client);
  953. destroy_workqueue(ms->kmirrord_wq);
  954. free_context(ms, ti, ms->nr_mirrors);
  955. }
  956. static void queue_bio(struct mirror_set *ms, struct bio *bio, int rw)
  957. {
  958. int should_wake = 0;
  959. struct bio_list *bl;
  960. bl = (rw == WRITE) ? &ms->writes : &ms->reads;
  961. spin_lock(&ms->lock);
  962. should_wake = !(bl->head);
  963. bio_list_add(bl, bio);
  964. spin_unlock(&ms->lock);
  965. if (should_wake)
  966. wake(ms);
  967. }
  968. /*
  969. * Mirror mapping function
  970. */
  971. static int mirror_map(struct dm_target *ti, struct bio *bio,
  972. union map_info *map_context)
  973. {
  974. int r, rw = bio_rw(bio);
  975. struct mirror *m;
  976. struct mirror_set *ms = ti->private;
  977. map_context->ll = bio_to_region(&ms->rh, bio);
  978. if (rw == WRITE) {
  979. queue_bio(ms, bio, rw);
  980. return DM_MAPIO_SUBMITTED;
  981. }
  982. r = ms->rh.log->type->in_sync(ms->rh.log,
  983. bio_to_region(&ms->rh, bio), 0);
  984. if (r < 0 && r != -EWOULDBLOCK)
  985. return r;
  986. if (r == -EWOULDBLOCK) /* FIXME: ugly */
  987. r = DM_MAPIO_SUBMITTED;
  988. /*
  989. * We don't want to fast track a recovery just for a read
  990. * ahead. So we just let it silently fail.
  991. * FIXME: get rid of this.
  992. */
  993. if (!r && rw == READA)
  994. return -EIO;
  995. if (!r) {
  996. /* Pass this io over to the daemon */
  997. queue_bio(ms, bio, rw);
  998. return DM_MAPIO_SUBMITTED;
  999. }
  1000. m = choose_mirror(ms, bio->bi_sector);
  1001. if (!m)
  1002. return -EIO;
  1003. map_bio(ms, m, bio);
  1004. return DM_MAPIO_REMAPPED;
  1005. }
  1006. static int mirror_end_io(struct dm_target *ti, struct bio *bio,
  1007. int error, union map_info *map_context)
  1008. {
  1009. int rw = bio_rw(bio);
  1010. struct mirror_set *ms = (struct mirror_set *) ti->private;
  1011. region_t region = map_context->ll;
  1012. /*
  1013. * We need to dec pending if this was a write.
  1014. */
  1015. if (rw == WRITE)
  1016. rh_dec(&ms->rh, region);
  1017. return 0;
  1018. }
  1019. static void mirror_postsuspend(struct dm_target *ti)
  1020. {
  1021. struct mirror_set *ms = (struct mirror_set *) ti->private;
  1022. struct dirty_log *log = ms->rh.log;
  1023. rh_stop_recovery(&ms->rh);
  1024. /* Wait for all I/O we generated to complete */
  1025. wait_event(_kmirrord_recovery_stopped,
  1026. !atomic_read(&ms->rh.recovery_in_flight));
  1027. if (log->type->suspend && log->type->suspend(log))
  1028. /* FIXME: need better error handling */
  1029. DMWARN("log suspend failed");
  1030. }
  1031. static void mirror_resume(struct dm_target *ti)
  1032. {
  1033. struct mirror_set *ms = (struct mirror_set *) ti->private;
  1034. struct dirty_log *log = ms->rh.log;
  1035. if (log->type->resume && log->type->resume(log))
  1036. /* FIXME: need better error handling */
  1037. DMWARN("log resume failed");
  1038. rh_start_recovery(&ms->rh);
  1039. }
  1040. static int mirror_status(struct dm_target *ti, status_type_t type,
  1041. char *result, unsigned int maxlen)
  1042. {
  1043. unsigned int m, sz = 0;
  1044. struct mirror_set *ms = (struct mirror_set *) ti->private;
  1045. switch (type) {
  1046. case STATUSTYPE_INFO:
  1047. DMEMIT("%d ", ms->nr_mirrors);
  1048. for (m = 0; m < ms->nr_mirrors; m++)
  1049. DMEMIT("%s ", ms->mirror[m].dev->name);
  1050. DMEMIT("%llu/%llu",
  1051. (unsigned long long)ms->rh.log->type->
  1052. get_sync_count(ms->rh.log),
  1053. (unsigned long long)ms->nr_regions);
  1054. sz = ms->rh.log->type->status(ms->rh.log, type, result, maxlen);
  1055. break;
  1056. case STATUSTYPE_TABLE:
  1057. sz = ms->rh.log->type->status(ms->rh.log, type, result, maxlen);
  1058. DMEMIT("%d", ms->nr_mirrors);
  1059. for (m = 0; m < ms->nr_mirrors; m++)
  1060. DMEMIT(" %s %llu", ms->mirror[m].dev->name,
  1061. (unsigned long long)ms->mirror[m].offset);
  1062. if (ms->features & DM_RAID1_HANDLE_ERRORS)
  1063. DMEMIT(" 1 handle_errors");
  1064. }
  1065. return 0;
  1066. }
  1067. static struct target_type mirror_target = {
  1068. .name = "mirror",
  1069. .version = {1, 0, 3},
  1070. .module = THIS_MODULE,
  1071. .ctr = mirror_ctr,
  1072. .dtr = mirror_dtr,
  1073. .map = mirror_map,
  1074. .end_io = mirror_end_io,
  1075. .postsuspend = mirror_postsuspend,
  1076. .resume = mirror_resume,
  1077. .status = mirror_status,
  1078. };
  1079. static int __init dm_mirror_init(void)
  1080. {
  1081. int r;
  1082. r = dm_dirty_log_init();
  1083. if (r)
  1084. return r;
  1085. r = dm_register_target(&mirror_target);
  1086. if (r < 0) {
  1087. DMERR("%s: Failed to register mirror target",
  1088. mirror_target.name);
  1089. dm_dirty_log_exit();
  1090. }
  1091. return r;
  1092. }
  1093. static void __exit dm_mirror_exit(void)
  1094. {
  1095. int r;
  1096. r = dm_unregister_target(&mirror_target);
  1097. if (r < 0)
  1098. DMERR("%s: unregister failed %d", mirror_target.name, r);
  1099. dm_dirty_log_exit();
  1100. }
  1101. /* Module hooks */
  1102. module_init(dm_mirror_init);
  1103. module_exit(dm_mirror_exit);
  1104. MODULE_DESCRIPTION(DM_NAME " mirror target");
  1105. MODULE_AUTHOR("Joe Thornber");
  1106. MODULE_LICENSE("GPL");