dm-raid1.c 32 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359
  1. /*
  2. * Copyright (C) 2003 Sistina Software Limited.
  3. * Copyright (C) 2005-2008 Red Hat, Inc. All rights reserved.
  4. *
  5. * This file is released under the GPL.
  6. */
  7. #include "dm-bio-record.h"
  8. #include <linux/init.h>
  9. #include <linux/mempool.h>
  10. #include <linux/module.h>
  11. #include <linux/pagemap.h>
  12. #include <linux/slab.h>
  13. #include <linux/workqueue.h>
  14. #include <linux/device-mapper.h>
  15. #include <linux/dm-io.h>
  16. #include <linux/dm-dirty-log.h>
  17. #include <linux/dm-kcopyd.h>
  18. #include <linux/dm-region-hash.h>
  19. #define DM_MSG_PREFIX "raid1"
  20. #define MAX_RECOVERY 1 /* Maximum number of regions recovered in parallel. */
  21. #define DM_IO_PAGES 64
  22. #define DM_KCOPYD_PAGES 64
  23. #define DM_RAID1_HANDLE_ERRORS 0x01
  24. #define errors_handled(p) ((p)->features & DM_RAID1_HANDLE_ERRORS)
  25. static DECLARE_WAIT_QUEUE_HEAD(_kmirrord_recovery_stopped);
  26. /*-----------------------------------------------------------------
  27. * Mirror set structures.
  28. *---------------------------------------------------------------*/
  29. enum dm_raid1_error {
  30. DM_RAID1_WRITE_ERROR,
  31. DM_RAID1_SYNC_ERROR,
  32. DM_RAID1_READ_ERROR
  33. };
  34. struct mirror {
  35. struct mirror_set *ms;
  36. atomic_t error_count;
  37. unsigned long error_type;
  38. struct dm_dev *dev;
  39. sector_t offset;
  40. };
  41. struct mirror_set {
  42. struct dm_target *ti;
  43. struct list_head list;
  44. uint64_t features;
  45. spinlock_t lock; /* protects the lists */
  46. struct bio_list reads;
  47. struct bio_list writes;
  48. struct bio_list failures;
  49. struct dm_region_hash *rh;
  50. struct dm_kcopyd_client *kcopyd_client;
  51. struct dm_io_client *io_client;
  52. mempool_t *read_record_pool;
  53. /* recovery */
  54. region_t nr_regions;
  55. int in_sync;
  56. int log_failure;
  57. atomic_t suspend;
  58. atomic_t default_mirror; /* Default mirror */
  59. struct workqueue_struct *kmirrord_wq;
  60. struct work_struct kmirrord_work;
  61. struct timer_list timer;
  62. unsigned long timer_pending;
  63. struct work_struct trigger_event;
  64. unsigned nr_mirrors;
  65. struct mirror mirror[0];
  66. };
  67. static void wakeup_mirrord(void *context)
  68. {
  69. struct mirror_set *ms = context;
  70. queue_work(ms->kmirrord_wq, &ms->kmirrord_work);
  71. }
  72. static void delayed_wake_fn(unsigned long data)
  73. {
  74. struct mirror_set *ms = (struct mirror_set *) data;
  75. clear_bit(0, &ms->timer_pending);
  76. wakeup_mirrord(ms);
  77. }
  78. static void delayed_wake(struct mirror_set *ms)
  79. {
  80. if (test_and_set_bit(0, &ms->timer_pending))
  81. return;
  82. ms->timer.expires = jiffies + HZ / 5;
  83. ms->timer.data = (unsigned long) ms;
  84. ms->timer.function = delayed_wake_fn;
  85. add_timer(&ms->timer);
  86. }
  87. static void wakeup_all_recovery_waiters(void *context)
  88. {
  89. wake_up_all(&_kmirrord_recovery_stopped);
  90. }
  91. static void queue_bio(struct mirror_set *ms, struct bio *bio, int rw)
  92. {
  93. unsigned long flags;
  94. int should_wake = 0;
  95. struct bio_list *bl;
  96. bl = (rw == WRITE) ? &ms->writes : &ms->reads;
  97. spin_lock_irqsave(&ms->lock, flags);
  98. should_wake = !(bl->head);
  99. bio_list_add(bl, bio);
  100. spin_unlock_irqrestore(&ms->lock, flags);
  101. if (should_wake)
  102. wakeup_mirrord(ms);
  103. }
  104. static void dispatch_bios(void *context, struct bio_list *bio_list)
  105. {
  106. struct mirror_set *ms = context;
  107. struct bio *bio;
  108. while ((bio = bio_list_pop(bio_list)))
  109. queue_bio(ms, bio, WRITE);
  110. }
  111. #define MIN_READ_RECORDS 20
  112. struct dm_raid1_read_record {
  113. struct mirror *m;
  114. struct dm_bio_details details;
  115. };
  116. static struct kmem_cache *_dm_raid1_read_record_cache;
  117. /*
  118. * Every mirror should look like this one.
  119. */
  120. #define DEFAULT_MIRROR 0
  121. /*
  122. * This is yucky. We squirrel the mirror struct away inside
  123. * bi_next for read/write buffers. This is safe since the bh
  124. * doesn't get submitted to the lower levels of block layer.
  125. */
  126. static struct mirror *bio_get_m(struct bio *bio)
  127. {
  128. return (struct mirror *) bio->bi_next;
  129. }
  130. static void bio_set_m(struct bio *bio, struct mirror *m)
  131. {
  132. bio->bi_next = (struct bio *) m;
  133. }
  134. static struct mirror *get_default_mirror(struct mirror_set *ms)
  135. {
  136. return &ms->mirror[atomic_read(&ms->default_mirror)];
  137. }
  138. static void set_default_mirror(struct mirror *m)
  139. {
  140. struct mirror_set *ms = m->ms;
  141. struct mirror *m0 = &(ms->mirror[0]);
  142. atomic_set(&ms->default_mirror, m - m0);
  143. }
  144. /* fail_mirror
  145. * @m: mirror device to fail
  146. * @error_type: one of the enum's, DM_RAID1_*_ERROR
  147. *
  148. * If errors are being handled, record the type of
  149. * error encountered for this device. If this type
  150. * of error has already been recorded, we can return;
  151. * otherwise, we must signal userspace by triggering
  152. * an event. Additionally, if the device is the
  153. * primary device, we must choose a new primary, but
  154. * only if the mirror is in-sync.
  155. *
  156. * This function must not block.
  157. */
  158. static void fail_mirror(struct mirror *m, enum dm_raid1_error error_type)
  159. {
  160. struct mirror_set *ms = m->ms;
  161. struct mirror *new;
  162. /*
  163. * error_count is used for nothing more than a
  164. * simple way to tell if a device has encountered
  165. * errors.
  166. */
  167. atomic_inc(&m->error_count);
  168. if (test_and_set_bit(error_type, &m->error_type))
  169. return;
  170. if (!errors_handled(ms))
  171. return;
  172. if (m != get_default_mirror(ms))
  173. goto out;
  174. if (!ms->in_sync) {
  175. /*
  176. * Better to issue requests to same failing device
  177. * than to risk returning corrupt data.
  178. */
  179. DMERR("Primary mirror (%s) failed while out-of-sync: "
  180. "Reads may fail.", m->dev->name);
  181. goto out;
  182. }
  183. for (new = ms->mirror; new < ms->mirror + ms->nr_mirrors; new++)
  184. if (!atomic_read(&new->error_count)) {
  185. set_default_mirror(new);
  186. break;
  187. }
  188. if (unlikely(new == ms->mirror + ms->nr_mirrors))
  189. DMWARN("All sides of mirror have failed.");
  190. out:
  191. schedule_work(&ms->trigger_event);
  192. }
  193. /*-----------------------------------------------------------------
  194. * Recovery.
  195. *
  196. * When a mirror is first activated we may find that some regions
  197. * are in the no-sync state. We have to recover these by
  198. * recopying from the default mirror to all the others.
  199. *---------------------------------------------------------------*/
  200. static void recovery_complete(int read_err, unsigned long write_err,
  201. void *context)
  202. {
  203. struct dm_region *reg = context;
  204. struct mirror_set *ms = dm_rh_region_context(reg);
  205. int m, bit = 0;
  206. if (read_err) {
  207. /* Read error means the failure of default mirror. */
  208. DMERR_LIMIT("Unable to read primary mirror during recovery");
  209. fail_mirror(get_default_mirror(ms), DM_RAID1_SYNC_ERROR);
  210. }
  211. if (write_err) {
  212. DMERR_LIMIT("Write error during recovery (error = 0x%lx)",
  213. write_err);
  214. /*
  215. * Bits correspond to devices (excluding default mirror).
  216. * The default mirror cannot change during recovery.
  217. */
  218. for (m = 0; m < ms->nr_mirrors; m++) {
  219. if (&ms->mirror[m] == get_default_mirror(ms))
  220. continue;
  221. if (test_bit(bit, &write_err))
  222. fail_mirror(ms->mirror + m,
  223. DM_RAID1_SYNC_ERROR);
  224. bit++;
  225. }
  226. }
  227. dm_rh_recovery_end(reg, !(read_err || write_err));
  228. }
  229. static int recover(struct mirror_set *ms, struct dm_region *reg)
  230. {
  231. int r;
  232. unsigned i;
  233. struct dm_io_region from, to[DM_KCOPYD_MAX_REGIONS], *dest;
  234. struct mirror *m;
  235. unsigned long flags = 0;
  236. region_t key = dm_rh_get_region_key(reg);
  237. sector_t region_size = dm_rh_get_region_size(ms->rh);
  238. /* fill in the source */
  239. m = get_default_mirror(ms);
  240. from.bdev = m->dev->bdev;
  241. from.sector = m->offset + dm_rh_region_to_sector(ms->rh, key);
  242. if (key == (ms->nr_regions - 1)) {
  243. /*
  244. * The final region may be smaller than
  245. * region_size.
  246. */
  247. from.count = ms->ti->len & (region_size - 1);
  248. if (!from.count)
  249. from.count = region_size;
  250. } else
  251. from.count = region_size;
  252. /* fill in the destinations */
  253. for (i = 0, dest = to; i < ms->nr_mirrors; i++) {
  254. if (&ms->mirror[i] == get_default_mirror(ms))
  255. continue;
  256. m = ms->mirror + i;
  257. dest->bdev = m->dev->bdev;
  258. dest->sector = m->offset + dm_rh_region_to_sector(ms->rh, key);
  259. dest->count = from.count;
  260. dest++;
  261. }
  262. /* hand to kcopyd */
  263. if (!errors_handled(ms))
  264. set_bit(DM_KCOPYD_IGNORE_ERROR, &flags);
  265. r = dm_kcopyd_copy(ms->kcopyd_client, &from, ms->nr_mirrors - 1, to,
  266. flags, recovery_complete, reg);
  267. return r;
  268. }
  269. static void do_recovery(struct mirror_set *ms)
  270. {
  271. struct dm_region *reg;
  272. struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh);
  273. int r;
  274. /*
  275. * Start quiescing some regions.
  276. */
  277. dm_rh_recovery_prepare(ms->rh);
  278. /*
  279. * Copy any already quiesced regions.
  280. */
  281. while ((reg = dm_rh_recovery_start(ms->rh))) {
  282. r = recover(ms, reg);
  283. if (r)
  284. dm_rh_recovery_end(reg, 0);
  285. }
  286. /*
  287. * Update the in sync flag.
  288. */
  289. if (!ms->in_sync &&
  290. (log->type->get_sync_count(log) == ms->nr_regions)) {
  291. /* the sync is complete */
  292. dm_table_event(ms->ti->table);
  293. ms->in_sync = 1;
  294. }
  295. }
  296. /*-----------------------------------------------------------------
  297. * Reads
  298. *---------------------------------------------------------------*/
  299. static struct mirror *choose_mirror(struct mirror_set *ms, sector_t sector)
  300. {
  301. struct mirror *m = get_default_mirror(ms);
  302. do {
  303. if (likely(!atomic_read(&m->error_count)))
  304. return m;
  305. if (m-- == ms->mirror)
  306. m += ms->nr_mirrors;
  307. } while (m != get_default_mirror(ms));
  308. return NULL;
  309. }
  310. static int default_ok(struct mirror *m)
  311. {
  312. struct mirror *default_mirror = get_default_mirror(m->ms);
  313. return !atomic_read(&default_mirror->error_count);
  314. }
  315. static int mirror_available(struct mirror_set *ms, struct bio *bio)
  316. {
  317. struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh);
  318. region_t region = dm_rh_bio_to_region(ms->rh, bio);
  319. if (log->type->in_sync(log, region, 0))
  320. return choose_mirror(ms, bio->bi_sector) ? 1 : 0;
  321. return 0;
  322. }
  323. /*
  324. * remap a buffer to a particular mirror.
  325. */
  326. static sector_t map_sector(struct mirror *m, struct bio *bio)
  327. {
  328. return m->offset + (bio->bi_sector - m->ms->ti->begin);
  329. }
  330. static void map_bio(struct mirror *m, struct bio *bio)
  331. {
  332. bio->bi_bdev = m->dev->bdev;
  333. bio->bi_sector = map_sector(m, bio);
  334. }
  335. static void map_region(struct dm_io_region *io, struct mirror *m,
  336. struct bio *bio)
  337. {
  338. io->bdev = m->dev->bdev;
  339. io->sector = map_sector(m, bio);
  340. io->count = bio->bi_size >> 9;
  341. }
  342. /*-----------------------------------------------------------------
  343. * Reads
  344. *---------------------------------------------------------------*/
  345. static void read_callback(unsigned long error, void *context)
  346. {
  347. struct bio *bio = context;
  348. struct mirror *m;
  349. m = bio_get_m(bio);
  350. bio_set_m(bio, NULL);
  351. if (likely(!error)) {
  352. bio_endio(bio, 0);
  353. return;
  354. }
  355. fail_mirror(m, DM_RAID1_READ_ERROR);
  356. if (likely(default_ok(m)) || mirror_available(m->ms, bio)) {
  357. DMWARN_LIMIT("Read failure on mirror device %s. "
  358. "Trying alternative device.",
  359. m->dev->name);
  360. queue_bio(m->ms, bio, bio_rw(bio));
  361. return;
  362. }
  363. DMERR_LIMIT("Read failure on mirror device %s. Failing I/O.",
  364. m->dev->name);
  365. bio_endio(bio, -EIO);
  366. }
  367. /* Asynchronous read. */
  368. static void read_async_bio(struct mirror *m, struct bio *bio)
  369. {
  370. struct dm_io_region io;
  371. struct dm_io_request io_req = {
  372. .bi_rw = READ,
  373. .mem.type = DM_IO_BVEC,
  374. .mem.ptr.bvec = bio->bi_io_vec + bio->bi_idx,
  375. .notify.fn = read_callback,
  376. .notify.context = bio,
  377. .client = m->ms->io_client,
  378. };
  379. map_region(&io, m, bio);
  380. bio_set_m(bio, m);
  381. BUG_ON(dm_io(&io_req, 1, &io, NULL));
  382. }
  383. static inline int region_in_sync(struct mirror_set *ms, region_t region,
  384. int may_block)
  385. {
  386. int state = dm_rh_get_state(ms->rh, region, may_block);
  387. return state == DM_RH_CLEAN || state == DM_RH_DIRTY;
  388. }
  389. static void do_reads(struct mirror_set *ms, struct bio_list *reads)
  390. {
  391. region_t region;
  392. struct bio *bio;
  393. struct mirror *m;
  394. while ((bio = bio_list_pop(reads))) {
  395. region = dm_rh_bio_to_region(ms->rh, bio);
  396. m = get_default_mirror(ms);
  397. /*
  398. * We can only read balance if the region is in sync.
  399. */
  400. if (likely(region_in_sync(ms, region, 1)))
  401. m = choose_mirror(ms, bio->bi_sector);
  402. else if (m && atomic_read(&m->error_count))
  403. m = NULL;
  404. if (likely(m))
  405. read_async_bio(m, bio);
  406. else
  407. bio_endio(bio, -EIO);
  408. }
  409. }
  410. /*-----------------------------------------------------------------
  411. * Writes.
  412. *
  413. * We do different things with the write io depending on the
  414. * state of the region that it's in:
  415. *
  416. * SYNC: increment pending, use kcopyd to write to *all* mirrors
  417. * RECOVERING: delay the io until recovery completes
  418. * NOSYNC: increment pending, just write to the default mirror
  419. *---------------------------------------------------------------*/
  420. static void write_callback(unsigned long error, void *context)
  421. {
  422. unsigned i, ret = 0;
  423. struct bio *bio = (struct bio *) context;
  424. struct mirror_set *ms;
  425. int uptodate = 0;
  426. int should_wake = 0;
  427. unsigned long flags;
  428. ms = bio_get_m(bio)->ms;
  429. bio_set_m(bio, NULL);
  430. /*
  431. * NOTE: We don't decrement the pending count here,
  432. * instead it is done by the targets endio function.
  433. * This way we handle both writes to SYNC and NOSYNC
  434. * regions with the same code.
  435. */
  436. if (likely(!error))
  437. goto out;
  438. for (i = 0; i < ms->nr_mirrors; i++)
  439. if (test_bit(i, &error))
  440. fail_mirror(ms->mirror + i, DM_RAID1_WRITE_ERROR);
  441. else
  442. uptodate = 1;
  443. if (unlikely(!uptodate)) {
  444. DMERR("All replicated volumes dead, failing I/O");
  445. /* None of the writes succeeded, fail the I/O. */
  446. ret = -EIO;
  447. } else if (errors_handled(ms)) {
  448. /*
  449. * Need to raise event. Since raising
  450. * events can block, we need to do it in
  451. * the main thread.
  452. */
  453. spin_lock_irqsave(&ms->lock, flags);
  454. if (!ms->failures.head)
  455. should_wake = 1;
  456. bio_list_add(&ms->failures, bio);
  457. spin_unlock_irqrestore(&ms->lock, flags);
  458. if (should_wake)
  459. wakeup_mirrord(ms);
  460. return;
  461. }
  462. out:
  463. bio_endio(bio, ret);
  464. }
  465. static void do_write(struct mirror_set *ms, struct bio *bio)
  466. {
  467. unsigned int i;
  468. struct dm_io_region io[ms->nr_mirrors], *dest = io;
  469. struct mirror *m;
  470. struct dm_io_request io_req = {
  471. .bi_rw = WRITE,
  472. .mem.type = DM_IO_BVEC,
  473. .mem.ptr.bvec = bio->bi_io_vec + bio->bi_idx,
  474. .notify.fn = write_callback,
  475. .notify.context = bio,
  476. .client = ms->io_client,
  477. };
  478. for (i = 0, m = ms->mirror; i < ms->nr_mirrors; i++, m++)
  479. map_region(dest++, m, bio);
  480. /*
  481. * Use default mirror because we only need it to retrieve the reference
  482. * to the mirror set in write_callback().
  483. */
  484. bio_set_m(bio, get_default_mirror(ms));
  485. BUG_ON(dm_io(&io_req, ms->nr_mirrors, io, NULL));
  486. }
  487. static void do_writes(struct mirror_set *ms, struct bio_list *writes)
  488. {
  489. int state;
  490. struct bio *bio;
  491. struct bio_list sync, nosync, recover, *this_list = NULL;
  492. struct bio_list requeue;
  493. struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh);
  494. region_t region;
  495. if (!writes->head)
  496. return;
  497. /*
  498. * Classify each write.
  499. */
  500. bio_list_init(&sync);
  501. bio_list_init(&nosync);
  502. bio_list_init(&recover);
  503. bio_list_init(&requeue);
  504. while ((bio = bio_list_pop(writes))) {
  505. region = dm_rh_bio_to_region(ms->rh, bio);
  506. if (log->type->is_remote_recovering &&
  507. log->type->is_remote_recovering(log, region)) {
  508. bio_list_add(&requeue, bio);
  509. continue;
  510. }
  511. state = dm_rh_get_state(ms->rh, region, 1);
  512. switch (state) {
  513. case DM_RH_CLEAN:
  514. case DM_RH_DIRTY:
  515. this_list = &sync;
  516. break;
  517. case DM_RH_NOSYNC:
  518. this_list = &nosync;
  519. break;
  520. case DM_RH_RECOVERING:
  521. this_list = &recover;
  522. break;
  523. }
  524. bio_list_add(this_list, bio);
  525. }
  526. /*
  527. * Add bios that are delayed due to remote recovery
  528. * back on to the write queue
  529. */
  530. if (unlikely(requeue.head)) {
  531. spin_lock_irq(&ms->lock);
  532. bio_list_merge(&ms->writes, &requeue);
  533. spin_unlock_irq(&ms->lock);
  534. delayed_wake(ms);
  535. }
  536. /*
  537. * Increment the pending counts for any regions that will
  538. * be written to (writes to recover regions are going to
  539. * be delayed).
  540. */
  541. dm_rh_inc_pending(ms->rh, &sync);
  542. dm_rh_inc_pending(ms->rh, &nosync);
  543. /*
  544. * If the flush fails on a previous call and succeeds here,
  545. * we must not reset the log_failure variable. We need
  546. * userspace interaction to do that.
  547. */
  548. ms->log_failure = dm_rh_flush(ms->rh) ? 1 : ms->log_failure;
  549. /*
  550. * Dispatch io.
  551. */
  552. if (unlikely(ms->log_failure)) {
  553. spin_lock_irq(&ms->lock);
  554. bio_list_merge(&ms->failures, &sync);
  555. spin_unlock_irq(&ms->lock);
  556. wakeup_mirrord(ms);
  557. } else
  558. while ((bio = bio_list_pop(&sync)))
  559. do_write(ms, bio);
  560. while ((bio = bio_list_pop(&recover)))
  561. dm_rh_delay(ms->rh, bio);
  562. while ((bio = bio_list_pop(&nosync))) {
  563. map_bio(get_default_mirror(ms), bio);
  564. generic_make_request(bio);
  565. }
  566. }
  567. static void do_failures(struct mirror_set *ms, struct bio_list *failures)
  568. {
  569. struct bio *bio;
  570. if (!failures->head)
  571. return;
  572. if (!ms->log_failure) {
  573. while ((bio = bio_list_pop(failures))) {
  574. ms->in_sync = 0;
  575. dm_rh_mark_nosync(ms->rh, bio, bio->bi_size, 0);
  576. }
  577. return;
  578. }
  579. /*
  580. * If the log has failed, unattempted writes are being
  581. * put on the failures list. We can't issue those writes
  582. * until a log has been marked, so we must store them.
  583. *
  584. * If a 'noflush' suspend is in progress, we can requeue
  585. * the I/O's to the core. This give userspace a chance
  586. * to reconfigure the mirror, at which point the core
  587. * will reissue the writes. If the 'noflush' flag is
  588. * not set, we have no choice but to return errors.
  589. *
  590. * Some writes on the failures list may have been
  591. * submitted before the log failure and represent a
  592. * failure to write to one of the devices. It is ok
  593. * for us to treat them the same and requeue them
  594. * as well.
  595. */
  596. if (dm_noflush_suspending(ms->ti)) {
  597. while ((bio = bio_list_pop(failures)))
  598. bio_endio(bio, DM_ENDIO_REQUEUE);
  599. return;
  600. }
  601. if (atomic_read(&ms->suspend)) {
  602. while ((bio = bio_list_pop(failures)))
  603. bio_endio(bio, -EIO);
  604. return;
  605. }
  606. spin_lock_irq(&ms->lock);
  607. bio_list_merge(&ms->failures, failures);
  608. spin_unlock_irq(&ms->lock);
  609. delayed_wake(ms);
  610. }
  611. static void trigger_event(struct work_struct *work)
  612. {
  613. struct mirror_set *ms =
  614. container_of(work, struct mirror_set, trigger_event);
  615. dm_table_event(ms->ti->table);
  616. }
  617. /*-----------------------------------------------------------------
  618. * kmirrord
  619. *---------------------------------------------------------------*/
  620. static void do_mirror(struct work_struct *work)
  621. {
  622. struct mirror_set *ms = container_of(work, struct mirror_set,
  623. kmirrord_work);
  624. struct bio_list reads, writes, failures;
  625. unsigned long flags;
  626. spin_lock_irqsave(&ms->lock, flags);
  627. reads = ms->reads;
  628. writes = ms->writes;
  629. failures = ms->failures;
  630. bio_list_init(&ms->reads);
  631. bio_list_init(&ms->writes);
  632. bio_list_init(&ms->failures);
  633. spin_unlock_irqrestore(&ms->lock, flags);
  634. dm_rh_update_states(ms->rh, errors_handled(ms));
  635. do_recovery(ms);
  636. do_reads(ms, &reads);
  637. do_writes(ms, &writes);
  638. do_failures(ms, &failures);
  639. dm_table_unplug_all(ms->ti->table);
  640. }
  641. /*-----------------------------------------------------------------
  642. * Target functions
  643. *---------------------------------------------------------------*/
  644. static struct mirror_set *alloc_context(unsigned int nr_mirrors,
  645. uint32_t region_size,
  646. struct dm_target *ti,
  647. struct dm_dirty_log *dl)
  648. {
  649. size_t len;
  650. struct mirror_set *ms = NULL;
  651. len = sizeof(*ms) + (sizeof(ms->mirror[0]) * nr_mirrors);
  652. ms = kzalloc(len, GFP_KERNEL);
  653. if (!ms) {
  654. ti->error = "Cannot allocate mirror context";
  655. return NULL;
  656. }
  657. spin_lock_init(&ms->lock);
  658. ms->ti = ti;
  659. ms->nr_mirrors = nr_mirrors;
  660. ms->nr_regions = dm_sector_div_up(ti->len, region_size);
  661. ms->in_sync = 0;
  662. ms->log_failure = 0;
  663. atomic_set(&ms->suspend, 0);
  664. atomic_set(&ms->default_mirror, DEFAULT_MIRROR);
  665. ms->read_record_pool = mempool_create_slab_pool(MIN_READ_RECORDS,
  666. _dm_raid1_read_record_cache);
  667. if (!ms->read_record_pool) {
  668. ti->error = "Error creating mirror read_record_pool";
  669. kfree(ms);
  670. return NULL;
  671. }
  672. ms->io_client = dm_io_client_create(DM_IO_PAGES);
  673. if (IS_ERR(ms->io_client)) {
  674. ti->error = "Error creating dm_io client";
  675. mempool_destroy(ms->read_record_pool);
  676. kfree(ms);
  677. return NULL;
  678. }
  679. ms->rh = dm_region_hash_create(ms, dispatch_bios, wakeup_mirrord,
  680. wakeup_all_recovery_waiters,
  681. ms->ti->begin, MAX_RECOVERY,
  682. dl, region_size, ms->nr_regions);
  683. if (IS_ERR(ms->rh)) {
  684. ti->error = "Error creating dirty region hash";
  685. dm_io_client_destroy(ms->io_client);
  686. mempool_destroy(ms->read_record_pool);
  687. kfree(ms);
  688. return NULL;
  689. }
  690. return ms;
  691. }
  692. static void free_context(struct mirror_set *ms, struct dm_target *ti,
  693. unsigned int m)
  694. {
  695. while (m--)
  696. dm_put_device(ti, ms->mirror[m].dev);
  697. dm_io_client_destroy(ms->io_client);
  698. dm_region_hash_destroy(ms->rh);
  699. mempool_destroy(ms->read_record_pool);
  700. kfree(ms);
  701. }
  702. static int get_mirror(struct mirror_set *ms, struct dm_target *ti,
  703. unsigned int mirror, char **argv)
  704. {
  705. unsigned long long offset;
  706. if (sscanf(argv[1], "%llu", &offset) != 1) {
  707. ti->error = "Invalid offset";
  708. return -EINVAL;
  709. }
  710. if (dm_get_device(ti, argv[0], offset, ti->len,
  711. dm_table_get_mode(ti->table),
  712. &ms->mirror[mirror].dev)) {
  713. ti->error = "Device lookup failure";
  714. return -ENXIO;
  715. }
  716. ms->mirror[mirror].ms = ms;
  717. atomic_set(&(ms->mirror[mirror].error_count), 0);
  718. ms->mirror[mirror].error_type = 0;
  719. ms->mirror[mirror].offset = offset;
  720. return 0;
  721. }
  722. /*
  723. * Create dirty log: log_type #log_params <log_params>
  724. */
  725. static struct dm_dirty_log *create_dirty_log(struct dm_target *ti,
  726. unsigned argc, char **argv,
  727. unsigned *args_used)
  728. {
  729. unsigned param_count;
  730. struct dm_dirty_log *dl;
  731. if (argc < 2) {
  732. ti->error = "Insufficient mirror log arguments";
  733. return NULL;
  734. }
  735. if (sscanf(argv[1], "%u", &param_count) != 1) {
  736. ti->error = "Invalid mirror log argument count";
  737. return NULL;
  738. }
  739. *args_used = 2 + param_count;
  740. if (argc < *args_used) {
  741. ti->error = "Insufficient mirror log arguments";
  742. return NULL;
  743. }
  744. dl = dm_dirty_log_create(argv[0], ti, param_count, argv + 2);
  745. if (!dl) {
  746. ti->error = "Error creating mirror dirty log";
  747. return NULL;
  748. }
  749. return dl;
  750. }
  751. static int parse_features(struct mirror_set *ms, unsigned argc, char **argv,
  752. unsigned *args_used)
  753. {
  754. unsigned num_features;
  755. struct dm_target *ti = ms->ti;
  756. *args_used = 0;
  757. if (!argc)
  758. return 0;
  759. if (sscanf(argv[0], "%u", &num_features) != 1) {
  760. ti->error = "Invalid number of features";
  761. return -EINVAL;
  762. }
  763. argc--;
  764. argv++;
  765. (*args_used)++;
  766. if (num_features > argc) {
  767. ti->error = "Not enough arguments to support feature count";
  768. return -EINVAL;
  769. }
  770. if (!strcmp("handle_errors", argv[0]))
  771. ms->features |= DM_RAID1_HANDLE_ERRORS;
  772. else {
  773. ti->error = "Unrecognised feature requested";
  774. return -EINVAL;
  775. }
  776. (*args_used)++;
  777. return 0;
  778. }
  779. /*
  780. * Construct a mirror mapping:
  781. *
  782. * log_type #log_params <log_params>
  783. * #mirrors [mirror_path offset]{2,}
  784. * [#features <features>]
  785. *
  786. * log_type is "core" or "disk"
  787. * #log_params is between 1 and 3
  788. *
  789. * If present, features must be "handle_errors".
  790. */
  791. static int mirror_ctr(struct dm_target *ti, unsigned int argc, char **argv)
  792. {
  793. int r;
  794. unsigned int nr_mirrors, m, args_used;
  795. struct mirror_set *ms;
  796. struct dm_dirty_log *dl;
  797. dl = create_dirty_log(ti, argc, argv, &args_used);
  798. if (!dl)
  799. return -EINVAL;
  800. argv += args_used;
  801. argc -= args_used;
  802. if (!argc || sscanf(argv[0], "%u", &nr_mirrors) != 1 ||
  803. nr_mirrors < 2 || nr_mirrors > DM_KCOPYD_MAX_REGIONS + 1) {
  804. ti->error = "Invalid number of mirrors";
  805. dm_dirty_log_destroy(dl);
  806. return -EINVAL;
  807. }
  808. argv++, argc--;
  809. if (argc < nr_mirrors * 2) {
  810. ti->error = "Too few mirror arguments";
  811. dm_dirty_log_destroy(dl);
  812. return -EINVAL;
  813. }
  814. ms = alloc_context(nr_mirrors, dl->type->get_region_size(dl), ti, dl);
  815. if (!ms) {
  816. dm_dirty_log_destroy(dl);
  817. return -ENOMEM;
  818. }
  819. /* Get the mirror parameter sets */
  820. for (m = 0; m < nr_mirrors; m++) {
  821. r = get_mirror(ms, ti, m, argv);
  822. if (r) {
  823. free_context(ms, ti, m);
  824. return r;
  825. }
  826. argv += 2;
  827. argc -= 2;
  828. }
  829. ti->private = ms;
  830. ti->split_io = dm_rh_get_region_size(ms->rh);
  831. ms->kmirrord_wq = create_singlethread_workqueue("kmirrord");
  832. if (!ms->kmirrord_wq) {
  833. DMERR("couldn't start kmirrord");
  834. r = -ENOMEM;
  835. goto err_free_context;
  836. }
  837. INIT_WORK(&ms->kmirrord_work, do_mirror);
  838. init_timer(&ms->timer);
  839. ms->timer_pending = 0;
  840. INIT_WORK(&ms->trigger_event, trigger_event);
  841. r = parse_features(ms, argc, argv, &args_used);
  842. if (r)
  843. goto err_destroy_wq;
  844. argv += args_used;
  845. argc -= args_used;
  846. /*
  847. * Any read-balancing addition depends on the
  848. * DM_RAID1_HANDLE_ERRORS flag being present.
  849. * This is because the decision to balance depends
  850. * on the sync state of a region. If the above
  851. * flag is not present, we ignore errors; and
  852. * the sync state may be inaccurate.
  853. */
  854. if (argc) {
  855. ti->error = "Too many mirror arguments";
  856. r = -EINVAL;
  857. goto err_destroy_wq;
  858. }
  859. r = dm_kcopyd_client_create(DM_KCOPYD_PAGES, &ms->kcopyd_client);
  860. if (r)
  861. goto err_destroy_wq;
  862. wakeup_mirrord(ms);
  863. return 0;
  864. err_destroy_wq:
  865. destroy_workqueue(ms->kmirrord_wq);
  866. err_free_context:
  867. free_context(ms, ti, ms->nr_mirrors);
  868. return r;
  869. }
  870. static void mirror_dtr(struct dm_target *ti)
  871. {
  872. struct mirror_set *ms = (struct mirror_set *) ti->private;
  873. del_timer_sync(&ms->timer);
  874. flush_workqueue(ms->kmirrord_wq);
  875. flush_scheduled_work();
  876. dm_kcopyd_client_destroy(ms->kcopyd_client);
  877. destroy_workqueue(ms->kmirrord_wq);
  878. free_context(ms, ti, ms->nr_mirrors);
  879. }
  880. /*
  881. * Mirror mapping function
  882. */
  883. static int mirror_map(struct dm_target *ti, struct bio *bio,
  884. union map_info *map_context)
  885. {
  886. int r, rw = bio_rw(bio);
  887. struct mirror *m;
  888. struct mirror_set *ms = ti->private;
  889. struct dm_raid1_read_record *read_record = NULL;
  890. struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh);
  891. if (rw == WRITE) {
  892. /* Save region for mirror_end_io() handler */
  893. map_context->ll = dm_rh_bio_to_region(ms->rh, bio);
  894. queue_bio(ms, bio, rw);
  895. return DM_MAPIO_SUBMITTED;
  896. }
  897. r = log->type->in_sync(log, dm_rh_bio_to_region(ms->rh, bio), 0);
  898. if (r < 0 && r != -EWOULDBLOCK)
  899. return r;
  900. /*
  901. * If region is not in-sync queue the bio.
  902. */
  903. if (!r || (r == -EWOULDBLOCK)) {
  904. if (rw == READA)
  905. return -EWOULDBLOCK;
  906. queue_bio(ms, bio, rw);
  907. return DM_MAPIO_SUBMITTED;
  908. }
  909. /*
  910. * The region is in-sync and we can perform reads directly.
  911. * Store enough information so we can retry if it fails.
  912. */
  913. m = choose_mirror(ms, bio->bi_sector);
  914. if (unlikely(!m))
  915. return -EIO;
  916. read_record = mempool_alloc(ms->read_record_pool, GFP_NOIO);
  917. if (likely(read_record)) {
  918. dm_bio_record(&read_record->details, bio);
  919. map_context->ptr = read_record;
  920. read_record->m = m;
  921. }
  922. map_bio(m, bio);
  923. return DM_MAPIO_REMAPPED;
  924. }
  925. static int mirror_end_io(struct dm_target *ti, struct bio *bio,
  926. int error, union map_info *map_context)
  927. {
  928. int rw = bio_rw(bio);
  929. struct mirror_set *ms = (struct mirror_set *) ti->private;
  930. struct mirror *m = NULL;
  931. struct dm_bio_details *bd = NULL;
  932. struct dm_raid1_read_record *read_record = map_context->ptr;
  933. /*
  934. * We need to dec pending if this was a write.
  935. */
  936. if (rw == WRITE) {
  937. dm_rh_dec(ms->rh, map_context->ll);
  938. return error;
  939. }
  940. if (error == -EOPNOTSUPP)
  941. goto out;
  942. if ((error == -EWOULDBLOCK) && bio_rw_flagged(bio, BIO_RW_AHEAD))
  943. goto out;
  944. if (unlikely(error)) {
  945. if (!read_record) {
  946. /*
  947. * There wasn't enough memory to record necessary
  948. * information for a retry or there was no other
  949. * mirror in-sync.
  950. */
  951. DMERR_LIMIT("Mirror read failed.");
  952. return -EIO;
  953. }
  954. m = read_record->m;
  955. DMERR("Mirror read failed from %s. Trying alternative device.",
  956. m->dev->name);
  957. fail_mirror(m, DM_RAID1_READ_ERROR);
  958. /*
  959. * A failed read is requeued for another attempt using an intact
  960. * mirror.
  961. */
  962. if (default_ok(m) || mirror_available(ms, bio)) {
  963. bd = &read_record->details;
  964. dm_bio_restore(bd, bio);
  965. mempool_free(read_record, ms->read_record_pool);
  966. map_context->ptr = NULL;
  967. queue_bio(ms, bio, rw);
  968. return 1;
  969. }
  970. DMERR("All replicated volumes dead, failing I/O");
  971. }
  972. out:
  973. if (read_record) {
  974. mempool_free(read_record, ms->read_record_pool);
  975. map_context->ptr = NULL;
  976. }
  977. return error;
  978. }
  979. static void mirror_presuspend(struct dm_target *ti)
  980. {
  981. struct mirror_set *ms = (struct mirror_set *) ti->private;
  982. struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh);
  983. atomic_set(&ms->suspend, 1);
  984. /*
  985. * We must finish up all the work that we've
  986. * generated (i.e. recovery work).
  987. */
  988. dm_rh_stop_recovery(ms->rh);
  989. wait_event(_kmirrord_recovery_stopped,
  990. !dm_rh_recovery_in_flight(ms->rh));
  991. if (log->type->presuspend && log->type->presuspend(log))
  992. /* FIXME: need better error handling */
  993. DMWARN("log presuspend failed");
  994. /*
  995. * Now that recovery is complete/stopped and the
  996. * delayed bios are queued, we need to wait for
  997. * the worker thread to complete. This way,
  998. * we know that all of our I/O has been pushed.
  999. */
  1000. flush_workqueue(ms->kmirrord_wq);
  1001. }
  1002. static void mirror_postsuspend(struct dm_target *ti)
  1003. {
  1004. struct mirror_set *ms = ti->private;
  1005. struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh);
  1006. if (log->type->postsuspend && log->type->postsuspend(log))
  1007. /* FIXME: need better error handling */
  1008. DMWARN("log postsuspend failed");
  1009. }
  1010. static void mirror_resume(struct dm_target *ti)
  1011. {
  1012. struct mirror_set *ms = ti->private;
  1013. struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh);
  1014. atomic_set(&ms->suspend, 0);
  1015. if (log->type->resume && log->type->resume(log))
  1016. /* FIXME: need better error handling */
  1017. DMWARN("log resume failed");
  1018. dm_rh_start_recovery(ms->rh);
  1019. }
  1020. /*
  1021. * device_status_char
  1022. * @m: mirror device/leg we want the status of
  1023. *
  1024. * We return one character representing the most severe error
  1025. * we have encountered.
  1026. * A => Alive - No failures
  1027. * D => Dead - A write failure occurred leaving mirror out-of-sync
  1028. * S => Sync - A sychronization failure occurred, mirror out-of-sync
  1029. * R => Read - A read failure occurred, mirror data unaffected
  1030. *
  1031. * Returns: <char>
  1032. */
  1033. static char device_status_char(struct mirror *m)
  1034. {
  1035. if (!atomic_read(&(m->error_count)))
  1036. return 'A';
  1037. return (test_bit(DM_RAID1_WRITE_ERROR, &(m->error_type))) ? 'D' :
  1038. (test_bit(DM_RAID1_SYNC_ERROR, &(m->error_type))) ? 'S' :
  1039. (test_bit(DM_RAID1_READ_ERROR, &(m->error_type))) ? 'R' : 'U';
  1040. }
  1041. static int mirror_status(struct dm_target *ti, status_type_t type,
  1042. char *result, unsigned int maxlen)
  1043. {
  1044. unsigned int m, sz = 0;
  1045. struct mirror_set *ms = (struct mirror_set *) ti->private;
  1046. struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh);
  1047. char buffer[ms->nr_mirrors + 1];
  1048. switch (type) {
  1049. case STATUSTYPE_INFO:
  1050. DMEMIT("%d ", ms->nr_mirrors);
  1051. for (m = 0; m < ms->nr_mirrors; m++) {
  1052. DMEMIT("%s ", ms->mirror[m].dev->name);
  1053. buffer[m] = device_status_char(&(ms->mirror[m]));
  1054. }
  1055. buffer[m] = '\0';
  1056. DMEMIT("%llu/%llu 1 %s ",
  1057. (unsigned long long)log->type->get_sync_count(log),
  1058. (unsigned long long)ms->nr_regions, buffer);
  1059. sz += log->type->status(log, type, result+sz, maxlen-sz);
  1060. break;
  1061. case STATUSTYPE_TABLE:
  1062. sz = log->type->status(log, type, result, maxlen);
  1063. DMEMIT("%d", ms->nr_mirrors);
  1064. for (m = 0; m < ms->nr_mirrors; m++)
  1065. DMEMIT(" %s %llu", ms->mirror[m].dev->name,
  1066. (unsigned long long)ms->mirror[m].offset);
  1067. if (ms->features & DM_RAID1_HANDLE_ERRORS)
  1068. DMEMIT(" 1 handle_errors");
  1069. }
  1070. return 0;
  1071. }
  1072. static int mirror_iterate_devices(struct dm_target *ti,
  1073. iterate_devices_callout_fn fn, void *data)
  1074. {
  1075. struct mirror_set *ms = ti->private;
  1076. int ret = 0;
  1077. unsigned i;
  1078. for (i = 0; !ret && i < ms->nr_mirrors; i++)
  1079. ret = fn(ti, ms->mirror[i].dev,
  1080. ms->mirror[i].offset, ti->len, data);
  1081. return ret;
  1082. }
  1083. static struct target_type mirror_target = {
  1084. .name = "mirror",
  1085. .version = {1, 12, 0},
  1086. .module = THIS_MODULE,
  1087. .ctr = mirror_ctr,
  1088. .dtr = mirror_dtr,
  1089. .map = mirror_map,
  1090. .end_io = mirror_end_io,
  1091. .presuspend = mirror_presuspend,
  1092. .postsuspend = mirror_postsuspend,
  1093. .resume = mirror_resume,
  1094. .status = mirror_status,
  1095. .iterate_devices = mirror_iterate_devices,
  1096. };
  1097. static int __init dm_mirror_init(void)
  1098. {
  1099. int r;
  1100. _dm_raid1_read_record_cache = KMEM_CACHE(dm_raid1_read_record, 0);
  1101. if (!_dm_raid1_read_record_cache) {
  1102. DMERR("Can't allocate dm_raid1_read_record cache");
  1103. r = -ENOMEM;
  1104. goto bad_cache;
  1105. }
  1106. r = dm_register_target(&mirror_target);
  1107. if (r < 0) {
  1108. DMERR("Failed to register mirror target");
  1109. goto bad_target;
  1110. }
  1111. return 0;
  1112. bad_target:
  1113. kmem_cache_destroy(_dm_raid1_read_record_cache);
  1114. bad_cache:
  1115. return r;
  1116. }
  1117. static void __exit dm_mirror_exit(void)
  1118. {
  1119. dm_unregister_target(&mirror_target);
  1120. kmem_cache_destroy(_dm_raid1_read_record_cache);
  1121. }
  1122. /* Module hooks */
  1123. module_init(dm_mirror_init);
  1124. module_exit(dm_mirror_exit);
  1125. MODULE_DESCRIPTION(DM_NAME " mirror target");
  1126. MODULE_AUTHOR("Joe Thornber");
  1127. MODULE_LICENSE("GPL");