dm-raid1.c 37 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623
  1. /*
  2. * Copyright (C) 2003 Sistina Software Limited.
  3. *
  4. * This file is released under the GPL.
  5. */
  6. #include "dm.h"
  7. #include "dm-bio-list.h"
  8. #include "dm-io.h"
  9. #include "dm-log.h"
  10. #include "kcopyd.h"
  11. #include <linux/ctype.h>
  12. #include <linux/init.h>
  13. #include <linux/mempool.h>
  14. #include <linux/module.h>
  15. #include <linux/pagemap.h>
  16. #include <linux/slab.h>
  17. #include <linux/time.h>
  18. #include <linux/vmalloc.h>
  19. #include <linux/workqueue.h>
  20. #include <linux/log2.h>
  21. #include <linux/hardirq.h>
  22. #define DM_MSG_PREFIX "raid1"
  23. #define DM_IO_PAGES 64
  24. #define DM_RAID1_HANDLE_ERRORS 0x01
  25. #define errors_handled(p) ((p)->features & DM_RAID1_HANDLE_ERRORS)
  26. static DECLARE_WAIT_QUEUE_HEAD(_kmirrord_recovery_stopped);
  27. /*-----------------------------------------------------------------
  28. * Region hash
  29. *
  30. * The mirror splits itself up into discrete regions. Each
  31. * region can be in one of three states: clean, dirty,
  32. * nosync. There is no need to put clean regions in the hash.
  33. *
  34. * In addition to being present in the hash table a region _may_
  35. * be present on one of three lists.
  36. *
  37. * clean_regions: Regions on this list have no io pending to
  38. * them, they are in sync, we are no longer interested in them,
  39. * they are dull. rh_update_states() will remove them from the
  40. * hash table.
  41. *
  42. * quiesced_regions: These regions have been spun down, ready
  43. * for recovery. rh_recovery_start() will remove regions from
  44. * this list and hand them to kmirrord, which will schedule the
  45. * recovery io with kcopyd.
  46. *
  47. * recovered_regions: Regions that kcopyd has successfully
  48. * recovered. rh_update_states() will now schedule any delayed
  49. * io, up the recovery_count, and remove the region from the
  50. * hash.
  51. *
  52. * There are 2 locks:
  53. * A rw spin lock 'hash_lock' protects just the hash table,
  54. * this is never held in write mode from interrupt context,
  55. * which I believe means that we only have to disable irqs when
  56. * doing a write lock.
  57. *
  58. * An ordinary spin lock 'region_lock' that protects the three
  59. * lists in the region_hash, with the 'state', 'list' and
  60. * 'bhs_delayed' fields of the regions. This is used from irq
  61. * context, so all other uses will have to suspend local irqs.
  62. *---------------------------------------------------------------*/
  63. struct mirror_set;
  64. struct region_hash {
  65. struct mirror_set *ms;
  66. uint32_t region_size;
  67. unsigned region_shift;
  68. /* holds persistent region state */
  69. struct dirty_log *log;
  70. /* hash table */
  71. rwlock_t hash_lock;
  72. mempool_t *region_pool;
  73. unsigned int mask;
  74. unsigned int nr_buckets;
  75. struct list_head *buckets;
  76. spinlock_t region_lock;
  77. atomic_t recovery_in_flight;
  78. struct semaphore recovery_count;
  79. struct list_head clean_regions;
  80. struct list_head quiesced_regions;
  81. struct list_head recovered_regions;
  82. struct list_head failed_recovered_regions;
  83. };
  84. enum {
  85. RH_CLEAN,
  86. RH_DIRTY,
  87. RH_NOSYNC,
  88. RH_RECOVERING
  89. };
  90. struct region {
  91. struct region_hash *rh; /* FIXME: can we get rid of this ? */
  92. region_t key;
  93. int state;
  94. struct list_head hash_list;
  95. struct list_head list;
  96. atomic_t pending;
  97. struct bio_list delayed_bios;
  98. };
  99. /*-----------------------------------------------------------------
  100. * Mirror set structures.
  101. *---------------------------------------------------------------*/
  102. enum dm_raid1_error {
  103. DM_RAID1_WRITE_ERROR,
  104. DM_RAID1_SYNC_ERROR,
  105. DM_RAID1_READ_ERROR
  106. };
  107. struct mirror {
  108. struct mirror_set *ms;
  109. atomic_t error_count;
  110. uint32_t error_type;
  111. struct dm_dev *dev;
  112. sector_t offset;
  113. };
  114. struct mirror_set {
  115. struct dm_target *ti;
  116. struct list_head list;
  117. struct region_hash rh;
  118. struct kcopyd_client *kcopyd_client;
  119. uint64_t features;
  120. spinlock_t lock; /* protects the lists */
  121. struct bio_list reads;
  122. struct bio_list writes;
  123. struct bio_list failures;
  124. struct dm_io_client *io_client;
  125. /* recovery */
  126. region_t nr_regions;
  127. int in_sync;
  128. int log_failure;
  129. atomic_t default_mirror; /* Default mirror */
  130. struct workqueue_struct *kmirrord_wq;
  131. struct work_struct kmirrord_work;
  132. struct work_struct trigger_event;
  133. unsigned int nr_mirrors;
  134. struct mirror mirror[0];
  135. };
  136. /*
  137. * Conversion fns
  138. */
  139. static inline region_t bio_to_region(struct region_hash *rh, struct bio *bio)
  140. {
  141. return (bio->bi_sector - rh->ms->ti->begin) >> rh->region_shift;
  142. }
  143. static inline sector_t region_to_sector(struct region_hash *rh, region_t region)
  144. {
  145. return region << rh->region_shift;
  146. }
  147. static void wake(struct mirror_set *ms)
  148. {
  149. queue_work(ms->kmirrord_wq, &ms->kmirrord_work);
  150. }
  151. /* FIXME move this */
  152. static void queue_bio(struct mirror_set *ms, struct bio *bio, int rw);
  153. #define MIN_REGIONS 64
  154. #define MAX_RECOVERY 1
  155. static int rh_init(struct region_hash *rh, struct mirror_set *ms,
  156. struct dirty_log *log, uint32_t region_size,
  157. region_t nr_regions)
  158. {
  159. unsigned int nr_buckets, max_buckets;
  160. size_t i;
  161. /*
  162. * Calculate a suitable number of buckets for our hash
  163. * table.
  164. */
  165. max_buckets = nr_regions >> 6;
  166. for (nr_buckets = 128u; nr_buckets < max_buckets; nr_buckets <<= 1)
  167. ;
  168. nr_buckets >>= 1;
  169. rh->ms = ms;
  170. rh->log = log;
  171. rh->region_size = region_size;
  172. rh->region_shift = ffs(region_size) - 1;
  173. rwlock_init(&rh->hash_lock);
  174. rh->mask = nr_buckets - 1;
  175. rh->nr_buckets = nr_buckets;
  176. rh->buckets = vmalloc(nr_buckets * sizeof(*rh->buckets));
  177. if (!rh->buckets) {
  178. DMERR("unable to allocate region hash memory");
  179. return -ENOMEM;
  180. }
  181. for (i = 0; i < nr_buckets; i++)
  182. INIT_LIST_HEAD(rh->buckets + i);
  183. spin_lock_init(&rh->region_lock);
  184. sema_init(&rh->recovery_count, 0);
  185. atomic_set(&rh->recovery_in_flight, 0);
  186. INIT_LIST_HEAD(&rh->clean_regions);
  187. INIT_LIST_HEAD(&rh->quiesced_regions);
  188. INIT_LIST_HEAD(&rh->recovered_regions);
  189. INIT_LIST_HEAD(&rh->failed_recovered_regions);
  190. rh->region_pool = mempool_create_kmalloc_pool(MIN_REGIONS,
  191. sizeof(struct region));
  192. if (!rh->region_pool) {
  193. vfree(rh->buckets);
  194. rh->buckets = NULL;
  195. return -ENOMEM;
  196. }
  197. return 0;
  198. }
  199. static void rh_exit(struct region_hash *rh)
  200. {
  201. unsigned int h;
  202. struct region *reg, *nreg;
  203. BUG_ON(!list_empty(&rh->quiesced_regions));
  204. for (h = 0; h < rh->nr_buckets; h++) {
  205. list_for_each_entry_safe(reg, nreg, rh->buckets + h, hash_list) {
  206. BUG_ON(atomic_read(&reg->pending));
  207. mempool_free(reg, rh->region_pool);
  208. }
  209. }
  210. if (rh->log)
  211. dm_destroy_dirty_log(rh->log);
  212. if (rh->region_pool)
  213. mempool_destroy(rh->region_pool);
  214. vfree(rh->buckets);
  215. }
  216. #define RH_HASH_MULT 2654435387U
  217. static inline unsigned int rh_hash(struct region_hash *rh, region_t region)
  218. {
  219. return (unsigned int) ((region * RH_HASH_MULT) >> 12) & rh->mask;
  220. }
  221. static struct region *__rh_lookup(struct region_hash *rh, region_t region)
  222. {
  223. struct region *reg;
  224. list_for_each_entry (reg, rh->buckets + rh_hash(rh, region), hash_list)
  225. if (reg->key == region)
  226. return reg;
  227. return NULL;
  228. }
  229. static void __rh_insert(struct region_hash *rh, struct region *reg)
  230. {
  231. unsigned int h = rh_hash(rh, reg->key);
  232. list_add(&reg->hash_list, rh->buckets + h);
  233. }
  234. static struct region *__rh_alloc(struct region_hash *rh, region_t region)
  235. {
  236. struct region *reg, *nreg;
  237. read_unlock(&rh->hash_lock);
  238. nreg = mempool_alloc(rh->region_pool, GFP_ATOMIC);
  239. if (unlikely(!nreg))
  240. nreg = kmalloc(sizeof(struct region), GFP_NOIO);
  241. nreg->state = rh->log->type->in_sync(rh->log, region, 1) ?
  242. RH_CLEAN : RH_NOSYNC;
  243. nreg->rh = rh;
  244. nreg->key = region;
  245. INIT_LIST_HEAD(&nreg->list);
  246. atomic_set(&nreg->pending, 0);
  247. bio_list_init(&nreg->delayed_bios);
  248. write_lock_irq(&rh->hash_lock);
  249. reg = __rh_lookup(rh, region);
  250. if (reg)
  251. /* we lost the race */
  252. mempool_free(nreg, rh->region_pool);
  253. else {
  254. __rh_insert(rh, nreg);
  255. if (nreg->state == RH_CLEAN) {
  256. spin_lock(&rh->region_lock);
  257. list_add(&nreg->list, &rh->clean_regions);
  258. spin_unlock(&rh->region_lock);
  259. }
  260. reg = nreg;
  261. }
  262. write_unlock_irq(&rh->hash_lock);
  263. read_lock(&rh->hash_lock);
  264. return reg;
  265. }
  266. static inline struct region *__rh_find(struct region_hash *rh, region_t region)
  267. {
  268. struct region *reg;
  269. reg = __rh_lookup(rh, region);
  270. if (!reg)
  271. reg = __rh_alloc(rh, region);
  272. return reg;
  273. }
  274. static int rh_state(struct region_hash *rh, region_t region, int may_block)
  275. {
  276. int r;
  277. struct region *reg;
  278. read_lock(&rh->hash_lock);
  279. reg = __rh_lookup(rh, region);
  280. read_unlock(&rh->hash_lock);
  281. if (reg)
  282. return reg->state;
  283. /*
  284. * The region wasn't in the hash, so we fall back to the
  285. * dirty log.
  286. */
  287. r = rh->log->type->in_sync(rh->log, region, may_block);
  288. /*
  289. * Any error from the dirty log (eg. -EWOULDBLOCK) gets
  290. * taken as a RH_NOSYNC
  291. */
  292. return r == 1 ? RH_CLEAN : RH_NOSYNC;
  293. }
  294. static inline int rh_in_sync(struct region_hash *rh,
  295. region_t region, int may_block)
  296. {
  297. int state = rh_state(rh, region, may_block);
  298. return state == RH_CLEAN || state == RH_DIRTY;
  299. }
  300. static void dispatch_bios(struct mirror_set *ms, struct bio_list *bio_list)
  301. {
  302. struct bio *bio;
  303. while ((bio = bio_list_pop(bio_list))) {
  304. queue_bio(ms, bio, WRITE);
  305. }
  306. }
  307. static void complete_resync_work(struct region *reg, int success)
  308. {
  309. struct region_hash *rh = reg->rh;
  310. rh->log->type->set_region_sync(rh->log, reg->key, success);
  311. dispatch_bios(rh->ms, &reg->delayed_bios);
  312. if (atomic_dec_and_test(&rh->recovery_in_flight))
  313. wake_up_all(&_kmirrord_recovery_stopped);
  314. up(&rh->recovery_count);
  315. }
  316. static void rh_update_states(struct region_hash *rh)
  317. {
  318. struct region *reg, *next;
  319. LIST_HEAD(clean);
  320. LIST_HEAD(recovered);
  321. LIST_HEAD(failed_recovered);
  322. /*
  323. * Quickly grab the lists.
  324. */
  325. write_lock_irq(&rh->hash_lock);
  326. spin_lock(&rh->region_lock);
  327. if (!list_empty(&rh->clean_regions)) {
  328. list_splice(&rh->clean_regions, &clean);
  329. INIT_LIST_HEAD(&rh->clean_regions);
  330. list_for_each_entry(reg, &clean, list)
  331. list_del(&reg->hash_list);
  332. }
  333. if (!list_empty(&rh->recovered_regions)) {
  334. list_splice(&rh->recovered_regions, &recovered);
  335. INIT_LIST_HEAD(&rh->recovered_regions);
  336. list_for_each_entry (reg, &recovered, list)
  337. list_del(&reg->hash_list);
  338. }
  339. if (!list_empty(&rh->failed_recovered_regions)) {
  340. list_splice(&rh->failed_recovered_regions, &failed_recovered);
  341. INIT_LIST_HEAD(&rh->failed_recovered_regions);
  342. list_for_each_entry(reg, &failed_recovered, list)
  343. list_del(&reg->hash_list);
  344. }
  345. spin_unlock(&rh->region_lock);
  346. write_unlock_irq(&rh->hash_lock);
  347. /*
  348. * All the regions on the recovered and clean lists have
  349. * now been pulled out of the system, so no need to do
  350. * any more locking.
  351. */
  352. list_for_each_entry_safe (reg, next, &recovered, list) {
  353. rh->log->type->clear_region(rh->log, reg->key);
  354. complete_resync_work(reg, 1);
  355. mempool_free(reg, rh->region_pool);
  356. }
  357. list_for_each_entry_safe(reg, next, &failed_recovered, list) {
  358. complete_resync_work(reg, errors_handled(rh->ms) ? 0 : 1);
  359. mempool_free(reg, rh->region_pool);
  360. }
  361. list_for_each_entry_safe(reg, next, &clean, list) {
  362. rh->log->type->clear_region(rh->log, reg->key);
  363. mempool_free(reg, rh->region_pool);
  364. }
  365. rh->log->type->flush(rh->log);
  366. }
  367. static void rh_inc(struct region_hash *rh, region_t region)
  368. {
  369. struct region *reg;
  370. read_lock(&rh->hash_lock);
  371. reg = __rh_find(rh, region);
  372. spin_lock_irq(&rh->region_lock);
  373. atomic_inc(&reg->pending);
  374. if (reg->state == RH_CLEAN) {
  375. reg->state = RH_DIRTY;
  376. list_del_init(&reg->list); /* take off the clean list */
  377. spin_unlock_irq(&rh->region_lock);
  378. rh->log->type->mark_region(rh->log, reg->key);
  379. } else
  380. spin_unlock_irq(&rh->region_lock);
  381. read_unlock(&rh->hash_lock);
  382. }
  383. static void rh_inc_pending(struct region_hash *rh, struct bio_list *bios)
  384. {
  385. struct bio *bio;
  386. for (bio = bios->head; bio; bio = bio->bi_next)
  387. rh_inc(rh, bio_to_region(rh, bio));
  388. }
  389. static void rh_dec(struct region_hash *rh, region_t region)
  390. {
  391. unsigned long flags;
  392. struct region *reg;
  393. int should_wake = 0;
  394. read_lock(&rh->hash_lock);
  395. reg = __rh_lookup(rh, region);
  396. read_unlock(&rh->hash_lock);
  397. spin_lock_irqsave(&rh->region_lock, flags);
  398. if (atomic_dec_and_test(&reg->pending)) {
  399. /*
  400. * There is no pending I/O for this region.
  401. * We can move the region to corresponding list for next action.
  402. * At this point, the region is not yet connected to any list.
  403. *
  404. * If the state is RH_NOSYNC, the region should be kept off
  405. * from clean list.
  406. * The hash entry for RH_NOSYNC will remain in memory
  407. * until the region is recovered or the map is reloaded.
  408. */
  409. /* do nothing for RH_NOSYNC */
  410. if (reg->state == RH_RECOVERING) {
  411. list_add_tail(&reg->list, &rh->quiesced_regions);
  412. } else if (reg->state == RH_DIRTY) {
  413. reg->state = RH_CLEAN;
  414. list_add(&reg->list, &rh->clean_regions);
  415. }
  416. should_wake = 1;
  417. }
  418. spin_unlock_irqrestore(&rh->region_lock, flags);
  419. if (should_wake)
  420. wake(rh->ms);
  421. }
  422. /*
  423. * Starts quiescing a region in preparation for recovery.
  424. */
  425. static int __rh_recovery_prepare(struct region_hash *rh)
  426. {
  427. int r;
  428. struct region *reg;
  429. region_t region;
  430. /*
  431. * Ask the dirty log what's next.
  432. */
  433. r = rh->log->type->get_resync_work(rh->log, &region);
  434. if (r <= 0)
  435. return r;
  436. /*
  437. * Get this region, and start it quiescing by setting the
  438. * recovering flag.
  439. */
  440. read_lock(&rh->hash_lock);
  441. reg = __rh_find(rh, region);
  442. read_unlock(&rh->hash_lock);
  443. spin_lock_irq(&rh->region_lock);
  444. reg->state = RH_RECOVERING;
  445. /* Already quiesced ? */
  446. if (atomic_read(&reg->pending))
  447. list_del_init(&reg->list);
  448. else
  449. list_move(&reg->list, &rh->quiesced_regions);
  450. spin_unlock_irq(&rh->region_lock);
  451. return 1;
  452. }
  453. static void rh_recovery_prepare(struct region_hash *rh)
  454. {
  455. /* Extra reference to avoid race with rh_stop_recovery */
  456. atomic_inc(&rh->recovery_in_flight);
  457. while (!down_trylock(&rh->recovery_count)) {
  458. atomic_inc(&rh->recovery_in_flight);
  459. if (__rh_recovery_prepare(rh) <= 0) {
  460. atomic_dec(&rh->recovery_in_flight);
  461. up(&rh->recovery_count);
  462. break;
  463. }
  464. }
  465. /* Drop the extra reference */
  466. if (atomic_dec_and_test(&rh->recovery_in_flight))
  467. wake_up_all(&_kmirrord_recovery_stopped);
  468. }
  469. /*
  470. * Returns any quiesced regions.
  471. */
  472. static struct region *rh_recovery_start(struct region_hash *rh)
  473. {
  474. struct region *reg = NULL;
  475. spin_lock_irq(&rh->region_lock);
  476. if (!list_empty(&rh->quiesced_regions)) {
  477. reg = list_entry(rh->quiesced_regions.next,
  478. struct region, list);
  479. list_del_init(&reg->list); /* remove from the quiesced list */
  480. }
  481. spin_unlock_irq(&rh->region_lock);
  482. return reg;
  483. }
  484. static void rh_recovery_end(struct region *reg, int success)
  485. {
  486. struct region_hash *rh = reg->rh;
  487. spin_lock_irq(&rh->region_lock);
  488. if (success)
  489. list_add(&reg->list, &reg->rh->recovered_regions);
  490. else {
  491. reg->state = RH_NOSYNC;
  492. list_add(&reg->list, &reg->rh->failed_recovered_regions);
  493. }
  494. spin_unlock_irq(&rh->region_lock);
  495. wake(rh->ms);
  496. }
  497. static int rh_flush(struct region_hash *rh)
  498. {
  499. return rh->log->type->flush(rh->log);
  500. }
  501. static void rh_delay(struct region_hash *rh, struct bio *bio)
  502. {
  503. struct region *reg;
  504. read_lock(&rh->hash_lock);
  505. reg = __rh_find(rh, bio_to_region(rh, bio));
  506. bio_list_add(&reg->delayed_bios, bio);
  507. read_unlock(&rh->hash_lock);
  508. }
  509. static void rh_stop_recovery(struct region_hash *rh)
  510. {
  511. int i;
  512. /* wait for any recovering regions */
  513. for (i = 0; i < MAX_RECOVERY; i++)
  514. down(&rh->recovery_count);
  515. }
  516. static void rh_start_recovery(struct region_hash *rh)
  517. {
  518. int i;
  519. for (i = 0; i < MAX_RECOVERY; i++)
  520. up(&rh->recovery_count);
  521. wake(rh->ms);
  522. }
  523. /*
  524. * Every mirror should look like this one.
  525. */
  526. #define DEFAULT_MIRROR 0
  527. /*
  528. * This is yucky. We squirrel the mirror_set struct away inside
  529. * bi_next for write buffers. This is safe since the bh
  530. * doesn't get submitted to the lower levels of block layer.
  531. */
  532. static struct mirror_set *bio_get_ms(struct bio *bio)
  533. {
  534. return (struct mirror_set *) bio->bi_next;
  535. }
  536. static void bio_set_ms(struct bio *bio, struct mirror_set *ms)
  537. {
  538. bio->bi_next = (struct bio *) ms;
  539. }
  540. static struct mirror *get_default_mirror(struct mirror_set *ms)
  541. {
  542. return &ms->mirror[atomic_read(&ms->default_mirror)];
  543. }
  544. static void set_default_mirror(struct mirror *m)
  545. {
  546. struct mirror_set *ms = m->ms;
  547. struct mirror *m0 = &(ms->mirror[0]);
  548. atomic_set(&ms->default_mirror, m - m0);
  549. }
  550. /* fail_mirror
  551. * @m: mirror device to fail
  552. * @error_type: one of the enum's, DM_RAID1_*_ERROR
  553. *
  554. * If errors are being handled, record the type of
  555. * error encountered for this device. If this type
  556. * of error has already been recorded, we can return;
  557. * otherwise, we must signal userspace by triggering
  558. * an event. Additionally, if the device is the
  559. * primary device, we must choose a new primary, but
  560. * only if the mirror is in-sync.
  561. *
  562. * This function must not block.
  563. */
  564. static void fail_mirror(struct mirror *m, enum dm_raid1_error error_type)
  565. {
  566. struct mirror_set *ms = m->ms;
  567. struct mirror *new;
  568. if (!errors_handled(ms))
  569. return;
  570. /*
  571. * error_count is used for nothing more than a
  572. * simple way to tell if a device has encountered
  573. * errors.
  574. */
  575. atomic_inc(&m->error_count);
  576. if (test_and_set_bit(error_type, &m->error_type))
  577. return;
  578. if (m != get_default_mirror(ms))
  579. goto out;
  580. if (!ms->in_sync) {
  581. /*
  582. * Better to issue requests to same failing device
  583. * than to risk returning corrupt data.
  584. */
  585. DMERR("Primary mirror (%s) failed while out-of-sync: "
  586. "Reads may fail.", m->dev->name);
  587. goto out;
  588. }
  589. for (new = ms->mirror; new < ms->mirror + ms->nr_mirrors; new++)
  590. if (!atomic_read(&new->error_count)) {
  591. set_default_mirror(new);
  592. break;
  593. }
  594. if (unlikely(new == ms->mirror + ms->nr_mirrors))
  595. DMWARN("All sides of mirror have failed.");
  596. out:
  597. schedule_work(&ms->trigger_event);
  598. }
  599. /*-----------------------------------------------------------------
  600. * Recovery.
  601. *
  602. * When a mirror is first activated we may find that some regions
  603. * are in the no-sync state. We have to recover these by
  604. * recopying from the default mirror to all the others.
  605. *---------------------------------------------------------------*/
  606. static void recovery_complete(int read_err, unsigned int write_err,
  607. void *context)
  608. {
  609. struct region *reg = (struct region *)context;
  610. struct mirror_set *ms = reg->rh->ms;
  611. int m, bit = 0;
  612. if (read_err) {
  613. /* Read error means the failure of default mirror. */
  614. DMERR_LIMIT("Unable to read primary mirror during recovery");
  615. fail_mirror(get_default_mirror(ms), DM_RAID1_SYNC_ERROR);
  616. }
  617. if (write_err) {
  618. DMERR_LIMIT("Write error during recovery (error = 0x%x)",
  619. write_err);
  620. /*
  621. * Bits correspond to devices (excluding default mirror).
  622. * The default mirror cannot change during recovery.
  623. */
  624. for (m = 0; m < ms->nr_mirrors; m++) {
  625. if (&ms->mirror[m] == get_default_mirror(ms))
  626. continue;
  627. if (test_bit(bit, &write_err))
  628. fail_mirror(ms->mirror + m,
  629. DM_RAID1_SYNC_ERROR);
  630. bit++;
  631. }
  632. }
  633. rh_recovery_end(reg, !(read_err || write_err));
  634. }
  635. static int recover(struct mirror_set *ms, struct region *reg)
  636. {
  637. int r;
  638. unsigned int i;
  639. struct io_region from, to[KCOPYD_MAX_REGIONS], *dest;
  640. struct mirror *m;
  641. unsigned long flags = 0;
  642. /* fill in the source */
  643. m = get_default_mirror(ms);
  644. from.bdev = m->dev->bdev;
  645. from.sector = m->offset + region_to_sector(reg->rh, reg->key);
  646. if (reg->key == (ms->nr_regions - 1)) {
  647. /*
  648. * The final region may be smaller than
  649. * region_size.
  650. */
  651. from.count = ms->ti->len & (reg->rh->region_size - 1);
  652. if (!from.count)
  653. from.count = reg->rh->region_size;
  654. } else
  655. from.count = reg->rh->region_size;
  656. /* fill in the destinations */
  657. for (i = 0, dest = to; i < ms->nr_mirrors; i++) {
  658. if (&ms->mirror[i] == get_default_mirror(ms))
  659. continue;
  660. m = ms->mirror + i;
  661. dest->bdev = m->dev->bdev;
  662. dest->sector = m->offset + region_to_sector(reg->rh, reg->key);
  663. dest->count = from.count;
  664. dest++;
  665. }
  666. /* hand to kcopyd */
  667. set_bit(KCOPYD_IGNORE_ERROR, &flags);
  668. r = kcopyd_copy(ms->kcopyd_client, &from, ms->nr_mirrors - 1, to, flags,
  669. recovery_complete, reg);
  670. return r;
  671. }
  672. static void do_recovery(struct mirror_set *ms)
  673. {
  674. int r;
  675. struct region *reg;
  676. struct dirty_log *log = ms->rh.log;
  677. /*
  678. * Start quiescing some regions.
  679. */
  680. rh_recovery_prepare(&ms->rh);
  681. /*
  682. * Copy any already quiesced regions.
  683. */
  684. while ((reg = rh_recovery_start(&ms->rh))) {
  685. r = recover(ms, reg);
  686. if (r)
  687. rh_recovery_end(reg, 0);
  688. }
  689. /*
  690. * Update the in sync flag.
  691. */
  692. if (!ms->in_sync &&
  693. (log->type->get_sync_count(log) == ms->nr_regions)) {
  694. /* the sync is complete */
  695. dm_table_event(ms->ti->table);
  696. ms->in_sync = 1;
  697. }
  698. }
  699. /*-----------------------------------------------------------------
  700. * Reads
  701. *---------------------------------------------------------------*/
  702. static struct mirror *choose_mirror(struct mirror_set *ms, sector_t sector)
  703. {
  704. /* FIXME: add read balancing */
  705. return get_default_mirror(ms);
  706. }
  707. /*
  708. * remap a buffer to a particular mirror.
  709. */
  710. static void map_bio(struct mirror_set *ms, struct mirror *m, struct bio *bio)
  711. {
  712. bio->bi_bdev = m->dev->bdev;
  713. bio->bi_sector = m->offset + (bio->bi_sector - ms->ti->begin);
  714. }
  715. static void do_reads(struct mirror_set *ms, struct bio_list *reads)
  716. {
  717. region_t region;
  718. struct bio *bio;
  719. struct mirror *m;
  720. while ((bio = bio_list_pop(reads))) {
  721. region = bio_to_region(&ms->rh, bio);
  722. /*
  723. * We can only read balance if the region is in sync.
  724. */
  725. if (rh_in_sync(&ms->rh, region, 1))
  726. m = choose_mirror(ms, bio->bi_sector);
  727. else
  728. m = get_default_mirror(ms);
  729. map_bio(ms, m, bio);
  730. generic_make_request(bio);
  731. }
  732. }
  733. /*-----------------------------------------------------------------
  734. * Writes.
  735. *
  736. * We do different things with the write io depending on the
  737. * state of the region that it's in:
  738. *
  739. * SYNC: increment pending, use kcopyd to write to *all* mirrors
  740. * RECOVERING: delay the io until recovery completes
  741. * NOSYNC: increment pending, just write to the default mirror
  742. *---------------------------------------------------------------*/
  743. /* __bio_mark_nosync
  744. * @ms
  745. * @bio
  746. * @done
  747. * @error
  748. *
  749. * The bio was written on some mirror(s) but failed on other mirror(s).
  750. * We can successfully endio the bio but should avoid the region being
  751. * marked clean by setting the state RH_NOSYNC.
  752. *
  753. * This function is _not_ safe in interrupt context!
  754. */
  755. static void __bio_mark_nosync(struct mirror_set *ms,
  756. struct bio *bio, unsigned done, int error)
  757. {
  758. unsigned long flags;
  759. struct region_hash *rh = &ms->rh;
  760. struct dirty_log *log = ms->rh.log;
  761. struct region *reg;
  762. region_t region = bio_to_region(rh, bio);
  763. int recovering = 0;
  764. /* We must inform the log that the sync count has changed. */
  765. log->type->set_region_sync(log, region, 0);
  766. ms->in_sync = 0;
  767. read_lock(&rh->hash_lock);
  768. reg = __rh_find(rh, region);
  769. read_unlock(&rh->hash_lock);
  770. /* region hash entry should exist because write was in-flight */
  771. BUG_ON(!reg);
  772. BUG_ON(!list_empty(&reg->list));
  773. spin_lock_irqsave(&rh->region_lock, flags);
  774. /*
  775. * Possible cases:
  776. * 1) RH_DIRTY
  777. * 2) RH_NOSYNC: was dirty, other preceeding writes failed
  778. * 3) RH_RECOVERING: flushing pending writes
  779. * Either case, the region should have not been connected to list.
  780. */
  781. recovering = (reg->state == RH_RECOVERING);
  782. reg->state = RH_NOSYNC;
  783. BUG_ON(!list_empty(&reg->list));
  784. spin_unlock_irqrestore(&rh->region_lock, flags);
  785. bio_endio(bio, error);
  786. if (recovering)
  787. complete_resync_work(reg, 0);
  788. }
  789. static void write_callback(unsigned long error, void *context)
  790. {
  791. unsigned i, ret = 0;
  792. struct bio *bio = (struct bio *) context;
  793. struct mirror_set *ms;
  794. int uptodate = 0;
  795. int should_wake = 0;
  796. unsigned long flags;
  797. ms = bio_get_ms(bio);
  798. bio_set_ms(bio, NULL);
  799. /*
  800. * NOTE: We don't decrement the pending count here,
  801. * instead it is done by the targets endio function.
  802. * This way we handle both writes to SYNC and NOSYNC
  803. * regions with the same code.
  804. */
  805. if (likely(!error))
  806. goto out;
  807. for (i = 0; i < ms->nr_mirrors; i++)
  808. if (test_bit(i, &error))
  809. fail_mirror(ms->mirror + i, DM_RAID1_WRITE_ERROR);
  810. else
  811. uptodate = 1;
  812. if (unlikely(!uptodate)) {
  813. DMERR("All replicated volumes dead, failing I/O");
  814. /* None of the writes succeeded, fail the I/O. */
  815. ret = -EIO;
  816. } else if (errors_handled(ms)) {
  817. /*
  818. * Need to raise event. Since raising
  819. * events can block, we need to do it in
  820. * the main thread.
  821. */
  822. spin_lock_irqsave(&ms->lock, flags);
  823. if (!ms->failures.head)
  824. should_wake = 1;
  825. bio_list_add(&ms->failures, bio);
  826. spin_unlock_irqrestore(&ms->lock, flags);
  827. if (should_wake)
  828. wake(ms);
  829. return;
  830. }
  831. out:
  832. bio_endio(bio, ret);
  833. }
  834. static void do_write(struct mirror_set *ms, struct bio *bio)
  835. {
  836. unsigned int i;
  837. struct io_region io[KCOPYD_MAX_REGIONS+1];
  838. struct mirror *m;
  839. struct dm_io_request io_req = {
  840. .bi_rw = WRITE,
  841. .mem.type = DM_IO_BVEC,
  842. .mem.ptr.bvec = bio->bi_io_vec + bio->bi_idx,
  843. .notify.fn = write_callback,
  844. .notify.context = bio,
  845. .client = ms->io_client,
  846. };
  847. for (i = 0; i < ms->nr_mirrors; i++) {
  848. m = ms->mirror + i;
  849. io[i].bdev = m->dev->bdev;
  850. io[i].sector = m->offset + (bio->bi_sector - ms->ti->begin);
  851. io[i].count = bio->bi_size >> 9;
  852. }
  853. bio_set_ms(bio, ms);
  854. (void) dm_io(&io_req, ms->nr_mirrors, io, NULL);
  855. }
  856. static void do_writes(struct mirror_set *ms, struct bio_list *writes)
  857. {
  858. int state;
  859. struct bio *bio;
  860. struct bio_list sync, nosync, recover, *this_list = NULL;
  861. if (!writes->head)
  862. return;
  863. /*
  864. * Classify each write.
  865. */
  866. bio_list_init(&sync);
  867. bio_list_init(&nosync);
  868. bio_list_init(&recover);
  869. while ((bio = bio_list_pop(writes))) {
  870. state = rh_state(&ms->rh, bio_to_region(&ms->rh, bio), 1);
  871. switch (state) {
  872. case RH_CLEAN:
  873. case RH_DIRTY:
  874. this_list = &sync;
  875. break;
  876. case RH_NOSYNC:
  877. this_list = &nosync;
  878. break;
  879. case RH_RECOVERING:
  880. this_list = &recover;
  881. break;
  882. }
  883. bio_list_add(this_list, bio);
  884. }
  885. /*
  886. * Increment the pending counts for any regions that will
  887. * be written to (writes to recover regions are going to
  888. * be delayed).
  889. */
  890. rh_inc_pending(&ms->rh, &sync);
  891. rh_inc_pending(&ms->rh, &nosync);
  892. ms->log_failure = rh_flush(&ms->rh) ? 1 : 0;
  893. /*
  894. * Dispatch io.
  895. */
  896. if (unlikely(ms->log_failure))
  897. while ((bio = bio_list_pop(&sync)))
  898. bio_endio(bio, -EIO);
  899. else while ((bio = bio_list_pop(&sync)))
  900. do_write(ms, bio);
  901. while ((bio = bio_list_pop(&recover)))
  902. rh_delay(&ms->rh, bio);
  903. while ((bio = bio_list_pop(&nosync))) {
  904. map_bio(ms, get_default_mirror(ms), bio);
  905. generic_make_request(bio);
  906. }
  907. }
  908. static void do_failures(struct mirror_set *ms, struct bio_list *failures)
  909. {
  910. struct bio *bio;
  911. if (!failures->head)
  912. return;
  913. while ((bio = bio_list_pop(failures)))
  914. __bio_mark_nosync(ms, bio, bio->bi_size, 0);
  915. }
  916. static void trigger_event(struct work_struct *work)
  917. {
  918. struct mirror_set *ms =
  919. container_of(work, struct mirror_set, trigger_event);
  920. dm_table_event(ms->ti->table);
  921. }
  922. /*-----------------------------------------------------------------
  923. * kmirrord
  924. *---------------------------------------------------------------*/
  925. static int _do_mirror(struct work_struct *work)
  926. {
  927. struct mirror_set *ms =container_of(work, struct mirror_set,
  928. kmirrord_work);
  929. struct bio_list reads, writes, failures;
  930. unsigned long flags;
  931. spin_lock_irqsave(&ms->lock, flags);
  932. reads = ms->reads;
  933. writes = ms->writes;
  934. failures = ms->failures;
  935. bio_list_init(&ms->reads);
  936. bio_list_init(&ms->writes);
  937. bio_list_init(&ms->failures);
  938. spin_unlock_irqrestore(&ms->lock, flags);
  939. rh_update_states(&ms->rh);
  940. do_recovery(ms);
  941. do_reads(ms, &reads);
  942. do_writes(ms, &writes);
  943. do_failures(ms, &failures);
  944. return (ms->failures.head) ? 1 : 0;
  945. }
  946. static void do_mirror(struct work_struct *work)
  947. {
  948. /*
  949. * If _do_mirror returns 1, we give it
  950. * another shot. This helps for cases like
  951. * 'suspend' where we call flush_workqueue
  952. * and expect all work to be finished. If
  953. * a failure happens during a suspend, we
  954. * couldn't issue a 'wake' because it would
  955. * not be honored. Therefore, we return '1'
  956. * from _do_mirror, and retry here.
  957. */
  958. while (_do_mirror(work))
  959. schedule();
  960. }
  961. /*-----------------------------------------------------------------
  962. * Target functions
  963. *---------------------------------------------------------------*/
  964. static struct mirror_set *alloc_context(unsigned int nr_mirrors,
  965. uint32_t region_size,
  966. struct dm_target *ti,
  967. struct dirty_log *dl)
  968. {
  969. size_t len;
  970. struct mirror_set *ms = NULL;
  971. if (array_too_big(sizeof(*ms), sizeof(ms->mirror[0]), nr_mirrors))
  972. return NULL;
  973. len = sizeof(*ms) + (sizeof(ms->mirror[0]) * nr_mirrors);
  974. ms = kzalloc(len, GFP_KERNEL);
  975. if (!ms) {
  976. ti->error = "Cannot allocate mirror context";
  977. return NULL;
  978. }
  979. spin_lock_init(&ms->lock);
  980. ms->ti = ti;
  981. ms->nr_mirrors = nr_mirrors;
  982. ms->nr_regions = dm_sector_div_up(ti->len, region_size);
  983. ms->in_sync = 0;
  984. atomic_set(&ms->default_mirror, DEFAULT_MIRROR);
  985. ms->io_client = dm_io_client_create(DM_IO_PAGES);
  986. if (IS_ERR(ms->io_client)) {
  987. ti->error = "Error creating dm_io client";
  988. kfree(ms);
  989. return NULL;
  990. }
  991. if (rh_init(&ms->rh, ms, dl, region_size, ms->nr_regions)) {
  992. ti->error = "Error creating dirty region hash";
  993. dm_io_client_destroy(ms->io_client);
  994. kfree(ms);
  995. return NULL;
  996. }
  997. return ms;
  998. }
  999. static void free_context(struct mirror_set *ms, struct dm_target *ti,
  1000. unsigned int m)
  1001. {
  1002. while (m--)
  1003. dm_put_device(ti, ms->mirror[m].dev);
  1004. dm_io_client_destroy(ms->io_client);
  1005. rh_exit(&ms->rh);
  1006. kfree(ms);
  1007. }
  1008. static inline int _check_region_size(struct dm_target *ti, uint32_t size)
  1009. {
  1010. return !(size % (PAGE_SIZE >> 9) || !is_power_of_2(size) ||
  1011. size > ti->len);
  1012. }
  1013. static int get_mirror(struct mirror_set *ms, struct dm_target *ti,
  1014. unsigned int mirror, char **argv)
  1015. {
  1016. unsigned long long offset;
  1017. if (sscanf(argv[1], "%llu", &offset) != 1) {
  1018. ti->error = "Invalid offset";
  1019. return -EINVAL;
  1020. }
  1021. if (dm_get_device(ti, argv[0], offset, ti->len,
  1022. dm_table_get_mode(ti->table),
  1023. &ms->mirror[mirror].dev)) {
  1024. ti->error = "Device lookup failure";
  1025. return -ENXIO;
  1026. }
  1027. ms->mirror[mirror].ms = ms;
  1028. atomic_set(&(ms->mirror[mirror].error_count), 0);
  1029. ms->mirror[mirror].error_type = 0;
  1030. ms->mirror[mirror].offset = offset;
  1031. return 0;
  1032. }
  1033. /*
  1034. * Create dirty log: log_type #log_params <log_params>
  1035. */
  1036. static struct dirty_log *create_dirty_log(struct dm_target *ti,
  1037. unsigned int argc, char **argv,
  1038. unsigned int *args_used)
  1039. {
  1040. unsigned int param_count;
  1041. struct dirty_log *dl;
  1042. if (argc < 2) {
  1043. ti->error = "Insufficient mirror log arguments";
  1044. return NULL;
  1045. }
  1046. if (sscanf(argv[1], "%u", &param_count) != 1) {
  1047. ti->error = "Invalid mirror log argument count";
  1048. return NULL;
  1049. }
  1050. *args_used = 2 + param_count;
  1051. if (argc < *args_used) {
  1052. ti->error = "Insufficient mirror log arguments";
  1053. return NULL;
  1054. }
  1055. dl = dm_create_dirty_log(argv[0], ti, param_count, argv + 2);
  1056. if (!dl) {
  1057. ti->error = "Error creating mirror dirty log";
  1058. return NULL;
  1059. }
  1060. if (!_check_region_size(ti, dl->type->get_region_size(dl))) {
  1061. ti->error = "Invalid region size";
  1062. dm_destroy_dirty_log(dl);
  1063. return NULL;
  1064. }
  1065. return dl;
  1066. }
  1067. static int parse_features(struct mirror_set *ms, unsigned argc, char **argv,
  1068. unsigned *args_used)
  1069. {
  1070. unsigned num_features;
  1071. struct dm_target *ti = ms->ti;
  1072. *args_used = 0;
  1073. if (!argc)
  1074. return 0;
  1075. if (sscanf(argv[0], "%u", &num_features) != 1) {
  1076. ti->error = "Invalid number of features";
  1077. return -EINVAL;
  1078. }
  1079. argc--;
  1080. argv++;
  1081. (*args_used)++;
  1082. if (num_features > argc) {
  1083. ti->error = "Not enough arguments to support feature count";
  1084. return -EINVAL;
  1085. }
  1086. if (!strcmp("handle_errors", argv[0]))
  1087. ms->features |= DM_RAID1_HANDLE_ERRORS;
  1088. else {
  1089. ti->error = "Unrecognised feature requested";
  1090. return -EINVAL;
  1091. }
  1092. (*args_used)++;
  1093. return 0;
  1094. }
  1095. /*
  1096. * Construct a mirror mapping:
  1097. *
  1098. * log_type #log_params <log_params>
  1099. * #mirrors [mirror_path offset]{2,}
  1100. * [#features <features>]
  1101. *
  1102. * log_type is "core" or "disk"
  1103. * #log_params is between 1 and 3
  1104. *
  1105. * If present, features must be "handle_errors".
  1106. */
  1107. static int mirror_ctr(struct dm_target *ti, unsigned int argc, char **argv)
  1108. {
  1109. int r;
  1110. unsigned int nr_mirrors, m, args_used;
  1111. struct mirror_set *ms;
  1112. struct dirty_log *dl;
  1113. dl = create_dirty_log(ti, argc, argv, &args_used);
  1114. if (!dl)
  1115. return -EINVAL;
  1116. argv += args_used;
  1117. argc -= args_used;
  1118. if (!argc || sscanf(argv[0], "%u", &nr_mirrors) != 1 ||
  1119. nr_mirrors < 2 || nr_mirrors > KCOPYD_MAX_REGIONS + 1) {
  1120. ti->error = "Invalid number of mirrors";
  1121. dm_destroy_dirty_log(dl);
  1122. return -EINVAL;
  1123. }
  1124. argv++, argc--;
  1125. if (argc < nr_mirrors * 2) {
  1126. ti->error = "Too few mirror arguments";
  1127. dm_destroy_dirty_log(dl);
  1128. return -EINVAL;
  1129. }
  1130. ms = alloc_context(nr_mirrors, dl->type->get_region_size(dl), ti, dl);
  1131. if (!ms) {
  1132. dm_destroy_dirty_log(dl);
  1133. return -ENOMEM;
  1134. }
  1135. /* Get the mirror parameter sets */
  1136. for (m = 0; m < nr_mirrors; m++) {
  1137. r = get_mirror(ms, ti, m, argv);
  1138. if (r) {
  1139. free_context(ms, ti, m);
  1140. return r;
  1141. }
  1142. argv += 2;
  1143. argc -= 2;
  1144. }
  1145. ti->private = ms;
  1146. ti->split_io = ms->rh.region_size;
  1147. ms->kmirrord_wq = create_singlethread_workqueue("kmirrord");
  1148. if (!ms->kmirrord_wq) {
  1149. DMERR("couldn't start kmirrord");
  1150. r = -ENOMEM;
  1151. goto err_free_context;
  1152. }
  1153. INIT_WORK(&ms->kmirrord_work, do_mirror);
  1154. INIT_WORK(&ms->trigger_event, trigger_event);
  1155. r = parse_features(ms, argc, argv, &args_used);
  1156. if (r)
  1157. goto err_destroy_wq;
  1158. argv += args_used;
  1159. argc -= args_used;
  1160. /*
  1161. * Any read-balancing addition depends on the
  1162. * DM_RAID1_HANDLE_ERRORS flag being present.
  1163. * This is because the decision to balance depends
  1164. * on the sync state of a region. If the above
  1165. * flag is not present, we ignore errors; and
  1166. * the sync state may be inaccurate.
  1167. */
  1168. if (argc) {
  1169. ti->error = "Too many mirror arguments";
  1170. r = -EINVAL;
  1171. goto err_destroy_wq;
  1172. }
  1173. r = kcopyd_client_create(DM_IO_PAGES, &ms->kcopyd_client);
  1174. if (r)
  1175. goto err_destroy_wq;
  1176. wake(ms);
  1177. return 0;
  1178. err_destroy_wq:
  1179. destroy_workqueue(ms->kmirrord_wq);
  1180. err_free_context:
  1181. free_context(ms, ti, ms->nr_mirrors);
  1182. return r;
  1183. }
  1184. static void mirror_dtr(struct dm_target *ti)
  1185. {
  1186. struct mirror_set *ms = (struct mirror_set *) ti->private;
  1187. flush_workqueue(ms->kmirrord_wq);
  1188. kcopyd_client_destroy(ms->kcopyd_client);
  1189. destroy_workqueue(ms->kmirrord_wq);
  1190. free_context(ms, ti, ms->nr_mirrors);
  1191. }
  1192. static void queue_bio(struct mirror_set *ms, struct bio *bio, int rw)
  1193. {
  1194. unsigned long flags;
  1195. int should_wake = 0;
  1196. struct bio_list *bl;
  1197. bl = (rw == WRITE) ? &ms->writes : &ms->reads;
  1198. spin_lock_irqsave(&ms->lock, flags);
  1199. should_wake = !(bl->head);
  1200. bio_list_add(bl, bio);
  1201. spin_unlock_irqrestore(&ms->lock, flags);
  1202. if (should_wake)
  1203. wake(ms);
  1204. }
  1205. /*
  1206. * Mirror mapping function
  1207. */
  1208. static int mirror_map(struct dm_target *ti, struct bio *bio,
  1209. union map_info *map_context)
  1210. {
  1211. int r, rw = bio_rw(bio);
  1212. struct mirror *m;
  1213. struct mirror_set *ms = ti->private;
  1214. map_context->ll = bio_to_region(&ms->rh, bio);
  1215. if (rw == WRITE) {
  1216. queue_bio(ms, bio, rw);
  1217. return DM_MAPIO_SUBMITTED;
  1218. }
  1219. r = ms->rh.log->type->in_sync(ms->rh.log,
  1220. bio_to_region(&ms->rh, bio), 0);
  1221. if (r < 0 && r != -EWOULDBLOCK)
  1222. return r;
  1223. if (r == -EWOULDBLOCK) /* FIXME: ugly */
  1224. r = DM_MAPIO_SUBMITTED;
  1225. /*
  1226. * We don't want to fast track a recovery just for a read
  1227. * ahead. So we just let it silently fail.
  1228. * FIXME: get rid of this.
  1229. */
  1230. if (!r && rw == READA)
  1231. return -EIO;
  1232. if (!r) {
  1233. /* Pass this io over to the daemon */
  1234. queue_bio(ms, bio, rw);
  1235. return DM_MAPIO_SUBMITTED;
  1236. }
  1237. m = choose_mirror(ms, bio->bi_sector);
  1238. if (!m)
  1239. return -EIO;
  1240. map_bio(ms, m, bio);
  1241. return DM_MAPIO_REMAPPED;
  1242. }
  1243. static int mirror_end_io(struct dm_target *ti, struct bio *bio,
  1244. int error, union map_info *map_context)
  1245. {
  1246. int rw = bio_rw(bio);
  1247. struct mirror_set *ms = (struct mirror_set *) ti->private;
  1248. region_t region = map_context->ll;
  1249. /*
  1250. * We need to dec pending if this was a write.
  1251. */
  1252. if (rw == WRITE)
  1253. rh_dec(&ms->rh, region);
  1254. return 0;
  1255. }
  1256. static void mirror_postsuspend(struct dm_target *ti)
  1257. {
  1258. struct mirror_set *ms = (struct mirror_set *) ti->private;
  1259. struct dirty_log *log = ms->rh.log;
  1260. rh_stop_recovery(&ms->rh);
  1261. /* Wait for all I/O we generated to complete */
  1262. wait_event(_kmirrord_recovery_stopped,
  1263. !atomic_read(&ms->rh.recovery_in_flight));
  1264. if (log->type->postsuspend && log->type->postsuspend(log))
  1265. /* FIXME: need better error handling */
  1266. DMWARN("log suspend failed");
  1267. }
  1268. static void mirror_resume(struct dm_target *ti)
  1269. {
  1270. struct mirror_set *ms = (struct mirror_set *) ti->private;
  1271. struct dirty_log *log = ms->rh.log;
  1272. if (log->type->resume && log->type->resume(log))
  1273. /* FIXME: need better error handling */
  1274. DMWARN("log resume failed");
  1275. rh_start_recovery(&ms->rh);
  1276. }
  1277. static int mirror_status(struct dm_target *ti, status_type_t type,
  1278. char *result, unsigned int maxlen)
  1279. {
  1280. unsigned int m, sz = 0;
  1281. struct mirror_set *ms = (struct mirror_set *) ti->private;
  1282. switch (type) {
  1283. case STATUSTYPE_INFO:
  1284. DMEMIT("%d ", ms->nr_mirrors);
  1285. for (m = 0; m < ms->nr_mirrors; m++)
  1286. DMEMIT("%s ", ms->mirror[m].dev->name);
  1287. DMEMIT("%llu/%llu 0 ",
  1288. (unsigned long long)ms->rh.log->type->
  1289. get_sync_count(ms->rh.log),
  1290. (unsigned long long)ms->nr_regions);
  1291. sz += ms->rh.log->type->status(ms->rh.log, type, result+sz, maxlen-sz);
  1292. break;
  1293. case STATUSTYPE_TABLE:
  1294. sz = ms->rh.log->type->status(ms->rh.log, type, result, maxlen);
  1295. DMEMIT("%d", ms->nr_mirrors);
  1296. for (m = 0; m < ms->nr_mirrors; m++)
  1297. DMEMIT(" %s %llu", ms->mirror[m].dev->name,
  1298. (unsigned long long)ms->mirror[m].offset);
  1299. if (ms->features & DM_RAID1_HANDLE_ERRORS)
  1300. DMEMIT(" 1 handle_errors");
  1301. }
  1302. return 0;
  1303. }
  1304. static struct target_type mirror_target = {
  1305. .name = "mirror",
  1306. .version = {1, 0, 3},
  1307. .module = THIS_MODULE,
  1308. .ctr = mirror_ctr,
  1309. .dtr = mirror_dtr,
  1310. .map = mirror_map,
  1311. .end_io = mirror_end_io,
  1312. .postsuspend = mirror_postsuspend,
  1313. .resume = mirror_resume,
  1314. .status = mirror_status,
  1315. };
  1316. static int __init dm_mirror_init(void)
  1317. {
  1318. int r;
  1319. r = dm_dirty_log_init();
  1320. if (r)
  1321. return r;
  1322. r = dm_register_target(&mirror_target);
  1323. if (r < 0) {
  1324. DMERR("Failed to register mirror target");
  1325. dm_dirty_log_exit();
  1326. }
  1327. return r;
  1328. }
  1329. static void __exit dm_mirror_exit(void)
  1330. {
  1331. int r;
  1332. r = dm_unregister_target(&mirror_target);
  1333. if (r < 0)
  1334. DMERR("unregister failed %d", r);
  1335. dm_dirty_log_exit();
  1336. }
  1337. /* Module hooks */
  1338. module_init(dm_mirror_init);
  1339. module_exit(dm_mirror_exit);
  1340. MODULE_DESCRIPTION(DM_NAME " mirror target");
  1341. MODULE_AUTHOR("Joe Thornber");
  1342. MODULE_LICENSE("GPL");