dm-raid1.c 31 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363
  1. /*
  2. * Copyright (C) 2003 Sistina Software Limited.
  3. *
  4. * This file is released under the GPL.
  5. */
  6. #include "dm.h"
  7. #include "dm-bio-list.h"
  8. #include "dm-io.h"
  9. #include "dm-log.h"
  10. #include "kcopyd.h"
  11. #include <linux/ctype.h>
  12. #include <linux/init.h>
  13. #include <linux/mempool.h>
  14. #include <linux/module.h>
  15. #include <linux/pagemap.h>
  16. #include <linux/slab.h>
  17. #include <linux/time.h>
  18. #include <linux/vmalloc.h>
  19. #include <linux/workqueue.h>
  20. #define DM_MSG_PREFIX "raid1"
  21. #define DM_IO_PAGES 64
  22. #define DM_RAID1_HANDLE_ERRORS 0x01
  23. static DECLARE_WAIT_QUEUE_HEAD(_kmirrord_recovery_stopped);
  24. /*-----------------------------------------------------------------
  25. * Region hash
  26. *
  27. * The mirror splits itself up into discrete regions. Each
  28. * region can be in one of three states: clean, dirty,
  29. * nosync. There is no need to put clean regions in the hash.
  30. *
  31. * In addition to being present in the hash table a region _may_
  32. * be present on one of three lists.
  33. *
  34. * clean_regions: Regions on this list have no io pending to
  35. * them, they are in sync, we are no longer interested in them,
  36. * they are dull. rh_update_states() will remove them from the
  37. * hash table.
  38. *
  39. * quiesced_regions: These regions have been spun down, ready
  40. * for recovery. rh_recovery_start() will remove regions from
  41. * this list and hand them to kmirrord, which will schedule the
  42. * recovery io with kcopyd.
  43. *
  44. * recovered_regions: Regions that kcopyd has successfully
  45. * recovered. rh_update_states() will now schedule any delayed
  46. * io, up the recovery_count, and remove the region from the
  47. * hash.
  48. *
  49. * There are 2 locks:
  50. * A rw spin lock 'hash_lock' protects just the hash table,
  51. * this is never held in write mode from interrupt context,
  52. * which I believe means that we only have to disable irqs when
  53. * doing a write lock.
  54. *
  55. * An ordinary spin lock 'region_lock' that protects the three
  56. * lists in the region_hash, with the 'state', 'list' and
  57. * 'bhs_delayed' fields of the regions. This is used from irq
  58. * context, so all other uses will have to suspend local irqs.
  59. *---------------------------------------------------------------*/
  60. struct mirror_set;
  61. struct region_hash {
  62. struct mirror_set *ms;
  63. uint32_t region_size;
  64. unsigned region_shift;
  65. /* holds persistent region state */
  66. struct dirty_log *log;
  67. /* hash table */
  68. rwlock_t hash_lock;
  69. mempool_t *region_pool;
  70. unsigned int mask;
  71. unsigned int nr_buckets;
  72. struct list_head *buckets;
  73. spinlock_t region_lock;
  74. atomic_t recovery_in_flight;
  75. struct semaphore recovery_count;
  76. struct list_head clean_regions;
  77. struct list_head quiesced_regions;
  78. struct list_head recovered_regions;
  79. };
  80. enum {
  81. RH_CLEAN,
  82. RH_DIRTY,
  83. RH_NOSYNC,
  84. RH_RECOVERING
  85. };
  86. struct region {
  87. struct region_hash *rh; /* FIXME: can we get rid of this ? */
  88. region_t key;
  89. int state;
  90. struct list_head hash_list;
  91. struct list_head list;
  92. atomic_t pending;
  93. struct bio_list delayed_bios;
  94. };
  95. /*-----------------------------------------------------------------
  96. * Mirror set structures.
  97. *---------------------------------------------------------------*/
  98. struct mirror {
  99. atomic_t error_count;
  100. struct dm_dev *dev;
  101. sector_t offset;
  102. };
  103. struct mirror_set {
  104. struct dm_target *ti;
  105. struct list_head list;
  106. struct region_hash rh;
  107. struct kcopyd_client *kcopyd_client;
  108. uint64_t features;
  109. spinlock_t lock; /* protects the next two lists */
  110. struct bio_list reads;
  111. struct bio_list writes;
  112. struct dm_io_client *io_client;
  113. /* recovery */
  114. region_t nr_regions;
  115. int in_sync;
  116. struct mirror *default_mirror; /* Default mirror */
  117. struct workqueue_struct *kmirrord_wq;
  118. struct work_struct kmirrord_work;
  119. unsigned int nr_mirrors;
  120. struct mirror mirror[0];
  121. };
  122. /*
  123. * Conversion fns
  124. */
  125. static inline region_t bio_to_region(struct region_hash *rh, struct bio *bio)
  126. {
  127. return (bio->bi_sector - rh->ms->ti->begin) >> rh->region_shift;
  128. }
  129. static inline sector_t region_to_sector(struct region_hash *rh, region_t region)
  130. {
  131. return region << rh->region_shift;
  132. }
  133. static void wake(struct mirror_set *ms)
  134. {
  135. queue_work(ms->kmirrord_wq, &ms->kmirrord_work);
  136. }
  137. /* FIXME move this */
  138. static void queue_bio(struct mirror_set *ms, struct bio *bio, int rw);
  139. #define MIN_REGIONS 64
  140. #define MAX_RECOVERY 1
  141. static int rh_init(struct region_hash *rh, struct mirror_set *ms,
  142. struct dirty_log *log, uint32_t region_size,
  143. region_t nr_regions)
  144. {
  145. unsigned int nr_buckets, max_buckets;
  146. size_t i;
  147. /*
  148. * Calculate a suitable number of buckets for our hash
  149. * table.
  150. */
  151. max_buckets = nr_regions >> 6;
  152. for (nr_buckets = 128u; nr_buckets < max_buckets; nr_buckets <<= 1)
  153. ;
  154. nr_buckets >>= 1;
  155. rh->ms = ms;
  156. rh->log = log;
  157. rh->region_size = region_size;
  158. rh->region_shift = ffs(region_size) - 1;
  159. rwlock_init(&rh->hash_lock);
  160. rh->mask = nr_buckets - 1;
  161. rh->nr_buckets = nr_buckets;
  162. rh->buckets = vmalloc(nr_buckets * sizeof(*rh->buckets));
  163. if (!rh->buckets) {
  164. DMERR("unable to allocate region hash memory");
  165. return -ENOMEM;
  166. }
  167. for (i = 0; i < nr_buckets; i++)
  168. INIT_LIST_HEAD(rh->buckets + i);
  169. spin_lock_init(&rh->region_lock);
  170. sema_init(&rh->recovery_count, 0);
  171. atomic_set(&rh->recovery_in_flight, 0);
  172. INIT_LIST_HEAD(&rh->clean_regions);
  173. INIT_LIST_HEAD(&rh->quiesced_regions);
  174. INIT_LIST_HEAD(&rh->recovered_regions);
  175. rh->region_pool = mempool_create_kmalloc_pool(MIN_REGIONS,
  176. sizeof(struct region));
  177. if (!rh->region_pool) {
  178. vfree(rh->buckets);
  179. rh->buckets = NULL;
  180. return -ENOMEM;
  181. }
  182. return 0;
  183. }
  184. static void rh_exit(struct region_hash *rh)
  185. {
  186. unsigned int h;
  187. struct region *reg, *nreg;
  188. BUG_ON(!list_empty(&rh->quiesced_regions));
  189. for (h = 0; h < rh->nr_buckets; h++) {
  190. list_for_each_entry_safe(reg, nreg, rh->buckets + h, hash_list) {
  191. BUG_ON(atomic_read(&reg->pending));
  192. mempool_free(reg, rh->region_pool);
  193. }
  194. }
  195. if (rh->log)
  196. dm_destroy_dirty_log(rh->log);
  197. if (rh->region_pool)
  198. mempool_destroy(rh->region_pool);
  199. vfree(rh->buckets);
  200. }
  201. #define RH_HASH_MULT 2654435387U
  202. static inline unsigned int rh_hash(struct region_hash *rh, region_t region)
  203. {
  204. return (unsigned int) ((region * RH_HASH_MULT) >> 12) & rh->mask;
  205. }
  206. static struct region *__rh_lookup(struct region_hash *rh, region_t region)
  207. {
  208. struct region *reg;
  209. list_for_each_entry (reg, rh->buckets + rh_hash(rh, region), hash_list)
  210. if (reg->key == region)
  211. return reg;
  212. return NULL;
  213. }
  214. static void __rh_insert(struct region_hash *rh, struct region *reg)
  215. {
  216. unsigned int h = rh_hash(rh, reg->key);
  217. list_add(&reg->hash_list, rh->buckets + h);
  218. }
  219. static struct region *__rh_alloc(struct region_hash *rh, region_t region)
  220. {
  221. struct region *reg, *nreg;
  222. read_unlock(&rh->hash_lock);
  223. nreg = mempool_alloc(rh->region_pool, GFP_ATOMIC);
  224. if (unlikely(!nreg))
  225. nreg = kmalloc(sizeof(struct region), GFP_NOIO);
  226. nreg->state = rh->log->type->in_sync(rh->log, region, 1) ?
  227. RH_CLEAN : RH_NOSYNC;
  228. nreg->rh = rh;
  229. nreg->key = region;
  230. INIT_LIST_HEAD(&nreg->list);
  231. atomic_set(&nreg->pending, 0);
  232. bio_list_init(&nreg->delayed_bios);
  233. write_lock_irq(&rh->hash_lock);
  234. reg = __rh_lookup(rh, region);
  235. if (reg)
  236. /* we lost the race */
  237. mempool_free(nreg, rh->region_pool);
  238. else {
  239. __rh_insert(rh, nreg);
  240. if (nreg->state == RH_CLEAN) {
  241. spin_lock(&rh->region_lock);
  242. list_add(&nreg->list, &rh->clean_regions);
  243. spin_unlock(&rh->region_lock);
  244. }
  245. reg = nreg;
  246. }
  247. write_unlock_irq(&rh->hash_lock);
  248. read_lock(&rh->hash_lock);
  249. return reg;
  250. }
  251. static inline struct region *__rh_find(struct region_hash *rh, region_t region)
  252. {
  253. struct region *reg;
  254. reg = __rh_lookup(rh, region);
  255. if (!reg)
  256. reg = __rh_alloc(rh, region);
  257. return reg;
  258. }
  259. static int rh_state(struct region_hash *rh, region_t region, int may_block)
  260. {
  261. int r;
  262. struct region *reg;
  263. read_lock(&rh->hash_lock);
  264. reg = __rh_lookup(rh, region);
  265. read_unlock(&rh->hash_lock);
  266. if (reg)
  267. return reg->state;
  268. /*
  269. * The region wasn't in the hash, so we fall back to the
  270. * dirty log.
  271. */
  272. r = rh->log->type->in_sync(rh->log, region, may_block);
  273. /*
  274. * Any error from the dirty log (eg. -EWOULDBLOCK) gets
  275. * taken as a RH_NOSYNC
  276. */
  277. return r == 1 ? RH_CLEAN : RH_NOSYNC;
  278. }
  279. static inline int rh_in_sync(struct region_hash *rh,
  280. region_t region, int may_block)
  281. {
  282. int state = rh_state(rh, region, may_block);
  283. return state == RH_CLEAN || state == RH_DIRTY;
  284. }
  285. static void dispatch_bios(struct mirror_set *ms, struct bio_list *bio_list)
  286. {
  287. struct bio *bio;
  288. while ((bio = bio_list_pop(bio_list))) {
  289. queue_bio(ms, bio, WRITE);
  290. }
  291. }
  292. static void complete_resync_work(struct region *reg, int success)
  293. {
  294. struct region_hash *rh = reg->rh;
  295. rh->log->type->set_region_sync(rh->log, reg->key, success);
  296. dispatch_bios(rh->ms, &reg->delayed_bios);
  297. if (atomic_dec_and_test(&rh->recovery_in_flight))
  298. wake_up_all(&_kmirrord_recovery_stopped);
  299. up(&rh->recovery_count);
  300. }
  301. static void rh_update_states(struct region_hash *rh)
  302. {
  303. struct region *reg, *next;
  304. LIST_HEAD(clean);
  305. LIST_HEAD(recovered);
  306. /*
  307. * Quickly grab the lists.
  308. */
  309. write_lock_irq(&rh->hash_lock);
  310. spin_lock(&rh->region_lock);
  311. if (!list_empty(&rh->clean_regions)) {
  312. list_splice(&rh->clean_regions, &clean);
  313. INIT_LIST_HEAD(&rh->clean_regions);
  314. list_for_each_entry (reg, &clean, list) {
  315. rh->log->type->clear_region(rh->log, reg->key);
  316. list_del(&reg->hash_list);
  317. }
  318. }
  319. if (!list_empty(&rh->recovered_regions)) {
  320. list_splice(&rh->recovered_regions, &recovered);
  321. INIT_LIST_HEAD(&rh->recovered_regions);
  322. list_for_each_entry (reg, &recovered, list)
  323. list_del(&reg->hash_list);
  324. }
  325. spin_unlock(&rh->region_lock);
  326. write_unlock_irq(&rh->hash_lock);
  327. /*
  328. * All the regions on the recovered and clean lists have
  329. * now been pulled out of the system, so no need to do
  330. * any more locking.
  331. */
  332. list_for_each_entry_safe (reg, next, &recovered, list) {
  333. rh->log->type->clear_region(rh->log, reg->key);
  334. complete_resync_work(reg, 1);
  335. mempool_free(reg, rh->region_pool);
  336. }
  337. rh->log->type->flush(rh->log);
  338. list_for_each_entry_safe (reg, next, &clean, list)
  339. mempool_free(reg, rh->region_pool);
  340. }
  341. static void rh_inc(struct region_hash *rh, region_t region)
  342. {
  343. struct region *reg;
  344. read_lock(&rh->hash_lock);
  345. reg = __rh_find(rh, region);
  346. spin_lock_irq(&rh->region_lock);
  347. atomic_inc(&reg->pending);
  348. if (reg->state == RH_CLEAN) {
  349. reg->state = RH_DIRTY;
  350. list_del_init(&reg->list); /* take off the clean list */
  351. spin_unlock_irq(&rh->region_lock);
  352. rh->log->type->mark_region(rh->log, reg->key);
  353. } else
  354. spin_unlock_irq(&rh->region_lock);
  355. read_unlock(&rh->hash_lock);
  356. }
  357. static void rh_inc_pending(struct region_hash *rh, struct bio_list *bios)
  358. {
  359. struct bio *bio;
  360. for (bio = bios->head; bio; bio = bio->bi_next)
  361. rh_inc(rh, bio_to_region(rh, bio));
  362. }
  363. static void rh_dec(struct region_hash *rh, region_t region)
  364. {
  365. unsigned long flags;
  366. struct region *reg;
  367. int should_wake = 0;
  368. read_lock(&rh->hash_lock);
  369. reg = __rh_lookup(rh, region);
  370. read_unlock(&rh->hash_lock);
  371. spin_lock_irqsave(&rh->region_lock, flags);
  372. if (atomic_dec_and_test(&reg->pending)) {
  373. /*
  374. * There is no pending I/O for this region.
  375. * We can move the region to corresponding list for next action.
  376. * At this point, the region is not yet connected to any list.
  377. *
  378. * If the state is RH_NOSYNC, the region should be kept off
  379. * from clean list.
  380. * The hash entry for RH_NOSYNC will remain in memory
  381. * until the region is recovered or the map is reloaded.
  382. */
  383. /* do nothing for RH_NOSYNC */
  384. if (reg->state == RH_RECOVERING) {
  385. list_add_tail(&reg->list, &rh->quiesced_regions);
  386. } else if (reg->state == RH_DIRTY) {
  387. reg->state = RH_CLEAN;
  388. list_add(&reg->list, &rh->clean_regions);
  389. }
  390. should_wake = 1;
  391. }
  392. spin_unlock_irqrestore(&rh->region_lock, flags);
  393. if (should_wake)
  394. wake(rh->ms);
  395. }
  396. /*
  397. * Starts quiescing a region in preparation for recovery.
  398. */
  399. static int __rh_recovery_prepare(struct region_hash *rh)
  400. {
  401. int r;
  402. struct region *reg;
  403. region_t region;
  404. /*
  405. * Ask the dirty log what's next.
  406. */
  407. r = rh->log->type->get_resync_work(rh->log, &region);
  408. if (r <= 0)
  409. return r;
  410. /*
  411. * Get this region, and start it quiescing by setting the
  412. * recovering flag.
  413. */
  414. read_lock(&rh->hash_lock);
  415. reg = __rh_find(rh, region);
  416. read_unlock(&rh->hash_lock);
  417. spin_lock_irq(&rh->region_lock);
  418. reg->state = RH_RECOVERING;
  419. /* Already quiesced ? */
  420. if (atomic_read(&reg->pending))
  421. list_del_init(&reg->list);
  422. else
  423. list_move(&reg->list, &rh->quiesced_regions);
  424. spin_unlock_irq(&rh->region_lock);
  425. return 1;
  426. }
  427. static void rh_recovery_prepare(struct region_hash *rh)
  428. {
  429. /* Extra reference to avoid race with rh_stop_recovery */
  430. atomic_inc(&rh->recovery_in_flight);
  431. while (!down_trylock(&rh->recovery_count)) {
  432. atomic_inc(&rh->recovery_in_flight);
  433. if (__rh_recovery_prepare(rh) <= 0) {
  434. atomic_dec(&rh->recovery_in_flight);
  435. up(&rh->recovery_count);
  436. break;
  437. }
  438. }
  439. /* Drop the extra reference */
  440. if (atomic_dec_and_test(&rh->recovery_in_flight))
  441. wake_up_all(&_kmirrord_recovery_stopped);
  442. }
  443. /*
  444. * Returns any quiesced regions.
  445. */
  446. static struct region *rh_recovery_start(struct region_hash *rh)
  447. {
  448. struct region *reg = NULL;
  449. spin_lock_irq(&rh->region_lock);
  450. if (!list_empty(&rh->quiesced_regions)) {
  451. reg = list_entry(rh->quiesced_regions.next,
  452. struct region, list);
  453. list_del_init(&reg->list); /* remove from the quiesced list */
  454. }
  455. spin_unlock_irq(&rh->region_lock);
  456. return reg;
  457. }
  458. /* FIXME: success ignored for now */
  459. static void rh_recovery_end(struct region *reg, int success)
  460. {
  461. struct region_hash *rh = reg->rh;
  462. spin_lock_irq(&rh->region_lock);
  463. list_add(&reg->list, &reg->rh->recovered_regions);
  464. spin_unlock_irq(&rh->region_lock);
  465. wake(rh->ms);
  466. }
  467. static void rh_flush(struct region_hash *rh)
  468. {
  469. rh->log->type->flush(rh->log);
  470. }
  471. static void rh_delay(struct region_hash *rh, struct bio *bio)
  472. {
  473. struct region *reg;
  474. read_lock(&rh->hash_lock);
  475. reg = __rh_find(rh, bio_to_region(rh, bio));
  476. bio_list_add(&reg->delayed_bios, bio);
  477. read_unlock(&rh->hash_lock);
  478. }
  479. static void rh_stop_recovery(struct region_hash *rh)
  480. {
  481. int i;
  482. /* wait for any recovering regions */
  483. for (i = 0; i < MAX_RECOVERY; i++)
  484. down(&rh->recovery_count);
  485. }
  486. static void rh_start_recovery(struct region_hash *rh)
  487. {
  488. int i;
  489. for (i = 0; i < MAX_RECOVERY; i++)
  490. up(&rh->recovery_count);
  491. wake(rh->ms);
  492. }
  493. /*
  494. * Every mirror should look like this one.
  495. */
  496. #define DEFAULT_MIRROR 0
  497. /*
  498. * This is yucky. We squirrel the mirror_set struct away inside
  499. * bi_next for write buffers. This is safe since the bh
  500. * doesn't get submitted to the lower levels of block layer.
  501. */
  502. static struct mirror_set *bio_get_ms(struct bio *bio)
  503. {
  504. return (struct mirror_set *) bio->bi_next;
  505. }
  506. static void bio_set_ms(struct bio *bio, struct mirror_set *ms)
  507. {
  508. bio->bi_next = (struct bio *) ms;
  509. }
  510. /*-----------------------------------------------------------------
  511. * Recovery.
  512. *
  513. * When a mirror is first activated we may find that some regions
  514. * are in the no-sync state. We have to recover these by
  515. * recopying from the default mirror to all the others.
  516. *---------------------------------------------------------------*/
  517. static void recovery_complete(int read_err, unsigned int write_err,
  518. void *context)
  519. {
  520. struct region *reg = (struct region *) context;
  521. /* FIXME: better error handling */
  522. rh_recovery_end(reg, !(read_err || write_err));
  523. }
  524. static int recover(struct mirror_set *ms, struct region *reg)
  525. {
  526. int r;
  527. unsigned int i;
  528. struct io_region from, to[KCOPYD_MAX_REGIONS], *dest;
  529. struct mirror *m;
  530. unsigned long flags = 0;
  531. /* fill in the source */
  532. m = ms->default_mirror;
  533. from.bdev = m->dev->bdev;
  534. from.sector = m->offset + region_to_sector(reg->rh, reg->key);
  535. if (reg->key == (ms->nr_regions - 1)) {
  536. /*
  537. * The final region may be smaller than
  538. * region_size.
  539. */
  540. from.count = ms->ti->len & (reg->rh->region_size - 1);
  541. if (!from.count)
  542. from.count = reg->rh->region_size;
  543. } else
  544. from.count = reg->rh->region_size;
  545. /* fill in the destinations */
  546. for (i = 0, dest = to; i < ms->nr_mirrors; i++) {
  547. if (&ms->mirror[i] == ms->default_mirror)
  548. continue;
  549. m = ms->mirror + i;
  550. dest->bdev = m->dev->bdev;
  551. dest->sector = m->offset + region_to_sector(reg->rh, reg->key);
  552. dest->count = from.count;
  553. dest++;
  554. }
  555. /* hand to kcopyd */
  556. set_bit(KCOPYD_IGNORE_ERROR, &flags);
  557. r = kcopyd_copy(ms->kcopyd_client, &from, ms->nr_mirrors - 1, to, flags,
  558. recovery_complete, reg);
  559. return r;
  560. }
  561. static void do_recovery(struct mirror_set *ms)
  562. {
  563. int r;
  564. struct region *reg;
  565. struct dirty_log *log = ms->rh.log;
  566. /*
  567. * Start quiescing some regions.
  568. */
  569. rh_recovery_prepare(&ms->rh);
  570. /*
  571. * Copy any already quiesced regions.
  572. */
  573. while ((reg = rh_recovery_start(&ms->rh))) {
  574. r = recover(ms, reg);
  575. if (r)
  576. rh_recovery_end(reg, 0);
  577. }
  578. /*
  579. * Update the in sync flag.
  580. */
  581. if (!ms->in_sync &&
  582. (log->type->get_sync_count(log) == ms->nr_regions)) {
  583. /* the sync is complete */
  584. dm_table_event(ms->ti->table);
  585. ms->in_sync = 1;
  586. }
  587. }
  588. /*-----------------------------------------------------------------
  589. * Reads
  590. *---------------------------------------------------------------*/
  591. static struct mirror *choose_mirror(struct mirror_set *ms, sector_t sector)
  592. {
  593. /* FIXME: add read balancing */
  594. return ms->default_mirror;
  595. }
  596. /*
  597. * remap a buffer to a particular mirror.
  598. */
  599. static void map_bio(struct mirror_set *ms, struct mirror *m, struct bio *bio)
  600. {
  601. bio->bi_bdev = m->dev->bdev;
  602. bio->bi_sector = m->offset + (bio->bi_sector - ms->ti->begin);
  603. }
  604. static void do_reads(struct mirror_set *ms, struct bio_list *reads)
  605. {
  606. region_t region;
  607. struct bio *bio;
  608. struct mirror *m;
  609. while ((bio = bio_list_pop(reads))) {
  610. region = bio_to_region(&ms->rh, bio);
  611. /*
  612. * We can only read balance if the region is in sync.
  613. */
  614. if (rh_in_sync(&ms->rh, region, 1))
  615. m = choose_mirror(ms, bio->bi_sector);
  616. else
  617. m = ms->default_mirror;
  618. map_bio(ms, m, bio);
  619. generic_make_request(bio);
  620. }
  621. }
  622. /*-----------------------------------------------------------------
  623. * Writes.
  624. *
  625. * We do different things with the write io depending on the
  626. * state of the region that it's in:
  627. *
  628. * SYNC: increment pending, use kcopyd to write to *all* mirrors
  629. * RECOVERING: delay the io until recovery completes
  630. * NOSYNC: increment pending, just write to the default mirror
  631. *---------------------------------------------------------------*/
  632. static void write_callback(unsigned long error, void *context)
  633. {
  634. unsigned int i;
  635. int uptodate = 1;
  636. struct bio *bio = (struct bio *) context;
  637. struct mirror_set *ms;
  638. ms = bio_get_ms(bio);
  639. bio_set_ms(bio, NULL);
  640. /*
  641. * NOTE: We don't decrement the pending count here,
  642. * instead it is done by the targets endio function.
  643. * This way we handle both writes to SYNC and NOSYNC
  644. * regions with the same code.
  645. */
  646. if (error) {
  647. /*
  648. * only error the io if all mirrors failed.
  649. * FIXME: bogus
  650. */
  651. uptodate = 0;
  652. for (i = 0; i < ms->nr_mirrors; i++)
  653. if (!test_bit(i, &error)) {
  654. uptodate = 1;
  655. break;
  656. }
  657. }
  658. bio_endio(bio, bio->bi_size, 0);
  659. }
  660. static void do_write(struct mirror_set *ms, struct bio *bio)
  661. {
  662. unsigned int i;
  663. struct io_region io[KCOPYD_MAX_REGIONS+1];
  664. struct mirror *m;
  665. struct dm_io_request io_req = {
  666. .bi_rw = WRITE,
  667. .mem.type = DM_IO_BVEC,
  668. .mem.ptr.bvec = bio->bi_io_vec + bio->bi_idx,
  669. .notify.fn = write_callback,
  670. .notify.context = bio,
  671. .client = ms->io_client,
  672. };
  673. for (i = 0; i < ms->nr_mirrors; i++) {
  674. m = ms->mirror + i;
  675. io[i].bdev = m->dev->bdev;
  676. io[i].sector = m->offset + (bio->bi_sector - ms->ti->begin);
  677. io[i].count = bio->bi_size >> 9;
  678. }
  679. bio_set_ms(bio, ms);
  680. (void) dm_io(&io_req, ms->nr_mirrors, io, NULL);
  681. }
  682. static void do_writes(struct mirror_set *ms, struct bio_list *writes)
  683. {
  684. int state;
  685. struct bio *bio;
  686. struct bio_list sync, nosync, recover, *this_list = NULL;
  687. if (!writes->head)
  688. return;
  689. /*
  690. * Classify each write.
  691. */
  692. bio_list_init(&sync);
  693. bio_list_init(&nosync);
  694. bio_list_init(&recover);
  695. while ((bio = bio_list_pop(writes))) {
  696. state = rh_state(&ms->rh, bio_to_region(&ms->rh, bio), 1);
  697. switch (state) {
  698. case RH_CLEAN:
  699. case RH_DIRTY:
  700. this_list = &sync;
  701. break;
  702. case RH_NOSYNC:
  703. this_list = &nosync;
  704. break;
  705. case RH_RECOVERING:
  706. this_list = &recover;
  707. break;
  708. }
  709. bio_list_add(this_list, bio);
  710. }
  711. /*
  712. * Increment the pending counts for any regions that will
  713. * be written to (writes to recover regions are going to
  714. * be delayed).
  715. */
  716. rh_inc_pending(&ms->rh, &sync);
  717. rh_inc_pending(&ms->rh, &nosync);
  718. rh_flush(&ms->rh);
  719. /*
  720. * Dispatch io.
  721. */
  722. while ((bio = bio_list_pop(&sync)))
  723. do_write(ms, bio);
  724. while ((bio = bio_list_pop(&recover)))
  725. rh_delay(&ms->rh, bio);
  726. while ((bio = bio_list_pop(&nosync))) {
  727. map_bio(ms, ms->default_mirror, bio);
  728. generic_make_request(bio);
  729. }
  730. }
  731. /*-----------------------------------------------------------------
  732. * kmirrord
  733. *---------------------------------------------------------------*/
  734. static void do_mirror(struct work_struct *work)
  735. {
  736. struct mirror_set *ms =container_of(work, struct mirror_set,
  737. kmirrord_work);
  738. struct bio_list reads, writes;
  739. spin_lock(&ms->lock);
  740. reads = ms->reads;
  741. writes = ms->writes;
  742. bio_list_init(&ms->reads);
  743. bio_list_init(&ms->writes);
  744. spin_unlock(&ms->lock);
  745. rh_update_states(&ms->rh);
  746. do_recovery(ms);
  747. do_reads(ms, &reads);
  748. do_writes(ms, &writes);
  749. }
  750. /*-----------------------------------------------------------------
  751. * Target functions
  752. *---------------------------------------------------------------*/
  753. static struct mirror_set *alloc_context(unsigned int nr_mirrors,
  754. uint32_t region_size,
  755. struct dm_target *ti,
  756. struct dirty_log *dl)
  757. {
  758. size_t len;
  759. struct mirror_set *ms = NULL;
  760. if (array_too_big(sizeof(*ms), sizeof(ms->mirror[0]), nr_mirrors))
  761. return NULL;
  762. len = sizeof(*ms) + (sizeof(ms->mirror[0]) * nr_mirrors);
  763. ms = kmalloc(len, GFP_KERNEL);
  764. if (!ms) {
  765. ti->error = "Cannot allocate mirror context";
  766. return NULL;
  767. }
  768. memset(ms, 0, len);
  769. spin_lock_init(&ms->lock);
  770. ms->ti = ti;
  771. ms->nr_mirrors = nr_mirrors;
  772. ms->nr_regions = dm_sector_div_up(ti->len, region_size);
  773. ms->in_sync = 0;
  774. ms->default_mirror = &ms->mirror[DEFAULT_MIRROR];
  775. ms->io_client = dm_io_client_create(DM_IO_PAGES);
  776. if (IS_ERR(ms->io_client)) {
  777. ti->error = "Error creating dm_io client";
  778. kfree(ms);
  779. return NULL;
  780. }
  781. if (rh_init(&ms->rh, ms, dl, region_size, ms->nr_regions)) {
  782. ti->error = "Error creating dirty region hash";
  783. kfree(ms);
  784. return NULL;
  785. }
  786. return ms;
  787. }
  788. static void free_context(struct mirror_set *ms, struct dm_target *ti,
  789. unsigned int m)
  790. {
  791. while (m--)
  792. dm_put_device(ti, ms->mirror[m].dev);
  793. dm_io_client_destroy(ms->io_client);
  794. rh_exit(&ms->rh);
  795. kfree(ms);
  796. }
  797. static inline int _check_region_size(struct dm_target *ti, uint32_t size)
  798. {
  799. return !(size % (PAGE_SIZE >> 9) || (size & (size - 1)) ||
  800. size > ti->len);
  801. }
  802. static int get_mirror(struct mirror_set *ms, struct dm_target *ti,
  803. unsigned int mirror, char **argv)
  804. {
  805. unsigned long long offset;
  806. if (sscanf(argv[1], "%llu", &offset) != 1) {
  807. ti->error = "Invalid offset";
  808. return -EINVAL;
  809. }
  810. if (dm_get_device(ti, argv[0], offset, ti->len,
  811. dm_table_get_mode(ti->table),
  812. &ms->mirror[mirror].dev)) {
  813. ti->error = "Device lookup failure";
  814. return -ENXIO;
  815. }
  816. ms->mirror[mirror].offset = offset;
  817. return 0;
  818. }
  819. /*
  820. * Create dirty log: log_type #log_params <log_params>
  821. */
  822. static struct dirty_log *create_dirty_log(struct dm_target *ti,
  823. unsigned int argc, char **argv,
  824. unsigned int *args_used)
  825. {
  826. unsigned int param_count;
  827. struct dirty_log *dl;
  828. if (argc < 2) {
  829. ti->error = "Insufficient mirror log arguments";
  830. return NULL;
  831. }
  832. if (sscanf(argv[1], "%u", &param_count) != 1) {
  833. ti->error = "Invalid mirror log argument count";
  834. return NULL;
  835. }
  836. *args_used = 2 + param_count;
  837. if (argc < *args_used) {
  838. ti->error = "Insufficient mirror log arguments";
  839. return NULL;
  840. }
  841. dl = dm_create_dirty_log(argv[0], ti, param_count, argv + 2);
  842. if (!dl) {
  843. ti->error = "Error creating mirror dirty log";
  844. return NULL;
  845. }
  846. if (!_check_region_size(ti, dl->type->get_region_size(dl))) {
  847. ti->error = "Invalid region size";
  848. dm_destroy_dirty_log(dl);
  849. return NULL;
  850. }
  851. return dl;
  852. }
  853. static int parse_features(struct mirror_set *ms, unsigned argc, char **argv,
  854. unsigned *args_used)
  855. {
  856. unsigned num_features;
  857. struct dm_target *ti = ms->ti;
  858. *args_used = 0;
  859. if (!argc)
  860. return 0;
  861. if (sscanf(argv[0], "%u", &num_features) != 1) {
  862. ti->error = "Invalid number of features";
  863. return -EINVAL;
  864. }
  865. argc--;
  866. argv++;
  867. (*args_used)++;
  868. if (num_features > argc) {
  869. ti->error = "Not enough arguments to support feature count";
  870. return -EINVAL;
  871. }
  872. if (!strcmp("handle_errors", argv[0]))
  873. ms->features |= DM_RAID1_HANDLE_ERRORS;
  874. else {
  875. ti->error = "Unrecognised feature requested";
  876. return -EINVAL;
  877. }
  878. (*args_used)++;
  879. return 0;
  880. }
  881. /*
  882. * Construct a mirror mapping:
  883. *
  884. * log_type #log_params <log_params>
  885. * #mirrors [mirror_path offset]{2,}
  886. * [#features <features>]
  887. *
  888. * log_type is "core" or "disk"
  889. * #log_params is between 1 and 3
  890. *
  891. * If present, features must be "handle_errors".
  892. */
  893. static int mirror_ctr(struct dm_target *ti, unsigned int argc, char **argv)
  894. {
  895. int r;
  896. unsigned int nr_mirrors, m, args_used;
  897. struct mirror_set *ms;
  898. struct dirty_log *dl;
  899. dl = create_dirty_log(ti, argc, argv, &args_used);
  900. if (!dl)
  901. return -EINVAL;
  902. argv += args_used;
  903. argc -= args_used;
  904. if (!argc || sscanf(argv[0], "%u", &nr_mirrors) != 1 ||
  905. nr_mirrors < 2 || nr_mirrors > KCOPYD_MAX_REGIONS + 1) {
  906. ti->error = "Invalid number of mirrors";
  907. dm_destroy_dirty_log(dl);
  908. return -EINVAL;
  909. }
  910. argv++, argc--;
  911. if (argc < nr_mirrors * 2) {
  912. ti->error = "Too few mirror arguments";
  913. dm_destroy_dirty_log(dl);
  914. return -EINVAL;
  915. }
  916. ms = alloc_context(nr_mirrors, dl->type->get_region_size(dl), ti, dl);
  917. if (!ms) {
  918. dm_destroy_dirty_log(dl);
  919. return -ENOMEM;
  920. }
  921. /* Get the mirror parameter sets */
  922. for (m = 0; m < nr_mirrors; m++) {
  923. r = get_mirror(ms, ti, m, argv);
  924. if (r) {
  925. free_context(ms, ti, m);
  926. return r;
  927. }
  928. argv += 2;
  929. argc -= 2;
  930. }
  931. ti->private = ms;
  932. ti->split_io = ms->rh.region_size;
  933. ms->kmirrord_wq = create_singlethread_workqueue("kmirrord");
  934. if (!ms->kmirrord_wq) {
  935. DMERR("couldn't start kmirrord");
  936. free_context(ms, ti, m);
  937. return -ENOMEM;
  938. }
  939. INIT_WORK(&ms->kmirrord_work, do_mirror);
  940. r = parse_features(ms, argc, argv, &args_used);
  941. if (r) {
  942. free_context(ms, ti, ms->nr_mirrors);
  943. return r;
  944. }
  945. argv += args_used;
  946. argc -= args_used;
  947. if (argc) {
  948. ti->error = "Too many mirror arguments";
  949. free_context(ms, ti, ms->nr_mirrors);
  950. return -EINVAL;
  951. }
  952. r = kcopyd_client_create(DM_IO_PAGES, &ms->kcopyd_client);
  953. if (r) {
  954. destroy_workqueue(ms->kmirrord_wq);
  955. free_context(ms, ti, ms->nr_mirrors);
  956. return r;
  957. }
  958. wake(ms);
  959. return 0;
  960. }
  961. static void mirror_dtr(struct dm_target *ti)
  962. {
  963. struct mirror_set *ms = (struct mirror_set *) ti->private;
  964. flush_workqueue(ms->kmirrord_wq);
  965. kcopyd_client_destroy(ms->kcopyd_client);
  966. destroy_workqueue(ms->kmirrord_wq);
  967. free_context(ms, ti, ms->nr_mirrors);
  968. }
  969. static void queue_bio(struct mirror_set *ms, struct bio *bio, int rw)
  970. {
  971. int should_wake = 0;
  972. struct bio_list *bl;
  973. bl = (rw == WRITE) ? &ms->writes : &ms->reads;
  974. spin_lock(&ms->lock);
  975. should_wake = !(bl->head);
  976. bio_list_add(bl, bio);
  977. spin_unlock(&ms->lock);
  978. if (should_wake)
  979. wake(ms);
  980. }
  981. /*
  982. * Mirror mapping function
  983. */
  984. static int mirror_map(struct dm_target *ti, struct bio *bio,
  985. union map_info *map_context)
  986. {
  987. int r, rw = bio_rw(bio);
  988. struct mirror *m;
  989. struct mirror_set *ms = ti->private;
  990. map_context->ll = bio_to_region(&ms->rh, bio);
  991. if (rw == WRITE) {
  992. queue_bio(ms, bio, rw);
  993. return DM_MAPIO_SUBMITTED;
  994. }
  995. r = ms->rh.log->type->in_sync(ms->rh.log,
  996. bio_to_region(&ms->rh, bio), 0);
  997. if (r < 0 && r != -EWOULDBLOCK)
  998. return r;
  999. if (r == -EWOULDBLOCK) /* FIXME: ugly */
  1000. r = DM_MAPIO_SUBMITTED;
  1001. /*
  1002. * We don't want to fast track a recovery just for a read
  1003. * ahead. So we just let it silently fail.
  1004. * FIXME: get rid of this.
  1005. */
  1006. if (!r && rw == READA)
  1007. return -EIO;
  1008. if (!r) {
  1009. /* Pass this io over to the daemon */
  1010. queue_bio(ms, bio, rw);
  1011. return DM_MAPIO_SUBMITTED;
  1012. }
  1013. m = choose_mirror(ms, bio->bi_sector);
  1014. if (!m)
  1015. return -EIO;
  1016. map_bio(ms, m, bio);
  1017. return DM_MAPIO_REMAPPED;
  1018. }
  1019. static int mirror_end_io(struct dm_target *ti, struct bio *bio,
  1020. int error, union map_info *map_context)
  1021. {
  1022. int rw = bio_rw(bio);
  1023. struct mirror_set *ms = (struct mirror_set *) ti->private;
  1024. region_t region = map_context->ll;
  1025. /*
  1026. * We need to dec pending if this was a write.
  1027. */
  1028. if (rw == WRITE)
  1029. rh_dec(&ms->rh, region);
  1030. return 0;
  1031. }
  1032. static void mirror_postsuspend(struct dm_target *ti)
  1033. {
  1034. struct mirror_set *ms = (struct mirror_set *) ti->private;
  1035. struct dirty_log *log = ms->rh.log;
  1036. rh_stop_recovery(&ms->rh);
  1037. /* Wait for all I/O we generated to complete */
  1038. wait_event(_kmirrord_recovery_stopped,
  1039. !atomic_read(&ms->rh.recovery_in_flight));
  1040. if (log->type->suspend && log->type->suspend(log))
  1041. /* FIXME: need better error handling */
  1042. DMWARN("log suspend failed");
  1043. }
  1044. static void mirror_resume(struct dm_target *ti)
  1045. {
  1046. struct mirror_set *ms = (struct mirror_set *) ti->private;
  1047. struct dirty_log *log = ms->rh.log;
  1048. if (log->type->resume && log->type->resume(log))
  1049. /* FIXME: need better error handling */
  1050. DMWARN("log resume failed");
  1051. rh_start_recovery(&ms->rh);
  1052. }
  1053. static int mirror_status(struct dm_target *ti, status_type_t type,
  1054. char *result, unsigned int maxlen)
  1055. {
  1056. unsigned int m, sz = 0;
  1057. struct mirror_set *ms = (struct mirror_set *) ti->private;
  1058. switch (type) {
  1059. case STATUSTYPE_INFO:
  1060. DMEMIT("%d ", ms->nr_mirrors);
  1061. for (m = 0; m < ms->nr_mirrors; m++)
  1062. DMEMIT("%s ", ms->mirror[m].dev->name);
  1063. DMEMIT("%llu/%llu",
  1064. (unsigned long long)ms->rh.log->type->
  1065. get_sync_count(ms->rh.log),
  1066. (unsigned long long)ms->nr_regions);
  1067. sz = ms->rh.log->type->status(ms->rh.log, type, result, maxlen);
  1068. break;
  1069. case STATUSTYPE_TABLE:
  1070. sz = ms->rh.log->type->status(ms->rh.log, type, result, maxlen);
  1071. DMEMIT("%d", ms->nr_mirrors);
  1072. for (m = 0; m < ms->nr_mirrors; m++)
  1073. DMEMIT(" %s %llu", ms->mirror[m].dev->name,
  1074. (unsigned long long)ms->mirror[m].offset);
  1075. if (ms->features & DM_RAID1_HANDLE_ERRORS)
  1076. DMEMIT(" 1 handle_errors");
  1077. }
  1078. return 0;
  1079. }
  1080. static struct target_type mirror_target = {
  1081. .name = "mirror",
  1082. .version = {1, 0, 3},
  1083. .module = THIS_MODULE,
  1084. .ctr = mirror_ctr,
  1085. .dtr = mirror_dtr,
  1086. .map = mirror_map,
  1087. .end_io = mirror_end_io,
  1088. .postsuspend = mirror_postsuspend,
  1089. .resume = mirror_resume,
  1090. .status = mirror_status,
  1091. };
  1092. static int __init dm_mirror_init(void)
  1093. {
  1094. int r;
  1095. r = dm_dirty_log_init();
  1096. if (r)
  1097. return r;
  1098. r = dm_register_target(&mirror_target);
  1099. if (r < 0) {
  1100. DMERR("%s: Failed to register mirror target",
  1101. mirror_target.name);
  1102. dm_dirty_log_exit();
  1103. }
  1104. return r;
  1105. }
  1106. static void __exit dm_mirror_exit(void)
  1107. {
  1108. int r;
  1109. r = dm_unregister_target(&mirror_target);
  1110. if (r < 0)
  1111. DMERR("%s: unregister failed %d", mirror_target.name, r);
  1112. dm_dirty_log_exit();
  1113. }
  1114. /* Module hooks */
  1115. module_init(dm_mirror_init);
  1116. module_exit(dm_mirror_exit);
  1117. MODULE_DESCRIPTION(DM_NAME " mirror target");
  1118. MODULE_AUTHOR("Joe Thornber");
  1119. MODULE_LICENSE("GPL");