dm-snap.c 28 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323
  1. /*
  2. * dm-snapshot.c
  3. *
  4. * Copyright (C) 2001-2002 Sistina Software (UK) Limited.
  5. *
  6. * This file is released under the GPL.
  7. */
  8. #include <linux/blkdev.h>
  9. #include <linux/ctype.h>
  10. #include <linux/device-mapper.h>
  11. #include <linux/fs.h>
  12. #include <linux/init.h>
  13. #include <linux/kdev_t.h>
  14. #include <linux/list.h>
  15. #include <linux/mempool.h>
  16. #include <linux/module.h>
  17. #include <linux/slab.h>
  18. #include <linux/vmalloc.h>
  19. #include <linux/log2.h>
  20. #include "dm-snap.h"
  21. #include "dm-bio-list.h"
  22. #include "kcopyd.h"
  23. #define DM_MSG_PREFIX "snapshots"
  24. /*
  25. * The percentage increment we will wake up users at
  26. */
  27. #define WAKE_UP_PERCENT 5
  28. /*
  29. * kcopyd priority of snapshot operations
  30. */
  31. #define SNAPSHOT_COPY_PRIORITY 2
  32. /*
  33. * Each snapshot reserves this many pages for io
  34. */
  35. #define SNAPSHOT_PAGES 256
  36. static struct workqueue_struct *ksnapd;
  37. static void flush_queued_bios(struct work_struct *work);
  38. struct dm_snap_pending_exception {
  39. struct dm_snap_exception e;
  40. /*
  41. * Origin buffers waiting for this to complete are held
  42. * in a bio list
  43. */
  44. struct bio_list origin_bios;
  45. struct bio_list snapshot_bios;
  46. /*
  47. * Short-term queue of pending exceptions prior to submission.
  48. */
  49. struct list_head list;
  50. /*
  51. * The primary pending_exception is the one that holds
  52. * the ref_count and the list of origin_bios for a
  53. * group of pending_exceptions. It is always last to get freed.
  54. * These fields get set up when writing to the origin.
  55. */
  56. struct dm_snap_pending_exception *primary_pe;
  57. /*
  58. * Number of pending_exceptions processing this chunk.
  59. * When this drops to zero we must complete the origin bios.
  60. * If incrementing or decrementing this, hold pe->snap->lock for
  61. * the sibling concerned and not pe->primary_pe->snap->lock unless
  62. * they are the same.
  63. */
  64. atomic_t ref_count;
  65. /* Pointer back to snapshot context */
  66. struct dm_snapshot *snap;
  67. /*
  68. * 1 indicates the exception has already been sent to
  69. * kcopyd.
  70. */
  71. int started;
  72. };
  73. /*
  74. * Hash table mapping origin volumes to lists of snapshots and
  75. * a lock to protect it
  76. */
  77. static struct kmem_cache *exception_cache;
  78. static struct kmem_cache *pending_cache;
  79. static mempool_t *pending_pool;
  80. /*
  81. * One of these per registered origin, held in the snapshot_origins hash
  82. */
  83. struct origin {
  84. /* The origin device */
  85. struct block_device *bdev;
  86. struct list_head hash_list;
  87. /* List of snapshots for this origin */
  88. struct list_head snapshots;
  89. };
  90. /*
  91. * Size of the hash table for origin volumes. If we make this
  92. * the size of the minors list then it should be nearly perfect
  93. */
  94. #define ORIGIN_HASH_SIZE 256
  95. #define ORIGIN_MASK 0xFF
  96. static struct list_head *_origins;
  97. static struct rw_semaphore _origins_lock;
  98. static int init_origin_hash(void)
  99. {
  100. int i;
  101. _origins = kmalloc(ORIGIN_HASH_SIZE * sizeof(struct list_head),
  102. GFP_KERNEL);
  103. if (!_origins) {
  104. DMERR("unable to allocate memory");
  105. return -ENOMEM;
  106. }
  107. for (i = 0; i < ORIGIN_HASH_SIZE; i++)
  108. INIT_LIST_HEAD(_origins + i);
  109. init_rwsem(&_origins_lock);
  110. return 0;
  111. }
  112. static void exit_origin_hash(void)
  113. {
  114. kfree(_origins);
  115. }
  116. static unsigned origin_hash(struct block_device *bdev)
  117. {
  118. return bdev->bd_dev & ORIGIN_MASK;
  119. }
  120. static struct origin *__lookup_origin(struct block_device *origin)
  121. {
  122. struct list_head *ol;
  123. struct origin *o;
  124. ol = &_origins[origin_hash(origin)];
  125. list_for_each_entry (o, ol, hash_list)
  126. if (bdev_equal(o->bdev, origin))
  127. return o;
  128. return NULL;
  129. }
  130. static void __insert_origin(struct origin *o)
  131. {
  132. struct list_head *sl = &_origins[origin_hash(o->bdev)];
  133. list_add_tail(&o->hash_list, sl);
  134. }
  135. /*
  136. * Make a note of the snapshot and its origin so we can look it
  137. * up when the origin has a write on it.
  138. */
  139. static int register_snapshot(struct dm_snapshot *snap)
  140. {
  141. struct origin *o;
  142. struct block_device *bdev = snap->origin->bdev;
  143. down_write(&_origins_lock);
  144. o = __lookup_origin(bdev);
  145. if (!o) {
  146. /* New origin */
  147. o = kmalloc(sizeof(*o), GFP_KERNEL);
  148. if (!o) {
  149. up_write(&_origins_lock);
  150. return -ENOMEM;
  151. }
  152. /* Initialise the struct */
  153. INIT_LIST_HEAD(&o->snapshots);
  154. o->bdev = bdev;
  155. __insert_origin(o);
  156. }
  157. list_add_tail(&snap->list, &o->snapshots);
  158. up_write(&_origins_lock);
  159. return 0;
  160. }
  161. static void unregister_snapshot(struct dm_snapshot *s)
  162. {
  163. struct origin *o;
  164. down_write(&_origins_lock);
  165. o = __lookup_origin(s->origin->bdev);
  166. list_del(&s->list);
  167. if (list_empty(&o->snapshots)) {
  168. list_del(&o->hash_list);
  169. kfree(o);
  170. }
  171. up_write(&_origins_lock);
  172. }
  173. /*
  174. * Implementation of the exception hash tables.
  175. */
  176. static int init_exception_table(struct exception_table *et, uint32_t size)
  177. {
  178. unsigned int i;
  179. et->hash_mask = size - 1;
  180. et->table = dm_vcalloc(size, sizeof(struct list_head));
  181. if (!et->table)
  182. return -ENOMEM;
  183. for (i = 0; i < size; i++)
  184. INIT_LIST_HEAD(et->table + i);
  185. return 0;
  186. }
  187. static void exit_exception_table(struct exception_table *et, struct kmem_cache *mem)
  188. {
  189. struct list_head *slot;
  190. struct dm_snap_exception *ex, *next;
  191. int i, size;
  192. size = et->hash_mask + 1;
  193. for (i = 0; i < size; i++) {
  194. slot = et->table + i;
  195. list_for_each_entry_safe (ex, next, slot, hash_list)
  196. kmem_cache_free(mem, ex);
  197. }
  198. vfree(et->table);
  199. }
  200. static uint32_t exception_hash(struct exception_table *et, chunk_t chunk)
  201. {
  202. return chunk & et->hash_mask;
  203. }
  204. static void insert_exception(struct exception_table *eh,
  205. struct dm_snap_exception *e)
  206. {
  207. struct list_head *l = &eh->table[exception_hash(eh, e->old_chunk)];
  208. list_add(&e->hash_list, l);
  209. }
  210. static void remove_exception(struct dm_snap_exception *e)
  211. {
  212. list_del(&e->hash_list);
  213. }
  214. /*
  215. * Return the exception data for a sector, or NULL if not
  216. * remapped.
  217. */
  218. static struct dm_snap_exception *lookup_exception(struct exception_table *et,
  219. chunk_t chunk)
  220. {
  221. struct list_head *slot;
  222. struct dm_snap_exception *e;
  223. slot = &et->table[exception_hash(et, chunk)];
  224. list_for_each_entry (e, slot, hash_list)
  225. if (e->old_chunk == chunk)
  226. return e;
  227. return NULL;
  228. }
  229. static struct dm_snap_exception *alloc_exception(void)
  230. {
  231. struct dm_snap_exception *e;
  232. e = kmem_cache_alloc(exception_cache, GFP_NOIO);
  233. if (!e)
  234. e = kmem_cache_alloc(exception_cache, GFP_ATOMIC);
  235. return e;
  236. }
  237. static void free_exception(struct dm_snap_exception *e)
  238. {
  239. kmem_cache_free(exception_cache, e);
  240. }
  241. static struct dm_snap_pending_exception *alloc_pending_exception(void)
  242. {
  243. return mempool_alloc(pending_pool, GFP_NOIO);
  244. }
  245. static void free_pending_exception(struct dm_snap_pending_exception *pe)
  246. {
  247. mempool_free(pe, pending_pool);
  248. }
  249. int dm_add_exception(struct dm_snapshot *s, chunk_t old, chunk_t new)
  250. {
  251. struct dm_snap_exception *e;
  252. e = alloc_exception();
  253. if (!e)
  254. return -ENOMEM;
  255. e->old_chunk = old;
  256. e->new_chunk = new;
  257. insert_exception(&s->complete, e);
  258. return 0;
  259. }
  260. /*
  261. * Hard coded magic.
  262. */
  263. static int calc_max_buckets(void)
  264. {
  265. /* use a fixed size of 2MB */
  266. unsigned long mem = 2 * 1024 * 1024;
  267. mem /= sizeof(struct list_head);
  268. return mem;
  269. }
  270. /*
  271. * Rounds a number down to a power of 2.
  272. */
  273. static uint32_t round_down(uint32_t n)
  274. {
  275. while (n & (n - 1))
  276. n &= (n - 1);
  277. return n;
  278. }
  279. /*
  280. * Allocate room for a suitable hash table.
  281. */
  282. static int init_hash_tables(struct dm_snapshot *s)
  283. {
  284. sector_t hash_size, cow_dev_size, origin_dev_size, max_buckets;
  285. /*
  286. * Calculate based on the size of the original volume or
  287. * the COW volume...
  288. */
  289. cow_dev_size = get_dev_size(s->cow->bdev);
  290. origin_dev_size = get_dev_size(s->origin->bdev);
  291. max_buckets = calc_max_buckets();
  292. hash_size = min(origin_dev_size, cow_dev_size) >> s->chunk_shift;
  293. hash_size = min(hash_size, max_buckets);
  294. /* Round it down to a power of 2 */
  295. hash_size = round_down(hash_size);
  296. if (init_exception_table(&s->complete, hash_size))
  297. return -ENOMEM;
  298. /*
  299. * Allocate hash table for in-flight exceptions
  300. * Make this smaller than the real hash table
  301. */
  302. hash_size >>= 3;
  303. if (hash_size < 64)
  304. hash_size = 64;
  305. if (init_exception_table(&s->pending, hash_size)) {
  306. exit_exception_table(&s->complete, exception_cache);
  307. return -ENOMEM;
  308. }
  309. return 0;
  310. }
  311. /*
  312. * Round a number up to the nearest 'size' boundary. size must
  313. * be a power of 2.
  314. */
  315. static ulong round_up(ulong n, ulong size)
  316. {
  317. size--;
  318. return (n + size) & ~size;
  319. }
  320. static int set_chunk_size(struct dm_snapshot *s, const char *chunk_size_arg,
  321. char **error)
  322. {
  323. unsigned long chunk_size;
  324. char *value;
  325. chunk_size = simple_strtoul(chunk_size_arg, &value, 10);
  326. if (*chunk_size_arg == '\0' || *value != '\0') {
  327. *error = "Invalid chunk size";
  328. return -EINVAL;
  329. }
  330. if (!chunk_size) {
  331. s->chunk_size = s->chunk_mask = s->chunk_shift = 0;
  332. return 0;
  333. }
  334. /*
  335. * Chunk size must be multiple of page size. Silently
  336. * round up if it's not.
  337. */
  338. chunk_size = round_up(chunk_size, PAGE_SIZE >> 9);
  339. /* Check chunk_size is a power of 2 */
  340. if (!is_power_of_2(chunk_size)) {
  341. *error = "Chunk size is not a power of 2";
  342. return -EINVAL;
  343. }
  344. /* Validate the chunk size against the device block size */
  345. if (chunk_size % (bdev_hardsect_size(s->cow->bdev) >> 9)) {
  346. *error = "Chunk size is not a multiple of device blocksize";
  347. return -EINVAL;
  348. }
  349. s->chunk_size = chunk_size;
  350. s->chunk_mask = chunk_size - 1;
  351. s->chunk_shift = ffs(chunk_size) - 1;
  352. return 0;
  353. }
  354. /*
  355. * Construct a snapshot mapping: <origin_dev> <COW-dev> <p/n> <chunk-size>
  356. */
  357. static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv)
  358. {
  359. struct dm_snapshot *s;
  360. int r = -EINVAL;
  361. char persistent;
  362. char *origin_path;
  363. char *cow_path;
  364. if (argc != 4) {
  365. ti->error = "requires exactly 4 arguments";
  366. r = -EINVAL;
  367. goto bad1;
  368. }
  369. origin_path = argv[0];
  370. cow_path = argv[1];
  371. persistent = toupper(*argv[2]);
  372. if (persistent != 'P' && persistent != 'N') {
  373. ti->error = "Persistent flag is not P or N";
  374. r = -EINVAL;
  375. goto bad1;
  376. }
  377. s = kmalloc(sizeof(*s), GFP_KERNEL);
  378. if (s == NULL) {
  379. ti->error = "Cannot allocate snapshot context private "
  380. "structure";
  381. r = -ENOMEM;
  382. goto bad1;
  383. }
  384. r = dm_get_device(ti, origin_path, 0, ti->len, FMODE_READ, &s->origin);
  385. if (r) {
  386. ti->error = "Cannot get origin device";
  387. goto bad2;
  388. }
  389. r = dm_get_device(ti, cow_path, 0, 0,
  390. FMODE_READ | FMODE_WRITE, &s->cow);
  391. if (r) {
  392. dm_put_device(ti, s->origin);
  393. ti->error = "Cannot get COW device";
  394. goto bad2;
  395. }
  396. r = set_chunk_size(s, argv[3], &ti->error);
  397. if (r)
  398. goto bad3;
  399. s->type = persistent;
  400. s->valid = 1;
  401. s->active = 0;
  402. s->last_percent = 0;
  403. init_rwsem(&s->lock);
  404. spin_lock_init(&s->pe_lock);
  405. s->table = ti->table;
  406. /* Allocate hash table for COW data */
  407. if (init_hash_tables(s)) {
  408. ti->error = "Unable to allocate hash table space";
  409. r = -ENOMEM;
  410. goto bad3;
  411. }
  412. s->store.snap = s;
  413. if (persistent == 'P')
  414. r = dm_create_persistent(&s->store);
  415. else
  416. r = dm_create_transient(&s->store);
  417. if (r) {
  418. ti->error = "Couldn't create exception store";
  419. r = -EINVAL;
  420. goto bad4;
  421. }
  422. r = kcopyd_client_create(SNAPSHOT_PAGES, &s->kcopyd_client);
  423. if (r) {
  424. ti->error = "Could not create kcopyd client";
  425. goto bad5;
  426. }
  427. /* Metadata must only be loaded into one table at once */
  428. r = s->store.read_metadata(&s->store);
  429. if (r < 0) {
  430. ti->error = "Failed to read snapshot metadata";
  431. goto bad6;
  432. } else if (r > 0) {
  433. s->valid = 0;
  434. DMWARN("Snapshot is marked invalid.");
  435. }
  436. bio_list_init(&s->queued_bios);
  437. INIT_WORK(&s->queued_bios_work, flush_queued_bios);
  438. /* Add snapshot to the list of snapshots for this origin */
  439. /* Exceptions aren't triggered till snapshot_resume() is called */
  440. if (register_snapshot(s)) {
  441. r = -EINVAL;
  442. ti->error = "Cannot register snapshot origin";
  443. goto bad6;
  444. }
  445. ti->private = s;
  446. ti->split_io = s->chunk_size;
  447. return 0;
  448. bad6:
  449. kcopyd_client_destroy(s->kcopyd_client);
  450. bad5:
  451. s->store.destroy(&s->store);
  452. bad4:
  453. exit_exception_table(&s->pending, pending_cache);
  454. exit_exception_table(&s->complete, exception_cache);
  455. bad3:
  456. dm_put_device(ti, s->cow);
  457. dm_put_device(ti, s->origin);
  458. bad2:
  459. kfree(s);
  460. bad1:
  461. return r;
  462. }
  463. static void __free_exceptions(struct dm_snapshot *s)
  464. {
  465. kcopyd_client_destroy(s->kcopyd_client);
  466. s->kcopyd_client = NULL;
  467. exit_exception_table(&s->pending, pending_cache);
  468. exit_exception_table(&s->complete, exception_cache);
  469. s->store.destroy(&s->store);
  470. }
  471. static void snapshot_dtr(struct dm_target *ti)
  472. {
  473. struct dm_snapshot *s = ti->private;
  474. flush_workqueue(ksnapd);
  475. /* Prevent further origin writes from using this snapshot. */
  476. /* After this returns there can be no new kcopyd jobs. */
  477. unregister_snapshot(s);
  478. __free_exceptions(s);
  479. dm_put_device(ti, s->origin);
  480. dm_put_device(ti, s->cow);
  481. kfree(s);
  482. }
  483. /*
  484. * Flush a list of buffers.
  485. */
  486. static void flush_bios(struct bio *bio)
  487. {
  488. struct bio *n;
  489. while (bio) {
  490. n = bio->bi_next;
  491. bio->bi_next = NULL;
  492. generic_make_request(bio);
  493. bio = n;
  494. }
  495. }
  496. static void flush_queued_bios(struct work_struct *work)
  497. {
  498. struct dm_snapshot *s =
  499. container_of(work, struct dm_snapshot, queued_bios_work);
  500. struct bio *queued_bios;
  501. unsigned long flags;
  502. spin_lock_irqsave(&s->pe_lock, flags);
  503. queued_bios = bio_list_get(&s->queued_bios);
  504. spin_unlock_irqrestore(&s->pe_lock, flags);
  505. flush_bios(queued_bios);
  506. }
  507. /*
  508. * Error a list of buffers.
  509. */
  510. static void error_bios(struct bio *bio)
  511. {
  512. struct bio *n;
  513. while (bio) {
  514. n = bio->bi_next;
  515. bio->bi_next = NULL;
  516. bio_io_error(bio);
  517. bio = n;
  518. }
  519. }
  520. static void __invalidate_snapshot(struct dm_snapshot *s, int err)
  521. {
  522. if (!s->valid)
  523. return;
  524. if (err == -EIO)
  525. DMERR("Invalidating snapshot: Error reading/writing.");
  526. else if (err == -ENOMEM)
  527. DMERR("Invalidating snapshot: Unable to allocate exception.");
  528. if (s->store.drop_snapshot)
  529. s->store.drop_snapshot(&s->store);
  530. s->valid = 0;
  531. dm_table_event(s->table);
  532. }
  533. static void get_pending_exception(struct dm_snap_pending_exception *pe)
  534. {
  535. atomic_inc(&pe->ref_count);
  536. }
  537. static struct bio *put_pending_exception(struct dm_snap_pending_exception *pe)
  538. {
  539. struct dm_snap_pending_exception *primary_pe;
  540. struct bio *origin_bios = NULL;
  541. primary_pe = pe->primary_pe;
  542. /*
  543. * If this pe is involved in a write to the origin and
  544. * it is the last sibling to complete then release
  545. * the bios for the original write to the origin.
  546. */
  547. if (primary_pe &&
  548. atomic_dec_and_test(&primary_pe->ref_count))
  549. origin_bios = bio_list_get(&primary_pe->origin_bios);
  550. /*
  551. * Free the pe if it's not linked to an origin write or if
  552. * it's not itself a primary pe.
  553. */
  554. if (!primary_pe || primary_pe != pe)
  555. free_pending_exception(pe);
  556. /*
  557. * Free the primary pe if nothing references it.
  558. */
  559. if (primary_pe && !atomic_read(&primary_pe->ref_count))
  560. free_pending_exception(primary_pe);
  561. return origin_bios;
  562. }
  563. static void pending_complete(struct dm_snap_pending_exception *pe, int success)
  564. {
  565. struct dm_snap_exception *e;
  566. struct dm_snapshot *s = pe->snap;
  567. struct bio *origin_bios = NULL;
  568. struct bio *snapshot_bios = NULL;
  569. int error = 0;
  570. if (!success) {
  571. /* Read/write error - snapshot is unusable */
  572. down_write(&s->lock);
  573. __invalidate_snapshot(s, -EIO);
  574. error = 1;
  575. goto out;
  576. }
  577. e = alloc_exception();
  578. if (!e) {
  579. down_write(&s->lock);
  580. __invalidate_snapshot(s, -ENOMEM);
  581. error = 1;
  582. goto out;
  583. }
  584. *e = pe->e;
  585. down_write(&s->lock);
  586. if (!s->valid) {
  587. free_exception(e);
  588. error = 1;
  589. goto out;
  590. }
  591. /*
  592. * Add a proper exception, and remove the
  593. * in-flight exception from the list.
  594. */
  595. insert_exception(&s->complete, e);
  596. out:
  597. remove_exception(&pe->e);
  598. snapshot_bios = bio_list_get(&pe->snapshot_bios);
  599. origin_bios = put_pending_exception(pe);
  600. up_write(&s->lock);
  601. /* Submit any pending write bios */
  602. if (error)
  603. error_bios(snapshot_bios);
  604. else
  605. flush_bios(snapshot_bios);
  606. flush_bios(origin_bios);
  607. }
  608. static void commit_callback(void *context, int success)
  609. {
  610. struct dm_snap_pending_exception *pe = context;
  611. pending_complete(pe, success);
  612. }
  613. /*
  614. * Called when the copy I/O has finished. kcopyd actually runs
  615. * this code so don't block.
  616. */
  617. static void copy_callback(int read_err, unsigned int write_err, void *context)
  618. {
  619. struct dm_snap_pending_exception *pe = context;
  620. struct dm_snapshot *s = pe->snap;
  621. if (read_err || write_err)
  622. pending_complete(pe, 0);
  623. else
  624. /* Update the metadata if we are persistent */
  625. s->store.commit_exception(&s->store, &pe->e, commit_callback,
  626. pe);
  627. }
  628. /*
  629. * Dispatches the copy operation to kcopyd.
  630. */
  631. static void start_copy(struct dm_snap_pending_exception *pe)
  632. {
  633. struct dm_snapshot *s = pe->snap;
  634. struct io_region src, dest;
  635. struct block_device *bdev = s->origin->bdev;
  636. sector_t dev_size;
  637. dev_size = get_dev_size(bdev);
  638. src.bdev = bdev;
  639. src.sector = chunk_to_sector(s, pe->e.old_chunk);
  640. src.count = min(s->chunk_size, dev_size - src.sector);
  641. dest.bdev = s->cow->bdev;
  642. dest.sector = chunk_to_sector(s, pe->e.new_chunk);
  643. dest.count = src.count;
  644. /* Hand over to kcopyd */
  645. kcopyd_copy(s->kcopyd_client,
  646. &src, 1, &dest, 0, copy_callback, pe);
  647. }
  648. /*
  649. * Looks to see if this snapshot already has a pending exception
  650. * for this chunk, otherwise it allocates a new one and inserts
  651. * it into the pending table.
  652. *
  653. * NOTE: a write lock must be held on snap->lock before calling
  654. * this.
  655. */
  656. static struct dm_snap_pending_exception *
  657. __find_pending_exception(struct dm_snapshot *s, struct bio *bio)
  658. {
  659. struct dm_snap_exception *e;
  660. struct dm_snap_pending_exception *pe;
  661. chunk_t chunk = sector_to_chunk(s, bio->bi_sector);
  662. /*
  663. * Is there a pending exception for this already ?
  664. */
  665. e = lookup_exception(&s->pending, chunk);
  666. if (e) {
  667. /* cast the exception to a pending exception */
  668. pe = container_of(e, struct dm_snap_pending_exception, e);
  669. goto out;
  670. }
  671. /*
  672. * Create a new pending exception, we don't want
  673. * to hold the lock while we do this.
  674. */
  675. up_write(&s->lock);
  676. pe = alloc_pending_exception();
  677. down_write(&s->lock);
  678. if (!s->valid) {
  679. free_pending_exception(pe);
  680. return NULL;
  681. }
  682. e = lookup_exception(&s->pending, chunk);
  683. if (e) {
  684. free_pending_exception(pe);
  685. pe = container_of(e, struct dm_snap_pending_exception, e);
  686. goto out;
  687. }
  688. pe->e.old_chunk = chunk;
  689. bio_list_init(&pe->origin_bios);
  690. bio_list_init(&pe->snapshot_bios);
  691. pe->primary_pe = NULL;
  692. atomic_set(&pe->ref_count, 0);
  693. pe->snap = s;
  694. pe->started = 0;
  695. if (s->store.prepare_exception(&s->store, &pe->e)) {
  696. free_pending_exception(pe);
  697. return NULL;
  698. }
  699. get_pending_exception(pe);
  700. insert_exception(&s->pending, &pe->e);
  701. out:
  702. return pe;
  703. }
  704. static void remap_exception(struct dm_snapshot *s, struct dm_snap_exception *e,
  705. struct bio *bio)
  706. {
  707. bio->bi_bdev = s->cow->bdev;
  708. bio->bi_sector = chunk_to_sector(s, e->new_chunk) +
  709. (bio->bi_sector & s->chunk_mask);
  710. }
  711. static int snapshot_map(struct dm_target *ti, struct bio *bio,
  712. union map_info *map_context)
  713. {
  714. struct dm_snap_exception *e;
  715. struct dm_snapshot *s = ti->private;
  716. int r = DM_MAPIO_REMAPPED;
  717. chunk_t chunk;
  718. struct dm_snap_pending_exception *pe = NULL;
  719. chunk = sector_to_chunk(s, bio->bi_sector);
  720. /* Full snapshots are not usable */
  721. /* To get here the table must be live so s->active is always set. */
  722. if (!s->valid)
  723. return -EIO;
  724. /* FIXME: should only take write lock if we need
  725. * to copy an exception */
  726. down_write(&s->lock);
  727. if (!s->valid) {
  728. r = -EIO;
  729. goto out_unlock;
  730. }
  731. /* If the block is already remapped - use that, else remap it */
  732. e = lookup_exception(&s->complete, chunk);
  733. if (e) {
  734. remap_exception(s, e, bio);
  735. goto out_unlock;
  736. }
  737. /*
  738. * Write to snapshot - higher level takes care of RW/RO
  739. * flags so we should only get this if we are
  740. * writeable.
  741. */
  742. if (bio_rw(bio) == WRITE) {
  743. pe = __find_pending_exception(s, bio);
  744. if (!pe) {
  745. __invalidate_snapshot(s, -ENOMEM);
  746. r = -EIO;
  747. goto out_unlock;
  748. }
  749. remap_exception(s, &pe->e, bio);
  750. bio_list_add(&pe->snapshot_bios, bio);
  751. r = DM_MAPIO_SUBMITTED;
  752. if (!pe->started) {
  753. /* this is protected by snap->lock */
  754. pe->started = 1;
  755. up_write(&s->lock);
  756. start_copy(pe);
  757. goto out;
  758. }
  759. } else
  760. /*
  761. * FIXME: this read path scares me because we
  762. * always use the origin when we have a pending
  763. * exception. However I can't think of a
  764. * situation where this is wrong - ejt.
  765. */
  766. bio->bi_bdev = s->origin->bdev;
  767. out_unlock:
  768. up_write(&s->lock);
  769. out:
  770. return r;
  771. }
  772. static void snapshot_resume(struct dm_target *ti)
  773. {
  774. struct dm_snapshot *s = ti->private;
  775. down_write(&s->lock);
  776. s->active = 1;
  777. up_write(&s->lock);
  778. }
  779. static int snapshot_status(struct dm_target *ti, status_type_t type,
  780. char *result, unsigned int maxlen)
  781. {
  782. struct dm_snapshot *snap = ti->private;
  783. switch (type) {
  784. case STATUSTYPE_INFO:
  785. if (!snap->valid)
  786. snprintf(result, maxlen, "Invalid");
  787. else {
  788. if (snap->store.fraction_full) {
  789. sector_t numerator, denominator;
  790. snap->store.fraction_full(&snap->store,
  791. &numerator,
  792. &denominator);
  793. snprintf(result, maxlen, "%llu/%llu",
  794. (unsigned long long)numerator,
  795. (unsigned long long)denominator);
  796. }
  797. else
  798. snprintf(result, maxlen, "Unknown");
  799. }
  800. break;
  801. case STATUSTYPE_TABLE:
  802. /*
  803. * kdevname returns a static pointer so we need
  804. * to make private copies if the output is to
  805. * make sense.
  806. */
  807. snprintf(result, maxlen, "%s %s %c %llu",
  808. snap->origin->name, snap->cow->name,
  809. snap->type,
  810. (unsigned long long)snap->chunk_size);
  811. break;
  812. }
  813. return 0;
  814. }
  815. /*-----------------------------------------------------------------
  816. * Origin methods
  817. *---------------------------------------------------------------*/
  818. static int __origin_write(struct list_head *snapshots, struct bio *bio)
  819. {
  820. int r = DM_MAPIO_REMAPPED, first = 0;
  821. struct dm_snapshot *snap;
  822. struct dm_snap_exception *e;
  823. struct dm_snap_pending_exception *pe, *next_pe, *primary_pe = NULL;
  824. chunk_t chunk;
  825. LIST_HEAD(pe_queue);
  826. /* Do all the snapshots on this origin */
  827. list_for_each_entry (snap, snapshots, list) {
  828. down_write(&snap->lock);
  829. /* Only deal with valid and active snapshots */
  830. if (!snap->valid || !snap->active)
  831. goto next_snapshot;
  832. /* Nothing to do if writing beyond end of snapshot */
  833. if (bio->bi_sector >= dm_table_get_size(snap->table))
  834. goto next_snapshot;
  835. /*
  836. * Remember, different snapshots can have
  837. * different chunk sizes.
  838. */
  839. chunk = sector_to_chunk(snap, bio->bi_sector);
  840. /*
  841. * Check exception table to see if block
  842. * is already remapped in this snapshot
  843. * and trigger an exception if not.
  844. *
  845. * ref_count is initialised to 1 so pending_complete()
  846. * won't destroy the primary_pe while we're inside this loop.
  847. */
  848. e = lookup_exception(&snap->complete, chunk);
  849. if (e)
  850. goto next_snapshot;
  851. pe = __find_pending_exception(snap, bio);
  852. if (!pe) {
  853. __invalidate_snapshot(snap, -ENOMEM);
  854. goto next_snapshot;
  855. }
  856. if (!primary_pe) {
  857. /*
  858. * Either every pe here has same
  859. * primary_pe or none has one yet.
  860. */
  861. if (pe->primary_pe)
  862. primary_pe = pe->primary_pe;
  863. else {
  864. primary_pe = pe;
  865. first = 1;
  866. }
  867. bio_list_add(&primary_pe->origin_bios, bio);
  868. r = DM_MAPIO_SUBMITTED;
  869. }
  870. if (!pe->primary_pe) {
  871. pe->primary_pe = primary_pe;
  872. get_pending_exception(primary_pe);
  873. }
  874. if (!pe->started) {
  875. pe->started = 1;
  876. list_add_tail(&pe->list, &pe_queue);
  877. }
  878. next_snapshot:
  879. up_write(&snap->lock);
  880. }
  881. if (!primary_pe)
  882. return r;
  883. /*
  884. * If this is the first time we're processing this chunk and
  885. * ref_count is now 1 it means all the pending exceptions
  886. * got completed while we were in the loop above, so it falls to
  887. * us here to remove the primary_pe and submit any origin_bios.
  888. */
  889. if (first && atomic_dec_and_test(&primary_pe->ref_count)) {
  890. flush_bios(bio_list_get(&primary_pe->origin_bios));
  891. free_pending_exception(primary_pe);
  892. /* If we got here, pe_queue is necessarily empty. */
  893. return r;
  894. }
  895. /*
  896. * Now that we have a complete pe list we can start the copying.
  897. */
  898. list_for_each_entry_safe(pe, next_pe, &pe_queue, list)
  899. start_copy(pe);
  900. return r;
  901. }
  902. /*
  903. * Called on a write from the origin driver.
  904. */
  905. static int do_origin(struct dm_dev *origin, struct bio *bio)
  906. {
  907. struct origin *o;
  908. int r = DM_MAPIO_REMAPPED;
  909. down_read(&_origins_lock);
  910. o = __lookup_origin(origin->bdev);
  911. if (o)
  912. r = __origin_write(&o->snapshots, bio);
  913. up_read(&_origins_lock);
  914. return r;
  915. }
  916. /*
  917. * Origin: maps a linear range of a device, with hooks for snapshotting.
  918. */
  919. /*
  920. * Construct an origin mapping: <dev_path>
  921. * The context for an origin is merely a 'struct dm_dev *'
  922. * pointing to the real device.
  923. */
  924. static int origin_ctr(struct dm_target *ti, unsigned int argc, char **argv)
  925. {
  926. int r;
  927. struct dm_dev *dev;
  928. if (argc != 1) {
  929. ti->error = "origin: incorrect number of arguments";
  930. return -EINVAL;
  931. }
  932. r = dm_get_device(ti, argv[0], 0, ti->len,
  933. dm_table_get_mode(ti->table), &dev);
  934. if (r) {
  935. ti->error = "Cannot get target device";
  936. return r;
  937. }
  938. ti->private = dev;
  939. return 0;
  940. }
  941. static void origin_dtr(struct dm_target *ti)
  942. {
  943. struct dm_dev *dev = ti->private;
  944. dm_put_device(ti, dev);
  945. }
  946. static int origin_map(struct dm_target *ti, struct bio *bio,
  947. union map_info *map_context)
  948. {
  949. struct dm_dev *dev = ti->private;
  950. bio->bi_bdev = dev->bdev;
  951. /* Only tell snapshots if this is a write */
  952. return (bio_rw(bio) == WRITE) ? do_origin(dev, bio) : DM_MAPIO_REMAPPED;
  953. }
  954. #define min_not_zero(l, r) (l == 0) ? r : ((r == 0) ? l : min(l, r))
  955. /*
  956. * Set the target "split_io" field to the minimum of all the snapshots'
  957. * chunk sizes.
  958. */
  959. static void origin_resume(struct dm_target *ti)
  960. {
  961. struct dm_dev *dev = ti->private;
  962. struct dm_snapshot *snap;
  963. struct origin *o;
  964. chunk_t chunk_size = 0;
  965. down_read(&_origins_lock);
  966. o = __lookup_origin(dev->bdev);
  967. if (o)
  968. list_for_each_entry (snap, &o->snapshots, list)
  969. chunk_size = min_not_zero(chunk_size, snap->chunk_size);
  970. up_read(&_origins_lock);
  971. ti->split_io = chunk_size;
  972. }
  973. static int origin_status(struct dm_target *ti, status_type_t type, char *result,
  974. unsigned int maxlen)
  975. {
  976. struct dm_dev *dev = ti->private;
  977. switch (type) {
  978. case STATUSTYPE_INFO:
  979. result[0] = '\0';
  980. break;
  981. case STATUSTYPE_TABLE:
  982. snprintf(result, maxlen, "%s", dev->name);
  983. break;
  984. }
  985. return 0;
  986. }
  987. static struct target_type origin_target = {
  988. .name = "snapshot-origin",
  989. .version = {1, 5, 0},
  990. .module = THIS_MODULE,
  991. .ctr = origin_ctr,
  992. .dtr = origin_dtr,
  993. .map = origin_map,
  994. .resume = origin_resume,
  995. .status = origin_status,
  996. };
  997. static struct target_type snapshot_target = {
  998. .name = "snapshot",
  999. .version = {1, 5, 0},
  1000. .module = THIS_MODULE,
  1001. .ctr = snapshot_ctr,
  1002. .dtr = snapshot_dtr,
  1003. .map = snapshot_map,
  1004. .resume = snapshot_resume,
  1005. .status = snapshot_status,
  1006. };
  1007. static int __init dm_snapshot_init(void)
  1008. {
  1009. int r;
  1010. r = dm_register_target(&snapshot_target);
  1011. if (r) {
  1012. DMERR("snapshot target register failed %d", r);
  1013. return r;
  1014. }
  1015. r = dm_register_target(&origin_target);
  1016. if (r < 0) {
  1017. DMERR("Origin target register failed %d", r);
  1018. goto bad1;
  1019. }
  1020. r = init_origin_hash();
  1021. if (r) {
  1022. DMERR("init_origin_hash failed.");
  1023. goto bad2;
  1024. }
  1025. exception_cache = KMEM_CACHE(dm_snap_exception, 0);
  1026. if (!exception_cache) {
  1027. DMERR("Couldn't create exception cache.");
  1028. r = -ENOMEM;
  1029. goto bad3;
  1030. }
  1031. pending_cache = KMEM_CACHE(dm_snap_pending_exception, 0);
  1032. if (!pending_cache) {
  1033. DMERR("Couldn't create pending cache.");
  1034. r = -ENOMEM;
  1035. goto bad4;
  1036. }
  1037. pending_pool = mempool_create_slab_pool(128, pending_cache);
  1038. if (!pending_pool) {
  1039. DMERR("Couldn't create pending pool.");
  1040. r = -ENOMEM;
  1041. goto bad5;
  1042. }
  1043. ksnapd = create_singlethread_workqueue("ksnapd");
  1044. if (!ksnapd) {
  1045. DMERR("Failed to create ksnapd workqueue.");
  1046. r = -ENOMEM;
  1047. goto bad6;
  1048. }
  1049. return 0;
  1050. bad6:
  1051. mempool_destroy(pending_pool);
  1052. bad5:
  1053. kmem_cache_destroy(pending_cache);
  1054. bad4:
  1055. kmem_cache_destroy(exception_cache);
  1056. bad3:
  1057. exit_origin_hash();
  1058. bad2:
  1059. dm_unregister_target(&origin_target);
  1060. bad1:
  1061. dm_unregister_target(&snapshot_target);
  1062. return r;
  1063. }
  1064. static void __exit dm_snapshot_exit(void)
  1065. {
  1066. int r;
  1067. destroy_workqueue(ksnapd);
  1068. r = dm_unregister_target(&snapshot_target);
  1069. if (r)
  1070. DMERR("snapshot unregister failed %d", r);
  1071. r = dm_unregister_target(&origin_target);
  1072. if (r)
  1073. DMERR("origin unregister failed %d", r);
  1074. exit_origin_hash();
  1075. mempool_destroy(pending_pool);
  1076. kmem_cache_destroy(pending_cache);
  1077. kmem_cache_destroy(exception_cache);
  1078. }
  1079. /* Module hooks */
  1080. module_init(dm_snapshot_init);
  1081. module_exit(dm_snapshot_exit);
  1082. MODULE_DESCRIPTION(DM_NAME " snapshot target");
  1083. MODULE_AUTHOR("Joe Thornber");
  1084. MODULE_LICENSE("GPL");