dm-snap.c 33 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521
  1. /*
  2. * dm-snapshot.c
  3. *
  4. * Copyright (C) 2001-2002 Sistina Software (UK) Limited.
  5. *
  6. * This file is released under the GPL.
  7. */
  8. #include <linux/blkdev.h>
  9. #include <linux/ctype.h>
  10. #include <linux/device-mapper.h>
  11. #include <linux/delay.h>
  12. #include <linux/fs.h>
  13. #include <linux/init.h>
  14. #include <linux/kdev_t.h>
  15. #include <linux/list.h>
  16. #include <linux/mempool.h>
  17. #include <linux/module.h>
  18. #include <linux/slab.h>
  19. #include <linux/vmalloc.h>
  20. #include <linux/log2.h>
  21. #include <linux/dm-kcopyd.h>
  22. #include "dm-exception-store.h"
  23. #include "dm-snap.h"
  24. #include "dm-bio-list.h"
  25. #define DM_MSG_PREFIX "snapshots"
  26. /*
  27. * The percentage increment we will wake up users at
  28. */
  29. #define WAKE_UP_PERCENT 5
  30. /*
  31. * kcopyd priority of snapshot operations
  32. */
  33. #define SNAPSHOT_COPY_PRIORITY 2
  34. /*
  35. * Reserve 1MB for each snapshot initially (with minimum of 1 page).
  36. */
  37. #define SNAPSHOT_PAGES (((1UL << 20) >> PAGE_SHIFT) ? : 1)
  38. /*
  39. * The size of the mempool used to track chunks in use.
  40. */
  41. #define MIN_IOS 256
  42. static struct workqueue_struct *ksnapd;
  43. static void flush_queued_bios(struct work_struct *work);
  44. struct dm_snap_pending_exception {
  45. struct dm_snap_exception e;
  46. /*
  47. * Origin buffers waiting for this to complete are held
  48. * in a bio list
  49. */
  50. struct bio_list origin_bios;
  51. struct bio_list snapshot_bios;
  52. /*
  53. * Short-term queue of pending exceptions prior to submission.
  54. */
  55. struct list_head list;
  56. /*
  57. * The primary pending_exception is the one that holds
  58. * the ref_count and the list of origin_bios for a
  59. * group of pending_exceptions. It is always last to get freed.
  60. * These fields get set up when writing to the origin.
  61. */
  62. struct dm_snap_pending_exception *primary_pe;
  63. /*
  64. * Number of pending_exceptions processing this chunk.
  65. * When this drops to zero we must complete the origin bios.
  66. * If incrementing or decrementing this, hold pe->snap->lock for
  67. * the sibling concerned and not pe->primary_pe->snap->lock unless
  68. * they are the same.
  69. */
  70. atomic_t ref_count;
  71. /* Pointer back to snapshot context */
  72. struct dm_snapshot *snap;
  73. /*
  74. * 1 indicates the exception has already been sent to
  75. * kcopyd.
  76. */
  77. int started;
  78. };
  79. /*
  80. * Hash table mapping origin volumes to lists of snapshots and
  81. * a lock to protect it
  82. */
  83. static struct kmem_cache *exception_cache;
  84. static struct kmem_cache *pending_cache;
  85. struct dm_snap_tracked_chunk {
  86. struct hlist_node node;
  87. chunk_t chunk;
  88. };
  89. static struct kmem_cache *tracked_chunk_cache;
  90. static struct dm_snap_tracked_chunk *track_chunk(struct dm_snapshot *s,
  91. chunk_t chunk)
  92. {
  93. struct dm_snap_tracked_chunk *c = mempool_alloc(s->tracked_chunk_pool,
  94. GFP_NOIO);
  95. unsigned long flags;
  96. c->chunk = chunk;
  97. spin_lock_irqsave(&s->tracked_chunk_lock, flags);
  98. hlist_add_head(&c->node,
  99. &s->tracked_chunk_hash[DM_TRACKED_CHUNK_HASH(chunk)]);
  100. spin_unlock_irqrestore(&s->tracked_chunk_lock, flags);
  101. return c;
  102. }
  103. static void stop_tracking_chunk(struct dm_snapshot *s,
  104. struct dm_snap_tracked_chunk *c)
  105. {
  106. unsigned long flags;
  107. spin_lock_irqsave(&s->tracked_chunk_lock, flags);
  108. hlist_del(&c->node);
  109. spin_unlock_irqrestore(&s->tracked_chunk_lock, flags);
  110. mempool_free(c, s->tracked_chunk_pool);
  111. }
  112. static int __chunk_is_tracked(struct dm_snapshot *s, chunk_t chunk)
  113. {
  114. struct dm_snap_tracked_chunk *c;
  115. struct hlist_node *hn;
  116. int found = 0;
  117. spin_lock_irq(&s->tracked_chunk_lock);
  118. hlist_for_each_entry(c, hn,
  119. &s->tracked_chunk_hash[DM_TRACKED_CHUNK_HASH(chunk)], node) {
  120. if (c->chunk == chunk) {
  121. found = 1;
  122. break;
  123. }
  124. }
  125. spin_unlock_irq(&s->tracked_chunk_lock);
  126. return found;
  127. }
  128. /*
  129. * One of these per registered origin, held in the snapshot_origins hash
  130. */
  131. struct origin {
  132. /* The origin device */
  133. struct block_device *bdev;
  134. struct list_head hash_list;
  135. /* List of snapshots for this origin */
  136. struct list_head snapshots;
  137. };
  138. /*
  139. * Size of the hash table for origin volumes. If we make this
  140. * the size of the minors list then it should be nearly perfect
  141. */
  142. #define ORIGIN_HASH_SIZE 256
  143. #define ORIGIN_MASK 0xFF
  144. static struct list_head *_origins;
  145. static struct rw_semaphore _origins_lock;
  146. static int init_origin_hash(void)
  147. {
  148. int i;
  149. _origins = kmalloc(ORIGIN_HASH_SIZE * sizeof(struct list_head),
  150. GFP_KERNEL);
  151. if (!_origins) {
  152. DMERR("unable to allocate memory");
  153. return -ENOMEM;
  154. }
  155. for (i = 0; i < ORIGIN_HASH_SIZE; i++)
  156. INIT_LIST_HEAD(_origins + i);
  157. init_rwsem(&_origins_lock);
  158. return 0;
  159. }
  160. static void exit_origin_hash(void)
  161. {
  162. kfree(_origins);
  163. }
  164. static unsigned origin_hash(struct block_device *bdev)
  165. {
  166. return bdev->bd_dev & ORIGIN_MASK;
  167. }
  168. static struct origin *__lookup_origin(struct block_device *origin)
  169. {
  170. struct list_head *ol;
  171. struct origin *o;
  172. ol = &_origins[origin_hash(origin)];
  173. list_for_each_entry (o, ol, hash_list)
  174. if (bdev_equal(o->bdev, origin))
  175. return o;
  176. return NULL;
  177. }
  178. static void __insert_origin(struct origin *o)
  179. {
  180. struct list_head *sl = &_origins[origin_hash(o->bdev)];
  181. list_add_tail(&o->hash_list, sl);
  182. }
  183. /*
  184. * Make a note of the snapshot and its origin so we can look it
  185. * up when the origin has a write on it.
  186. */
  187. static int register_snapshot(struct dm_snapshot *snap)
  188. {
  189. struct origin *o, *new_o;
  190. struct block_device *bdev = snap->origin->bdev;
  191. new_o = kmalloc(sizeof(*new_o), GFP_KERNEL);
  192. if (!new_o)
  193. return -ENOMEM;
  194. down_write(&_origins_lock);
  195. o = __lookup_origin(bdev);
  196. if (o)
  197. kfree(new_o);
  198. else {
  199. /* New origin */
  200. o = new_o;
  201. /* Initialise the struct */
  202. INIT_LIST_HEAD(&o->snapshots);
  203. o->bdev = bdev;
  204. __insert_origin(o);
  205. }
  206. list_add_tail(&snap->list, &o->snapshots);
  207. up_write(&_origins_lock);
  208. return 0;
  209. }
  210. static void unregister_snapshot(struct dm_snapshot *s)
  211. {
  212. struct origin *o;
  213. down_write(&_origins_lock);
  214. o = __lookup_origin(s->origin->bdev);
  215. list_del(&s->list);
  216. if (list_empty(&o->snapshots)) {
  217. list_del(&o->hash_list);
  218. kfree(o);
  219. }
  220. up_write(&_origins_lock);
  221. }
  222. /*
  223. * Implementation of the exception hash tables.
  224. * The lowest hash_shift bits of the chunk number are ignored, allowing
  225. * some consecutive chunks to be grouped together.
  226. */
  227. static int init_exception_table(struct exception_table *et, uint32_t size,
  228. unsigned hash_shift)
  229. {
  230. unsigned int i;
  231. et->hash_shift = hash_shift;
  232. et->hash_mask = size - 1;
  233. et->table = dm_vcalloc(size, sizeof(struct list_head));
  234. if (!et->table)
  235. return -ENOMEM;
  236. for (i = 0; i < size; i++)
  237. INIT_LIST_HEAD(et->table + i);
  238. return 0;
  239. }
  240. static void exit_exception_table(struct exception_table *et, struct kmem_cache *mem)
  241. {
  242. struct list_head *slot;
  243. struct dm_snap_exception *ex, *next;
  244. int i, size;
  245. size = et->hash_mask + 1;
  246. for (i = 0; i < size; i++) {
  247. slot = et->table + i;
  248. list_for_each_entry_safe (ex, next, slot, hash_list)
  249. kmem_cache_free(mem, ex);
  250. }
  251. vfree(et->table);
  252. }
  253. static uint32_t exception_hash(struct exception_table *et, chunk_t chunk)
  254. {
  255. return (chunk >> et->hash_shift) & et->hash_mask;
  256. }
  257. static void insert_exception(struct exception_table *eh,
  258. struct dm_snap_exception *e)
  259. {
  260. struct list_head *l = &eh->table[exception_hash(eh, e->old_chunk)];
  261. list_add(&e->hash_list, l);
  262. }
  263. static void remove_exception(struct dm_snap_exception *e)
  264. {
  265. list_del(&e->hash_list);
  266. }
  267. /*
  268. * Return the exception data for a sector, or NULL if not
  269. * remapped.
  270. */
  271. static struct dm_snap_exception *lookup_exception(struct exception_table *et,
  272. chunk_t chunk)
  273. {
  274. struct list_head *slot;
  275. struct dm_snap_exception *e;
  276. slot = &et->table[exception_hash(et, chunk)];
  277. list_for_each_entry (e, slot, hash_list)
  278. if (chunk >= e->old_chunk &&
  279. chunk <= e->old_chunk + dm_consecutive_chunk_count(e))
  280. return e;
  281. return NULL;
  282. }
  283. static struct dm_snap_exception *alloc_exception(void)
  284. {
  285. struct dm_snap_exception *e;
  286. e = kmem_cache_alloc(exception_cache, GFP_NOIO);
  287. if (!e)
  288. e = kmem_cache_alloc(exception_cache, GFP_ATOMIC);
  289. return e;
  290. }
  291. static void free_exception(struct dm_snap_exception *e)
  292. {
  293. kmem_cache_free(exception_cache, e);
  294. }
  295. static struct dm_snap_pending_exception *alloc_pending_exception(struct dm_snapshot *s)
  296. {
  297. struct dm_snap_pending_exception *pe = mempool_alloc(s->pending_pool,
  298. GFP_NOIO);
  299. atomic_inc(&s->pending_exceptions_count);
  300. pe->snap = s;
  301. return pe;
  302. }
  303. static void free_pending_exception(struct dm_snap_pending_exception *pe)
  304. {
  305. struct dm_snapshot *s = pe->snap;
  306. mempool_free(pe, s->pending_pool);
  307. smp_mb__before_atomic_dec();
  308. atomic_dec(&s->pending_exceptions_count);
  309. }
  310. static void insert_completed_exception(struct dm_snapshot *s,
  311. struct dm_snap_exception *new_e)
  312. {
  313. struct exception_table *eh = &s->complete;
  314. struct list_head *l;
  315. struct dm_snap_exception *e = NULL;
  316. l = &eh->table[exception_hash(eh, new_e->old_chunk)];
  317. /* Add immediately if this table doesn't support consecutive chunks */
  318. if (!eh->hash_shift)
  319. goto out;
  320. /* List is ordered by old_chunk */
  321. list_for_each_entry_reverse(e, l, hash_list) {
  322. /* Insert after an existing chunk? */
  323. if (new_e->old_chunk == (e->old_chunk +
  324. dm_consecutive_chunk_count(e) + 1) &&
  325. new_e->new_chunk == (dm_chunk_number(e->new_chunk) +
  326. dm_consecutive_chunk_count(e) + 1)) {
  327. dm_consecutive_chunk_count_inc(e);
  328. free_exception(new_e);
  329. return;
  330. }
  331. /* Insert before an existing chunk? */
  332. if (new_e->old_chunk == (e->old_chunk - 1) &&
  333. new_e->new_chunk == (dm_chunk_number(e->new_chunk) - 1)) {
  334. dm_consecutive_chunk_count_inc(e);
  335. e->old_chunk--;
  336. e->new_chunk--;
  337. free_exception(new_e);
  338. return;
  339. }
  340. if (new_e->old_chunk > e->old_chunk)
  341. break;
  342. }
  343. out:
  344. list_add(&new_e->hash_list, e ? &e->hash_list : l);
  345. }
  346. /*
  347. * Callback used by the exception stores to load exceptions when
  348. * initialising.
  349. */
  350. static int dm_add_exception(void *context, chunk_t old, chunk_t new)
  351. {
  352. struct dm_snapshot *s = context;
  353. struct dm_snap_exception *e;
  354. e = alloc_exception();
  355. if (!e)
  356. return -ENOMEM;
  357. e->old_chunk = old;
  358. /* Consecutive_count is implicitly initialised to zero */
  359. e->new_chunk = new;
  360. insert_completed_exception(s, e);
  361. return 0;
  362. }
  363. /*
  364. * Hard coded magic.
  365. */
  366. static int calc_max_buckets(void)
  367. {
  368. /* use a fixed size of 2MB */
  369. unsigned long mem = 2 * 1024 * 1024;
  370. mem /= sizeof(struct list_head);
  371. return mem;
  372. }
  373. /*
  374. * Allocate room for a suitable hash table.
  375. */
  376. static int init_hash_tables(struct dm_snapshot *s)
  377. {
  378. sector_t hash_size, cow_dev_size, origin_dev_size, max_buckets;
  379. /*
  380. * Calculate based on the size of the original volume or
  381. * the COW volume...
  382. */
  383. cow_dev_size = get_dev_size(s->cow->bdev);
  384. origin_dev_size = get_dev_size(s->origin->bdev);
  385. max_buckets = calc_max_buckets();
  386. hash_size = min(origin_dev_size, cow_dev_size) >> s->chunk_shift;
  387. hash_size = min(hash_size, max_buckets);
  388. hash_size = rounddown_pow_of_two(hash_size);
  389. if (init_exception_table(&s->complete, hash_size,
  390. DM_CHUNK_CONSECUTIVE_BITS))
  391. return -ENOMEM;
  392. /*
  393. * Allocate hash table for in-flight exceptions
  394. * Make this smaller than the real hash table
  395. */
  396. hash_size >>= 3;
  397. if (hash_size < 64)
  398. hash_size = 64;
  399. if (init_exception_table(&s->pending, hash_size, 0)) {
  400. exit_exception_table(&s->complete, exception_cache);
  401. return -ENOMEM;
  402. }
  403. return 0;
  404. }
  405. /*
  406. * Round a number up to the nearest 'size' boundary. size must
  407. * be a power of 2.
  408. */
  409. static ulong round_up(ulong n, ulong size)
  410. {
  411. size--;
  412. return (n + size) & ~size;
  413. }
  414. static int set_chunk_size(struct dm_snapshot *s, const char *chunk_size_arg,
  415. char **error)
  416. {
  417. unsigned long chunk_size;
  418. char *value;
  419. chunk_size = simple_strtoul(chunk_size_arg, &value, 10);
  420. if (*chunk_size_arg == '\0' || *value != '\0') {
  421. *error = "Invalid chunk size";
  422. return -EINVAL;
  423. }
  424. if (!chunk_size) {
  425. s->chunk_size = s->chunk_mask = s->chunk_shift = 0;
  426. return 0;
  427. }
  428. /*
  429. * Chunk size must be multiple of page size. Silently
  430. * round up if it's not.
  431. */
  432. chunk_size = round_up(chunk_size, PAGE_SIZE >> 9);
  433. /* Check chunk_size is a power of 2 */
  434. if (!is_power_of_2(chunk_size)) {
  435. *error = "Chunk size is not a power of 2";
  436. return -EINVAL;
  437. }
  438. /* Validate the chunk size against the device block size */
  439. if (chunk_size % (bdev_hardsect_size(s->cow->bdev) >> 9)) {
  440. *error = "Chunk size is not a multiple of device blocksize";
  441. return -EINVAL;
  442. }
  443. s->chunk_size = chunk_size;
  444. s->chunk_mask = chunk_size - 1;
  445. s->chunk_shift = ffs(chunk_size) - 1;
  446. return 0;
  447. }
  448. /*
  449. * Construct a snapshot mapping: <origin_dev> <COW-dev> <p/n> <chunk-size>
  450. */
  451. static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv)
  452. {
  453. struct dm_snapshot *s;
  454. int i;
  455. int r = -EINVAL;
  456. char persistent;
  457. char *origin_path;
  458. char *cow_path;
  459. if (argc != 4) {
  460. ti->error = "requires exactly 4 arguments";
  461. r = -EINVAL;
  462. goto bad1;
  463. }
  464. origin_path = argv[0];
  465. cow_path = argv[1];
  466. persistent = toupper(*argv[2]);
  467. if (persistent != 'P' && persistent != 'N') {
  468. ti->error = "Persistent flag is not P or N";
  469. r = -EINVAL;
  470. goto bad1;
  471. }
  472. s = kmalloc(sizeof(*s), GFP_KERNEL);
  473. if (s == NULL) {
  474. ti->error = "Cannot allocate snapshot context private "
  475. "structure";
  476. r = -ENOMEM;
  477. goto bad1;
  478. }
  479. r = dm_get_device(ti, origin_path, 0, ti->len, FMODE_READ, &s->origin);
  480. if (r) {
  481. ti->error = "Cannot get origin device";
  482. goto bad2;
  483. }
  484. r = dm_get_device(ti, cow_path, 0, 0,
  485. FMODE_READ | FMODE_WRITE, &s->cow);
  486. if (r) {
  487. dm_put_device(ti, s->origin);
  488. ti->error = "Cannot get COW device";
  489. goto bad2;
  490. }
  491. r = set_chunk_size(s, argv[3], &ti->error);
  492. if (r)
  493. goto bad3;
  494. s->valid = 1;
  495. s->active = 0;
  496. atomic_set(&s->pending_exceptions_count, 0);
  497. init_rwsem(&s->lock);
  498. spin_lock_init(&s->pe_lock);
  499. /* Allocate hash table for COW data */
  500. if (init_hash_tables(s)) {
  501. ti->error = "Unable to allocate hash table space";
  502. r = -ENOMEM;
  503. goto bad3;
  504. }
  505. r = dm_exception_store_create(argv[2], ti, &s->store);
  506. if (r) {
  507. ti->error = "Couldn't create exception store";
  508. r = -EINVAL;
  509. goto bad4;
  510. }
  511. s->store->snap = s;
  512. r = dm_kcopyd_client_create(SNAPSHOT_PAGES, &s->kcopyd_client);
  513. if (r) {
  514. ti->error = "Could not create kcopyd client";
  515. goto bad5;
  516. }
  517. s->pending_pool = mempool_create_slab_pool(MIN_IOS, pending_cache);
  518. if (!s->pending_pool) {
  519. ti->error = "Could not allocate mempool for pending exceptions";
  520. goto bad6;
  521. }
  522. s->tracked_chunk_pool = mempool_create_slab_pool(MIN_IOS,
  523. tracked_chunk_cache);
  524. if (!s->tracked_chunk_pool) {
  525. ti->error = "Could not allocate tracked_chunk mempool for "
  526. "tracking reads";
  527. goto bad_tracked_chunk_pool;
  528. }
  529. for (i = 0; i < DM_TRACKED_CHUNK_HASH_SIZE; i++)
  530. INIT_HLIST_HEAD(&s->tracked_chunk_hash[i]);
  531. spin_lock_init(&s->tracked_chunk_lock);
  532. /* Metadata must only be loaded into one table at once */
  533. r = s->store->type->read_metadata(s->store, dm_add_exception,
  534. (void *)s);
  535. if (r < 0) {
  536. ti->error = "Failed to read snapshot metadata";
  537. goto bad_load_and_register;
  538. } else if (r > 0) {
  539. s->valid = 0;
  540. DMWARN("Snapshot is marked invalid.");
  541. }
  542. bio_list_init(&s->queued_bios);
  543. INIT_WORK(&s->queued_bios_work, flush_queued_bios);
  544. /* Add snapshot to the list of snapshots for this origin */
  545. /* Exceptions aren't triggered till snapshot_resume() is called */
  546. if (register_snapshot(s)) {
  547. r = -EINVAL;
  548. ti->error = "Cannot register snapshot origin";
  549. goto bad_load_and_register;
  550. }
  551. ti->private = s;
  552. ti->split_io = s->chunk_size;
  553. return 0;
  554. bad_load_and_register:
  555. mempool_destroy(s->tracked_chunk_pool);
  556. bad_tracked_chunk_pool:
  557. mempool_destroy(s->pending_pool);
  558. bad6:
  559. dm_kcopyd_client_destroy(s->kcopyd_client);
  560. bad5:
  561. s->store->type->dtr(s->store);
  562. bad4:
  563. exit_exception_table(&s->pending, pending_cache);
  564. exit_exception_table(&s->complete, exception_cache);
  565. bad3:
  566. dm_put_device(ti, s->cow);
  567. dm_put_device(ti, s->origin);
  568. bad2:
  569. kfree(s);
  570. bad1:
  571. return r;
  572. }
  573. static void __free_exceptions(struct dm_snapshot *s)
  574. {
  575. dm_kcopyd_client_destroy(s->kcopyd_client);
  576. s->kcopyd_client = NULL;
  577. exit_exception_table(&s->pending, pending_cache);
  578. exit_exception_table(&s->complete, exception_cache);
  579. s->store->type->dtr(s->store);
  580. }
  581. static void snapshot_dtr(struct dm_target *ti)
  582. {
  583. #ifdef CONFIG_DM_DEBUG
  584. int i;
  585. #endif
  586. struct dm_snapshot *s = ti->private;
  587. flush_workqueue(ksnapd);
  588. /* Prevent further origin writes from using this snapshot. */
  589. /* After this returns there can be no new kcopyd jobs. */
  590. unregister_snapshot(s);
  591. while (atomic_read(&s->pending_exceptions_count))
  592. msleep(1);
  593. /*
  594. * Ensure instructions in mempool_destroy aren't reordered
  595. * before atomic_read.
  596. */
  597. smp_mb();
  598. #ifdef CONFIG_DM_DEBUG
  599. for (i = 0; i < DM_TRACKED_CHUNK_HASH_SIZE; i++)
  600. BUG_ON(!hlist_empty(&s->tracked_chunk_hash[i]));
  601. #endif
  602. mempool_destroy(s->tracked_chunk_pool);
  603. __free_exceptions(s);
  604. mempool_destroy(s->pending_pool);
  605. dm_put_device(ti, s->origin);
  606. dm_put_device(ti, s->cow);
  607. kfree(s);
  608. }
  609. /*
  610. * Flush a list of buffers.
  611. */
  612. static void flush_bios(struct bio *bio)
  613. {
  614. struct bio *n;
  615. while (bio) {
  616. n = bio->bi_next;
  617. bio->bi_next = NULL;
  618. generic_make_request(bio);
  619. bio = n;
  620. }
  621. }
  622. static void flush_queued_bios(struct work_struct *work)
  623. {
  624. struct dm_snapshot *s =
  625. container_of(work, struct dm_snapshot, queued_bios_work);
  626. struct bio *queued_bios;
  627. unsigned long flags;
  628. spin_lock_irqsave(&s->pe_lock, flags);
  629. queued_bios = bio_list_get(&s->queued_bios);
  630. spin_unlock_irqrestore(&s->pe_lock, flags);
  631. flush_bios(queued_bios);
  632. }
  633. /*
  634. * Error a list of buffers.
  635. */
  636. static void error_bios(struct bio *bio)
  637. {
  638. struct bio *n;
  639. while (bio) {
  640. n = bio->bi_next;
  641. bio->bi_next = NULL;
  642. bio_io_error(bio);
  643. bio = n;
  644. }
  645. }
  646. static void __invalidate_snapshot(struct dm_snapshot *s, int err)
  647. {
  648. if (!s->valid)
  649. return;
  650. if (err == -EIO)
  651. DMERR("Invalidating snapshot: Error reading/writing.");
  652. else if (err == -ENOMEM)
  653. DMERR("Invalidating snapshot: Unable to allocate exception.");
  654. if (s->store->type->drop_snapshot)
  655. s->store->type->drop_snapshot(s->store);
  656. s->valid = 0;
  657. dm_table_event(s->store->ti->table);
  658. }
  659. static void get_pending_exception(struct dm_snap_pending_exception *pe)
  660. {
  661. atomic_inc(&pe->ref_count);
  662. }
  663. static struct bio *put_pending_exception(struct dm_snap_pending_exception *pe)
  664. {
  665. struct dm_snap_pending_exception *primary_pe;
  666. struct bio *origin_bios = NULL;
  667. primary_pe = pe->primary_pe;
  668. /*
  669. * If this pe is involved in a write to the origin and
  670. * it is the last sibling to complete then release
  671. * the bios for the original write to the origin.
  672. */
  673. if (primary_pe &&
  674. atomic_dec_and_test(&primary_pe->ref_count)) {
  675. origin_bios = bio_list_get(&primary_pe->origin_bios);
  676. free_pending_exception(primary_pe);
  677. }
  678. /*
  679. * Free the pe if it's not linked to an origin write or if
  680. * it's not itself a primary pe.
  681. */
  682. if (!primary_pe || primary_pe != pe)
  683. free_pending_exception(pe);
  684. return origin_bios;
  685. }
  686. static void pending_complete(struct dm_snap_pending_exception *pe, int success)
  687. {
  688. struct dm_snap_exception *e;
  689. struct dm_snapshot *s = pe->snap;
  690. struct bio *origin_bios = NULL;
  691. struct bio *snapshot_bios = NULL;
  692. int error = 0;
  693. if (!success) {
  694. /* Read/write error - snapshot is unusable */
  695. down_write(&s->lock);
  696. __invalidate_snapshot(s, -EIO);
  697. error = 1;
  698. goto out;
  699. }
  700. e = alloc_exception();
  701. if (!e) {
  702. down_write(&s->lock);
  703. __invalidate_snapshot(s, -ENOMEM);
  704. error = 1;
  705. goto out;
  706. }
  707. *e = pe->e;
  708. down_write(&s->lock);
  709. if (!s->valid) {
  710. free_exception(e);
  711. error = 1;
  712. goto out;
  713. }
  714. /*
  715. * Check for conflicting reads. This is extremely improbable,
  716. * so msleep(1) is sufficient and there is no need for a wait queue.
  717. */
  718. while (__chunk_is_tracked(s, pe->e.old_chunk))
  719. msleep(1);
  720. /*
  721. * Add a proper exception, and remove the
  722. * in-flight exception from the list.
  723. */
  724. insert_completed_exception(s, e);
  725. out:
  726. remove_exception(&pe->e);
  727. snapshot_bios = bio_list_get(&pe->snapshot_bios);
  728. origin_bios = put_pending_exception(pe);
  729. up_write(&s->lock);
  730. /* Submit any pending write bios */
  731. if (error)
  732. error_bios(snapshot_bios);
  733. else
  734. flush_bios(snapshot_bios);
  735. flush_bios(origin_bios);
  736. }
  737. static void commit_callback(void *context, int success)
  738. {
  739. struct dm_snap_pending_exception *pe = context;
  740. pending_complete(pe, success);
  741. }
  742. /*
  743. * Called when the copy I/O has finished. kcopyd actually runs
  744. * this code so don't block.
  745. */
  746. static void copy_callback(int read_err, unsigned long write_err, void *context)
  747. {
  748. struct dm_snap_pending_exception *pe = context;
  749. struct dm_snapshot *s = pe->snap;
  750. if (read_err || write_err)
  751. pending_complete(pe, 0);
  752. else
  753. /* Update the metadata if we are persistent */
  754. s->store->type->commit_exception(s->store, &pe->e,
  755. commit_callback, pe);
  756. }
  757. /*
  758. * Dispatches the copy operation to kcopyd.
  759. */
  760. static void start_copy(struct dm_snap_pending_exception *pe)
  761. {
  762. struct dm_snapshot *s = pe->snap;
  763. struct dm_io_region src, dest;
  764. struct block_device *bdev = s->origin->bdev;
  765. sector_t dev_size;
  766. dev_size = get_dev_size(bdev);
  767. src.bdev = bdev;
  768. src.sector = chunk_to_sector(s, pe->e.old_chunk);
  769. src.count = min(s->chunk_size, dev_size - src.sector);
  770. dest.bdev = s->cow->bdev;
  771. dest.sector = chunk_to_sector(s, pe->e.new_chunk);
  772. dest.count = src.count;
  773. /* Hand over to kcopyd */
  774. dm_kcopyd_copy(s->kcopyd_client,
  775. &src, 1, &dest, 0, copy_callback, pe);
  776. }
  777. static struct dm_snap_pending_exception *
  778. __lookup_pending_exception(struct dm_snapshot *s, chunk_t chunk)
  779. {
  780. struct dm_snap_exception *e = lookup_exception(&s->pending, chunk);
  781. if (!e)
  782. return NULL;
  783. return container_of(e, struct dm_snap_pending_exception, e);
  784. }
  785. /*
  786. * Looks to see if this snapshot already has a pending exception
  787. * for this chunk, otherwise it allocates a new one and inserts
  788. * it into the pending table.
  789. *
  790. * NOTE: a write lock must be held on snap->lock before calling
  791. * this.
  792. */
  793. static struct dm_snap_pending_exception *
  794. __find_pending_exception(struct dm_snapshot *s,
  795. struct dm_snap_pending_exception *pe, chunk_t chunk)
  796. {
  797. struct dm_snap_pending_exception *pe2;
  798. pe2 = __lookup_pending_exception(s, chunk);
  799. if (pe2) {
  800. free_pending_exception(pe);
  801. return pe2;
  802. }
  803. pe->e.old_chunk = chunk;
  804. bio_list_init(&pe->origin_bios);
  805. bio_list_init(&pe->snapshot_bios);
  806. pe->primary_pe = NULL;
  807. atomic_set(&pe->ref_count, 0);
  808. pe->started = 0;
  809. if (s->store->type->prepare_exception(s->store, &pe->e)) {
  810. free_pending_exception(pe);
  811. return NULL;
  812. }
  813. get_pending_exception(pe);
  814. insert_exception(&s->pending, &pe->e);
  815. return pe;
  816. }
  817. static void remap_exception(struct dm_snapshot *s, struct dm_snap_exception *e,
  818. struct bio *bio, chunk_t chunk)
  819. {
  820. bio->bi_bdev = s->cow->bdev;
  821. bio->bi_sector = chunk_to_sector(s, dm_chunk_number(e->new_chunk) +
  822. (chunk - e->old_chunk)) +
  823. (bio->bi_sector & s->chunk_mask);
  824. }
  825. static int snapshot_map(struct dm_target *ti, struct bio *bio,
  826. union map_info *map_context)
  827. {
  828. struct dm_snap_exception *e;
  829. struct dm_snapshot *s = ti->private;
  830. int r = DM_MAPIO_REMAPPED;
  831. chunk_t chunk;
  832. struct dm_snap_pending_exception *pe = NULL;
  833. chunk = sector_to_chunk(s, bio->bi_sector);
  834. /* Full snapshots are not usable */
  835. /* To get here the table must be live so s->active is always set. */
  836. if (!s->valid)
  837. return -EIO;
  838. /* FIXME: should only take write lock if we need
  839. * to copy an exception */
  840. down_write(&s->lock);
  841. if (!s->valid) {
  842. r = -EIO;
  843. goto out_unlock;
  844. }
  845. /* If the block is already remapped - use that, else remap it */
  846. e = lookup_exception(&s->complete, chunk);
  847. if (e) {
  848. remap_exception(s, e, bio, chunk);
  849. goto out_unlock;
  850. }
  851. /*
  852. * Write to snapshot - higher level takes care of RW/RO
  853. * flags so we should only get this if we are
  854. * writeable.
  855. */
  856. if (bio_rw(bio) == WRITE) {
  857. pe = __lookup_pending_exception(s, chunk);
  858. if (!pe) {
  859. up_write(&s->lock);
  860. pe = alloc_pending_exception(s);
  861. down_write(&s->lock);
  862. if (!s->valid) {
  863. free_pending_exception(pe);
  864. r = -EIO;
  865. goto out_unlock;
  866. }
  867. e = lookup_exception(&s->complete, chunk);
  868. if (e) {
  869. free_pending_exception(pe);
  870. remap_exception(s, e, bio, chunk);
  871. goto out_unlock;
  872. }
  873. pe = __find_pending_exception(s, pe, chunk);
  874. if (!pe) {
  875. __invalidate_snapshot(s, -ENOMEM);
  876. r = -EIO;
  877. goto out_unlock;
  878. }
  879. }
  880. remap_exception(s, &pe->e, bio, chunk);
  881. bio_list_add(&pe->snapshot_bios, bio);
  882. r = DM_MAPIO_SUBMITTED;
  883. if (!pe->started) {
  884. /* this is protected by snap->lock */
  885. pe->started = 1;
  886. up_write(&s->lock);
  887. start_copy(pe);
  888. goto out;
  889. }
  890. } else {
  891. bio->bi_bdev = s->origin->bdev;
  892. map_context->ptr = track_chunk(s, chunk);
  893. }
  894. out_unlock:
  895. up_write(&s->lock);
  896. out:
  897. return r;
  898. }
  899. static int snapshot_end_io(struct dm_target *ti, struct bio *bio,
  900. int error, union map_info *map_context)
  901. {
  902. struct dm_snapshot *s = ti->private;
  903. struct dm_snap_tracked_chunk *c = map_context->ptr;
  904. if (c)
  905. stop_tracking_chunk(s, c);
  906. return 0;
  907. }
  908. static void snapshot_resume(struct dm_target *ti)
  909. {
  910. struct dm_snapshot *s = ti->private;
  911. down_write(&s->lock);
  912. s->active = 1;
  913. up_write(&s->lock);
  914. }
  915. static int snapshot_status(struct dm_target *ti, status_type_t type,
  916. char *result, unsigned int maxlen)
  917. {
  918. struct dm_snapshot *snap = ti->private;
  919. switch (type) {
  920. case STATUSTYPE_INFO:
  921. if (!snap->valid)
  922. snprintf(result, maxlen, "Invalid");
  923. else {
  924. if (snap->store->type->fraction_full) {
  925. sector_t numerator, denominator;
  926. snap->store->type->fraction_full(snap->store,
  927. &numerator,
  928. &denominator);
  929. snprintf(result, maxlen, "%llu/%llu",
  930. (unsigned long long)numerator,
  931. (unsigned long long)denominator);
  932. }
  933. else
  934. snprintf(result, maxlen, "Unknown");
  935. }
  936. break;
  937. case STATUSTYPE_TABLE:
  938. /*
  939. * kdevname returns a static pointer so we need
  940. * to make private copies if the output is to
  941. * make sense.
  942. */
  943. snprintf(result, maxlen, "%s %s %s %llu",
  944. snap->origin->name, snap->cow->name,
  945. snap->store->type->name,
  946. (unsigned long long)snap->chunk_size);
  947. break;
  948. }
  949. return 0;
  950. }
  951. /*-----------------------------------------------------------------
  952. * Origin methods
  953. *---------------------------------------------------------------*/
  954. static int __origin_write(struct list_head *snapshots, struct bio *bio)
  955. {
  956. int r = DM_MAPIO_REMAPPED, first = 0;
  957. struct dm_snapshot *snap;
  958. struct dm_snap_exception *e;
  959. struct dm_snap_pending_exception *pe, *next_pe, *primary_pe = NULL;
  960. chunk_t chunk;
  961. LIST_HEAD(pe_queue);
  962. /* Do all the snapshots on this origin */
  963. list_for_each_entry (snap, snapshots, list) {
  964. down_write(&snap->lock);
  965. /* Only deal with valid and active snapshots */
  966. if (!snap->valid || !snap->active)
  967. goto next_snapshot;
  968. /* Nothing to do if writing beyond end of snapshot */
  969. if (bio->bi_sector >= dm_table_get_size(snap->store->ti->table))
  970. goto next_snapshot;
  971. /*
  972. * Remember, different snapshots can have
  973. * different chunk sizes.
  974. */
  975. chunk = sector_to_chunk(snap, bio->bi_sector);
  976. /*
  977. * Check exception table to see if block
  978. * is already remapped in this snapshot
  979. * and trigger an exception if not.
  980. *
  981. * ref_count is initialised to 1 so pending_complete()
  982. * won't destroy the primary_pe while we're inside this loop.
  983. */
  984. e = lookup_exception(&snap->complete, chunk);
  985. if (e)
  986. goto next_snapshot;
  987. pe = __lookup_pending_exception(snap, chunk);
  988. if (!pe) {
  989. up_write(&snap->lock);
  990. pe = alloc_pending_exception(snap);
  991. down_write(&snap->lock);
  992. if (!snap->valid) {
  993. free_pending_exception(pe);
  994. goto next_snapshot;
  995. }
  996. e = lookup_exception(&snap->complete, chunk);
  997. if (e) {
  998. free_pending_exception(pe);
  999. goto next_snapshot;
  1000. }
  1001. pe = __find_pending_exception(snap, pe, chunk);
  1002. if (!pe) {
  1003. __invalidate_snapshot(snap, -ENOMEM);
  1004. goto next_snapshot;
  1005. }
  1006. }
  1007. if (!primary_pe) {
  1008. /*
  1009. * Either every pe here has same
  1010. * primary_pe or none has one yet.
  1011. */
  1012. if (pe->primary_pe)
  1013. primary_pe = pe->primary_pe;
  1014. else {
  1015. primary_pe = pe;
  1016. first = 1;
  1017. }
  1018. bio_list_add(&primary_pe->origin_bios, bio);
  1019. r = DM_MAPIO_SUBMITTED;
  1020. }
  1021. if (!pe->primary_pe) {
  1022. pe->primary_pe = primary_pe;
  1023. get_pending_exception(primary_pe);
  1024. }
  1025. if (!pe->started) {
  1026. pe->started = 1;
  1027. list_add_tail(&pe->list, &pe_queue);
  1028. }
  1029. next_snapshot:
  1030. up_write(&snap->lock);
  1031. }
  1032. if (!primary_pe)
  1033. return r;
  1034. /*
  1035. * If this is the first time we're processing this chunk and
  1036. * ref_count is now 1 it means all the pending exceptions
  1037. * got completed while we were in the loop above, so it falls to
  1038. * us here to remove the primary_pe and submit any origin_bios.
  1039. */
  1040. if (first && atomic_dec_and_test(&primary_pe->ref_count)) {
  1041. flush_bios(bio_list_get(&primary_pe->origin_bios));
  1042. free_pending_exception(primary_pe);
  1043. /* If we got here, pe_queue is necessarily empty. */
  1044. return r;
  1045. }
  1046. /*
  1047. * Now that we have a complete pe list we can start the copying.
  1048. */
  1049. list_for_each_entry_safe(pe, next_pe, &pe_queue, list)
  1050. start_copy(pe);
  1051. return r;
  1052. }
  1053. /*
  1054. * Called on a write from the origin driver.
  1055. */
  1056. static int do_origin(struct dm_dev *origin, struct bio *bio)
  1057. {
  1058. struct origin *o;
  1059. int r = DM_MAPIO_REMAPPED;
  1060. down_read(&_origins_lock);
  1061. o = __lookup_origin(origin->bdev);
  1062. if (o)
  1063. r = __origin_write(&o->snapshots, bio);
  1064. up_read(&_origins_lock);
  1065. return r;
  1066. }
  1067. /*
  1068. * Origin: maps a linear range of a device, with hooks for snapshotting.
  1069. */
  1070. /*
  1071. * Construct an origin mapping: <dev_path>
  1072. * The context for an origin is merely a 'struct dm_dev *'
  1073. * pointing to the real device.
  1074. */
  1075. static int origin_ctr(struct dm_target *ti, unsigned int argc, char **argv)
  1076. {
  1077. int r;
  1078. struct dm_dev *dev;
  1079. if (argc != 1) {
  1080. ti->error = "origin: incorrect number of arguments";
  1081. return -EINVAL;
  1082. }
  1083. r = dm_get_device(ti, argv[0], 0, ti->len,
  1084. dm_table_get_mode(ti->table), &dev);
  1085. if (r) {
  1086. ti->error = "Cannot get target device";
  1087. return r;
  1088. }
  1089. ti->private = dev;
  1090. return 0;
  1091. }
  1092. static void origin_dtr(struct dm_target *ti)
  1093. {
  1094. struct dm_dev *dev = ti->private;
  1095. dm_put_device(ti, dev);
  1096. }
  1097. static int origin_map(struct dm_target *ti, struct bio *bio,
  1098. union map_info *map_context)
  1099. {
  1100. struct dm_dev *dev = ti->private;
  1101. bio->bi_bdev = dev->bdev;
  1102. /* Only tell snapshots if this is a write */
  1103. return (bio_rw(bio) == WRITE) ? do_origin(dev, bio) : DM_MAPIO_REMAPPED;
  1104. }
  1105. #define min_not_zero(l, r) (l == 0) ? r : ((r == 0) ? l : min(l, r))
  1106. /*
  1107. * Set the target "split_io" field to the minimum of all the snapshots'
  1108. * chunk sizes.
  1109. */
  1110. static void origin_resume(struct dm_target *ti)
  1111. {
  1112. struct dm_dev *dev = ti->private;
  1113. struct dm_snapshot *snap;
  1114. struct origin *o;
  1115. chunk_t chunk_size = 0;
  1116. down_read(&_origins_lock);
  1117. o = __lookup_origin(dev->bdev);
  1118. if (o)
  1119. list_for_each_entry (snap, &o->snapshots, list)
  1120. chunk_size = min_not_zero(chunk_size, snap->chunk_size);
  1121. up_read(&_origins_lock);
  1122. ti->split_io = chunk_size;
  1123. }
  1124. static int origin_status(struct dm_target *ti, status_type_t type, char *result,
  1125. unsigned int maxlen)
  1126. {
  1127. struct dm_dev *dev = ti->private;
  1128. switch (type) {
  1129. case STATUSTYPE_INFO:
  1130. result[0] = '\0';
  1131. break;
  1132. case STATUSTYPE_TABLE:
  1133. snprintf(result, maxlen, "%s", dev->name);
  1134. break;
  1135. }
  1136. return 0;
  1137. }
  1138. static struct target_type origin_target = {
  1139. .name = "snapshot-origin",
  1140. .version = {1, 6, 0},
  1141. .module = THIS_MODULE,
  1142. .ctr = origin_ctr,
  1143. .dtr = origin_dtr,
  1144. .map = origin_map,
  1145. .resume = origin_resume,
  1146. .status = origin_status,
  1147. };
  1148. static struct target_type snapshot_target = {
  1149. .name = "snapshot",
  1150. .version = {1, 6, 0},
  1151. .module = THIS_MODULE,
  1152. .ctr = snapshot_ctr,
  1153. .dtr = snapshot_dtr,
  1154. .map = snapshot_map,
  1155. .end_io = snapshot_end_io,
  1156. .resume = snapshot_resume,
  1157. .status = snapshot_status,
  1158. };
  1159. static int __init dm_snapshot_init(void)
  1160. {
  1161. int r;
  1162. r = dm_exception_store_init();
  1163. if (r) {
  1164. DMERR("Failed to initialize exception stores");
  1165. return r;
  1166. }
  1167. r = dm_register_target(&snapshot_target);
  1168. if (r) {
  1169. DMERR("snapshot target register failed %d", r);
  1170. return r;
  1171. }
  1172. r = dm_register_target(&origin_target);
  1173. if (r < 0) {
  1174. DMERR("Origin target register failed %d", r);
  1175. goto bad1;
  1176. }
  1177. r = init_origin_hash();
  1178. if (r) {
  1179. DMERR("init_origin_hash failed.");
  1180. goto bad2;
  1181. }
  1182. exception_cache = KMEM_CACHE(dm_snap_exception, 0);
  1183. if (!exception_cache) {
  1184. DMERR("Couldn't create exception cache.");
  1185. r = -ENOMEM;
  1186. goto bad3;
  1187. }
  1188. pending_cache = KMEM_CACHE(dm_snap_pending_exception, 0);
  1189. if (!pending_cache) {
  1190. DMERR("Couldn't create pending cache.");
  1191. r = -ENOMEM;
  1192. goto bad4;
  1193. }
  1194. tracked_chunk_cache = KMEM_CACHE(dm_snap_tracked_chunk, 0);
  1195. if (!tracked_chunk_cache) {
  1196. DMERR("Couldn't create cache to track chunks in use.");
  1197. r = -ENOMEM;
  1198. goto bad5;
  1199. }
  1200. ksnapd = create_singlethread_workqueue("ksnapd");
  1201. if (!ksnapd) {
  1202. DMERR("Failed to create ksnapd workqueue.");
  1203. r = -ENOMEM;
  1204. goto bad_pending_pool;
  1205. }
  1206. return 0;
  1207. bad_pending_pool:
  1208. kmem_cache_destroy(tracked_chunk_cache);
  1209. bad5:
  1210. kmem_cache_destroy(pending_cache);
  1211. bad4:
  1212. kmem_cache_destroy(exception_cache);
  1213. bad3:
  1214. exit_origin_hash();
  1215. bad2:
  1216. dm_unregister_target(&origin_target);
  1217. bad1:
  1218. dm_unregister_target(&snapshot_target);
  1219. return r;
  1220. }
  1221. static void __exit dm_snapshot_exit(void)
  1222. {
  1223. destroy_workqueue(ksnapd);
  1224. dm_unregister_target(&snapshot_target);
  1225. dm_unregister_target(&origin_target);
  1226. exit_origin_hash();
  1227. kmem_cache_destroy(pending_cache);
  1228. kmem_cache_destroy(exception_cache);
  1229. kmem_cache_destroy(tracked_chunk_cache);
  1230. dm_exception_store_exit();
  1231. }
  1232. /* Module hooks */
  1233. module_init(dm_snapshot_init);
  1234. module_exit(dm_snapshot_exit);
  1235. MODULE_DESCRIPTION(DM_NAME " snapshot target");
  1236. MODULE_AUTHOR("Joe Thornber");
  1237. MODULE_LICENSE("GPL");