dm-snap.c 33 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524
  1. /*
  2. * dm-snapshot.c
  3. *
  4. * Copyright (C) 2001-2002 Sistina Software (UK) Limited.
  5. *
  6. * This file is released under the GPL.
  7. */
  8. #include <linux/blkdev.h>
  9. #include <linux/device-mapper.h>
  10. #include <linux/delay.h>
  11. #include <linux/fs.h>
  12. #include <linux/init.h>
  13. #include <linux/kdev_t.h>
  14. #include <linux/list.h>
  15. #include <linux/mempool.h>
  16. #include <linux/module.h>
  17. #include <linux/slab.h>
  18. #include <linux/vmalloc.h>
  19. #include <linux/log2.h>
  20. #include <linux/dm-kcopyd.h>
  21. #include <linux/workqueue.h>
  22. #include "dm-exception-store.h"
  23. #include "dm-bio-list.h"
  24. #define DM_MSG_PREFIX "snapshots"
  25. /*
  26. * The percentage increment we will wake up users at
  27. */
  28. #define WAKE_UP_PERCENT 5
  29. /*
  30. * kcopyd priority of snapshot operations
  31. */
  32. #define SNAPSHOT_COPY_PRIORITY 2
  33. /*
  34. * Reserve 1MB for each snapshot initially (with minimum of 1 page).
  35. */
  36. #define SNAPSHOT_PAGES (((1UL << 20) >> PAGE_SHIFT) ? : 1)
  37. /*
  38. * The size of the mempool used to track chunks in use.
  39. */
  40. #define MIN_IOS 256
  41. #define DM_TRACKED_CHUNK_HASH_SIZE 16
  42. #define DM_TRACKED_CHUNK_HASH(x) ((unsigned long)(x) & \
  43. (DM_TRACKED_CHUNK_HASH_SIZE - 1))
  44. struct exception_table {
  45. uint32_t hash_mask;
  46. unsigned hash_shift;
  47. struct list_head *table;
  48. };
  49. struct dm_snapshot {
  50. struct rw_semaphore lock;
  51. struct dm_dev *origin;
  52. /* List of snapshots per Origin */
  53. struct list_head list;
  54. /* You can't use a snapshot if this is 0 (e.g. if full) */
  55. int valid;
  56. /* Origin writes don't trigger exceptions until this is set */
  57. int active;
  58. /* Used for display of table */
  59. char type;
  60. mempool_t *pending_pool;
  61. atomic_t pending_exceptions_count;
  62. struct exception_table pending;
  63. struct exception_table complete;
  64. /*
  65. * pe_lock protects all pending_exception operations and access
  66. * as well as the snapshot_bios list.
  67. */
  68. spinlock_t pe_lock;
  69. /* The on disk metadata handler */
  70. struct dm_exception_store *store;
  71. struct dm_kcopyd_client *kcopyd_client;
  72. /* Queue of snapshot writes for ksnapd to flush */
  73. struct bio_list queued_bios;
  74. struct work_struct queued_bios_work;
  75. /* Chunks with outstanding reads */
  76. mempool_t *tracked_chunk_pool;
  77. spinlock_t tracked_chunk_lock;
  78. struct hlist_head tracked_chunk_hash[DM_TRACKED_CHUNK_HASH_SIZE];
  79. };
  80. static struct workqueue_struct *ksnapd;
  81. static void flush_queued_bios(struct work_struct *work);
  82. static sector_t chunk_to_sector(struct dm_exception_store *store,
  83. chunk_t chunk)
  84. {
  85. return chunk << store->chunk_shift;
  86. }
  87. static int bdev_equal(struct block_device *lhs, struct block_device *rhs)
  88. {
  89. /*
  90. * There is only ever one instance of a particular block
  91. * device so we can compare pointers safely.
  92. */
  93. return lhs == rhs;
  94. }
  95. struct dm_snap_pending_exception {
  96. struct dm_snap_exception e;
  97. /*
  98. * Origin buffers waiting for this to complete are held
  99. * in a bio list
  100. */
  101. struct bio_list origin_bios;
  102. struct bio_list snapshot_bios;
  103. /*
  104. * Short-term queue of pending exceptions prior to submission.
  105. */
  106. struct list_head list;
  107. /*
  108. * The primary pending_exception is the one that holds
  109. * the ref_count and the list of origin_bios for a
  110. * group of pending_exceptions. It is always last to get freed.
  111. * These fields get set up when writing to the origin.
  112. */
  113. struct dm_snap_pending_exception *primary_pe;
  114. /*
  115. * Number of pending_exceptions processing this chunk.
  116. * When this drops to zero we must complete the origin bios.
  117. * If incrementing or decrementing this, hold pe->snap->lock for
  118. * the sibling concerned and not pe->primary_pe->snap->lock unless
  119. * they are the same.
  120. */
  121. atomic_t ref_count;
  122. /* Pointer back to snapshot context */
  123. struct dm_snapshot *snap;
  124. /*
  125. * 1 indicates the exception has already been sent to
  126. * kcopyd.
  127. */
  128. int started;
  129. };
  130. /*
  131. * Hash table mapping origin volumes to lists of snapshots and
  132. * a lock to protect it
  133. */
  134. static struct kmem_cache *exception_cache;
  135. static struct kmem_cache *pending_cache;
  136. struct dm_snap_tracked_chunk {
  137. struct hlist_node node;
  138. chunk_t chunk;
  139. };
  140. static struct kmem_cache *tracked_chunk_cache;
  141. static struct dm_snap_tracked_chunk *track_chunk(struct dm_snapshot *s,
  142. chunk_t chunk)
  143. {
  144. struct dm_snap_tracked_chunk *c = mempool_alloc(s->tracked_chunk_pool,
  145. GFP_NOIO);
  146. unsigned long flags;
  147. c->chunk = chunk;
  148. spin_lock_irqsave(&s->tracked_chunk_lock, flags);
  149. hlist_add_head(&c->node,
  150. &s->tracked_chunk_hash[DM_TRACKED_CHUNK_HASH(chunk)]);
  151. spin_unlock_irqrestore(&s->tracked_chunk_lock, flags);
  152. return c;
  153. }
  154. static void stop_tracking_chunk(struct dm_snapshot *s,
  155. struct dm_snap_tracked_chunk *c)
  156. {
  157. unsigned long flags;
  158. spin_lock_irqsave(&s->tracked_chunk_lock, flags);
  159. hlist_del(&c->node);
  160. spin_unlock_irqrestore(&s->tracked_chunk_lock, flags);
  161. mempool_free(c, s->tracked_chunk_pool);
  162. }
  163. static int __chunk_is_tracked(struct dm_snapshot *s, chunk_t chunk)
  164. {
  165. struct dm_snap_tracked_chunk *c;
  166. struct hlist_node *hn;
  167. int found = 0;
  168. spin_lock_irq(&s->tracked_chunk_lock);
  169. hlist_for_each_entry(c, hn,
  170. &s->tracked_chunk_hash[DM_TRACKED_CHUNK_HASH(chunk)], node) {
  171. if (c->chunk == chunk) {
  172. found = 1;
  173. break;
  174. }
  175. }
  176. spin_unlock_irq(&s->tracked_chunk_lock);
  177. return found;
  178. }
  179. /*
  180. * One of these per registered origin, held in the snapshot_origins hash
  181. */
  182. struct origin {
  183. /* The origin device */
  184. struct block_device *bdev;
  185. struct list_head hash_list;
  186. /* List of snapshots for this origin */
  187. struct list_head snapshots;
  188. };
  189. /*
  190. * Size of the hash table for origin volumes. If we make this
  191. * the size of the minors list then it should be nearly perfect
  192. */
  193. #define ORIGIN_HASH_SIZE 256
  194. #define ORIGIN_MASK 0xFF
  195. static struct list_head *_origins;
  196. static struct rw_semaphore _origins_lock;
  197. static int init_origin_hash(void)
  198. {
  199. int i;
  200. _origins = kmalloc(ORIGIN_HASH_SIZE * sizeof(struct list_head),
  201. GFP_KERNEL);
  202. if (!_origins) {
  203. DMERR("unable to allocate memory");
  204. return -ENOMEM;
  205. }
  206. for (i = 0; i < ORIGIN_HASH_SIZE; i++)
  207. INIT_LIST_HEAD(_origins + i);
  208. init_rwsem(&_origins_lock);
  209. return 0;
  210. }
  211. static void exit_origin_hash(void)
  212. {
  213. kfree(_origins);
  214. }
  215. static unsigned origin_hash(struct block_device *bdev)
  216. {
  217. return bdev->bd_dev & ORIGIN_MASK;
  218. }
  219. static struct origin *__lookup_origin(struct block_device *origin)
  220. {
  221. struct list_head *ol;
  222. struct origin *o;
  223. ol = &_origins[origin_hash(origin)];
  224. list_for_each_entry (o, ol, hash_list)
  225. if (bdev_equal(o->bdev, origin))
  226. return o;
  227. return NULL;
  228. }
  229. static void __insert_origin(struct origin *o)
  230. {
  231. struct list_head *sl = &_origins[origin_hash(o->bdev)];
  232. list_add_tail(&o->hash_list, sl);
  233. }
  234. /*
  235. * Make a note of the snapshot and its origin so we can look it
  236. * up when the origin has a write on it.
  237. */
  238. static int register_snapshot(struct dm_snapshot *snap)
  239. {
  240. struct origin *o, *new_o;
  241. struct block_device *bdev = snap->origin->bdev;
  242. new_o = kmalloc(sizeof(*new_o), GFP_KERNEL);
  243. if (!new_o)
  244. return -ENOMEM;
  245. down_write(&_origins_lock);
  246. o = __lookup_origin(bdev);
  247. if (o)
  248. kfree(new_o);
  249. else {
  250. /* New origin */
  251. o = new_o;
  252. /* Initialise the struct */
  253. INIT_LIST_HEAD(&o->snapshots);
  254. o->bdev = bdev;
  255. __insert_origin(o);
  256. }
  257. list_add_tail(&snap->list, &o->snapshots);
  258. up_write(&_origins_lock);
  259. return 0;
  260. }
  261. static void unregister_snapshot(struct dm_snapshot *s)
  262. {
  263. struct origin *o;
  264. down_write(&_origins_lock);
  265. o = __lookup_origin(s->origin->bdev);
  266. list_del(&s->list);
  267. if (list_empty(&o->snapshots)) {
  268. list_del(&o->hash_list);
  269. kfree(o);
  270. }
  271. up_write(&_origins_lock);
  272. }
  273. /*
  274. * Implementation of the exception hash tables.
  275. * The lowest hash_shift bits of the chunk number are ignored, allowing
  276. * some consecutive chunks to be grouped together.
  277. */
  278. static int init_exception_table(struct exception_table *et, uint32_t size,
  279. unsigned hash_shift)
  280. {
  281. unsigned int i;
  282. et->hash_shift = hash_shift;
  283. et->hash_mask = size - 1;
  284. et->table = dm_vcalloc(size, sizeof(struct list_head));
  285. if (!et->table)
  286. return -ENOMEM;
  287. for (i = 0; i < size; i++)
  288. INIT_LIST_HEAD(et->table + i);
  289. return 0;
  290. }
  291. static void exit_exception_table(struct exception_table *et, struct kmem_cache *mem)
  292. {
  293. struct list_head *slot;
  294. struct dm_snap_exception *ex, *next;
  295. int i, size;
  296. size = et->hash_mask + 1;
  297. for (i = 0; i < size; i++) {
  298. slot = et->table + i;
  299. list_for_each_entry_safe (ex, next, slot, hash_list)
  300. kmem_cache_free(mem, ex);
  301. }
  302. vfree(et->table);
  303. }
  304. static uint32_t exception_hash(struct exception_table *et, chunk_t chunk)
  305. {
  306. return (chunk >> et->hash_shift) & et->hash_mask;
  307. }
  308. static void insert_exception(struct exception_table *eh,
  309. struct dm_snap_exception *e)
  310. {
  311. struct list_head *l = &eh->table[exception_hash(eh, e->old_chunk)];
  312. list_add(&e->hash_list, l);
  313. }
  314. static void remove_exception(struct dm_snap_exception *e)
  315. {
  316. list_del(&e->hash_list);
  317. }
  318. /*
  319. * Return the exception data for a sector, or NULL if not
  320. * remapped.
  321. */
  322. static struct dm_snap_exception *lookup_exception(struct exception_table *et,
  323. chunk_t chunk)
  324. {
  325. struct list_head *slot;
  326. struct dm_snap_exception *e;
  327. slot = &et->table[exception_hash(et, chunk)];
  328. list_for_each_entry (e, slot, hash_list)
  329. if (chunk >= e->old_chunk &&
  330. chunk <= e->old_chunk + dm_consecutive_chunk_count(e))
  331. return e;
  332. return NULL;
  333. }
  334. static struct dm_snap_exception *alloc_exception(void)
  335. {
  336. struct dm_snap_exception *e;
  337. e = kmem_cache_alloc(exception_cache, GFP_NOIO);
  338. if (!e)
  339. e = kmem_cache_alloc(exception_cache, GFP_ATOMIC);
  340. return e;
  341. }
  342. static void free_exception(struct dm_snap_exception *e)
  343. {
  344. kmem_cache_free(exception_cache, e);
  345. }
  346. static struct dm_snap_pending_exception *alloc_pending_exception(struct dm_snapshot *s)
  347. {
  348. struct dm_snap_pending_exception *pe = mempool_alloc(s->pending_pool,
  349. GFP_NOIO);
  350. atomic_inc(&s->pending_exceptions_count);
  351. pe->snap = s;
  352. return pe;
  353. }
  354. static void free_pending_exception(struct dm_snap_pending_exception *pe)
  355. {
  356. struct dm_snapshot *s = pe->snap;
  357. mempool_free(pe, s->pending_pool);
  358. smp_mb__before_atomic_dec();
  359. atomic_dec(&s->pending_exceptions_count);
  360. }
  361. static void insert_completed_exception(struct dm_snapshot *s,
  362. struct dm_snap_exception *new_e)
  363. {
  364. struct exception_table *eh = &s->complete;
  365. struct list_head *l;
  366. struct dm_snap_exception *e = NULL;
  367. l = &eh->table[exception_hash(eh, new_e->old_chunk)];
  368. /* Add immediately if this table doesn't support consecutive chunks */
  369. if (!eh->hash_shift)
  370. goto out;
  371. /* List is ordered by old_chunk */
  372. list_for_each_entry_reverse(e, l, hash_list) {
  373. /* Insert after an existing chunk? */
  374. if (new_e->old_chunk == (e->old_chunk +
  375. dm_consecutive_chunk_count(e) + 1) &&
  376. new_e->new_chunk == (dm_chunk_number(e->new_chunk) +
  377. dm_consecutive_chunk_count(e) + 1)) {
  378. dm_consecutive_chunk_count_inc(e);
  379. free_exception(new_e);
  380. return;
  381. }
  382. /* Insert before an existing chunk? */
  383. if (new_e->old_chunk == (e->old_chunk - 1) &&
  384. new_e->new_chunk == (dm_chunk_number(e->new_chunk) - 1)) {
  385. dm_consecutive_chunk_count_inc(e);
  386. e->old_chunk--;
  387. e->new_chunk--;
  388. free_exception(new_e);
  389. return;
  390. }
  391. if (new_e->old_chunk > e->old_chunk)
  392. break;
  393. }
  394. out:
  395. list_add(&new_e->hash_list, e ? &e->hash_list : l);
  396. }
  397. /*
  398. * Callback used by the exception stores to load exceptions when
  399. * initialising.
  400. */
  401. static int dm_add_exception(void *context, chunk_t old, chunk_t new)
  402. {
  403. struct dm_snapshot *s = context;
  404. struct dm_snap_exception *e;
  405. e = alloc_exception();
  406. if (!e)
  407. return -ENOMEM;
  408. e->old_chunk = old;
  409. /* Consecutive_count is implicitly initialised to zero */
  410. e->new_chunk = new;
  411. insert_completed_exception(s, e);
  412. return 0;
  413. }
  414. /*
  415. * Hard coded magic.
  416. */
  417. static int calc_max_buckets(void)
  418. {
  419. /* use a fixed size of 2MB */
  420. unsigned long mem = 2 * 1024 * 1024;
  421. mem /= sizeof(struct list_head);
  422. return mem;
  423. }
  424. /*
  425. * Allocate room for a suitable hash table.
  426. */
  427. static int init_hash_tables(struct dm_snapshot *s)
  428. {
  429. sector_t hash_size, cow_dev_size, origin_dev_size, max_buckets;
  430. /*
  431. * Calculate based on the size of the original volume or
  432. * the COW volume...
  433. */
  434. cow_dev_size = get_dev_size(s->store->cow->bdev);
  435. origin_dev_size = get_dev_size(s->origin->bdev);
  436. max_buckets = calc_max_buckets();
  437. hash_size = min(origin_dev_size, cow_dev_size) >> s->store->chunk_shift;
  438. hash_size = min(hash_size, max_buckets);
  439. hash_size = rounddown_pow_of_two(hash_size);
  440. if (init_exception_table(&s->complete, hash_size,
  441. DM_CHUNK_CONSECUTIVE_BITS))
  442. return -ENOMEM;
  443. /*
  444. * Allocate hash table for in-flight exceptions
  445. * Make this smaller than the real hash table
  446. */
  447. hash_size >>= 3;
  448. if (hash_size < 64)
  449. hash_size = 64;
  450. if (init_exception_table(&s->pending, hash_size, 0)) {
  451. exit_exception_table(&s->complete, exception_cache);
  452. return -ENOMEM;
  453. }
  454. return 0;
  455. }
  456. /*
  457. * Construct a snapshot mapping: <origin_dev> <COW-dev> <p/n> <chunk-size>
  458. */
  459. static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv)
  460. {
  461. struct dm_snapshot *s;
  462. int i;
  463. int r = -EINVAL;
  464. char *origin_path;
  465. struct dm_exception_store *store;
  466. unsigned args_used;
  467. if (argc != 4) {
  468. ti->error = "requires exactly 4 arguments";
  469. r = -EINVAL;
  470. goto bad_args;
  471. }
  472. origin_path = argv[0];
  473. argv++;
  474. argc--;
  475. r = dm_exception_store_create(ti, argc, argv, &args_used, &store);
  476. if (r) {
  477. ti->error = "Couldn't create exception store";
  478. r = -EINVAL;
  479. goto bad_args;
  480. }
  481. argv += args_used;
  482. argc -= args_used;
  483. s = kmalloc(sizeof(*s), GFP_KERNEL);
  484. if (!s) {
  485. ti->error = "Cannot allocate snapshot context private "
  486. "structure";
  487. r = -ENOMEM;
  488. goto bad_snap;
  489. }
  490. r = dm_get_device(ti, origin_path, 0, ti->len, FMODE_READ, &s->origin);
  491. if (r) {
  492. ti->error = "Cannot get origin device";
  493. goto bad_origin;
  494. }
  495. s->store = store;
  496. s->valid = 1;
  497. s->active = 0;
  498. atomic_set(&s->pending_exceptions_count, 0);
  499. init_rwsem(&s->lock);
  500. spin_lock_init(&s->pe_lock);
  501. /* Allocate hash table for COW data */
  502. if (init_hash_tables(s)) {
  503. ti->error = "Unable to allocate hash table space";
  504. r = -ENOMEM;
  505. goto bad_hash_tables;
  506. }
  507. r = dm_kcopyd_client_create(SNAPSHOT_PAGES, &s->kcopyd_client);
  508. if (r) {
  509. ti->error = "Could not create kcopyd client";
  510. goto bad_kcopyd;
  511. }
  512. s->pending_pool = mempool_create_slab_pool(MIN_IOS, pending_cache);
  513. if (!s->pending_pool) {
  514. ti->error = "Could not allocate mempool for pending exceptions";
  515. goto bad_pending_pool;
  516. }
  517. s->tracked_chunk_pool = mempool_create_slab_pool(MIN_IOS,
  518. tracked_chunk_cache);
  519. if (!s->tracked_chunk_pool) {
  520. ti->error = "Could not allocate tracked_chunk mempool for "
  521. "tracking reads";
  522. goto bad_tracked_chunk_pool;
  523. }
  524. for (i = 0; i < DM_TRACKED_CHUNK_HASH_SIZE; i++)
  525. INIT_HLIST_HEAD(&s->tracked_chunk_hash[i]);
  526. spin_lock_init(&s->tracked_chunk_lock);
  527. /* Metadata must only be loaded into one table at once */
  528. r = s->store->type->read_metadata(s->store, dm_add_exception,
  529. (void *)s);
  530. if (r < 0) {
  531. ti->error = "Failed to read snapshot metadata";
  532. goto bad_load_and_register;
  533. } else if (r > 0) {
  534. s->valid = 0;
  535. DMWARN("Snapshot is marked invalid.");
  536. }
  537. bio_list_init(&s->queued_bios);
  538. INIT_WORK(&s->queued_bios_work, flush_queued_bios);
  539. /* Add snapshot to the list of snapshots for this origin */
  540. /* Exceptions aren't triggered till snapshot_resume() is called */
  541. if (register_snapshot(s)) {
  542. r = -EINVAL;
  543. ti->error = "Cannot register snapshot origin";
  544. goto bad_load_and_register;
  545. }
  546. ti->private = s;
  547. ti->split_io = s->store->chunk_size;
  548. return 0;
  549. bad_load_and_register:
  550. mempool_destroy(s->tracked_chunk_pool);
  551. bad_tracked_chunk_pool:
  552. mempool_destroy(s->pending_pool);
  553. bad_pending_pool:
  554. dm_kcopyd_client_destroy(s->kcopyd_client);
  555. bad_kcopyd:
  556. exit_exception_table(&s->pending, pending_cache);
  557. exit_exception_table(&s->complete, exception_cache);
  558. bad_hash_tables:
  559. dm_put_device(ti, s->origin);
  560. bad_origin:
  561. kfree(s);
  562. bad_snap:
  563. dm_exception_store_destroy(store);
  564. bad_args:
  565. return r;
  566. }
  567. static void __free_exceptions(struct dm_snapshot *s)
  568. {
  569. dm_kcopyd_client_destroy(s->kcopyd_client);
  570. s->kcopyd_client = NULL;
  571. exit_exception_table(&s->pending, pending_cache);
  572. exit_exception_table(&s->complete, exception_cache);
  573. }
  574. static void snapshot_dtr(struct dm_target *ti)
  575. {
  576. #ifdef CONFIG_DM_DEBUG
  577. int i;
  578. #endif
  579. struct dm_snapshot *s = ti->private;
  580. flush_workqueue(ksnapd);
  581. /* Prevent further origin writes from using this snapshot. */
  582. /* After this returns there can be no new kcopyd jobs. */
  583. unregister_snapshot(s);
  584. while (atomic_read(&s->pending_exceptions_count))
  585. msleep(1);
  586. /*
  587. * Ensure instructions in mempool_destroy aren't reordered
  588. * before atomic_read.
  589. */
  590. smp_mb();
  591. #ifdef CONFIG_DM_DEBUG
  592. for (i = 0; i < DM_TRACKED_CHUNK_HASH_SIZE; i++)
  593. BUG_ON(!hlist_empty(&s->tracked_chunk_hash[i]));
  594. #endif
  595. mempool_destroy(s->tracked_chunk_pool);
  596. __free_exceptions(s);
  597. mempool_destroy(s->pending_pool);
  598. dm_put_device(ti, s->origin);
  599. dm_exception_store_destroy(s->store);
  600. kfree(s);
  601. }
  602. /*
  603. * Flush a list of buffers.
  604. */
  605. static void flush_bios(struct bio *bio)
  606. {
  607. struct bio *n;
  608. while (bio) {
  609. n = bio->bi_next;
  610. bio->bi_next = NULL;
  611. generic_make_request(bio);
  612. bio = n;
  613. }
  614. }
  615. static void flush_queued_bios(struct work_struct *work)
  616. {
  617. struct dm_snapshot *s =
  618. container_of(work, struct dm_snapshot, queued_bios_work);
  619. struct bio *queued_bios;
  620. unsigned long flags;
  621. spin_lock_irqsave(&s->pe_lock, flags);
  622. queued_bios = bio_list_get(&s->queued_bios);
  623. spin_unlock_irqrestore(&s->pe_lock, flags);
  624. flush_bios(queued_bios);
  625. }
  626. /*
  627. * Error a list of buffers.
  628. */
  629. static void error_bios(struct bio *bio)
  630. {
  631. struct bio *n;
  632. while (bio) {
  633. n = bio->bi_next;
  634. bio->bi_next = NULL;
  635. bio_io_error(bio);
  636. bio = n;
  637. }
  638. }
  639. static void __invalidate_snapshot(struct dm_snapshot *s, int err)
  640. {
  641. if (!s->valid)
  642. return;
  643. if (err == -EIO)
  644. DMERR("Invalidating snapshot: Error reading/writing.");
  645. else if (err == -ENOMEM)
  646. DMERR("Invalidating snapshot: Unable to allocate exception.");
  647. if (s->store->type->drop_snapshot)
  648. s->store->type->drop_snapshot(s->store);
  649. s->valid = 0;
  650. dm_table_event(s->store->ti->table);
  651. }
  652. static void get_pending_exception(struct dm_snap_pending_exception *pe)
  653. {
  654. atomic_inc(&pe->ref_count);
  655. }
  656. static struct bio *put_pending_exception(struct dm_snap_pending_exception *pe)
  657. {
  658. struct dm_snap_pending_exception *primary_pe;
  659. struct bio *origin_bios = NULL;
  660. primary_pe = pe->primary_pe;
  661. /*
  662. * If this pe is involved in a write to the origin and
  663. * it is the last sibling to complete then release
  664. * the bios for the original write to the origin.
  665. */
  666. if (primary_pe &&
  667. atomic_dec_and_test(&primary_pe->ref_count)) {
  668. origin_bios = bio_list_get(&primary_pe->origin_bios);
  669. free_pending_exception(primary_pe);
  670. }
  671. /*
  672. * Free the pe if it's not linked to an origin write or if
  673. * it's not itself a primary pe.
  674. */
  675. if (!primary_pe || primary_pe != pe)
  676. free_pending_exception(pe);
  677. return origin_bios;
  678. }
  679. static void pending_complete(struct dm_snap_pending_exception *pe, int success)
  680. {
  681. struct dm_snap_exception *e;
  682. struct dm_snapshot *s = pe->snap;
  683. struct bio *origin_bios = NULL;
  684. struct bio *snapshot_bios = NULL;
  685. int error = 0;
  686. if (!success) {
  687. /* Read/write error - snapshot is unusable */
  688. down_write(&s->lock);
  689. __invalidate_snapshot(s, -EIO);
  690. error = 1;
  691. goto out;
  692. }
  693. e = alloc_exception();
  694. if (!e) {
  695. down_write(&s->lock);
  696. __invalidate_snapshot(s, -ENOMEM);
  697. error = 1;
  698. goto out;
  699. }
  700. *e = pe->e;
  701. down_write(&s->lock);
  702. if (!s->valid) {
  703. free_exception(e);
  704. error = 1;
  705. goto out;
  706. }
  707. /*
  708. * Check for conflicting reads. This is extremely improbable,
  709. * so msleep(1) is sufficient and there is no need for a wait queue.
  710. */
  711. while (__chunk_is_tracked(s, pe->e.old_chunk))
  712. msleep(1);
  713. /*
  714. * Add a proper exception, and remove the
  715. * in-flight exception from the list.
  716. */
  717. insert_completed_exception(s, e);
  718. out:
  719. remove_exception(&pe->e);
  720. snapshot_bios = bio_list_get(&pe->snapshot_bios);
  721. origin_bios = put_pending_exception(pe);
  722. up_write(&s->lock);
  723. /* Submit any pending write bios */
  724. if (error)
  725. error_bios(snapshot_bios);
  726. else
  727. flush_bios(snapshot_bios);
  728. flush_bios(origin_bios);
  729. }
  730. static void commit_callback(void *context, int success)
  731. {
  732. struct dm_snap_pending_exception *pe = context;
  733. pending_complete(pe, success);
  734. }
  735. /*
  736. * Called when the copy I/O has finished. kcopyd actually runs
  737. * this code so don't block.
  738. */
  739. static void copy_callback(int read_err, unsigned long write_err, void *context)
  740. {
  741. struct dm_snap_pending_exception *pe = context;
  742. struct dm_snapshot *s = pe->snap;
  743. if (read_err || write_err)
  744. pending_complete(pe, 0);
  745. else
  746. /* Update the metadata if we are persistent */
  747. s->store->type->commit_exception(s->store, &pe->e,
  748. commit_callback, pe);
  749. }
  750. /*
  751. * Dispatches the copy operation to kcopyd.
  752. */
  753. static void start_copy(struct dm_snap_pending_exception *pe)
  754. {
  755. struct dm_snapshot *s = pe->snap;
  756. struct dm_io_region src, dest;
  757. struct block_device *bdev = s->origin->bdev;
  758. sector_t dev_size;
  759. dev_size = get_dev_size(bdev);
  760. src.bdev = bdev;
  761. src.sector = chunk_to_sector(s->store, pe->e.old_chunk);
  762. src.count = min(s->store->chunk_size, dev_size - src.sector);
  763. dest.bdev = s->store->cow->bdev;
  764. dest.sector = chunk_to_sector(s->store, pe->e.new_chunk);
  765. dest.count = src.count;
  766. /* Hand over to kcopyd */
  767. dm_kcopyd_copy(s->kcopyd_client,
  768. &src, 1, &dest, 0, copy_callback, pe);
  769. }
  770. static struct dm_snap_pending_exception *
  771. __lookup_pending_exception(struct dm_snapshot *s, chunk_t chunk)
  772. {
  773. struct dm_snap_exception *e = lookup_exception(&s->pending, chunk);
  774. if (!e)
  775. return NULL;
  776. return container_of(e, struct dm_snap_pending_exception, e);
  777. }
  778. /*
  779. * Looks to see if this snapshot already has a pending exception
  780. * for this chunk, otherwise it allocates a new one and inserts
  781. * it into the pending table.
  782. *
  783. * NOTE: a write lock must be held on snap->lock before calling
  784. * this.
  785. */
  786. static struct dm_snap_pending_exception *
  787. __find_pending_exception(struct dm_snapshot *s,
  788. struct dm_snap_pending_exception *pe, chunk_t chunk)
  789. {
  790. struct dm_snap_pending_exception *pe2;
  791. pe2 = __lookup_pending_exception(s, chunk);
  792. if (pe2) {
  793. free_pending_exception(pe);
  794. return pe2;
  795. }
  796. pe->e.old_chunk = chunk;
  797. bio_list_init(&pe->origin_bios);
  798. bio_list_init(&pe->snapshot_bios);
  799. pe->primary_pe = NULL;
  800. atomic_set(&pe->ref_count, 0);
  801. pe->started = 0;
  802. if (s->store->type->prepare_exception(s->store, &pe->e)) {
  803. free_pending_exception(pe);
  804. return NULL;
  805. }
  806. get_pending_exception(pe);
  807. insert_exception(&s->pending, &pe->e);
  808. return pe;
  809. }
  810. static void remap_exception(struct dm_snapshot *s, struct dm_snap_exception *e,
  811. struct bio *bio, chunk_t chunk)
  812. {
  813. bio->bi_bdev = s->store->cow->bdev;
  814. bio->bi_sector = chunk_to_sector(s->store,
  815. dm_chunk_number(e->new_chunk) +
  816. (chunk - e->old_chunk)) +
  817. (bio->bi_sector &
  818. s->store->chunk_mask);
  819. }
  820. static int snapshot_map(struct dm_target *ti, struct bio *bio,
  821. union map_info *map_context)
  822. {
  823. struct dm_snap_exception *e;
  824. struct dm_snapshot *s = ti->private;
  825. int r = DM_MAPIO_REMAPPED;
  826. chunk_t chunk;
  827. struct dm_snap_pending_exception *pe = NULL;
  828. chunk = sector_to_chunk(s->store, bio->bi_sector);
  829. /* Full snapshots are not usable */
  830. /* To get here the table must be live so s->active is always set. */
  831. if (!s->valid)
  832. return -EIO;
  833. /* FIXME: should only take write lock if we need
  834. * to copy an exception */
  835. down_write(&s->lock);
  836. if (!s->valid) {
  837. r = -EIO;
  838. goto out_unlock;
  839. }
  840. /* If the block is already remapped - use that, else remap it */
  841. e = lookup_exception(&s->complete, chunk);
  842. if (e) {
  843. remap_exception(s, e, bio, chunk);
  844. goto out_unlock;
  845. }
  846. /*
  847. * Write to snapshot - higher level takes care of RW/RO
  848. * flags so we should only get this if we are
  849. * writeable.
  850. */
  851. if (bio_rw(bio) == WRITE) {
  852. pe = __lookup_pending_exception(s, chunk);
  853. if (!pe) {
  854. up_write(&s->lock);
  855. pe = alloc_pending_exception(s);
  856. down_write(&s->lock);
  857. if (!s->valid) {
  858. free_pending_exception(pe);
  859. r = -EIO;
  860. goto out_unlock;
  861. }
  862. e = lookup_exception(&s->complete, chunk);
  863. if (e) {
  864. free_pending_exception(pe);
  865. remap_exception(s, e, bio, chunk);
  866. goto out_unlock;
  867. }
  868. pe = __find_pending_exception(s, pe, chunk);
  869. if (!pe) {
  870. __invalidate_snapshot(s, -ENOMEM);
  871. r = -EIO;
  872. goto out_unlock;
  873. }
  874. }
  875. remap_exception(s, &pe->e, bio, chunk);
  876. bio_list_add(&pe->snapshot_bios, bio);
  877. r = DM_MAPIO_SUBMITTED;
  878. if (!pe->started) {
  879. /* this is protected by snap->lock */
  880. pe->started = 1;
  881. up_write(&s->lock);
  882. start_copy(pe);
  883. goto out;
  884. }
  885. } else {
  886. bio->bi_bdev = s->origin->bdev;
  887. map_context->ptr = track_chunk(s, chunk);
  888. }
  889. out_unlock:
  890. up_write(&s->lock);
  891. out:
  892. return r;
  893. }
  894. static int snapshot_end_io(struct dm_target *ti, struct bio *bio,
  895. int error, union map_info *map_context)
  896. {
  897. struct dm_snapshot *s = ti->private;
  898. struct dm_snap_tracked_chunk *c = map_context->ptr;
  899. if (c)
  900. stop_tracking_chunk(s, c);
  901. return 0;
  902. }
  903. static void snapshot_resume(struct dm_target *ti)
  904. {
  905. struct dm_snapshot *s = ti->private;
  906. down_write(&s->lock);
  907. s->active = 1;
  908. up_write(&s->lock);
  909. }
  910. static int snapshot_status(struct dm_target *ti, status_type_t type,
  911. char *result, unsigned int maxlen)
  912. {
  913. unsigned sz = 0;
  914. struct dm_snapshot *snap = ti->private;
  915. switch (type) {
  916. case STATUSTYPE_INFO:
  917. if (!snap->valid)
  918. DMEMIT("Invalid");
  919. else {
  920. if (snap->store->type->fraction_full) {
  921. sector_t numerator, denominator;
  922. snap->store->type->fraction_full(snap->store,
  923. &numerator,
  924. &denominator);
  925. DMEMIT("%llu/%llu",
  926. (unsigned long long)numerator,
  927. (unsigned long long)denominator);
  928. }
  929. else
  930. DMEMIT("Unknown");
  931. }
  932. break;
  933. case STATUSTYPE_TABLE:
  934. /*
  935. * kdevname returns a static pointer so we need
  936. * to make private copies if the output is to
  937. * make sense.
  938. */
  939. DMEMIT("%s", snap->origin->name);
  940. DMEMIT(" %s %s %llu", snap->store->cow->name,
  941. snap->store->type->name,
  942. (unsigned long long)snap->store->chunk_size);
  943. break;
  944. }
  945. return 0;
  946. }
  947. /*-----------------------------------------------------------------
  948. * Origin methods
  949. *---------------------------------------------------------------*/
  950. static int __origin_write(struct list_head *snapshots, struct bio *bio)
  951. {
  952. int r = DM_MAPIO_REMAPPED, first = 0;
  953. struct dm_snapshot *snap;
  954. struct dm_snap_exception *e;
  955. struct dm_snap_pending_exception *pe, *next_pe, *primary_pe = NULL;
  956. chunk_t chunk;
  957. LIST_HEAD(pe_queue);
  958. /* Do all the snapshots on this origin */
  959. list_for_each_entry (snap, snapshots, list) {
  960. down_write(&snap->lock);
  961. /* Only deal with valid and active snapshots */
  962. if (!snap->valid || !snap->active)
  963. goto next_snapshot;
  964. /* Nothing to do if writing beyond end of snapshot */
  965. if (bio->bi_sector >= dm_table_get_size(snap->store->ti->table))
  966. goto next_snapshot;
  967. /*
  968. * Remember, different snapshots can have
  969. * different chunk sizes.
  970. */
  971. chunk = sector_to_chunk(snap->store, bio->bi_sector);
  972. /*
  973. * Check exception table to see if block
  974. * is already remapped in this snapshot
  975. * and trigger an exception if not.
  976. *
  977. * ref_count is initialised to 1 so pending_complete()
  978. * won't destroy the primary_pe while we're inside this loop.
  979. */
  980. e = lookup_exception(&snap->complete, chunk);
  981. if (e)
  982. goto next_snapshot;
  983. pe = __lookup_pending_exception(snap, chunk);
  984. if (!pe) {
  985. up_write(&snap->lock);
  986. pe = alloc_pending_exception(snap);
  987. down_write(&snap->lock);
  988. if (!snap->valid) {
  989. free_pending_exception(pe);
  990. goto next_snapshot;
  991. }
  992. e = lookup_exception(&snap->complete, chunk);
  993. if (e) {
  994. free_pending_exception(pe);
  995. goto next_snapshot;
  996. }
  997. pe = __find_pending_exception(snap, pe, chunk);
  998. if (!pe) {
  999. __invalidate_snapshot(snap, -ENOMEM);
  1000. goto next_snapshot;
  1001. }
  1002. }
  1003. if (!primary_pe) {
  1004. /*
  1005. * Either every pe here has same
  1006. * primary_pe or none has one yet.
  1007. */
  1008. if (pe->primary_pe)
  1009. primary_pe = pe->primary_pe;
  1010. else {
  1011. primary_pe = pe;
  1012. first = 1;
  1013. }
  1014. bio_list_add(&primary_pe->origin_bios, bio);
  1015. r = DM_MAPIO_SUBMITTED;
  1016. }
  1017. if (!pe->primary_pe) {
  1018. pe->primary_pe = primary_pe;
  1019. get_pending_exception(primary_pe);
  1020. }
  1021. if (!pe->started) {
  1022. pe->started = 1;
  1023. list_add_tail(&pe->list, &pe_queue);
  1024. }
  1025. next_snapshot:
  1026. up_write(&snap->lock);
  1027. }
  1028. if (!primary_pe)
  1029. return r;
  1030. /*
  1031. * If this is the first time we're processing this chunk and
  1032. * ref_count is now 1 it means all the pending exceptions
  1033. * got completed while we were in the loop above, so it falls to
  1034. * us here to remove the primary_pe and submit any origin_bios.
  1035. */
  1036. if (first && atomic_dec_and_test(&primary_pe->ref_count)) {
  1037. flush_bios(bio_list_get(&primary_pe->origin_bios));
  1038. free_pending_exception(primary_pe);
  1039. /* If we got here, pe_queue is necessarily empty. */
  1040. return r;
  1041. }
  1042. /*
  1043. * Now that we have a complete pe list we can start the copying.
  1044. */
  1045. list_for_each_entry_safe(pe, next_pe, &pe_queue, list)
  1046. start_copy(pe);
  1047. return r;
  1048. }
  1049. /*
  1050. * Called on a write from the origin driver.
  1051. */
  1052. static int do_origin(struct dm_dev *origin, struct bio *bio)
  1053. {
  1054. struct origin *o;
  1055. int r = DM_MAPIO_REMAPPED;
  1056. down_read(&_origins_lock);
  1057. o = __lookup_origin(origin->bdev);
  1058. if (o)
  1059. r = __origin_write(&o->snapshots, bio);
  1060. up_read(&_origins_lock);
  1061. return r;
  1062. }
  1063. /*
  1064. * Origin: maps a linear range of a device, with hooks for snapshotting.
  1065. */
  1066. /*
  1067. * Construct an origin mapping: <dev_path>
  1068. * The context for an origin is merely a 'struct dm_dev *'
  1069. * pointing to the real device.
  1070. */
  1071. static int origin_ctr(struct dm_target *ti, unsigned int argc, char **argv)
  1072. {
  1073. int r;
  1074. struct dm_dev *dev;
  1075. if (argc != 1) {
  1076. ti->error = "origin: incorrect number of arguments";
  1077. return -EINVAL;
  1078. }
  1079. r = dm_get_device(ti, argv[0], 0, ti->len,
  1080. dm_table_get_mode(ti->table), &dev);
  1081. if (r) {
  1082. ti->error = "Cannot get target device";
  1083. return r;
  1084. }
  1085. ti->private = dev;
  1086. return 0;
  1087. }
  1088. static void origin_dtr(struct dm_target *ti)
  1089. {
  1090. struct dm_dev *dev = ti->private;
  1091. dm_put_device(ti, dev);
  1092. }
  1093. static int origin_map(struct dm_target *ti, struct bio *bio,
  1094. union map_info *map_context)
  1095. {
  1096. struct dm_dev *dev = ti->private;
  1097. bio->bi_bdev = dev->bdev;
  1098. /* Only tell snapshots if this is a write */
  1099. return (bio_rw(bio) == WRITE) ? do_origin(dev, bio) : DM_MAPIO_REMAPPED;
  1100. }
  1101. #define min_not_zero(l, r) (l == 0) ? r : ((r == 0) ? l : min(l, r))
  1102. /*
  1103. * Set the target "split_io" field to the minimum of all the snapshots'
  1104. * chunk sizes.
  1105. */
  1106. static void origin_resume(struct dm_target *ti)
  1107. {
  1108. struct dm_dev *dev = ti->private;
  1109. struct dm_snapshot *snap;
  1110. struct origin *o;
  1111. chunk_t chunk_size = 0;
  1112. down_read(&_origins_lock);
  1113. o = __lookup_origin(dev->bdev);
  1114. if (o)
  1115. list_for_each_entry (snap, &o->snapshots, list)
  1116. chunk_size = min_not_zero(chunk_size,
  1117. snap->store->chunk_size);
  1118. up_read(&_origins_lock);
  1119. ti->split_io = chunk_size;
  1120. }
  1121. static int origin_status(struct dm_target *ti, status_type_t type, char *result,
  1122. unsigned int maxlen)
  1123. {
  1124. struct dm_dev *dev = ti->private;
  1125. switch (type) {
  1126. case STATUSTYPE_INFO:
  1127. result[0] = '\0';
  1128. break;
  1129. case STATUSTYPE_TABLE:
  1130. snprintf(result, maxlen, "%s", dev->name);
  1131. break;
  1132. }
  1133. return 0;
  1134. }
  1135. static struct target_type origin_target = {
  1136. .name = "snapshot-origin",
  1137. .version = {1, 6, 0},
  1138. .module = THIS_MODULE,
  1139. .ctr = origin_ctr,
  1140. .dtr = origin_dtr,
  1141. .map = origin_map,
  1142. .resume = origin_resume,
  1143. .status = origin_status,
  1144. };
  1145. static struct target_type snapshot_target = {
  1146. .name = "snapshot",
  1147. .version = {1, 6, 0},
  1148. .module = THIS_MODULE,
  1149. .ctr = snapshot_ctr,
  1150. .dtr = snapshot_dtr,
  1151. .map = snapshot_map,
  1152. .end_io = snapshot_end_io,
  1153. .resume = snapshot_resume,
  1154. .status = snapshot_status,
  1155. };
  1156. static int __init dm_snapshot_init(void)
  1157. {
  1158. int r;
  1159. r = dm_exception_store_init();
  1160. if (r) {
  1161. DMERR("Failed to initialize exception stores");
  1162. return r;
  1163. }
  1164. r = dm_register_target(&snapshot_target);
  1165. if (r) {
  1166. DMERR("snapshot target register failed %d", r);
  1167. return r;
  1168. }
  1169. r = dm_register_target(&origin_target);
  1170. if (r < 0) {
  1171. DMERR("Origin target register failed %d", r);
  1172. goto bad1;
  1173. }
  1174. r = init_origin_hash();
  1175. if (r) {
  1176. DMERR("init_origin_hash failed.");
  1177. goto bad2;
  1178. }
  1179. exception_cache = KMEM_CACHE(dm_snap_exception, 0);
  1180. if (!exception_cache) {
  1181. DMERR("Couldn't create exception cache.");
  1182. r = -ENOMEM;
  1183. goto bad3;
  1184. }
  1185. pending_cache = KMEM_CACHE(dm_snap_pending_exception, 0);
  1186. if (!pending_cache) {
  1187. DMERR("Couldn't create pending cache.");
  1188. r = -ENOMEM;
  1189. goto bad4;
  1190. }
  1191. tracked_chunk_cache = KMEM_CACHE(dm_snap_tracked_chunk, 0);
  1192. if (!tracked_chunk_cache) {
  1193. DMERR("Couldn't create cache to track chunks in use.");
  1194. r = -ENOMEM;
  1195. goto bad5;
  1196. }
  1197. ksnapd = create_singlethread_workqueue("ksnapd");
  1198. if (!ksnapd) {
  1199. DMERR("Failed to create ksnapd workqueue.");
  1200. r = -ENOMEM;
  1201. goto bad_pending_pool;
  1202. }
  1203. return 0;
  1204. bad_pending_pool:
  1205. kmem_cache_destroy(tracked_chunk_cache);
  1206. bad5:
  1207. kmem_cache_destroy(pending_cache);
  1208. bad4:
  1209. kmem_cache_destroy(exception_cache);
  1210. bad3:
  1211. exit_origin_hash();
  1212. bad2:
  1213. dm_unregister_target(&origin_target);
  1214. bad1:
  1215. dm_unregister_target(&snapshot_target);
  1216. return r;
  1217. }
  1218. static void __exit dm_snapshot_exit(void)
  1219. {
  1220. destroy_workqueue(ksnapd);
  1221. dm_unregister_target(&snapshot_target);
  1222. dm_unregister_target(&origin_target);
  1223. exit_origin_hash();
  1224. kmem_cache_destroy(pending_cache);
  1225. kmem_cache_destroy(exception_cache);
  1226. kmem_cache_destroy(tracked_chunk_cache);
  1227. dm_exception_store_exit();
  1228. }
  1229. /* Module hooks */
  1230. module_init(dm_snapshot_init);
  1231. module_exit(dm_snapshot_exit);
  1232. MODULE_DESCRIPTION(DM_NAME " snapshot target");
  1233. MODULE_AUTHOR("Joe Thornber");
  1234. MODULE_LICENSE("GPL");