dm-snap.c 32 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497
  1. /*
  2. * dm-snapshot.c
  3. *
  4. * Copyright (C) 2001-2002 Sistina Software (UK) Limited.
  5. *
  6. * This file is released under the GPL.
  7. */
  8. #include <linux/blkdev.h>
  9. #include <linux/ctype.h>
  10. #include <linux/device-mapper.h>
  11. #include <linux/delay.h>
  12. #include <linux/fs.h>
  13. #include <linux/init.h>
  14. #include <linux/kdev_t.h>
  15. #include <linux/list.h>
  16. #include <linux/mempool.h>
  17. #include <linux/module.h>
  18. #include <linux/slab.h>
  19. #include <linux/vmalloc.h>
  20. #include <linux/log2.h>
  21. #include <linux/dm-kcopyd.h>
  22. #include "dm-snap.h"
  23. #include "dm-bio-list.h"
  24. #define DM_MSG_PREFIX "snapshots"
  25. /*
  26. * The percentage increment we will wake up users at
  27. */
  28. #define WAKE_UP_PERCENT 5
  29. /*
  30. * kcopyd priority of snapshot operations
  31. */
  32. #define SNAPSHOT_COPY_PRIORITY 2
  33. /*
  34. * Reserve 1MB for each snapshot initially (with minimum of 1 page).
  35. */
  36. #define SNAPSHOT_PAGES (((1UL << 20) >> PAGE_SHIFT) ? : 1)
  37. /*
  38. * The size of the mempool used to track chunks in use.
  39. */
  40. #define MIN_IOS 256
  41. static struct workqueue_struct *ksnapd;
  42. static void flush_queued_bios(struct work_struct *work);
  43. struct dm_snap_pending_exception {
  44. struct dm_snap_exception e;
  45. /*
  46. * Origin buffers waiting for this to complete are held
  47. * in a bio list
  48. */
  49. struct bio_list origin_bios;
  50. struct bio_list snapshot_bios;
  51. /*
  52. * Short-term queue of pending exceptions prior to submission.
  53. */
  54. struct list_head list;
  55. /*
  56. * The primary pending_exception is the one that holds
  57. * the ref_count and the list of origin_bios for a
  58. * group of pending_exceptions. It is always last to get freed.
  59. * These fields get set up when writing to the origin.
  60. */
  61. struct dm_snap_pending_exception *primary_pe;
  62. /*
  63. * Number of pending_exceptions processing this chunk.
  64. * When this drops to zero we must complete the origin bios.
  65. * If incrementing or decrementing this, hold pe->snap->lock for
  66. * the sibling concerned and not pe->primary_pe->snap->lock unless
  67. * they are the same.
  68. */
  69. atomic_t ref_count;
  70. /* Pointer back to snapshot context */
  71. struct dm_snapshot *snap;
  72. /*
  73. * 1 indicates the exception has already been sent to
  74. * kcopyd.
  75. */
  76. int started;
  77. };
  78. /*
  79. * Hash table mapping origin volumes to lists of snapshots and
  80. * a lock to protect it
  81. */
  82. static struct kmem_cache *exception_cache;
  83. static struct kmem_cache *pending_cache;
  84. struct dm_snap_tracked_chunk {
  85. struct hlist_node node;
  86. chunk_t chunk;
  87. };
  88. static struct kmem_cache *tracked_chunk_cache;
  89. static struct dm_snap_tracked_chunk *track_chunk(struct dm_snapshot *s,
  90. chunk_t chunk)
  91. {
  92. struct dm_snap_tracked_chunk *c = mempool_alloc(s->tracked_chunk_pool,
  93. GFP_NOIO);
  94. unsigned long flags;
  95. c->chunk = chunk;
  96. spin_lock_irqsave(&s->tracked_chunk_lock, flags);
  97. hlist_add_head(&c->node,
  98. &s->tracked_chunk_hash[DM_TRACKED_CHUNK_HASH(chunk)]);
  99. spin_unlock_irqrestore(&s->tracked_chunk_lock, flags);
  100. return c;
  101. }
  102. static void stop_tracking_chunk(struct dm_snapshot *s,
  103. struct dm_snap_tracked_chunk *c)
  104. {
  105. unsigned long flags;
  106. spin_lock_irqsave(&s->tracked_chunk_lock, flags);
  107. hlist_del(&c->node);
  108. spin_unlock_irqrestore(&s->tracked_chunk_lock, flags);
  109. mempool_free(c, s->tracked_chunk_pool);
  110. }
  111. static int __chunk_is_tracked(struct dm_snapshot *s, chunk_t chunk)
  112. {
  113. struct dm_snap_tracked_chunk *c;
  114. struct hlist_node *hn;
  115. int found = 0;
  116. spin_lock_irq(&s->tracked_chunk_lock);
  117. hlist_for_each_entry(c, hn,
  118. &s->tracked_chunk_hash[DM_TRACKED_CHUNK_HASH(chunk)], node) {
  119. if (c->chunk == chunk) {
  120. found = 1;
  121. break;
  122. }
  123. }
  124. spin_unlock_irq(&s->tracked_chunk_lock);
  125. return found;
  126. }
  127. /*
  128. * One of these per registered origin, held in the snapshot_origins hash
  129. */
  130. struct origin {
  131. /* The origin device */
  132. struct block_device *bdev;
  133. struct list_head hash_list;
  134. /* List of snapshots for this origin */
  135. struct list_head snapshots;
  136. };
  137. /*
  138. * Size of the hash table for origin volumes. If we make this
  139. * the size of the minors list then it should be nearly perfect
  140. */
  141. #define ORIGIN_HASH_SIZE 256
  142. #define ORIGIN_MASK 0xFF
  143. static struct list_head *_origins;
  144. static struct rw_semaphore _origins_lock;
  145. static int init_origin_hash(void)
  146. {
  147. int i;
  148. _origins = kmalloc(ORIGIN_HASH_SIZE * sizeof(struct list_head),
  149. GFP_KERNEL);
  150. if (!_origins) {
  151. DMERR("unable to allocate memory");
  152. return -ENOMEM;
  153. }
  154. for (i = 0; i < ORIGIN_HASH_SIZE; i++)
  155. INIT_LIST_HEAD(_origins + i);
  156. init_rwsem(&_origins_lock);
  157. return 0;
  158. }
  159. static void exit_origin_hash(void)
  160. {
  161. kfree(_origins);
  162. }
  163. static unsigned origin_hash(struct block_device *bdev)
  164. {
  165. return bdev->bd_dev & ORIGIN_MASK;
  166. }
  167. static struct origin *__lookup_origin(struct block_device *origin)
  168. {
  169. struct list_head *ol;
  170. struct origin *o;
  171. ol = &_origins[origin_hash(origin)];
  172. list_for_each_entry (o, ol, hash_list)
  173. if (bdev_equal(o->bdev, origin))
  174. return o;
  175. return NULL;
  176. }
  177. static void __insert_origin(struct origin *o)
  178. {
  179. struct list_head *sl = &_origins[origin_hash(o->bdev)];
  180. list_add_tail(&o->hash_list, sl);
  181. }
  182. /*
  183. * Make a note of the snapshot and its origin so we can look it
  184. * up when the origin has a write on it.
  185. */
  186. static int register_snapshot(struct dm_snapshot *snap)
  187. {
  188. struct origin *o, *new_o;
  189. struct block_device *bdev = snap->origin->bdev;
  190. new_o = kmalloc(sizeof(*new_o), GFP_KERNEL);
  191. if (!new_o)
  192. return -ENOMEM;
  193. down_write(&_origins_lock);
  194. o = __lookup_origin(bdev);
  195. if (o)
  196. kfree(new_o);
  197. else {
  198. /* New origin */
  199. o = new_o;
  200. /* Initialise the struct */
  201. INIT_LIST_HEAD(&o->snapshots);
  202. o->bdev = bdev;
  203. __insert_origin(o);
  204. }
  205. list_add_tail(&snap->list, &o->snapshots);
  206. up_write(&_origins_lock);
  207. return 0;
  208. }
  209. static void unregister_snapshot(struct dm_snapshot *s)
  210. {
  211. struct origin *o;
  212. down_write(&_origins_lock);
  213. o = __lookup_origin(s->origin->bdev);
  214. list_del(&s->list);
  215. if (list_empty(&o->snapshots)) {
  216. list_del(&o->hash_list);
  217. kfree(o);
  218. }
  219. up_write(&_origins_lock);
  220. }
  221. /*
  222. * Implementation of the exception hash tables.
  223. * The lowest hash_shift bits of the chunk number are ignored, allowing
  224. * some consecutive chunks to be grouped together.
  225. */
  226. static int init_exception_table(struct exception_table *et, uint32_t size,
  227. unsigned hash_shift)
  228. {
  229. unsigned int i;
  230. et->hash_shift = hash_shift;
  231. et->hash_mask = size - 1;
  232. et->table = dm_vcalloc(size, sizeof(struct list_head));
  233. if (!et->table)
  234. return -ENOMEM;
  235. for (i = 0; i < size; i++)
  236. INIT_LIST_HEAD(et->table + i);
  237. return 0;
  238. }
  239. static void exit_exception_table(struct exception_table *et, struct kmem_cache *mem)
  240. {
  241. struct list_head *slot;
  242. struct dm_snap_exception *ex, *next;
  243. int i, size;
  244. size = et->hash_mask + 1;
  245. for (i = 0; i < size; i++) {
  246. slot = et->table + i;
  247. list_for_each_entry_safe (ex, next, slot, hash_list)
  248. kmem_cache_free(mem, ex);
  249. }
  250. vfree(et->table);
  251. }
  252. static uint32_t exception_hash(struct exception_table *et, chunk_t chunk)
  253. {
  254. return (chunk >> et->hash_shift) & et->hash_mask;
  255. }
  256. static void insert_exception(struct exception_table *eh,
  257. struct dm_snap_exception *e)
  258. {
  259. struct list_head *l = &eh->table[exception_hash(eh, e->old_chunk)];
  260. list_add(&e->hash_list, l);
  261. }
  262. static void remove_exception(struct dm_snap_exception *e)
  263. {
  264. list_del(&e->hash_list);
  265. }
  266. /*
  267. * Return the exception data for a sector, or NULL if not
  268. * remapped.
  269. */
  270. static struct dm_snap_exception *lookup_exception(struct exception_table *et,
  271. chunk_t chunk)
  272. {
  273. struct list_head *slot;
  274. struct dm_snap_exception *e;
  275. slot = &et->table[exception_hash(et, chunk)];
  276. list_for_each_entry (e, slot, hash_list)
  277. if (chunk >= e->old_chunk &&
  278. chunk <= e->old_chunk + dm_consecutive_chunk_count(e))
  279. return e;
  280. return NULL;
  281. }
  282. static struct dm_snap_exception *alloc_exception(void)
  283. {
  284. struct dm_snap_exception *e;
  285. e = kmem_cache_alloc(exception_cache, GFP_NOIO);
  286. if (!e)
  287. e = kmem_cache_alloc(exception_cache, GFP_ATOMIC);
  288. return e;
  289. }
  290. static void free_exception(struct dm_snap_exception *e)
  291. {
  292. kmem_cache_free(exception_cache, e);
  293. }
  294. static struct dm_snap_pending_exception *alloc_pending_exception(struct dm_snapshot *s)
  295. {
  296. struct dm_snap_pending_exception *pe = mempool_alloc(s->pending_pool,
  297. GFP_NOIO);
  298. atomic_inc(&s->pending_exceptions_count);
  299. pe->snap = s;
  300. return pe;
  301. }
  302. static void free_pending_exception(struct dm_snap_pending_exception *pe)
  303. {
  304. struct dm_snapshot *s = pe->snap;
  305. mempool_free(pe, s->pending_pool);
  306. smp_mb__before_atomic_dec();
  307. atomic_dec(&s->pending_exceptions_count);
  308. }
  309. static void insert_completed_exception(struct dm_snapshot *s,
  310. struct dm_snap_exception *new_e)
  311. {
  312. struct exception_table *eh = &s->complete;
  313. struct list_head *l;
  314. struct dm_snap_exception *e = NULL;
  315. l = &eh->table[exception_hash(eh, new_e->old_chunk)];
  316. /* Add immediately if this table doesn't support consecutive chunks */
  317. if (!eh->hash_shift)
  318. goto out;
  319. /* List is ordered by old_chunk */
  320. list_for_each_entry_reverse(e, l, hash_list) {
  321. /* Insert after an existing chunk? */
  322. if (new_e->old_chunk == (e->old_chunk +
  323. dm_consecutive_chunk_count(e) + 1) &&
  324. new_e->new_chunk == (dm_chunk_number(e->new_chunk) +
  325. dm_consecutive_chunk_count(e) + 1)) {
  326. dm_consecutive_chunk_count_inc(e);
  327. free_exception(new_e);
  328. return;
  329. }
  330. /* Insert before an existing chunk? */
  331. if (new_e->old_chunk == (e->old_chunk - 1) &&
  332. new_e->new_chunk == (dm_chunk_number(e->new_chunk) - 1)) {
  333. dm_consecutive_chunk_count_inc(e);
  334. e->old_chunk--;
  335. e->new_chunk--;
  336. free_exception(new_e);
  337. return;
  338. }
  339. if (new_e->old_chunk > e->old_chunk)
  340. break;
  341. }
  342. out:
  343. list_add(&new_e->hash_list, e ? &e->hash_list : l);
  344. }
  345. int dm_add_exception(struct dm_snapshot *s, chunk_t old, chunk_t new)
  346. {
  347. struct dm_snap_exception *e;
  348. e = alloc_exception();
  349. if (!e)
  350. return -ENOMEM;
  351. e->old_chunk = old;
  352. /* Consecutive_count is implicitly initialised to zero */
  353. e->new_chunk = new;
  354. insert_completed_exception(s, e);
  355. return 0;
  356. }
  357. /*
  358. * Hard coded magic.
  359. */
  360. static int calc_max_buckets(void)
  361. {
  362. /* use a fixed size of 2MB */
  363. unsigned long mem = 2 * 1024 * 1024;
  364. mem /= sizeof(struct list_head);
  365. return mem;
  366. }
  367. /*
  368. * Allocate room for a suitable hash table.
  369. */
  370. static int init_hash_tables(struct dm_snapshot *s)
  371. {
  372. sector_t hash_size, cow_dev_size, origin_dev_size, max_buckets;
  373. /*
  374. * Calculate based on the size of the original volume or
  375. * the COW volume...
  376. */
  377. cow_dev_size = get_dev_size(s->cow->bdev);
  378. origin_dev_size = get_dev_size(s->origin->bdev);
  379. max_buckets = calc_max_buckets();
  380. hash_size = min(origin_dev_size, cow_dev_size) >> s->chunk_shift;
  381. hash_size = min(hash_size, max_buckets);
  382. hash_size = rounddown_pow_of_two(hash_size);
  383. if (init_exception_table(&s->complete, hash_size,
  384. DM_CHUNK_CONSECUTIVE_BITS))
  385. return -ENOMEM;
  386. /*
  387. * Allocate hash table for in-flight exceptions
  388. * Make this smaller than the real hash table
  389. */
  390. hash_size >>= 3;
  391. if (hash_size < 64)
  392. hash_size = 64;
  393. if (init_exception_table(&s->pending, hash_size, 0)) {
  394. exit_exception_table(&s->complete, exception_cache);
  395. return -ENOMEM;
  396. }
  397. return 0;
  398. }
  399. /*
  400. * Round a number up to the nearest 'size' boundary. size must
  401. * be a power of 2.
  402. */
  403. static ulong round_up(ulong n, ulong size)
  404. {
  405. size--;
  406. return (n + size) & ~size;
  407. }
  408. static int set_chunk_size(struct dm_snapshot *s, const char *chunk_size_arg,
  409. char **error)
  410. {
  411. unsigned long chunk_size;
  412. char *value;
  413. chunk_size = simple_strtoul(chunk_size_arg, &value, 10);
  414. if (*chunk_size_arg == '\0' || *value != '\0') {
  415. *error = "Invalid chunk size";
  416. return -EINVAL;
  417. }
  418. if (!chunk_size) {
  419. s->chunk_size = s->chunk_mask = s->chunk_shift = 0;
  420. return 0;
  421. }
  422. /*
  423. * Chunk size must be multiple of page size. Silently
  424. * round up if it's not.
  425. */
  426. chunk_size = round_up(chunk_size, PAGE_SIZE >> 9);
  427. /* Check chunk_size is a power of 2 */
  428. if (!is_power_of_2(chunk_size)) {
  429. *error = "Chunk size is not a power of 2";
  430. return -EINVAL;
  431. }
  432. /* Validate the chunk size against the device block size */
  433. if (chunk_size % (bdev_hardsect_size(s->cow->bdev) >> 9)) {
  434. *error = "Chunk size is not a multiple of device blocksize";
  435. return -EINVAL;
  436. }
  437. s->chunk_size = chunk_size;
  438. s->chunk_mask = chunk_size - 1;
  439. s->chunk_shift = ffs(chunk_size) - 1;
  440. return 0;
  441. }
  442. /*
  443. * Construct a snapshot mapping: <origin_dev> <COW-dev> <p/n> <chunk-size>
  444. */
  445. static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv)
  446. {
  447. struct dm_snapshot *s;
  448. int i;
  449. int r = -EINVAL;
  450. char persistent;
  451. char *origin_path;
  452. char *cow_path;
  453. if (argc != 4) {
  454. ti->error = "requires exactly 4 arguments";
  455. r = -EINVAL;
  456. goto bad1;
  457. }
  458. origin_path = argv[0];
  459. cow_path = argv[1];
  460. persistent = toupper(*argv[2]);
  461. if (persistent != 'P' && persistent != 'N') {
  462. ti->error = "Persistent flag is not P or N";
  463. r = -EINVAL;
  464. goto bad1;
  465. }
  466. s = kmalloc(sizeof(*s), GFP_KERNEL);
  467. if (s == NULL) {
  468. ti->error = "Cannot allocate snapshot context private "
  469. "structure";
  470. r = -ENOMEM;
  471. goto bad1;
  472. }
  473. r = dm_get_device(ti, origin_path, 0, ti->len, FMODE_READ, &s->origin);
  474. if (r) {
  475. ti->error = "Cannot get origin device";
  476. goto bad2;
  477. }
  478. r = dm_get_device(ti, cow_path, 0, 0,
  479. FMODE_READ | FMODE_WRITE, &s->cow);
  480. if (r) {
  481. dm_put_device(ti, s->origin);
  482. ti->error = "Cannot get COW device";
  483. goto bad2;
  484. }
  485. r = set_chunk_size(s, argv[3], &ti->error);
  486. if (r)
  487. goto bad3;
  488. s->type = persistent;
  489. s->valid = 1;
  490. s->active = 0;
  491. atomic_set(&s->pending_exceptions_count, 0);
  492. init_rwsem(&s->lock);
  493. spin_lock_init(&s->pe_lock);
  494. s->ti = ti;
  495. /* Allocate hash table for COW data */
  496. if (init_hash_tables(s)) {
  497. ti->error = "Unable to allocate hash table space";
  498. r = -ENOMEM;
  499. goto bad3;
  500. }
  501. s->store.snap = s;
  502. if (persistent == 'P')
  503. r = dm_create_persistent(&s->store);
  504. else
  505. r = dm_create_transient(&s->store);
  506. if (r) {
  507. ti->error = "Couldn't create exception store";
  508. r = -EINVAL;
  509. goto bad4;
  510. }
  511. r = dm_kcopyd_client_create(SNAPSHOT_PAGES, &s->kcopyd_client);
  512. if (r) {
  513. ti->error = "Could not create kcopyd client";
  514. goto bad5;
  515. }
  516. s->pending_pool = mempool_create_slab_pool(MIN_IOS, pending_cache);
  517. if (!s->pending_pool) {
  518. ti->error = "Could not allocate mempool for pending exceptions";
  519. goto bad6;
  520. }
  521. s->tracked_chunk_pool = mempool_create_slab_pool(MIN_IOS,
  522. tracked_chunk_cache);
  523. if (!s->tracked_chunk_pool) {
  524. ti->error = "Could not allocate tracked_chunk mempool for "
  525. "tracking reads";
  526. goto bad_tracked_chunk_pool;
  527. }
  528. for (i = 0; i < DM_TRACKED_CHUNK_HASH_SIZE; i++)
  529. INIT_HLIST_HEAD(&s->tracked_chunk_hash[i]);
  530. spin_lock_init(&s->tracked_chunk_lock);
  531. /* Metadata must only be loaded into one table at once */
  532. r = s->store.read_metadata(&s->store);
  533. if (r < 0) {
  534. ti->error = "Failed to read snapshot metadata";
  535. goto bad_load_and_register;
  536. } else if (r > 0) {
  537. s->valid = 0;
  538. DMWARN("Snapshot is marked invalid.");
  539. }
  540. bio_list_init(&s->queued_bios);
  541. INIT_WORK(&s->queued_bios_work, flush_queued_bios);
  542. /* Add snapshot to the list of snapshots for this origin */
  543. /* Exceptions aren't triggered till snapshot_resume() is called */
  544. if (register_snapshot(s)) {
  545. r = -EINVAL;
  546. ti->error = "Cannot register snapshot origin";
  547. goto bad_load_and_register;
  548. }
  549. ti->private = s;
  550. ti->split_io = s->chunk_size;
  551. return 0;
  552. bad_load_and_register:
  553. mempool_destroy(s->tracked_chunk_pool);
  554. bad_tracked_chunk_pool:
  555. mempool_destroy(s->pending_pool);
  556. bad6:
  557. dm_kcopyd_client_destroy(s->kcopyd_client);
  558. bad5:
  559. s->store.destroy(&s->store);
  560. bad4:
  561. exit_exception_table(&s->pending, pending_cache);
  562. exit_exception_table(&s->complete, exception_cache);
  563. bad3:
  564. dm_put_device(ti, s->cow);
  565. dm_put_device(ti, s->origin);
  566. bad2:
  567. kfree(s);
  568. bad1:
  569. return r;
  570. }
  571. static void __free_exceptions(struct dm_snapshot *s)
  572. {
  573. dm_kcopyd_client_destroy(s->kcopyd_client);
  574. s->kcopyd_client = NULL;
  575. exit_exception_table(&s->pending, pending_cache);
  576. exit_exception_table(&s->complete, exception_cache);
  577. s->store.destroy(&s->store);
  578. }
  579. static void snapshot_dtr(struct dm_target *ti)
  580. {
  581. #ifdef CONFIG_DM_DEBUG
  582. int i;
  583. #endif
  584. struct dm_snapshot *s = ti->private;
  585. flush_workqueue(ksnapd);
  586. /* Prevent further origin writes from using this snapshot. */
  587. /* After this returns there can be no new kcopyd jobs. */
  588. unregister_snapshot(s);
  589. while (atomic_read(&s->pending_exceptions_count))
  590. msleep(1);
  591. /*
  592. * Ensure instructions in mempool_destroy aren't reordered
  593. * before atomic_read.
  594. */
  595. smp_mb();
  596. #ifdef CONFIG_DM_DEBUG
  597. for (i = 0; i < DM_TRACKED_CHUNK_HASH_SIZE; i++)
  598. BUG_ON(!hlist_empty(&s->tracked_chunk_hash[i]));
  599. #endif
  600. mempool_destroy(s->tracked_chunk_pool);
  601. __free_exceptions(s);
  602. mempool_destroy(s->pending_pool);
  603. dm_put_device(ti, s->origin);
  604. dm_put_device(ti, s->cow);
  605. kfree(s);
  606. }
  607. /*
  608. * Flush a list of buffers.
  609. */
  610. static void flush_bios(struct bio *bio)
  611. {
  612. struct bio *n;
  613. while (bio) {
  614. n = bio->bi_next;
  615. bio->bi_next = NULL;
  616. generic_make_request(bio);
  617. bio = n;
  618. }
  619. }
  620. static void flush_queued_bios(struct work_struct *work)
  621. {
  622. struct dm_snapshot *s =
  623. container_of(work, struct dm_snapshot, queued_bios_work);
  624. struct bio *queued_bios;
  625. unsigned long flags;
  626. spin_lock_irqsave(&s->pe_lock, flags);
  627. queued_bios = bio_list_get(&s->queued_bios);
  628. spin_unlock_irqrestore(&s->pe_lock, flags);
  629. flush_bios(queued_bios);
  630. }
  631. /*
  632. * Error a list of buffers.
  633. */
  634. static void error_bios(struct bio *bio)
  635. {
  636. struct bio *n;
  637. while (bio) {
  638. n = bio->bi_next;
  639. bio->bi_next = NULL;
  640. bio_io_error(bio);
  641. bio = n;
  642. }
  643. }
  644. static void __invalidate_snapshot(struct dm_snapshot *s, int err)
  645. {
  646. if (!s->valid)
  647. return;
  648. if (err == -EIO)
  649. DMERR("Invalidating snapshot: Error reading/writing.");
  650. else if (err == -ENOMEM)
  651. DMERR("Invalidating snapshot: Unable to allocate exception.");
  652. if (s->store.drop_snapshot)
  653. s->store.drop_snapshot(&s->store);
  654. s->valid = 0;
  655. dm_table_event(s->ti->table);
  656. }
  657. static void get_pending_exception(struct dm_snap_pending_exception *pe)
  658. {
  659. atomic_inc(&pe->ref_count);
  660. }
  661. static struct bio *put_pending_exception(struct dm_snap_pending_exception *pe)
  662. {
  663. struct dm_snap_pending_exception *primary_pe;
  664. struct bio *origin_bios = NULL;
  665. primary_pe = pe->primary_pe;
  666. /*
  667. * If this pe is involved in a write to the origin and
  668. * it is the last sibling to complete then release
  669. * the bios for the original write to the origin.
  670. */
  671. if (primary_pe &&
  672. atomic_dec_and_test(&primary_pe->ref_count)) {
  673. origin_bios = bio_list_get(&primary_pe->origin_bios);
  674. free_pending_exception(primary_pe);
  675. }
  676. /*
  677. * Free the pe if it's not linked to an origin write or if
  678. * it's not itself a primary pe.
  679. */
  680. if (!primary_pe || primary_pe != pe)
  681. free_pending_exception(pe);
  682. return origin_bios;
  683. }
  684. static void pending_complete(struct dm_snap_pending_exception *pe, int success)
  685. {
  686. struct dm_snap_exception *e;
  687. struct dm_snapshot *s = pe->snap;
  688. struct bio *origin_bios = NULL;
  689. struct bio *snapshot_bios = NULL;
  690. int error = 0;
  691. if (!success) {
  692. /* Read/write error - snapshot is unusable */
  693. down_write(&s->lock);
  694. __invalidate_snapshot(s, -EIO);
  695. error = 1;
  696. goto out;
  697. }
  698. e = alloc_exception();
  699. if (!e) {
  700. down_write(&s->lock);
  701. __invalidate_snapshot(s, -ENOMEM);
  702. error = 1;
  703. goto out;
  704. }
  705. *e = pe->e;
  706. down_write(&s->lock);
  707. if (!s->valid) {
  708. free_exception(e);
  709. error = 1;
  710. goto out;
  711. }
  712. /*
  713. * Check for conflicting reads. This is extremely improbable,
  714. * so msleep(1) is sufficient and there is no need for a wait queue.
  715. */
  716. while (__chunk_is_tracked(s, pe->e.old_chunk))
  717. msleep(1);
  718. /*
  719. * Add a proper exception, and remove the
  720. * in-flight exception from the list.
  721. */
  722. insert_completed_exception(s, e);
  723. out:
  724. remove_exception(&pe->e);
  725. snapshot_bios = bio_list_get(&pe->snapshot_bios);
  726. origin_bios = put_pending_exception(pe);
  727. up_write(&s->lock);
  728. /* Submit any pending write bios */
  729. if (error)
  730. error_bios(snapshot_bios);
  731. else
  732. flush_bios(snapshot_bios);
  733. flush_bios(origin_bios);
  734. }
  735. static void commit_callback(void *context, int success)
  736. {
  737. struct dm_snap_pending_exception *pe = context;
  738. pending_complete(pe, success);
  739. }
  740. /*
  741. * Called when the copy I/O has finished. kcopyd actually runs
  742. * this code so don't block.
  743. */
  744. static void copy_callback(int read_err, unsigned long write_err, void *context)
  745. {
  746. struct dm_snap_pending_exception *pe = context;
  747. struct dm_snapshot *s = pe->snap;
  748. if (read_err || write_err)
  749. pending_complete(pe, 0);
  750. else
  751. /* Update the metadata if we are persistent */
  752. s->store.commit_exception(&s->store, &pe->e, commit_callback,
  753. pe);
  754. }
  755. /*
  756. * Dispatches the copy operation to kcopyd.
  757. */
  758. static void start_copy(struct dm_snap_pending_exception *pe)
  759. {
  760. struct dm_snapshot *s = pe->snap;
  761. struct dm_io_region src, dest;
  762. struct block_device *bdev = s->origin->bdev;
  763. sector_t dev_size;
  764. dev_size = get_dev_size(bdev);
  765. src.bdev = bdev;
  766. src.sector = chunk_to_sector(s, pe->e.old_chunk);
  767. src.count = min(s->chunk_size, dev_size - src.sector);
  768. dest.bdev = s->cow->bdev;
  769. dest.sector = chunk_to_sector(s, pe->e.new_chunk);
  770. dest.count = src.count;
  771. /* Hand over to kcopyd */
  772. dm_kcopyd_copy(s->kcopyd_client,
  773. &src, 1, &dest, 0, copy_callback, pe);
  774. }
  775. /*
  776. * Looks to see if this snapshot already has a pending exception
  777. * for this chunk, otherwise it allocates a new one and inserts
  778. * it into the pending table.
  779. *
  780. * NOTE: a write lock must be held on snap->lock before calling
  781. * this.
  782. */
  783. static struct dm_snap_pending_exception *
  784. __find_pending_exception(struct dm_snapshot *s, struct bio *bio)
  785. {
  786. struct dm_snap_exception *e;
  787. struct dm_snap_pending_exception *pe;
  788. chunk_t chunk = sector_to_chunk(s, bio->bi_sector);
  789. /*
  790. * Is there a pending exception for this already ?
  791. */
  792. e = lookup_exception(&s->pending, chunk);
  793. if (e) {
  794. /* cast the exception to a pending exception */
  795. pe = container_of(e, struct dm_snap_pending_exception, e);
  796. goto out;
  797. }
  798. /*
  799. * Create a new pending exception, we don't want
  800. * to hold the lock while we do this.
  801. */
  802. up_write(&s->lock);
  803. pe = alloc_pending_exception(s);
  804. down_write(&s->lock);
  805. if (!s->valid) {
  806. free_pending_exception(pe);
  807. return NULL;
  808. }
  809. e = lookup_exception(&s->pending, chunk);
  810. if (e) {
  811. free_pending_exception(pe);
  812. pe = container_of(e, struct dm_snap_pending_exception, e);
  813. goto out;
  814. }
  815. pe->e.old_chunk = chunk;
  816. bio_list_init(&pe->origin_bios);
  817. bio_list_init(&pe->snapshot_bios);
  818. pe->primary_pe = NULL;
  819. atomic_set(&pe->ref_count, 0);
  820. pe->started = 0;
  821. if (s->store.prepare_exception(&s->store, &pe->e)) {
  822. free_pending_exception(pe);
  823. return NULL;
  824. }
  825. get_pending_exception(pe);
  826. insert_exception(&s->pending, &pe->e);
  827. out:
  828. return pe;
  829. }
  830. static void remap_exception(struct dm_snapshot *s, struct dm_snap_exception *e,
  831. struct bio *bio, chunk_t chunk)
  832. {
  833. bio->bi_bdev = s->cow->bdev;
  834. bio->bi_sector = chunk_to_sector(s, dm_chunk_number(e->new_chunk) +
  835. (chunk - e->old_chunk)) +
  836. (bio->bi_sector & s->chunk_mask);
  837. }
  838. static int snapshot_map(struct dm_target *ti, struct bio *bio,
  839. union map_info *map_context)
  840. {
  841. struct dm_snap_exception *e;
  842. struct dm_snapshot *s = ti->private;
  843. int r = DM_MAPIO_REMAPPED;
  844. chunk_t chunk;
  845. struct dm_snap_pending_exception *pe = NULL;
  846. chunk = sector_to_chunk(s, bio->bi_sector);
  847. /* Full snapshots are not usable */
  848. /* To get here the table must be live so s->active is always set. */
  849. if (!s->valid)
  850. return -EIO;
  851. /* FIXME: should only take write lock if we need
  852. * to copy an exception */
  853. down_write(&s->lock);
  854. if (!s->valid) {
  855. r = -EIO;
  856. goto out_unlock;
  857. }
  858. /* If the block is already remapped - use that, else remap it */
  859. e = lookup_exception(&s->complete, chunk);
  860. if (e) {
  861. remap_exception(s, e, bio, chunk);
  862. goto out_unlock;
  863. }
  864. /*
  865. * Write to snapshot - higher level takes care of RW/RO
  866. * flags so we should only get this if we are
  867. * writeable.
  868. */
  869. if (bio_rw(bio) == WRITE) {
  870. pe = __find_pending_exception(s, bio);
  871. if (!pe) {
  872. __invalidate_snapshot(s, -ENOMEM);
  873. r = -EIO;
  874. goto out_unlock;
  875. }
  876. remap_exception(s, &pe->e, bio, chunk);
  877. bio_list_add(&pe->snapshot_bios, bio);
  878. r = DM_MAPIO_SUBMITTED;
  879. if (!pe->started) {
  880. /* this is protected by snap->lock */
  881. pe->started = 1;
  882. up_write(&s->lock);
  883. start_copy(pe);
  884. goto out;
  885. }
  886. } else {
  887. bio->bi_bdev = s->origin->bdev;
  888. map_context->ptr = track_chunk(s, chunk);
  889. }
  890. out_unlock:
  891. up_write(&s->lock);
  892. out:
  893. return r;
  894. }
  895. static int snapshot_end_io(struct dm_target *ti, struct bio *bio,
  896. int error, union map_info *map_context)
  897. {
  898. struct dm_snapshot *s = ti->private;
  899. struct dm_snap_tracked_chunk *c = map_context->ptr;
  900. if (c)
  901. stop_tracking_chunk(s, c);
  902. return 0;
  903. }
  904. static void snapshot_resume(struct dm_target *ti)
  905. {
  906. struct dm_snapshot *s = ti->private;
  907. down_write(&s->lock);
  908. s->active = 1;
  909. up_write(&s->lock);
  910. }
  911. static int snapshot_status(struct dm_target *ti, status_type_t type,
  912. char *result, unsigned int maxlen)
  913. {
  914. struct dm_snapshot *snap = ti->private;
  915. switch (type) {
  916. case STATUSTYPE_INFO:
  917. if (!snap->valid)
  918. snprintf(result, maxlen, "Invalid");
  919. else {
  920. if (snap->store.fraction_full) {
  921. sector_t numerator, denominator;
  922. snap->store.fraction_full(&snap->store,
  923. &numerator,
  924. &denominator);
  925. snprintf(result, maxlen, "%llu/%llu",
  926. (unsigned long long)numerator,
  927. (unsigned long long)denominator);
  928. }
  929. else
  930. snprintf(result, maxlen, "Unknown");
  931. }
  932. break;
  933. case STATUSTYPE_TABLE:
  934. /*
  935. * kdevname returns a static pointer so we need
  936. * to make private copies if the output is to
  937. * make sense.
  938. */
  939. snprintf(result, maxlen, "%s %s %c %llu",
  940. snap->origin->name, snap->cow->name,
  941. snap->type,
  942. (unsigned long long)snap->chunk_size);
  943. break;
  944. }
  945. return 0;
  946. }
  947. /*-----------------------------------------------------------------
  948. * Origin methods
  949. *---------------------------------------------------------------*/
  950. static int __origin_write(struct list_head *snapshots, struct bio *bio)
  951. {
  952. int r = DM_MAPIO_REMAPPED, first = 0;
  953. struct dm_snapshot *snap;
  954. struct dm_snap_exception *e;
  955. struct dm_snap_pending_exception *pe, *next_pe, *primary_pe = NULL;
  956. chunk_t chunk;
  957. LIST_HEAD(pe_queue);
  958. /* Do all the snapshots on this origin */
  959. list_for_each_entry (snap, snapshots, list) {
  960. down_write(&snap->lock);
  961. /* Only deal with valid and active snapshots */
  962. if (!snap->valid || !snap->active)
  963. goto next_snapshot;
  964. /* Nothing to do if writing beyond end of snapshot */
  965. if (bio->bi_sector >= dm_table_get_size(snap->ti->table))
  966. goto next_snapshot;
  967. /*
  968. * Remember, different snapshots can have
  969. * different chunk sizes.
  970. */
  971. chunk = sector_to_chunk(snap, bio->bi_sector);
  972. /*
  973. * Check exception table to see if block
  974. * is already remapped in this snapshot
  975. * and trigger an exception if not.
  976. *
  977. * ref_count is initialised to 1 so pending_complete()
  978. * won't destroy the primary_pe while we're inside this loop.
  979. */
  980. e = lookup_exception(&snap->complete, chunk);
  981. if (e)
  982. goto next_snapshot;
  983. pe = __find_pending_exception(snap, bio);
  984. if (!pe) {
  985. __invalidate_snapshot(snap, -ENOMEM);
  986. goto next_snapshot;
  987. }
  988. if (!primary_pe) {
  989. /*
  990. * Either every pe here has same
  991. * primary_pe or none has one yet.
  992. */
  993. if (pe->primary_pe)
  994. primary_pe = pe->primary_pe;
  995. else {
  996. primary_pe = pe;
  997. first = 1;
  998. }
  999. bio_list_add(&primary_pe->origin_bios, bio);
  1000. r = DM_MAPIO_SUBMITTED;
  1001. }
  1002. if (!pe->primary_pe) {
  1003. pe->primary_pe = primary_pe;
  1004. get_pending_exception(primary_pe);
  1005. }
  1006. if (!pe->started) {
  1007. pe->started = 1;
  1008. list_add_tail(&pe->list, &pe_queue);
  1009. }
  1010. next_snapshot:
  1011. up_write(&snap->lock);
  1012. }
  1013. if (!primary_pe)
  1014. return r;
  1015. /*
  1016. * If this is the first time we're processing this chunk and
  1017. * ref_count is now 1 it means all the pending exceptions
  1018. * got completed while we were in the loop above, so it falls to
  1019. * us here to remove the primary_pe and submit any origin_bios.
  1020. */
  1021. if (first && atomic_dec_and_test(&primary_pe->ref_count)) {
  1022. flush_bios(bio_list_get(&primary_pe->origin_bios));
  1023. free_pending_exception(primary_pe);
  1024. /* If we got here, pe_queue is necessarily empty. */
  1025. return r;
  1026. }
  1027. /*
  1028. * Now that we have a complete pe list we can start the copying.
  1029. */
  1030. list_for_each_entry_safe(pe, next_pe, &pe_queue, list)
  1031. start_copy(pe);
  1032. return r;
  1033. }
  1034. /*
  1035. * Called on a write from the origin driver.
  1036. */
  1037. static int do_origin(struct dm_dev *origin, struct bio *bio)
  1038. {
  1039. struct origin *o;
  1040. int r = DM_MAPIO_REMAPPED;
  1041. down_read(&_origins_lock);
  1042. o = __lookup_origin(origin->bdev);
  1043. if (o)
  1044. r = __origin_write(&o->snapshots, bio);
  1045. up_read(&_origins_lock);
  1046. return r;
  1047. }
  1048. /*
  1049. * Origin: maps a linear range of a device, with hooks for snapshotting.
  1050. */
  1051. /*
  1052. * Construct an origin mapping: <dev_path>
  1053. * The context for an origin is merely a 'struct dm_dev *'
  1054. * pointing to the real device.
  1055. */
  1056. static int origin_ctr(struct dm_target *ti, unsigned int argc, char **argv)
  1057. {
  1058. int r;
  1059. struct dm_dev *dev;
  1060. if (argc != 1) {
  1061. ti->error = "origin: incorrect number of arguments";
  1062. return -EINVAL;
  1063. }
  1064. r = dm_get_device(ti, argv[0], 0, ti->len,
  1065. dm_table_get_mode(ti->table), &dev);
  1066. if (r) {
  1067. ti->error = "Cannot get target device";
  1068. return r;
  1069. }
  1070. ti->private = dev;
  1071. return 0;
  1072. }
  1073. static void origin_dtr(struct dm_target *ti)
  1074. {
  1075. struct dm_dev *dev = ti->private;
  1076. dm_put_device(ti, dev);
  1077. }
  1078. static int origin_map(struct dm_target *ti, struct bio *bio,
  1079. union map_info *map_context)
  1080. {
  1081. struct dm_dev *dev = ti->private;
  1082. bio->bi_bdev = dev->bdev;
  1083. /* Only tell snapshots if this is a write */
  1084. return (bio_rw(bio) == WRITE) ? do_origin(dev, bio) : DM_MAPIO_REMAPPED;
  1085. }
  1086. #define min_not_zero(l, r) (l == 0) ? r : ((r == 0) ? l : min(l, r))
  1087. /*
  1088. * Set the target "split_io" field to the minimum of all the snapshots'
  1089. * chunk sizes.
  1090. */
  1091. static void origin_resume(struct dm_target *ti)
  1092. {
  1093. struct dm_dev *dev = ti->private;
  1094. struct dm_snapshot *snap;
  1095. struct origin *o;
  1096. chunk_t chunk_size = 0;
  1097. down_read(&_origins_lock);
  1098. o = __lookup_origin(dev->bdev);
  1099. if (o)
  1100. list_for_each_entry (snap, &o->snapshots, list)
  1101. chunk_size = min_not_zero(chunk_size, snap->chunk_size);
  1102. up_read(&_origins_lock);
  1103. ti->split_io = chunk_size;
  1104. }
  1105. static int origin_status(struct dm_target *ti, status_type_t type, char *result,
  1106. unsigned int maxlen)
  1107. {
  1108. struct dm_dev *dev = ti->private;
  1109. switch (type) {
  1110. case STATUSTYPE_INFO:
  1111. result[0] = '\0';
  1112. break;
  1113. case STATUSTYPE_TABLE:
  1114. snprintf(result, maxlen, "%s", dev->name);
  1115. break;
  1116. }
  1117. return 0;
  1118. }
  1119. static struct target_type origin_target = {
  1120. .name = "snapshot-origin",
  1121. .version = {1, 6, 0},
  1122. .module = THIS_MODULE,
  1123. .ctr = origin_ctr,
  1124. .dtr = origin_dtr,
  1125. .map = origin_map,
  1126. .resume = origin_resume,
  1127. .status = origin_status,
  1128. };
  1129. static struct target_type snapshot_target = {
  1130. .name = "snapshot",
  1131. .version = {1, 6, 0},
  1132. .module = THIS_MODULE,
  1133. .ctr = snapshot_ctr,
  1134. .dtr = snapshot_dtr,
  1135. .map = snapshot_map,
  1136. .end_io = snapshot_end_io,
  1137. .resume = snapshot_resume,
  1138. .status = snapshot_status,
  1139. };
  1140. static int __init dm_snapshot_init(void)
  1141. {
  1142. int r;
  1143. r = dm_register_target(&snapshot_target);
  1144. if (r) {
  1145. DMERR("snapshot target register failed %d", r);
  1146. return r;
  1147. }
  1148. r = dm_register_target(&origin_target);
  1149. if (r < 0) {
  1150. DMERR("Origin target register failed %d", r);
  1151. goto bad1;
  1152. }
  1153. r = init_origin_hash();
  1154. if (r) {
  1155. DMERR("init_origin_hash failed.");
  1156. goto bad2;
  1157. }
  1158. exception_cache = KMEM_CACHE(dm_snap_exception, 0);
  1159. if (!exception_cache) {
  1160. DMERR("Couldn't create exception cache.");
  1161. r = -ENOMEM;
  1162. goto bad3;
  1163. }
  1164. pending_cache = KMEM_CACHE(dm_snap_pending_exception, 0);
  1165. if (!pending_cache) {
  1166. DMERR("Couldn't create pending cache.");
  1167. r = -ENOMEM;
  1168. goto bad4;
  1169. }
  1170. tracked_chunk_cache = KMEM_CACHE(dm_snap_tracked_chunk, 0);
  1171. if (!tracked_chunk_cache) {
  1172. DMERR("Couldn't create cache to track chunks in use.");
  1173. r = -ENOMEM;
  1174. goto bad5;
  1175. }
  1176. ksnapd = create_singlethread_workqueue("ksnapd");
  1177. if (!ksnapd) {
  1178. DMERR("Failed to create ksnapd workqueue.");
  1179. r = -ENOMEM;
  1180. goto bad_pending_pool;
  1181. }
  1182. return 0;
  1183. bad_pending_pool:
  1184. kmem_cache_destroy(tracked_chunk_cache);
  1185. bad5:
  1186. kmem_cache_destroy(pending_cache);
  1187. bad4:
  1188. kmem_cache_destroy(exception_cache);
  1189. bad3:
  1190. exit_origin_hash();
  1191. bad2:
  1192. dm_unregister_target(&origin_target);
  1193. bad1:
  1194. dm_unregister_target(&snapshot_target);
  1195. return r;
  1196. }
  1197. static void __exit dm_snapshot_exit(void)
  1198. {
  1199. int r;
  1200. destroy_workqueue(ksnapd);
  1201. r = dm_unregister_target(&snapshot_target);
  1202. if (r)
  1203. DMERR("snapshot unregister failed %d", r);
  1204. r = dm_unregister_target(&origin_target);
  1205. if (r)
  1206. DMERR("origin unregister failed %d", r);
  1207. exit_origin_hash();
  1208. kmem_cache_destroy(pending_cache);
  1209. kmem_cache_destroy(exception_cache);
  1210. kmem_cache_destroy(tracked_chunk_cache);
  1211. }
  1212. /* Module hooks */
  1213. module_init(dm_snapshot_init);
  1214. module_exit(dm_snapshot_exit);
  1215. MODULE_DESCRIPTION(DM_NAME " snapshot target");
  1216. MODULE_AUTHOR("Joe Thornber");
  1217. MODULE_LICENSE("GPL");