dm-snap.c 33 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530
  1. /*
  2. * dm-snapshot.c
  3. *
  4. * Copyright (C) 2001-2002 Sistina Software (UK) Limited.
  5. *
  6. * This file is released under the GPL.
  7. */
  8. #include <linux/blkdev.h>
  9. #include <linux/ctype.h>
  10. #include <linux/device-mapper.h>
  11. #include <linux/delay.h>
  12. #include <linux/fs.h>
  13. #include <linux/init.h>
  14. #include <linux/kdev_t.h>
  15. #include <linux/list.h>
  16. #include <linux/mempool.h>
  17. #include <linux/module.h>
  18. #include <linux/slab.h>
  19. #include <linux/vmalloc.h>
  20. #include <linux/log2.h>
  21. #include <linux/dm-kcopyd.h>
  22. #include "dm-exception-store.h"
  23. #include "dm-snap.h"
  24. #include "dm-bio-list.h"
  25. #define DM_MSG_PREFIX "snapshots"
  26. /*
  27. * The percentage increment we will wake up users at
  28. */
  29. #define WAKE_UP_PERCENT 5
  30. /*
  31. * kcopyd priority of snapshot operations
  32. */
  33. #define SNAPSHOT_COPY_PRIORITY 2
  34. /*
  35. * Reserve 1MB for each snapshot initially (with minimum of 1 page).
  36. */
  37. #define SNAPSHOT_PAGES (((1UL << 20) >> PAGE_SHIFT) ? : 1)
  38. /*
  39. * The size of the mempool used to track chunks in use.
  40. */
  41. #define MIN_IOS 256
  42. static struct workqueue_struct *ksnapd;
  43. static void flush_queued_bios(struct work_struct *work);
  44. struct dm_snap_pending_exception {
  45. struct dm_snap_exception e;
  46. /*
  47. * Origin buffers waiting for this to complete are held
  48. * in a bio list
  49. */
  50. struct bio_list origin_bios;
  51. struct bio_list snapshot_bios;
  52. /*
  53. * Short-term queue of pending exceptions prior to submission.
  54. */
  55. struct list_head list;
  56. /*
  57. * The primary pending_exception is the one that holds
  58. * the ref_count and the list of origin_bios for a
  59. * group of pending_exceptions. It is always last to get freed.
  60. * These fields get set up when writing to the origin.
  61. */
  62. struct dm_snap_pending_exception *primary_pe;
  63. /*
  64. * Number of pending_exceptions processing this chunk.
  65. * When this drops to zero we must complete the origin bios.
  66. * If incrementing or decrementing this, hold pe->snap->lock for
  67. * the sibling concerned and not pe->primary_pe->snap->lock unless
  68. * they are the same.
  69. */
  70. atomic_t ref_count;
  71. /* Pointer back to snapshot context */
  72. struct dm_snapshot *snap;
  73. /*
  74. * 1 indicates the exception has already been sent to
  75. * kcopyd.
  76. */
  77. int started;
  78. };
  79. /*
  80. * Hash table mapping origin volumes to lists of snapshots and
  81. * a lock to protect it
  82. */
  83. static struct kmem_cache *exception_cache;
  84. static struct kmem_cache *pending_cache;
  85. struct dm_snap_tracked_chunk {
  86. struct hlist_node node;
  87. chunk_t chunk;
  88. };
  89. static struct kmem_cache *tracked_chunk_cache;
  90. static struct dm_snap_tracked_chunk *track_chunk(struct dm_snapshot *s,
  91. chunk_t chunk)
  92. {
  93. struct dm_snap_tracked_chunk *c = mempool_alloc(s->tracked_chunk_pool,
  94. GFP_NOIO);
  95. unsigned long flags;
  96. c->chunk = chunk;
  97. spin_lock_irqsave(&s->tracked_chunk_lock, flags);
  98. hlist_add_head(&c->node,
  99. &s->tracked_chunk_hash[DM_TRACKED_CHUNK_HASH(chunk)]);
  100. spin_unlock_irqrestore(&s->tracked_chunk_lock, flags);
  101. return c;
  102. }
  103. static void stop_tracking_chunk(struct dm_snapshot *s,
  104. struct dm_snap_tracked_chunk *c)
  105. {
  106. unsigned long flags;
  107. spin_lock_irqsave(&s->tracked_chunk_lock, flags);
  108. hlist_del(&c->node);
  109. spin_unlock_irqrestore(&s->tracked_chunk_lock, flags);
  110. mempool_free(c, s->tracked_chunk_pool);
  111. }
  112. static int __chunk_is_tracked(struct dm_snapshot *s, chunk_t chunk)
  113. {
  114. struct dm_snap_tracked_chunk *c;
  115. struct hlist_node *hn;
  116. int found = 0;
  117. spin_lock_irq(&s->tracked_chunk_lock);
  118. hlist_for_each_entry(c, hn,
  119. &s->tracked_chunk_hash[DM_TRACKED_CHUNK_HASH(chunk)], node) {
  120. if (c->chunk == chunk) {
  121. found = 1;
  122. break;
  123. }
  124. }
  125. spin_unlock_irq(&s->tracked_chunk_lock);
  126. return found;
  127. }
  128. /*
  129. * One of these per registered origin, held in the snapshot_origins hash
  130. */
  131. struct origin {
  132. /* The origin device */
  133. struct block_device *bdev;
  134. struct list_head hash_list;
  135. /* List of snapshots for this origin */
  136. struct list_head snapshots;
  137. };
  138. /*
  139. * Size of the hash table for origin volumes. If we make this
  140. * the size of the minors list then it should be nearly perfect
  141. */
  142. #define ORIGIN_HASH_SIZE 256
  143. #define ORIGIN_MASK 0xFF
  144. static struct list_head *_origins;
  145. static struct rw_semaphore _origins_lock;
  146. static int init_origin_hash(void)
  147. {
  148. int i;
  149. _origins = kmalloc(ORIGIN_HASH_SIZE * sizeof(struct list_head),
  150. GFP_KERNEL);
  151. if (!_origins) {
  152. DMERR("unable to allocate memory");
  153. return -ENOMEM;
  154. }
  155. for (i = 0; i < ORIGIN_HASH_SIZE; i++)
  156. INIT_LIST_HEAD(_origins + i);
  157. init_rwsem(&_origins_lock);
  158. return 0;
  159. }
  160. static void exit_origin_hash(void)
  161. {
  162. kfree(_origins);
  163. }
  164. static unsigned origin_hash(struct block_device *bdev)
  165. {
  166. return bdev->bd_dev & ORIGIN_MASK;
  167. }
  168. static struct origin *__lookup_origin(struct block_device *origin)
  169. {
  170. struct list_head *ol;
  171. struct origin *o;
  172. ol = &_origins[origin_hash(origin)];
  173. list_for_each_entry (o, ol, hash_list)
  174. if (bdev_equal(o->bdev, origin))
  175. return o;
  176. return NULL;
  177. }
  178. static void __insert_origin(struct origin *o)
  179. {
  180. struct list_head *sl = &_origins[origin_hash(o->bdev)];
  181. list_add_tail(&o->hash_list, sl);
  182. }
  183. /*
  184. * Make a note of the snapshot and its origin so we can look it
  185. * up when the origin has a write on it.
  186. */
  187. static int register_snapshot(struct dm_snapshot *snap)
  188. {
  189. struct origin *o, *new_o;
  190. struct block_device *bdev = snap->origin->bdev;
  191. new_o = kmalloc(sizeof(*new_o), GFP_KERNEL);
  192. if (!new_o)
  193. return -ENOMEM;
  194. down_write(&_origins_lock);
  195. o = __lookup_origin(bdev);
  196. if (o)
  197. kfree(new_o);
  198. else {
  199. /* New origin */
  200. o = new_o;
  201. /* Initialise the struct */
  202. INIT_LIST_HEAD(&o->snapshots);
  203. o->bdev = bdev;
  204. __insert_origin(o);
  205. }
  206. list_add_tail(&snap->list, &o->snapshots);
  207. up_write(&_origins_lock);
  208. return 0;
  209. }
  210. static void unregister_snapshot(struct dm_snapshot *s)
  211. {
  212. struct origin *o;
  213. down_write(&_origins_lock);
  214. o = __lookup_origin(s->origin->bdev);
  215. list_del(&s->list);
  216. if (list_empty(&o->snapshots)) {
  217. list_del(&o->hash_list);
  218. kfree(o);
  219. }
  220. up_write(&_origins_lock);
  221. }
  222. /*
  223. * Implementation of the exception hash tables.
  224. * The lowest hash_shift bits of the chunk number are ignored, allowing
  225. * some consecutive chunks to be grouped together.
  226. */
  227. static int init_exception_table(struct exception_table *et, uint32_t size,
  228. unsigned hash_shift)
  229. {
  230. unsigned int i;
  231. et->hash_shift = hash_shift;
  232. et->hash_mask = size - 1;
  233. et->table = dm_vcalloc(size, sizeof(struct list_head));
  234. if (!et->table)
  235. return -ENOMEM;
  236. for (i = 0; i < size; i++)
  237. INIT_LIST_HEAD(et->table + i);
  238. return 0;
  239. }
  240. static void exit_exception_table(struct exception_table *et, struct kmem_cache *mem)
  241. {
  242. struct list_head *slot;
  243. struct dm_snap_exception *ex, *next;
  244. int i, size;
  245. size = et->hash_mask + 1;
  246. for (i = 0; i < size; i++) {
  247. slot = et->table + i;
  248. list_for_each_entry_safe (ex, next, slot, hash_list)
  249. kmem_cache_free(mem, ex);
  250. }
  251. vfree(et->table);
  252. }
  253. static uint32_t exception_hash(struct exception_table *et, chunk_t chunk)
  254. {
  255. return (chunk >> et->hash_shift) & et->hash_mask;
  256. }
  257. static void insert_exception(struct exception_table *eh,
  258. struct dm_snap_exception *e)
  259. {
  260. struct list_head *l = &eh->table[exception_hash(eh, e->old_chunk)];
  261. list_add(&e->hash_list, l);
  262. }
  263. static void remove_exception(struct dm_snap_exception *e)
  264. {
  265. list_del(&e->hash_list);
  266. }
  267. /*
  268. * Return the exception data for a sector, or NULL if not
  269. * remapped.
  270. */
  271. static struct dm_snap_exception *lookup_exception(struct exception_table *et,
  272. chunk_t chunk)
  273. {
  274. struct list_head *slot;
  275. struct dm_snap_exception *e;
  276. slot = &et->table[exception_hash(et, chunk)];
  277. list_for_each_entry (e, slot, hash_list)
  278. if (chunk >= e->old_chunk &&
  279. chunk <= e->old_chunk + dm_consecutive_chunk_count(e))
  280. return e;
  281. return NULL;
  282. }
  283. static struct dm_snap_exception *alloc_exception(void)
  284. {
  285. struct dm_snap_exception *e;
  286. e = kmem_cache_alloc(exception_cache, GFP_NOIO);
  287. if (!e)
  288. e = kmem_cache_alloc(exception_cache, GFP_ATOMIC);
  289. return e;
  290. }
  291. static void free_exception(struct dm_snap_exception *e)
  292. {
  293. kmem_cache_free(exception_cache, e);
  294. }
  295. static struct dm_snap_pending_exception *alloc_pending_exception(struct dm_snapshot *s)
  296. {
  297. struct dm_snap_pending_exception *pe = mempool_alloc(s->pending_pool,
  298. GFP_NOIO);
  299. atomic_inc(&s->pending_exceptions_count);
  300. pe->snap = s;
  301. return pe;
  302. }
  303. static void free_pending_exception(struct dm_snap_pending_exception *pe)
  304. {
  305. struct dm_snapshot *s = pe->snap;
  306. mempool_free(pe, s->pending_pool);
  307. smp_mb__before_atomic_dec();
  308. atomic_dec(&s->pending_exceptions_count);
  309. }
  310. static void insert_completed_exception(struct dm_snapshot *s,
  311. struct dm_snap_exception *new_e)
  312. {
  313. struct exception_table *eh = &s->complete;
  314. struct list_head *l;
  315. struct dm_snap_exception *e = NULL;
  316. l = &eh->table[exception_hash(eh, new_e->old_chunk)];
  317. /* Add immediately if this table doesn't support consecutive chunks */
  318. if (!eh->hash_shift)
  319. goto out;
  320. /* List is ordered by old_chunk */
  321. list_for_each_entry_reverse(e, l, hash_list) {
  322. /* Insert after an existing chunk? */
  323. if (new_e->old_chunk == (e->old_chunk +
  324. dm_consecutive_chunk_count(e) + 1) &&
  325. new_e->new_chunk == (dm_chunk_number(e->new_chunk) +
  326. dm_consecutive_chunk_count(e) + 1)) {
  327. dm_consecutive_chunk_count_inc(e);
  328. free_exception(new_e);
  329. return;
  330. }
  331. /* Insert before an existing chunk? */
  332. if (new_e->old_chunk == (e->old_chunk - 1) &&
  333. new_e->new_chunk == (dm_chunk_number(e->new_chunk) - 1)) {
  334. dm_consecutive_chunk_count_inc(e);
  335. e->old_chunk--;
  336. e->new_chunk--;
  337. free_exception(new_e);
  338. return;
  339. }
  340. if (new_e->old_chunk > e->old_chunk)
  341. break;
  342. }
  343. out:
  344. list_add(&new_e->hash_list, e ? &e->hash_list : l);
  345. }
  346. /*
  347. * Callback used by the exception stores to load exceptions when
  348. * initialising.
  349. */
  350. static int dm_add_exception(void *context, chunk_t old, chunk_t new)
  351. {
  352. struct dm_snapshot *s = context;
  353. struct dm_snap_exception *e;
  354. e = alloc_exception();
  355. if (!e)
  356. return -ENOMEM;
  357. e->old_chunk = old;
  358. /* Consecutive_count is implicitly initialised to zero */
  359. e->new_chunk = new;
  360. insert_completed_exception(s, e);
  361. return 0;
  362. }
  363. /*
  364. * Hard coded magic.
  365. */
  366. static int calc_max_buckets(void)
  367. {
  368. /* use a fixed size of 2MB */
  369. unsigned long mem = 2 * 1024 * 1024;
  370. mem /= sizeof(struct list_head);
  371. return mem;
  372. }
  373. /*
  374. * Allocate room for a suitable hash table.
  375. */
  376. static int init_hash_tables(struct dm_snapshot *s, chunk_t chunk_shift,
  377. struct dm_dev *cow)
  378. {
  379. sector_t hash_size, cow_dev_size, origin_dev_size, max_buckets;
  380. /*
  381. * Calculate based on the size of the original volume or
  382. * the COW volume...
  383. */
  384. cow_dev_size = get_dev_size(cow->bdev);
  385. origin_dev_size = get_dev_size(s->origin->bdev);
  386. max_buckets = calc_max_buckets();
  387. hash_size = min(origin_dev_size, cow_dev_size) >> chunk_shift;
  388. hash_size = min(hash_size, max_buckets);
  389. hash_size = rounddown_pow_of_two(hash_size);
  390. if (init_exception_table(&s->complete, hash_size,
  391. DM_CHUNK_CONSECUTIVE_BITS))
  392. return -ENOMEM;
  393. /*
  394. * Allocate hash table for in-flight exceptions
  395. * Make this smaller than the real hash table
  396. */
  397. hash_size >>= 3;
  398. if (hash_size < 64)
  399. hash_size = 64;
  400. if (init_exception_table(&s->pending, hash_size, 0)) {
  401. exit_exception_table(&s->complete, exception_cache);
  402. return -ENOMEM;
  403. }
  404. return 0;
  405. }
  406. /*
  407. * Round a number up to the nearest 'size' boundary. size must
  408. * be a power of 2.
  409. */
  410. static ulong round_up(ulong n, ulong size)
  411. {
  412. size--;
  413. return (n + size) & ~size;
  414. }
  415. static int set_chunk_size(struct dm_snapshot *s, const char *chunk_size_arg,
  416. chunk_t *chunk_size, chunk_t *chunk_mask,
  417. chunk_t *chunk_shift, struct dm_dev *cow,
  418. char **error)
  419. {
  420. unsigned long chunk_size_ulong;
  421. char *value;
  422. chunk_size_ulong = simple_strtoul(chunk_size_arg, &value, 10);
  423. if (*chunk_size_arg == '\0' || *value != '\0') {
  424. *error = "Invalid chunk size";
  425. return -EINVAL;
  426. }
  427. if (!chunk_size_ulong) {
  428. *chunk_size = *chunk_mask = *chunk_shift = 0;
  429. return 0;
  430. }
  431. /*
  432. * Chunk size must be multiple of page size. Silently
  433. * round up if it's not.
  434. */
  435. chunk_size_ulong = round_up(chunk_size_ulong, PAGE_SIZE >> 9);
  436. /* Check chunk_size is a power of 2 */
  437. if (!is_power_of_2(chunk_size_ulong)) {
  438. *error = "Chunk size is not a power of 2";
  439. return -EINVAL;
  440. }
  441. /* Validate the chunk size against the device block size */
  442. if (chunk_size_ulong % (bdev_hardsect_size(cow->bdev) >> 9)) {
  443. *error = "Chunk size is not a multiple of device blocksize";
  444. return -EINVAL;
  445. }
  446. *chunk_size = chunk_size_ulong;
  447. *chunk_mask = chunk_size_ulong - 1;
  448. *chunk_shift = ffs(chunk_size_ulong) - 1;
  449. return 0;
  450. }
  451. /*
  452. * Construct a snapshot mapping: <origin_dev> <COW-dev> <p/n> <chunk-size>
  453. */
  454. static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv)
  455. {
  456. struct dm_snapshot *s;
  457. int i;
  458. int r = -EINVAL;
  459. char persistent;
  460. char *origin_path;
  461. char *cow_path;
  462. chunk_t chunk_size, chunk_mask, chunk_shift;
  463. struct dm_dev *cow;
  464. if (argc != 4) {
  465. ti->error = "requires exactly 4 arguments";
  466. r = -EINVAL;
  467. goto bad1;
  468. }
  469. origin_path = argv[0];
  470. cow_path = argv[1];
  471. persistent = toupper(*argv[2]);
  472. if (persistent != 'P' && persistent != 'N') {
  473. ti->error = "Persistent flag is not P or N";
  474. r = -EINVAL;
  475. goto bad1;
  476. }
  477. s = kmalloc(sizeof(*s), GFP_KERNEL);
  478. if (s == NULL) {
  479. ti->error = "Cannot allocate snapshot context private "
  480. "structure";
  481. r = -ENOMEM;
  482. goto bad1;
  483. }
  484. r = dm_get_device(ti, origin_path, 0, ti->len, FMODE_READ, &s->origin);
  485. if (r) {
  486. ti->error = "Cannot get origin device";
  487. goto bad2;
  488. }
  489. r = dm_get_device(ti, cow_path, 0, 0,
  490. FMODE_READ | FMODE_WRITE, &cow);
  491. if (r) {
  492. dm_put_device(ti, s->origin);
  493. ti->error = "Cannot get COW device";
  494. goto bad2;
  495. }
  496. r = set_chunk_size(s, argv[3], &chunk_size, &chunk_mask, &chunk_shift,
  497. cow, &ti->error);
  498. if (r)
  499. goto bad3;
  500. s->valid = 1;
  501. s->active = 0;
  502. atomic_set(&s->pending_exceptions_count, 0);
  503. init_rwsem(&s->lock);
  504. spin_lock_init(&s->pe_lock);
  505. /* Allocate hash table for COW data */
  506. if (init_hash_tables(s, chunk_shift, cow)) {
  507. ti->error = "Unable to allocate hash table space";
  508. r = -ENOMEM;
  509. goto bad3;
  510. }
  511. r = dm_exception_store_create(argv[2], ti, chunk_size, chunk_mask,
  512. chunk_shift, cow, &s->store);
  513. if (r) {
  514. ti->error = "Couldn't create exception store";
  515. r = -EINVAL;
  516. goto bad4;
  517. }
  518. r = dm_kcopyd_client_create(SNAPSHOT_PAGES, &s->kcopyd_client);
  519. if (r) {
  520. ti->error = "Could not create kcopyd client";
  521. goto bad5;
  522. }
  523. s->pending_pool = mempool_create_slab_pool(MIN_IOS, pending_cache);
  524. if (!s->pending_pool) {
  525. ti->error = "Could not allocate mempool for pending exceptions";
  526. goto bad6;
  527. }
  528. s->tracked_chunk_pool = mempool_create_slab_pool(MIN_IOS,
  529. tracked_chunk_cache);
  530. if (!s->tracked_chunk_pool) {
  531. ti->error = "Could not allocate tracked_chunk mempool for "
  532. "tracking reads";
  533. goto bad_tracked_chunk_pool;
  534. }
  535. for (i = 0; i < DM_TRACKED_CHUNK_HASH_SIZE; i++)
  536. INIT_HLIST_HEAD(&s->tracked_chunk_hash[i]);
  537. spin_lock_init(&s->tracked_chunk_lock);
  538. /* Metadata must only be loaded into one table at once */
  539. r = s->store->type->read_metadata(s->store, dm_add_exception,
  540. (void *)s);
  541. if (r < 0) {
  542. ti->error = "Failed to read snapshot metadata";
  543. goto bad_load_and_register;
  544. } else if (r > 0) {
  545. s->valid = 0;
  546. DMWARN("Snapshot is marked invalid.");
  547. }
  548. bio_list_init(&s->queued_bios);
  549. INIT_WORK(&s->queued_bios_work, flush_queued_bios);
  550. /* Add snapshot to the list of snapshots for this origin */
  551. /* Exceptions aren't triggered till snapshot_resume() is called */
  552. if (register_snapshot(s)) {
  553. r = -EINVAL;
  554. ti->error = "Cannot register snapshot origin";
  555. goto bad_load_and_register;
  556. }
  557. ti->private = s;
  558. ti->split_io = s->store->chunk_size;
  559. return 0;
  560. bad_load_and_register:
  561. mempool_destroy(s->tracked_chunk_pool);
  562. bad_tracked_chunk_pool:
  563. mempool_destroy(s->pending_pool);
  564. bad6:
  565. dm_kcopyd_client_destroy(s->kcopyd_client);
  566. bad5:
  567. s->store->type->dtr(s->store);
  568. bad4:
  569. exit_exception_table(&s->pending, pending_cache);
  570. exit_exception_table(&s->complete, exception_cache);
  571. bad3:
  572. dm_put_device(ti, cow);
  573. dm_put_device(ti, s->origin);
  574. bad2:
  575. kfree(s);
  576. bad1:
  577. return r;
  578. }
  579. static void __free_exceptions(struct dm_snapshot *s)
  580. {
  581. dm_kcopyd_client_destroy(s->kcopyd_client);
  582. s->kcopyd_client = NULL;
  583. exit_exception_table(&s->pending, pending_cache);
  584. exit_exception_table(&s->complete, exception_cache);
  585. s->store->type->dtr(s->store);
  586. }
  587. static void snapshot_dtr(struct dm_target *ti)
  588. {
  589. #ifdef CONFIG_DM_DEBUG
  590. int i;
  591. #endif
  592. struct dm_snapshot *s = ti->private;
  593. struct dm_dev *cow = s->store->cow;
  594. flush_workqueue(ksnapd);
  595. /* Prevent further origin writes from using this snapshot. */
  596. /* After this returns there can be no new kcopyd jobs. */
  597. unregister_snapshot(s);
  598. while (atomic_read(&s->pending_exceptions_count))
  599. msleep(1);
  600. /*
  601. * Ensure instructions in mempool_destroy aren't reordered
  602. * before atomic_read.
  603. */
  604. smp_mb();
  605. #ifdef CONFIG_DM_DEBUG
  606. for (i = 0; i < DM_TRACKED_CHUNK_HASH_SIZE; i++)
  607. BUG_ON(!hlist_empty(&s->tracked_chunk_hash[i]));
  608. #endif
  609. mempool_destroy(s->tracked_chunk_pool);
  610. __free_exceptions(s);
  611. mempool_destroy(s->pending_pool);
  612. dm_put_device(ti, s->origin);
  613. dm_put_device(ti, cow);
  614. kfree(s);
  615. }
  616. /*
  617. * Flush a list of buffers.
  618. */
  619. static void flush_bios(struct bio *bio)
  620. {
  621. struct bio *n;
  622. while (bio) {
  623. n = bio->bi_next;
  624. bio->bi_next = NULL;
  625. generic_make_request(bio);
  626. bio = n;
  627. }
  628. }
  629. static void flush_queued_bios(struct work_struct *work)
  630. {
  631. struct dm_snapshot *s =
  632. container_of(work, struct dm_snapshot, queued_bios_work);
  633. struct bio *queued_bios;
  634. unsigned long flags;
  635. spin_lock_irqsave(&s->pe_lock, flags);
  636. queued_bios = bio_list_get(&s->queued_bios);
  637. spin_unlock_irqrestore(&s->pe_lock, flags);
  638. flush_bios(queued_bios);
  639. }
  640. /*
  641. * Error a list of buffers.
  642. */
  643. static void error_bios(struct bio *bio)
  644. {
  645. struct bio *n;
  646. while (bio) {
  647. n = bio->bi_next;
  648. bio->bi_next = NULL;
  649. bio_io_error(bio);
  650. bio = n;
  651. }
  652. }
  653. static void __invalidate_snapshot(struct dm_snapshot *s, int err)
  654. {
  655. if (!s->valid)
  656. return;
  657. if (err == -EIO)
  658. DMERR("Invalidating snapshot: Error reading/writing.");
  659. else if (err == -ENOMEM)
  660. DMERR("Invalidating snapshot: Unable to allocate exception.");
  661. if (s->store->type->drop_snapshot)
  662. s->store->type->drop_snapshot(s->store);
  663. s->valid = 0;
  664. dm_table_event(s->store->ti->table);
  665. }
  666. static void get_pending_exception(struct dm_snap_pending_exception *pe)
  667. {
  668. atomic_inc(&pe->ref_count);
  669. }
  670. static struct bio *put_pending_exception(struct dm_snap_pending_exception *pe)
  671. {
  672. struct dm_snap_pending_exception *primary_pe;
  673. struct bio *origin_bios = NULL;
  674. primary_pe = pe->primary_pe;
  675. /*
  676. * If this pe is involved in a write to the origin and
  677. * it is the last sibling to complete then release
  678. * the bios for the original write to the origin.
  679. */
  680. if (primary_pe &&
  681. atomic_dec_and_test(&primary_pe->ref_count)) {
  682. origin_bios = bio_list_get(&primary_pe->origin_bios);
  683. free_pending_exception(primary_pe);
  684. }
  685. /*
  686. * Free the pe if it's not linked to an origin write or if
  687. * it's not itself a primary pe.
  688. */
  689. if (!primary_pe || primary_pe != pe)
  690. free_pending_exception(pe);
  691. return origin_bios;
  692. }
  693. static void pending_complete(struct dm_snap_pending_exception *pe, int success)
  694. {
  695. struct dm_snap_exception *e;
  696. struct dm_snapshot *s = pe->snap;
  697. struct bio *origin_bios = NULL;
  698. struct bio *snapshot_bios = NULL;
  699. int error = 0;
  700. if (!success) {
  701. /* Read/write error - snapshot is unusable */
  702. down_write(&s->lock);
  703. __invalidate_snapshot(s, -EIO);
  704. error = 1;
  705. goto out;
  706. }
  707. e = alloc_exception();
  708. if (!e) {
  709. down_write(&s->lock);
  710. __invalidate_snapshot(s, -ENOMEM);
  711. error = 1;
  712. goto out;
  713. }
  714. *e = pe->e;
  715. down_write(&s->lock);
  716. if (!s->valid) {
  717. free_exception(e);
  718. error = 1;
  719. goto out;
  720. }
  721. /*
  722. * Check for conflicting reads. This is extremely improbable,
  723. * so msleep(1) is sufficient and there is no need for a wait queue.
  724. */
  725. while (__chunk_is_tracked(s, pe->e.old_chunk))
  726. msleep(1);
  727. /*
  728. * Add a proper exception, and remove the
  729. * in-flight exception from the list.
  730. */
  731. insert_completed_exception(s, e);
  732. out:
  733. remove_exception(&pe->e);
  734. snapshot_bios = bio_list_get(&pe->snapshot_bios);
  735. origin_bios = put_pending_exception(pe);
  736. up_write(&s->lock);
  737. /* Submit any pending write bios */
  738. if (error)
  739. error_bios(snapshot_bios);
  740. else
  741. flush_bios(snapshot_bios);
  742. flush_bios(origin_bios);
  743. }
  744. static void commit_callback(void *context, int success)
  745. {
  746. struct dm_snap_pending_exception *pe = context;
  747. pending_complete(pe, success);
  748. }
  749. /*
  750. * Called when the copy I/O has finished. kcopyd actually runs
  751. * this code so don't block.
  752. */
  753. static void copy_callback(int read_err, unsigned long write_err, void *context)
  754. {
  755. struct dm_snap_pending_exception *pe = context;
  756. struct dm_snapshot *s = pe->snap;
  757. if (read_err || write_err)
  758. pending_complete(pe, 0);
  759. else
  760. /* Update the metadata if we are persistent */
  761. s->store->type->commit_exception(s->store, &pe->e,
  762. commit_callback, pe);
  763. }
  764. /*
  765. * Dispatches the copy operation to kcopyd.
  766. */
  767. static void start_copy(struct dm_snap_pending_exception *pe)
  768. {
  769. struct dm_snapshot *s = pe->snap;
  770. struct dm_io_region src, dest;
  771. struct block_device *bdev = s->origin->bdev;
  772. sector_t dev_size;
  773. dev_size = get_dev_size(bdev);
  774. src.bdev = bdev;
  775. src.sector = chunk_to_sector(s->store, pe->e.old_chunk);
  776. src.count = min(s->store->chunk_size, dev_size - src.sector);
  777. dest.bdev = s->store->cow->bdev;
  778. dest.sector = chunk_to_sector(s->store, pe->e.new_chunk);
  779. dest.count = src.count;
  780. /* Hand over to kcopyd */
  781. dm_kcopyd_copy(s->kcopyd_client,
  782. &src, 1, &dest, 0, copy_callback, pe);
  783. }
  784. static struct dm_snap_pending_exception *
  785. __lookup_pending_exception(struct dm_snapshot *s, chunk_t chunk)
  786. {
  787. struct dm_snap_exception *e = lookup_exception(&s->pending, chunk);
  788. if (!e)
  789. return NULL;
  790. return container_of(e, struct dm_snap_pending_exception, e);
  791. }
  792. /*
  793. * Looks to see if this snapshot already has a pending exception
  794. * for this chunk, otherwise it allocates a new one and inserts
  795. * it into the pending table.
  796. *
  797. * NOTE: a write lock must be held on snap->lock before calling
  798. * this.
  799. */
  800. static struct dm_snap_pending_exception *
  801. __find_pending_exception(struct dm_snapshot *s,
  802. struct dm_snap_pending_exception *pe, chunk_t chunk)
  803. {
  804. struct dm_snap_pending_exception *pe2;
  805. pe2 = __lookup_pending_exception(s, chunk);
  806. if (pe2) {
  807. free_pending_exception(pe);
  808. return pe2;
  809. }
  810. pe->e.old_chunk = chunk;
  811. bio_list_init(&pe->origin_bios);
  812. bio_list_init(&pe->snapshot_bios);
  813. pe->primary_pe = NULL;
  814. atomic_set(&pe->ref_count, 0);
  815. pe->started = 0;
  816. if (s->store->type->prepare_exception(s->store, &pe->e)) {
  817. free_pending_exception(pe);
  818. return NULL;
  819. }
  820. get_pending_exception(pe);
  821. insert_exception(&s->pending, &pe->e);
  822. return pe;
  823. }
  824. static void remap_exception(struct dm_snapshot *s, struct dm_snap_exception *e,
  825. struct bio *bio, chunk_t chunk)
  826. {
  827. bio->bi_bdev = s->store->cow->bdev;
  828. bio->bi_sector = chunk_to_sector(s->store,
  829. dm_chunk_number(e->new_chunk) +
  830. (chunk - e->old_chunk)) +
  831. (bio->bi_sector &
  832. s->store->chunk_mask);
  833. }
  834. static int snapshot_map(struct dm_target *ti, struct bio *bio,
  835. union map_info *map_context)
  836. {
  837. struct dm_snap_exception *e;
  838. struct dm_snapshot *s = ti->private;
  839. int r = DM_MAPIO_REMAPPED;
  840. chunk_t chunk;
  841. struct dm_snap_pending_exception *pe = NULL;
  842. chunk = sector_to_chunk(s->store, bio->bi_sector);
  843. /* Full snapshots are not usable */
  844. /* To get here the table must be live so s->active is always set. */
  845. if (!s->valid)
  846. return -EIO;
  847. /* FIXME: should only take write lock if we need
  848. * to copy an exception */
  849. down_write(&s->lock);
  850. if (!s->valid) {
  851. r = -EIO;
  852. goto out_unlock;
  853. }
  854. /* If the block is already remapped - use that, else remap it */
  855. e = lookup_exception(&s->complete, chunk);
  856. if (e) {
  857. remap_exception(s, e, bio, chunk);
  858. goto out_unlock;
  859. }
  860. /*
  861. * Write to snapshot - higher level takes care of RW/RO
  862. * flags so we should only get this if we are
  863. * writeable.
  864. */
  865. if (bio_rw(bio) == WRITE) {
  866. pe = __lookup_pending_exception(s, chunk);
  867. if (!pe) {
  868. up_write(&s->lock);
  869. pe = alloc_pending_exception(s);
  870. down_write(&s->lock);
  871. if (!s->valid) {
  872. free_pending_exception(pe);
  873. r = -EIO;
  874. goto out_unlock;
  875. }
  876. e = lookup_exception(&s->complete, chunk);
  877. if (e) {
  878. free_pending_exception(pe);
  879. remap_exception(s, e, bio, chunk);
  880. goto out_unlock;
  881. }
  882. pe = __find_pending_exception(s, pe, chunk);
  883. if (!pe) {
  884. __invalidate_snapshot(s, -ENOMEM);
  885. r = -EIO;
  886. goto out_unlock;
  887. }
  888. }
  889. remap_exception(s, &pe->e, bio, chunk);
  890. bio_list_add(&pe->snapshot_bios, bio);
  891. r = DM_MAPIO_SUBMITTED;
  892. if (!pe->started) {
  893. /* this is protected by snap->lock */
  894. pe->started = 1;
  895. up_write(&s->lock);
  896. start_copy(pe);
  897. goto out;
  898. }
  899. } else {
  900. bio->bi_bdev = s->origin->bdev;
  901. map_context->ptr = track_chunk(s, chunk);
  902. }
  903. out_unlock:
  904. up_write(&s->lock);
  905. out:
  906. return r;
  907. }
  908. static int snapshot_end_io(struct dm_target *ti, struct bio *bio,
  909. int error, union map_info *map_context)
  910. {
  911. struct dm_snapshot *s = ti->private;
  912. struct dm_snap_tracked_chunk *c = map_context->ptr;
  913. if (c)
  914. stop_tracking_chunk(s, c);
  915. return 0;
  916. }
  917. static void snapshot_resume(struct dm_target *ti)
  918. {
  919. struct dm_snapshot *s = ti->private;
  920. down_write(&s->lock);
  921. s->active = 1;
  922. up_write(&s->lock);
  923. }
  924. static int snapshot_status(struct dm_target *ti, status_type_t type,
  925. char *result, unsigned int maxlen)
  926. {
  927. struct dm_snapshot *snap = ti->private;
  928. switch (type) {
  929. case STATUSTYPE_INFO:
  930. if (!snap->valid)
  931. snprintf(result, maxlen, "Invalid");
  932. else {
  933. if (snap->store->type->fraction_full) {
  934. sector_t numerator, denominator;
  935. snap->store->type->fraction_full(snap->store,
  936. &numerator,
  937. &denominator);
  938. snprintf(result, maxlen, "%llu/%llu",
  939. (unsigned long long)numerator,
  940. (unsigned long long)denominator);
  941. }
  942. else
  943. snprintf(result, maxlen, "Unknown");
  944. }
  945. break;
  946. case STATUSTYPE_TABLE:
  947. /*
  948. * kdevname returns a static pointer so we need
  949. * to make private copies if the output is to
  950. * make sense.
  951. */
  952. snprintf(result, maxlen, "%s %s %s %llu",
  953. snap->origin->name, snap->store->cow->name,
  954. snap->store->type->name,
  955. (unsigned long long)snap->store->chunk_size);
  956. break;
  957. }
  958. return 0;
  959. }
  960. /*-----------------------------------------------------------------
  961. * Origin methods
  962. *---------------------------------------------------------------*/
  963. static int __origin_write(struct list_head *snapshots, struct bio *bio)
  964. {
  965. int r = DM_MAPIO_REMAPPED, first = 0;
  966. struct dm_snapshot *snap;
  967. struct dm_snap_exception *e;
  968. struct dm_snap_pending_exception *pe, *next_pe, *primary_pe = NULL;
  969. chunk_t chunk;
  970. LIST_HEAD(pe_queue);
  971. /* Do all the snapshots on this origin */
  972. list_for_each_entry (snap, snapshots, list) {
  973. down_write(&snap->lock);
  974. /* Only deal with valid and active snapshots */
  975. if (!snap->valid || !snap->active)
  976. goto next_snapshot;
  977. /* Nothing to do if writing beyond end of snapshot */
  978. if (bio->bi_sector >= dm_table_get_size(snap->store->ti->table))
  979. goto next_snapshot;
  980. /*
  981. * Remember, different snapshots can have
  982. * different chunk sizes.
  983. */
  984. chunk = sector_to_chunk(snap->store, bio->bi_sector);
  985. /*
  986. * Check exception table to see if block
  987. * is already remapped in this snapshot
  988. * and trigger an exception if not.
  989. *
  990. * ref_count is initialised to 1 so pending_complete()
  991. * won't destroy the primary_pe while we're inside this loop.
  992. */
  993. e = lookup_exception(&snap->complete, chunk);
  994. if (e)
  995. goto next_snapshot;
  996. pe = __lookup_pending_exception(snap, chunk);
  997. if (!pe) {
  998. up_write(&snap->lock);
  999. pe = alloc_pending_exception(snap);
  1000. down_write(&snap->lock);
  1001. if (!snap->valid) {
  1002. free_pending_exception(pe);
  1003. goto next_snapshot;
  1004. }
  1005. e = lookup_exception(&snap->complete, chunk);
  1006. if (e) {
  1007. free_pending_exception(pe);
  1008. goto next_snapshot;
  1009. }
  1010. pe = __find_pending_exception(snap, pe, chunk);
  1011. if (!pe) {
  1012. __invalidate_snapshot(snap, -ENOMEM);
  1013. goto next_snapshot;
  1014. }
  1015. }
  1016. if (!primary_pe) {
  1017. /*
  1018. * Either every pe here has same
  1019. * primary_pe or none has one yet.
  1020. */
  1021. if (pe->primary_pe)
  1022. primary_pe = pe->primary_pe;
  1023. else {
  1024. primary_pe = pe;
  1025. first = 1;
  1026. }
  1027. bio_list_add(&primary_pe->origin_bios, bio);
  1028. r = DM_MAPIO_SUBMITTED;
  1029. }
  1030. if (!pe->primary_pe) {
  1031. pe->primary_pe = primary_pe;
  1032. get_pending_exception(primary_pe);
  1033. }
  1034. if (!pe->started) {
  1035. pe->started = 1;
  1036. list_add_tail(&pe->list, &pe_queue);
  1037. }
  1038. next_snapshot:
  1039. up_write(&snap->lock);
  1040. }
  1041. if (!primary_pe)
  1042. return r;
  1043. /*
  1044. * If this is the first time we're processing this chunk and
  1045. * ref_count is now 1 it means all the pending exceptions
  1046. * got completed while we were in the loop above, so it falls to
  1047. * us here to remove the primary_pe and submit any origin_bios.
  1048. */
  1049. if (first && atomic_dec_and_test(&primary_pe->ref_count)) {
  1050. flush_bios(bio_list_get(&primary_pe->origin_bios));
  1051. free_pending_exception(primary_pe);
  1052. /* If we got here, pe_queue is necessarily empty. */
  1053. return r;
  1054. }
  1055. /*
  1056. * Now that we have a complete pe list we can start the copying.
  1057. */
  1058. list_for_each_entry_safe(pe, next_pe, &pe_queue, list)
  1059. start_copy(pe);
  1060. return r;
  1061. }
  1062. /*
  1063. * Called on a write from the origin driver.
  1064. */
  1065. static int do_origin(struct dm_dev *origin, struct bio *bio)
  1066. {
  1067. struct origin *o;
  1068. int r = DM_MAPIO_REMAPPED;
  1069. down_read(&_origins_lock);
  1070. o = __lookup_origin(origin->bdev);
  1071. if (o)
  1072. r = __origin_write(&o->snapshots, bio);
  1073. up_read(&_origins_lock);
  1074. return r;
  1075. }
  1076. /*
  1077. * Origin: maps a linear range of a device, with hooks for snapshotting.
  1078. */
  1079. /*
  1080. * Construct an origin mapping: <dev_path>
  1081. * The context for an origin is merely a 'struct dm_dev *'
  1082. * pointing to the real device.
  1083. */
  1084. static int origin_ctr(struct dm_target *ti, unsigned int argc, char **argv)
  1085. {
  1086. int r;
  1087. struct dm_dev *dev;
  1088. if (argc != 1) {
  1089. ti->error = "origin: incorrect number of arguments";
  1090. return -EINVAL;
  1091. }
  1092. r = dm_get_device(ti, argv[0], 0, ti->len,
  1093. dm_table_get_mode(ti->table), &dev);
  1094. if (r) {
  1095. ti->error = "Cannot get target device";
  1096. return r;
  1097. }
  1098. ti->private = dev;
  1099. return 0;
  1100. }
  1101. static void origin_dtr(struct dm_target *ti)
  1102. {
  1103. struct dm_dev *dev = ti->private;
  1104. dm_put_device(ti, dev);
  1105. }
  1106. static int origin_map(struct dm_target *ti, struct bio *bio,
  1107. union map_info *map_context)
  1108. {
  1109. struct dm_dev *dev = ti->private;
  1110. bio->bi_bdev = dev->bdev;
  1111. /* Only tell snapshots if this is a write */
  1112. return (bio_rw(bio) == WRITE) ? do_origin(dev, bio) : DM_MAPIO_REMAPPED;
  1113. }
  1114. #define min_not_zero(l, r) (l == 0) ? r : ((r == 0) ? l : min(l, r))
  1115. /*
  1116. * Set the target "split_io" field to the minimum of all the snapshots'
  1117. * chunk sizes.
  1118. */
  1119. static void origin_resume(struct dm_target *ti)
  1120. {
  1121. struct dm_dev *dev = ti->private;
  1122. struct dm_snapshot *snap;
  1123. struct origin *o;
  1124. chunk_t chunk_size = 0;
  1125. down_read(&_origins_lock);
  1126. o = __lookup_origin(dev->bdev);
  1127. if (o)
  1128. list_for_each_entry (snap, &o->snapshots, list)
  1129. chunk_size = min_not_zero(chunk_size,
  1130. snap->store->chunk_size);
  1131. up_read(&_origins_lock);
  1132. ti->split_io = chunk_size;
  1133. }
  1134. static int origin_status(struct dm_target *ti, status_type_t type, char *result,
  1135. unsigned int maxlen)
  1136. {
  1137. struct dm_dev *dev = ti->private;
  1138. switch (type) {
  1139. case STATUSTYPE_INFO:
  1140. result[0] = '\0';
  1141. break;
  1142. case STATUSTYPE_TABLE:
  1143. snprintf(result, maxlen, "%s", dev->name);
  1144. break;
  1145. }
  1146. return 0;
  1147. }
  1148. static struct target_type origin_target = {
  1149. .name = "snapshot-origin",
  1150. .version = {1, 6, 0},
  1151. .module = THIS_MODULE,
  1152. .ctr = origin_ctr,
  1153. .dtr = origin_dtr,
  1154. .map = origin_map,
  1155. .resume = origin_resume,
  1156. .status = origin_status,
  1157. };
  1158. static struct target_type snapshot_target = {
  1159. .name = "snapshot",
  1160. .version = {1, 6, 0},
  1161. .module = THIS_MODULE,
  1162. .ctr = snapshot_ctr,
  1163. .dtr = snapshot_dtr,
  1164. .map = snapshot_map,
  1165. .end_io = snapshot_end_io,
  1166. .resume = snapshot_resume,
  1167. .status = snapshot_status,
  1168. };
  1169. static int __init dm_snapshot_init(void)
  1170. {
  1171. int r;
  1172. r = dm_exception_store_init();
  1173. if (r) {
  1174. DMERR("Failed to initialize exception stores");
  1175. return r;
  1176. }
  1177. r = dm_register_target(&snapshot_target);
  1178. if (r) {
  1179. DMERR("snapshot target register failed %d", r);
  1180. return r;
  1181. }
  1182. r = dm_register_target(&origin_target);
  1183. if (r < 0) {
  1184. DMERR("Origin target register failed %d", r);
  1185. goto bad1;
  1186. }
  1187. r = init_origin_hash();
  1188. if (r) {
  1189. DMERR("init_origin_hash failed.");
  1190. goto bad2;
  1191. }
  1192. exception_cache = KMEM_CACHE(dm_snap_exception, 0);
  1193. if (!exception_cache) {
  1194. DMERR("Couldn't create exception cache.");
  1195. r = -ENOMEM;
  1196. goto bad3;
  1197. }
  1198. pending_cache = KMEM_CACHE(dm_snap_pending_exception, 0);
  1199. if (!pending_cache) {
  1200. DMERR("Couldn't create pending cache.");
  1201. r = -ENOMEM;
  1202. goto bad4;
  1203. }
  1204. tracked_chunk_cache = KMEM_CACHE(dm_snap_tracked_chunk, 0);
  1205. if (!tracked_chunk_cache) {
  1206. DMERR("Couldn't create cache to track chunks in use.");
  1207. r = -ENOMEM;
  1208. goto bad5;
  1209. }
  1210. ksnapd = create_singlethread_workqueue("ksnapd");
  1211. if (!ksnapd) {
  1212. DMERR("Failed to create ksnapd workqueue.");
  1213. r = -ENOMEM;
  1214. goto bad_pending_pool;
  1215. }
  1216. return 0;
  1217. bad_pending_pool:
  1218. kmem_cache_destroy(tracked_chunk_cache);
  1219. bad5:
  1220. kmem_cache_destroy(pending_cache);
  1221. bad4:
  1222. kmem_cache_destroy(exception_cache);
  1223. bad3:
  1224. exit_origin_hash();
  1225. bad2:
  1226. dm_unregister_target(&origin_target);
  1227. bad1:
  1228. dm_unregister_target(&snapshot_target);
  1229. return r;
  1230. }
  1231. static void __exit dm_snapshot_exit(void)
  1232. {
  1233. destroy_workqueue(ksnapd);
  1234. dm_unregister_target(&snapshot_target);
  1235. dm_unregister_target(&origin_target);
  1236. exit_origin_hash();
  1237. kmem_cache_destroy(pending_cache);
  1238. kmem_cache_destroy(exception_cache);
  1239. kmem_cache_destroy(tracked_chunk_cache);
  1240. dm_exception_store_exit();
  1241. }
  1242. /* Module hooks */
  1243. module_init(dm_snapshot_init);
  1244. module_exit(dm_snapshot_exit);
  1245. MODULE_DESCRIPTION(DM_NAME " snapshot target");
  1246. MODULE_AUTHOR("Joe Thornber");
  1247. MODULE_LICENSE("GPL");