dm-snap.c 32 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508
  1. /*
  2. * dm-snapshot.c
  3. *
  4. * Copyright (C) 2001-2002 Sistina Software (UK) Limited.
  5. *
  6. * This file is released under the GPL.
  7. */
  8. #include <linux/blkdev.h>
  9. #include <linux/ctype.h>
  10. #include <linux/device-mapper.h>
  11. #include <linux/delay.h>
  12. #include <linux/fs.h>
  13. #include <linux/init.h>
  14. #include <linux/kdev_t.h>
  15. #include <linux/list.h>
  16. #include <linux/mempool.h>
  17. #include <linux/module.h>
  18. #include <linux/slab.h>
  19. #include <linux/vmalloc.h>
  20. #include <linux/log2.h>
  21. #include <linux/dm-kcopyd.h>
  22. #include "dm-exception-store.h"
  23. #include "dm-snap.h"
  24. #include "dm-bio-list.h"
  25. #define DM_MSG_PREFIX "snapshots"
  26. /*
  27. * The percentage increment we will wake up users at
  28. */
  29. #define WAKE_UP_PERCENT 5
  30. /*
  31. * kcopyd priority of snapshot operations
  32. */
  33. #define SNAPSHOT_COPY_PRIORITY 2
  34. /*
  35. * Reserve 1MB for each snapshot initially (with minimum of 1 page).
  36. */
  37. #define SNAPSHOT_PAGES (((1UL << 20) >> PAGE_SHIFT) ? : 1)
  38. /*
  39. * The size of the mempool used to track chunks in use.
  40. */
  41. #define MIN_IOS 256
  42. static struct workqueue_struct *ksnapd;
  43. static void flush_queued_bios(struct work_struct *work);
  44. struct dm_snap_pending_exception {
  45. struct dm_snap_exception e;
  46. /*
  47. * Origin buffers waiting for this to complete are held
  48. * in a bio list
  49. */
  50. struct bio_list origin_bios;
  51. struct bio_list snapshot_bios;
  52. /*
  53. * Short-term queue of pending exceptions prior to submission.
  54. */
  55. struct list_head list;
  56. /*
  57. * The primary pending_exception is the one that holds
  58. * the ref_count and the list of origin_bios for a
  59. * group of pending_exceptions. It is always last to get freed.
  60. * These fields get set up when writing to the origin.
  61. */
  62. struct dm_snap_pending_exception *primary_pe;
  63. /*
  64. * Number of pending_exceptions processing this chunk.
  65. * When this drops to zero we must complete the origin bios.
  66. * If incrementing or decrementing this, hold pe->snap->lock for
  67. * the sibling concerned and not pe->primary_pe->snap->lock unless
  68. * they are the same.
  69. */
  70. atomic_t ref_count;
  71. /* Pointer back to snapshot context */
  72. struct dm_snapshot *snap;
  73. /*
  74. * 1 indicates the exception has already been sent to
  75. * kcopyd.
  76. */
  77. int started;
  78. };
  79. /*
  80. * Hash table mapping origin volumes to lists of snapshots and
  81. * a lock to protect it
  82. */
  83. static struct kmem_cache *exception_cache;
  84. static struct kmem_cache *pending_cache;
  85. struct dm_snap_tracked_chunk {
  86. struct hlist_node node;
  87. chunk_t chunk;
  88. };
  89. static struct kmem_cache *tracked_chunk_cache;
  90. static struct dm_snap_tracked_chunk *track_chunk(struct dm_snapshot *s,
  91. chunk_t chunk)
  92. {
  93. struct dm_snap_tracked_chunk *c = mempool_alloc(s->tracked_chunk_pool,
  94. GFP_NOIO);
  95. unsigned long flags;
  96. c->chunk = chunk;
  97. spin_lock_irqsave(&s->tracked_chunk_lock, flags);
  98. hlist_add_head(&c->node,
  99. &s->tracked_chunk_hash[DM_TRACKED_CHUNK_HASH(chunk)]);
  100. spin_unlock_irqrestore(&s->tracked_chunk_lock, flags);
  101. return c;
  102. }
  103. static void stop_tracking_chunk(struct dm_snapshot *s,
  104. struct dm_snap_tracked_chunk *c)
  105. {
  106. unsigned long flags;
  107. spin_lock_irqsave(&s->tracked_chunk_lock, flags);
  108. hlist_del(&c->node);
  109. spin_unlock_irqrestore(&s->tracked_chunk_lock, flags);
  110. mempool_free(c, s->tracked_chunk_pool);
  111. }
  112. static int __chunk_is_tracked(struct dm_snapshot *s, chunk_t chunk)
  113. {
  114. struct dm_snap_tracked_chunk *c;
  115. struct hlist_node *hn;
  116. int found = 0;
  117. spin_lock_irq(&s->tracked_chunk_lock);
  118. hlist_for_each_entry(c, hn,
  119. &s->tracked_chunk_hash[DM_TRACKED_CHUNK_HASH(chunk)], node) {
  120. if (c->chunk == chunk) {
  121. found = 1;
  122. break;
  123. }
  124. }
  125. spin_unlock_irq(&s->tracked_chunk_lock);
  126. return found;
  127. }
  128. /*
  129. * One of these per registered origin, held in the snapshot_origins hash
  130. */
  131. struct origin {
  132. /* The origin device */
  133. struct block_device *bdev;
  134. struct list_head hash_list;
  135. /* List of snapshots for this origin */
  136. struct list_head snapshots;
  137. };
  138. /*
  139. * Size of the hash table for origin volumes. If we make this
  140. * the size of the minors list then it should be nearly perfect
  141. */
  142. #define ORIGIN_HASH_SIZE 256
  143. #define ORIGIN_MASK 0xFF
  144. static struct list_head *_origins;
  145. static struct rw_semaphore _origins_lock;
  146. static int init_origin_hash(void)
  147. {
  148. int i;
  149. _origins = kmalloc(ORIGIN_HASH_SIZE * sizeof(struct list_head),
  150. GFP_KERNEL);
  151. if (!_origins) {
  152. DMERR("unable to allocate memory");
  153. return -ENOMEM;
  154. }
  155. for (i = 0; i < ORIGIN_HASH_SIZE; i++)
  156. INIT_LIST_HEAD(_origins + i);
  157. init_rwsem(&_origins_lock);
  158. return 0;
  159. }
  160. static void exit_origin_hash(void)
  161. {
  162. kfree(_origins);
  163. }
  164. static unsigned origin_hash(struct block_device *bdev)
  165. {
  166. return bdev->bd_dev & ORIGIN_MASK;
  167. }
  168. static struct origin *__lookup_origin(struct block_device *origin)
  169. {
  170. struct list_head *ol;
  171. struct origin *o;
  172. ol = &_origins[origin_hash(origin)];
  173. list_for_each_entry (o, ol, hash_list)
  174. if (bdev_equal(o->bdev, origin))
  175. return o;
  176. return NULL;
  177. }
  178. static void __insert_origin(struct origin *o)
  179. {
  180. struct list_head *sl = &_origins[origin_hash(o->bdev)];
  181. list_add_tail(&o->hash_list, sl);
  182. }
  183. /*
  184. * Make a note of the snapshot and its origin so we can look it
  185. * up when the origin has a write on it.
  186. */
  187. static int register_snapshot(struct dm_snapshot *snap)
  188. {
  189. struct origin *o, *new_o;
  190. struct block_device *bdev = snap->origin->bdev;
  191. new_o = kmalloc(sizeof(*new_o), GFP_KERNEL);
  192. if (!new_o)
  193. return -ENOMEM;
  194. down_write(&_origins_lock);
  195. o = __lookup_origin(bdev);
  196. if (o)
  197. kfree(new_o);
  198. else {
  199. /* New origin */
  200. o = new_o;
  201. /* Initialise the struct */
  202. INIT_LIST_HEAD(&o->snapshots);
  203. o->bdev = bdev;
  204. __insert_origin(o);
  205. }
  206. list_add_tail(&snap->list, &o->snapshots);
  207. up_write(&_origins_lock);
  208. return 0;
  209. }
  210. static void unregister_snapshot(struct dm_snapshot *s)
  211. {
  212. struct origin *o;
  213. down_write(&_origins_lock);
  214. o = __lookup_origin(s->origin->bdev);
  215. list_del(&s->list);
  216. if (list_empty(&o->snapshots)) {
  217. list_del(&o->hash_list);
  218. kfree(o);
  219. }
  220. up_write(&_origins_lock);
  221. }
  222. /*
  223. * Implementation of the exception hash tables.
  224. * The lowest hash_shift bits of the chunk number are ignored, allowing
  225. * some consecutive chunks to be grouped together.
  226. */
  227. static int init_exception_table(struct exception_table *et, uint32_t size,
  228. unsigned hash_shift)
  229. {
  230. unsigned int i;
  231. et->hash_shift = hash_shift;
  232. et->hash_mask = size - 1;
  233. et->table = dm_vcalloc(size, sizeof(struct list_head));
  234. if (!et->table)
  235. return -ENOMEM;
  236. for (i = 0; i < size; i++)
  237. INIT_LIST_HEAD(et->table + i);
  238. return 0;
  239. }
  240. static void exit_exception_table(struct exception_table *et, struct kmem_cache *mem)
  241. {
  242. struct list_head *slot;
  243. struct dm_snap_exception *ex, *next;
  244. int i, size;
  245. size = et->hash_mask + 1;
  246. for (i = 0; i < size; i++) {
  247. slot = et->table + i;
  248. list_for_each_entry_safe (ex, next, slot, hash_list)
  249. kmem_cache_free(mem, ex);
  250. }
  251. vfree(et->table);
  252. }
  253. static uint32_t exception_hash(struct exception_table *et, chunk_t chunk)
  254. {
  255. return (chunk >> et->hash_shift) & et->hash_mask;
  256. }
  257. static void insert_exception(struct exception_table *eh,
  258. struct dm_snap_exception *e)
  259. {
  260. struct list_head *l = &eh->table[exception_hash(eh, e->old_chunk)];
  261. list_add(&e->hash_list, l);
  262. }
  263. static void remove_exception(struct dm_snap_exception *e)
  264. {
  265. list_del(&e->hash_list);
  266. }
  267. /*
  268. * Return the exception data for a sector, or NULL if not
  269. * remapped.
  270. */
  271. static struct dm_snap_exception *lookup_exception(struct exception_table *et,
  272. chunk_t chunk)
  273. {
  274. struct list_head *slot;
  275. struct dm_snap_exception *e;
  276. slot = &et->table[exception_hash(et, chunk)];
  277. list_for_each_entry (e, slot, hash_list)
  278. if (chunk >= e->old_chunk &&
  279. chunk <= e->old_chunk + dm_consecutive_chunk_count(e))
  280. return e;
  281. return NULL;
  282. }
  283. static struct dm_snap_exception *alloc_exception(void)
  284. {
  285. struct dm_snap_exception *e;
  286. e = kmem_cache_alloc(exception_cache, GFP_NOIO);
  287. if (!e)
  288. e = kmem_cache_alloc(exception_cache, GFP_ATOMIC);
  289. return e;
  290. }
  291. static void free_exception(struct dm_snap_exception *e)
  292. {
  293. kmem_cache_free(exception_cache, e);
  294. }
  295. static struct dm_snap_pending_exception *alloc_pending_exception(struct dm_snapshot *s)
  296. {
  297. struct dm_snap_pending_exception *pe = mempool_alloc(s->pending_pool,
  298. GFP_NOIO);
  299. atomic_inc(&s->pending_exceptions_count);
  300. pe->snap = s;
  301. return pe;
  302. }
  303. static void free_pending_exception(struct dm_snap_pending_exception *pe)
  304. {
  305. struct dm_snapshot *s = pe->snap;
  306. mempool_free(pe, s->pending_pool);
  307. smp_mb__before_atomic_dec();
  308. atomic_dec(&s->pending_exceptions_count);
  309. }
  310. static void insert_completed_exception(struct dm_snapshot *s,
  311. struct dm_snap_exception *new_e)
  312. {
  313. struct exception_table *eh = &s->complete;
  314. struct list_head *l;
  315. struct dm_snap_exception *e = NULL;
  316. l = &eh->table[exception_hash(eh, new_e->old_chunk)];
  317. /* Add immediately if this table doesn't support consecutive chunks */
  318. if (!eh->hash_shift)
  319. goto out;
  320. /* List is ordered by old_chunk */
  321. list_for_each_entry_reverse(e, l, hash_list) {
  322. /* Insert after an existing chunk? */
  323. if (new_e->old_chunk == (e->old_chunk +
  324. dm_consecutive_chunk_count(e) + 1) &&
  325. new_e->new_chunk == (dm_chunk_number(e->new_chunk) +
  326. dm_consecutive_chunk_count(e) + 1)) {
  327. dm_consecutive_chunk_count_inc(e);
  328. free_exception(new_e);
  329. return;
  330. }
  331. /* Insert before an existing chunk? */
  332. if (new_e->old_chunk == (e->old_chunk - 1) &&
  333. new_e->new_chunk == (dm_chunk_number(e->new_chunk) - 1)) {
  334. dm_consecutive_chunk_count_inc(e);
  335. e->old_chunk--;
  336. e->new_chunk--;
  337. free_exception(new_e);
  338. return;
  339. }
  340. if (new_e->old_chunk > e->old_chunk)
  341. break;
  342. }
  343. out:
  344. list_add(&new_e->hash_list, e ? &e->hash_list : l);
  345. }
  346. /*
  347. * Callback used by the exception stores to load exceptions when
  348. * initialising.
  349. */
  350. static int dm_add_exception(void *context, chunk_t old, chunk_t new)
  351. {
  352. struct dm_snapshot *s = context;
  353. struct dm_snap_exception *e;
  354. e = alloc_exception();
  355. if (!e)
  356. return -ENOMEM;
  357. e->old_chunk = old;
  358. /* Consecutive_count is implicitly initialised to zero */
  359. e->new_chunk = new;
  360. insert_completed_exception(s, e);
  361. return 0;
  362. }
  363. /*
  364. * Hard coded magic.
  365. */
  366. static int calc_max_buckets(void)
  367. {
  368. /* use a fixed size of 2MB */
  369. unsigned long mem = 2 * 1024 * 1024;
  370. mem /= sizeof(struct list_head);
  371. return mem;
  372. }
  373. /*
  374. * Allocate room for a suitable hash table.
  375. */
  376. static int init_hash_tables(struct dm_snapshot *s)
  377. {
  378. sector_t hash_size, cow_dev_size, origin_dev_size, max_buckets;
  379. /*
  380. * Calculate based on the size of the original volume or
  381. * the COW volume...
  382. */
  383. cow_dev_size = get_dev_size(s->cow->bdev);
  384. origin_dev_size = get_dev_size(s->origin->bdev);
  385. max_buckets = calc_max_buckets();
  386. hash_size = min(origin_dev_size, cow_dev_size) >> s->chunk_shift;
  387. hash_size = min(hash_size, max_buckets);
  388. hash_size = rounddown_pow_of_two(hash_size);
  389. if (init_exception_table(&s->complete, hash_size,
  390. DM_CHUNK_CONSECUTIVE_BITS))
  391. return -ENOMEM;
  392. /*
  393. * Allocate hash table for in-flight exceptions
  394. * Make this smaller than the real hash table
  395. */
  396. hash_size >>= 3;
  397. if (hash_size < 64)
  398. hash_size = 64;
  399. if (init_exception_table(&s->pending, hash_size, 0)) {
  400. exit_exception_table(&s->complete, exception_cache);
  401. return -ENOMEM;
  402. }
  403. return 0;
  404. }
  405. /*
  406. * Round a number up to the nearest 'size' boundary. size must
  407. * be a power of 2.
  408. */
  409. static ulong round_up(ulong n, ulong size)
  410. {
  411. size--;
  412. return (n + size) & ~size;
  413. }
  414. static int set_chunk_size(struct dm_snapshot *s, const char *chunk_size_arg,
  415. char **error)
  416. {
  417. unsigned long chunk_size;
  418. char *value;
  419. chunk_size = simple_strtoul(chunk_size_arg, &value, 10);
  420. if (*chunk_size_arg == '\0' || *value != '\0') {
  421. *error = "Invalid chunk size";
  422. return -EINVAL;
  423. }
  424. if (!chunk_size) {
  425. s->chunk_size = s->chunk_mask = s->chunk_shift = 0;
  426. return 0;
  427. }
  428. /*
  429. * Chunk size must be multiple of page size. Silently
  430. * round up if it's not.
  431. */
  432. chunk_size = round_up(chunk_size, PAGE_SIZE >> 9);
  433. /* Check chunk_size is a power of 2 */
  434. if (!is_power_of_2(chunk_size)) {
  435. *error = "Chunk size is not a power of 2";
  436. return -EINVAL;
  437. }
  438. /* Validate the chunk size against the device block size */
  439. if (chunk_size % (bdev_hardsect_size(s->cow->bdev) >> 9)) {
  440. *error = "Chunk size is not a multiple of device blocksize";
  441. return -EINVAL;
  442. }
  443. s->chunk_size = chunk_size;
  444. s->chunk_mask = chunk_size - 1;
  445. s->chunk_shift = ffs(chunk_size) - 1;
  446. return 0;
  447. }
  448. /*
  449. * Construct a snapshot mapping: <origin_dev> <COW-dev> <p/n> <chunk-size>
  450. */
  451. static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv)
  452. {
  453. struct dm_snapshot *s;
  454. int i;
  455. int r = -EINVAL;
  456. char persistent;
  457. char *origin_path;
  458. char *cow_path;
  459. if (argc != 4) {
  460. ti->error = "requires exactly 4 arguments";
  461. r = -EINVAL;
  462. goto bad1;
  463. }
  464. origin_path = argv[0];
  465. cow_path = argv[1];
  466. persistent = toupper(*argv[2]);
  467. if (persistent != 'P' && persistent != 'N') {
  468. ti->error = "Persistent flag is not P or N";
  469. r = -EINVAL;
  470. goto bad1;
  471. }
  472. s = kmalloc(sizeof(*s), GFP_KERNEL);
  473. if (s == NULL) {
  474. ti->error = "Cannot allocate snapshot context private "
  475. "structure";
  476. r = -ENOMEM;
  477. goto bad1;
  478. }
  479. r = dm_get_device(ti, origin_path, 0, ti->len, FMODE_READ, &s->origin);
  480. if (r) {
  481. ti->error = "Cannot get origin device";
  482. goto bad2;
  483. }
  484. r = dm_get_device(ti, cow_path, 0, 0,
  485. FMODE_READ | FMODE_WRITE, &s->cow);
  486. if (r) {
  487. dm_put_device(ti, s->origin);
  488. ti->error = "Cannot get COW device";
  489. goto bad2;
  490. }
  491. r = set_chunk_size(s, argv[3], &ti->error);
  492. if (r)
  493. goto bad3;
  494. s->type = persistent;
  495. s->valid = 1;
  496. s->active = 0;
  497. atomic_set(&s->pending_exceptions_count, 0);
  498. init_rwsem(&s->lock);
  499. spin_lock_init(&s->pe_lock);
  500. s->ti = ti;
  501. /* Allocate hash table for COW data */
  502. if (init_hash_tables(s)) {
  503. ti->error = "Unable to allocate hash table space";
  504. r = -ENOMEM;
  505. goto bad3;
  506. }
  507. s->store.snap = s;
  508. if (persistent == 'P')
  509. r = dm_create_persistent(&s->store);
  510. else
  511. r = dm_create_transient(&s->store);
  512. if (r) {
  513. ti->error = "Couldn't create exception store";
  514. r = -EINVAL;
  515. goto bad4;
  516. }
  517. r = dm_kcopyd_client_create(SNAPSHOT_PAGES, &s->kcopyd_client);
  518. if (r) {
  519. ti->error = "Could not create kcopyd client";
  520. goto bad5;
  521. }
  522. s->pending_pool = mempool_create_slab_pool(MIN_IOS, pending_cache);
  523. if (!s->pending_pool) {
  524. ti->error = "Could not allocate mempool for pending exceptions";
  525. goto bad6;
  526. }
  527. s->tracked_chunk_pool = mempool_create_slab_pool(MIN_IOS,
  528. tracked_chunk_cache);
  529. if (!s->tracked_chunk_pool) {
  530. ti->error = "Could not allocate tracked_chunk mempool for "
  531. "tracking reads";
  532. goto bad_tracked_chunk_pool;
  533. }
  534. for (i = 0; i < DM_TRACKED_CHUNK_HASH_SIZE; i++)
  535. INIT_HLIST_HEAD(&s->tracked_chunk_hash[i]);
  536. spin_lock_init(&s->tracked_chunk_lock);
  537. /* Metadata must only be loaded into one table at once */
  538. r = s->store.read_metadata(&s->store, dm_add_exception, (void *)s);
  539. if (r < 0) {
  540. ti->error = "Failed to read snapshot metadata";
  541. goto bad_load_and_register;
  542. } else if (r > 0) {
  543. s->valid = 0;
  544. DMWARN("Snapshot is marked invalid.");
  545. }
  546. bio_list_init(&s->queued_bios);
  547. INIT_WORK(&s->queued_bios_work, flush_queued_bios);
  548. /* Add snapshot to the list of snapshots for this origin */
  549. /* Exceptions aren't triggered till snapshot_resume() is called */
  550. if (register_snapshot(s)) {
  551. r = -EINVAL;
  552. ti->error = "Cannot register snapshot origin";
  553. goto bad_load_and_register;
  554. }
  555. ti->private = s;
  556. ti->split_io = s->chunk_size;
  557. return 0;
  558. bad_load_and_register:
  559. mempool_destroy(s->tracked_chunk_pool);
  560. bad_tracked_chunk_pool:
  561. mempool_destroy(s->pending_pool);
  562. bad6:
  563. dm_kcopyd_client_destroy(s->kcopyd_client);
  564. bad5:
  565. s->store.destroy(&s->store);
  566. bad4:
  567. exit_exception_table(&s->pending, pending_cache);
  568. exit_exception_table(&s->complete, exception_cache);
  569. bad3:
  570. dm_put_device(ti, s->cow);
  571. dm_put_device(ti, s->origin);
  572. bad2:
  573. kfree(s);
  574. bad1:
  575. return r;
  576. }
  577. static void __free_exceptions(struct dm_snapshot *s)
  578. {
  579. dm_kcopyd_client_destroy(s->kcopyd_client);
  580. s->kcopyd_client = NULL;
  581. exit_exception_table(&s->pending, pending_cache);
  582. exit_exception_table(&s->complete, exception_cache);
  583. s->store.destroy(&s->store);
  584. }
  585. static void snapshot_dtr(struct dm_target *ti)
  586. {
  587. #ifdef CONFIG_DM_DEBUG
  588. int i;
  589. #endif
  590. struct dm_snapshot *s = ti->private;
  591. flush_workqueue(ksnapd);
  592. /* Prevent further origin writes from using this snapshot. */
  593. /* After this returns there can be no new kcopyd jobs. */
  594. unregister_snapshot(s);
  595. while (atomic_read(&s->pending_exceptions_count))
  596. msleep(1);
  597. /*
  598. * Ensure instructions in mempool_destroy aren't reordered
  599. * before atomic_read.
  600. */
  601. smp_mb();
  602. #ifdef CONFIG_DM_DEBUG
  603. for (i = 0; i < DM_TRACKED_CHUNK_HASH_SIZE; i++)
  604. BUG_ON(!hlist_empty(&s->tracked_chunk_hash[i]));
  605. #endif
  606. mempool_destroy(s->tracked_chunk_pool);
  607. __free_exceptions(s);
  608. mempool_destroy(s->pending_pool);
  609. dm_put_device(ti, s->origin);
  610. dm_put_device(ti, s->cow);
  611. kfree(s);
  612. }
  613. /*
  614. * Flush a list of buffers.
  615. */
  616. static void flush_bios(struct bio *bio)
  617. {
  618. struct bio *n;
  619. while (bio) {
  620. n = bio->bi_next;
  621. bio->bi_next = NULL;
  622. generic_make_request(bio);
  623. bio = n;
  624. }
  625. }
  626. static void flush_queued_bios(struct work_struct *work)
  627. {
  628. struct dm_snapshot *s =
  629. container_of(work, struct dm_snapshot, queued_bios_work);
  630. struct bio *queued_bios;
  631. unsigned long flags;
  632. spin_lock_irqsave(&s->pe_lock, flags);
  633. queued_bios = bio_list_get(&s->queued_bios);
  634. spin_unlock_irqrestore(&s->pe_lock, flags);
  635. flush_bios(queued_bios);
  636. }
  637. /*
  638. * Error a list of buffers.
  639. */
  640. static void error_bios(struct bio *bio)
  641. {
  642. struct bio *n;
  643. while (bio) {
  644. n = bio->bi_next;
  645. bio->bi_next = NULL;
  646. bio_io_error(bio);
  647. bio = n;
  648. }
  649. }
  650. static void __invalidate_snapshot(struct dm_snapshot *s, int err)
  651. {
  652. if (!s->valid)
  653. return;
  654. if (err == -EIO)
  655. DMERR("Invalidating snapshot: Error reading/writing.");
  656. else if (err == -ENOMEM)
  657. DMERR("Invalidating snapshot: Unable to allocate exception.");
  658. if (s->store.drop_snapshot)
  659. s->store.drop_snapshot(&s->store);
  660. s->valid = 0;
  661. dm_table_event(s->ti->table);
  662. }
  663. static void get_pending_exception(struct dm_snap_pending_exception *pe)
  664. {
  665. atomic_inc(&pe->ref_count);
  666. }
  667. static struct bio *put_pending_exception(struct dm_snap_pending_exception *pe)
  668. {
  669. struct dm_snap_pending_exception *primary_pe;
  670. struct bio *origin_bios = NULL;
  671. primary_pe = pe->primary_pe;
  672. /*
  673. * If this pe is involved in a write to the origin and
  674. * it is the last sibling to complete then release
  675. * the bios for the original write to the origin.
  676. */
  677. if (primary_pe &&
  678. atomic_dec_and_test(&primary_pe->ref_count)) {
  679. origin_bios = bio_list_get(&primary_pe->origin_bios);
  680. free_pending_exception(primary_pe);
  681. }
  682. /*
  683. * Free the pe if it's not linked to an origin write or if
  684. * it's not itself a primary pe.
  685. */
  686. if (!primary_pe || primary_pe != pe)
  687. free_pending_exception(pe);
  688. return origin_bios;
  689. }
  690. static void pending_complete(struct dm_snap_pending_exception *pe, int success)
  691. {
  692. struct dm_snap_exception *e;
  693. struct dm_snapshot *s = pe->snap;
  694. struct bio *origin_bios = NULL;
  695. struct bio *snapshot_bios = NULL;
  696. int error = 0;
  697. if (!success) {
  698. /* Read/write error - snapshot is unusable */
  699. down_write(&s->lock);
  700. __invalidate_snapshot(s, -EIO);
  701. error = 1;
  702. goto out;
  703. }
  704. e = alloc_exception();
  705. if (!e) {
  706. down_write(&s->lock);
  707. __invalidate_snapshot(s, -ENOMEM);
  708. error = 1;
  709. goto out;
  710. }
  711. *e = pe->e;
  712. down_write(&s->lock);
  713. if (!s->valid) {
  714. free_exception(e);
  715. error = 1;
  716. goto out;
  717. }
  718. /*
  719. * Check for conflicting reads. This is extremely improbable,
  720. * so msleep(1) is sufficient and there is no need for a wait queue.
  721. */
  722. while (__chunk_is_tracked(s, pe->e.old_chunk))
  723. msleep(1);
  724. /*
  725. * Add a proper exception, and remove the
  726. * in-flight exception from the list.
  727. */
  728. insert_completed_exception(s, e);
  729. out:
  730. remove_exception(&pe->e);
  731. snapshot_bios = bio_list_get(&pe->snapshot_bios);
  732. origin_bios = put_pending_exception(pe);
  733. up_write(&s->lock);
  734. /* Submit any pending write bios */
  735. if (error)
  736. error_bios(snapshot_bios);
  737. else
  738. flush_bios(snapshot_bios);
  739. flush_bios(origin_bios);
  740. }
  741. static void commit_callback(void *context, int success)
  742. {
  743. struct dm_snap_pending_exception *pe = context;
  744. pending_complete(pe, success);
  745. }
  746. /*
  747. * Called when the copy I/O has finished. kcopyd actually runs
  748. * this code so don't block.
  749. */
  750. static void copy_callback(int read_err, unsigned long write_err, void *context)
  751. {
  752. struct dm_snap_pending_exception *pe = context;
  753. struct dm_snapshot *s = pe->snap;
  754. if (read_err || write_err)
  755. pending_complete(pe, 0);
  756. else
  757. /* Update the metadata if we are persistent */
  758. s->store.commit_exception(&s->store, &pe->e, commit_callback,
  759. pe);
  760. }
  761. /*
  762. * Dispatches the copy operation to kcopyd.
  763. */
  764. static void start_copy(struct dm_snap_pending_exception *pe)
  765. {
  766. struct dm_snapshot *s = pe->snap;
  767. struct dm_io_region src, dest;
  768. struct block_device *bdev = s->origin->bdev;
  769. sector_t dev_size;
  770. dev_size = get_dev_size(bdev);
  771. src.bdev = bdev;
  772. src.sector = chunk_to_sector(s, pe->e.old_chunk);
  773. src.count = min(s->chunk_size, dev_size - src.sector);
  774. dest.bdev = s->cow->bdev;
  775. dest.sector = chunk_to_sector(s, pe->e.new_chunk);
  776. dest.count = src.count;
  777. /* Hand over to kcopyd */
  778. dm_kcopyd_copy(s->kcopyd_client,
  779. &src, 1, &dest, 0, copy_callback, pe);
  780. }
  781. static struct dm_snap_pending_exception *
  782. __lookup_pending_exception(struct dm_snapshot *s, chunk_t chunk)
  783. {
  784. struct dm_snap_exception *e = lookup_exception(&s->pending, chunk);
  785. if (!e)
  786. return NULL;
  787. return container_of(e, struct dm_snap_pending_exception, e);
  788. }
  789. /*
  790. * Looks to see if this snapshot already has a pending exception
  791. * for this chunk, otherwise it allocates a new one and inserts
  792. * it into the pending table.
  793. *
  794. * NOTE: a write lock must be held on snap->lock before calling
  795. * this.
  796. */
  797. static struct dm_snap_pending_exception *
  798. __find_pending_exception(struct dm_snapshot *s, struct bio *bio)
  799. {
  800. struct dm_snap_pending_exception *pe, *pe2;
  801. chunk_t chunk = sector_to_chunk(s, bio->bi_sector);
  802. /*
  803. * Create a new pending exception, we don't want
  804. * to hold the lock while we do this.
  805. */
  806. up_write(&s->lock);
  807. pe = alloc_pending_exception(s);
  808. down_write(&s->lock);
  809. if (!s->valid) {
  810. free_pending_exception(pe);
  811. return NULL;
  812. }
  813. pe2 = __lookup_pending_exception(s, chunk);
  814. if (pe2) {
  815. free_pending_exception(pe);
  816. return pe2;
  817. }
  818. pe->e.old_chunk = chunk;
  819. bio_list_init(&pe->origin_bios);
  820. bio_list_init(&pe->snapshot_bios);
  821. pe->primary_pe = NULL;
  822. atomic_set(&pe->ref_count, 0);
  823. pe->started = 0;
  824. if (s->store.prepare_exception(&s->store, &pe->e)) {
  825. free_pending_exception(pe);
  826. return NULL;
  827. }
  828. get_pending_exception(pe);
  829. insert_exception(&s->pending, &pe->e);
  830. return pe;
  831. }
  832. static void remap_exception(struct dm_snapshot *s, struct dm_snap_exception *e,
  833. struct bio *bio, chunk_t chunk)
  834. {
  835. bio->bi_bdev = s->cow->bdev;
  836. bio->bi_sector = chunk_to_sector(s, dm_chunk_number(e->new_chunk) +
  837. (chunk - e->old_chunk)) +
  838. (bio->bi_sector & s->chunk_mask);
  839. }
  840. static int snapshot_map(struct dm_target *ti, struct bio *bio,
  841. union map_info *map_context)
  842. {
  843. struct dm_snap_exception *e;
  844. struct dm_snapshot *s = ti->private;
  845. int r = DM_MAPIO_REMAPPED;
  846. chunk_t chunk;
  847. struct dm_snap_pending_exception *pe = NULL;
  848. chunk = sector_to_chunk(s, bio->bi_sector);
  849. /* Full snapshots are not usable */
  850. /* To get here the table must be live so s->active is always set. */
  851. if (!s->valid)
  852. return -EIO;
  853. /* FIXME: should only take write lock if we need
  854. * to copy an exception */
  855. down_write(&s->lock);
  856. if (!s->valid) {
  857. r = -EIO;
  858. goto out_unlock;
  859. }
  860. /* If the block is already remapped - use that, else remap it */
  861. e = lookup_exception(&s->complete, chunk);
  862. if (e) {
  863. remap_exception(s, e, bio, chunk);
  864. goto out_unlock;
  865. }
  866. /*
  867. * Write to snapshot - higher level takes care of RW/RO
  868. * flags so we should only get this if we are
  869. * writeable.
  870. */
  871. if (bio_rw(bio) == WRITE) {
  872. pe = __lookup_pending_exception(s, chunk);
  873. if (!pe) {
  874. pe = __find_pending_exception(s, bio);
  875. if (!pe) {
  876. __invalidate_snapshot(s, -ENOMEM);
  877. r = -EIO;
  878. goto out_unlock;
  879. }
  880. }
  881. remap_exception(s, &pe->e, bio, chunk);
  882. bio_list_add(&pe->snapshot_bios, bio);
  883. r = DM_MAPIO_SUBMITTED;
  884. if (!pe->started) {
  885. /* this is protected by snap->lock */
  886. pe->started = 1;
  887. up_write(&s->lock);
  888. start_copy(pe);
  889. goto out;
  890. }
  891. } else {
  892. bio->bi_bdev = s->origin->bdev;
  893. map_context->ptr = track_chunk(s, chunk);
  894. }
  895. out_unlock:
  896. up_write(&s->lock);
  897. out:
  898. return r;
  899. }
  900. static int snapshot_end_io(struct dm_target *ti, struct bio *bio,
  901. int error, union map_info *map_context)
  902. {
  903. struct dm_snapshot *s = ti->private;
  904. struct dm_snap_tracked_chunk *c = map_context->ptr;
  905. if (c)
  906. stop_tracking_chunk(s, c);
  907. return 0;
  908. }
  909. static void snapshot_resume(struct dm_target *ti)
  910. {
  911. struct dm_snapshot *s = ti->private;
  912. down_write(&s->lock);
  913. s->active = 1;
  914. up_write(&s->lock);
  915. }
  916. static int snapshot_status(struct dm_target *ti, status_type_t type,
  917. char *result, unsigned int maxlen)
  918. {
  919. struct dm_snapshot *snap = ti->private;
  920. switch (type) {
  921. case STATUSTYPE_INFO:
  922. if (!snap->valid)
  923. snprintf(result, maxlen, "Invalid");
  924. else {
  925. if (snap->store.fraction_full) {
  926. sector_t numerator, denominator;
  927. snap->store.fraction_full(&snap->store,
  928. &numerator,
  929. &denominator);
  930. snprintf(result, maxlen, "%llu/%llu",
  931. (unsigned long long)numerator,
  932. (unsigned long long)denominator);
  933. }
  934. else
  935. snprintf(result, maxlen, "Unknown");
  936. }
  937. break;
  938. case STATUSTYPE_TABLE:
  939. /*
  940. * kdevname returns a static pointer so we need
  941. * to make private copies if the output is to
  942. * make sense.
  943. */
  944. snprintf(result, maxlen, "%s %s %c %llu",
  945. snap->origin->name, snap->cow->name,
  946. snap->type,
  947. (unsigned long long)snap->chunk_size);
  948. break;
  949. }
  950. return 0;
  951. }
  952. /*-----------------------------------------------------------------
  953. * Origin methods
  954. *---------------------------------------------------------------*/
  955. static int __origin_write(struct list_head *snapshots, struct bio *bio)
  956. {
  957. int r = DM_MAPIO_REMAPPED, first = 0;
  958. struct dm_snapshot *snap;
  959. struct dm_snap_exception *e;
  960. struct dm_snap_pending_exception *pe, *next_pe, *primary_pe = NULL;
  961. chunk_t chunk;
  962. LIST_HEAD(pe_queue);
  963. /* Do all the snapshots on this origin */
  964. list_for_each_entry (snap, snapshots, list) {
  965. down_write(&snap->lock);
  966. /* Only deal with valid and active snapshots */
  967. if (!snap->valid || !snap->active)
  968. goto next_snapshot;
  969. /* Nothing to do if writing beyond end of snapshot */
  970. if (bio->bi_sector >= dm_table_get_size(snap->ti->table))
  971. goto next_snapshot;
  972. /*
  973. * Remember, different snapshots can have
  974. * different chunk sizes.
  975. */
  976. chunk = sector_to_chunk(snap, bio->bi_sector);
  977. /*
  978. * Check exception table to see if block
  979. * is already remapped in this snapshot
  980. * and trigger an exception if not.
  981. *
  982. * ref_count is initialised to 1 so pending_complete()
  983. * won't destroy the primary_pe while we're inside this loop.
  984. */
  985. e = lookup_exception(&snap->complete, chunk);
  986. if (e)
  987. goto next_snapshot;
  988. pe = __lookup_pending_exception(snap, chunk);
  989. if (!pe) {
  990. pe = __find_pending_exception(snap, bio);
  991. if (!pe) {
  992. __invalidate_snapshot(snap, -ENOMEM);
  993. goto next_snapshot;
  994. }
  995. }
  996. if (!primary_pe) {
  997. /*
  998. * Either every pe here has same
  999. * primary_pe or none has one yet.
  1000. */
  1001. if (pe->primary_pe)
  1002. primary_pe = pe->primary_pe;
  1003. else {
  1004. primary_pe = pe;
  1005. first = 1;
  1006. }
  1007. bio_list_add(&primary_pe->origin_bios, bio);
  1008. r = DM_MAPIO_SUBMITTED;
  1009. }
  1010. if (!pe->primary_pe) {
  1011. pe->primary_pe = primary_pe;
  1012. get_pending_exception(primary_pe);
  1013. }
  1014. if (!pe->started) {
  1015. pe->started = 1;
  1016. list_add_tail(&pe->list, &pe_queue);
  1017. }
  1018. next_snapshot:
  1019. up_write(&snap->lock);
  1020. }
  1021. if (!primary_pe)
  1022. return r;
  1023. /*
  1024. * If this is the first time we're processing this chunk and
  1025. * ref_count is now 1 it means all the pending exceptions
  1026. * got completed while we were in the loop above, so it falls to
  1027. * us here to remove the primary_pe and submit any origin_bios.
  1028. */
  1029. if (first && atomic_dec_and_test(&primary_pe->ref_count)) {
  1030. flush_bios(bio_list_get(&primary_pe->origin_bios));
  1031. free_pending_exception(primary_pe);
  1032. /* If we got here, pe_queue is necessarily empty. */
  1033. return r;
  1034. }
  1035. /*
  1036. * Now that we have a complete pe list we can start the copying.
  1037. */
  1038. list_for_each_entry_safe(pe, next_pe, &pe_queue, list)
  1039. start_copy(pe);
  1040. return r;
  1041. }
  1042. /*
  1043. * Called on a write from the origin driver.
  1044. */
  1045. static int do_origin(struct dm_dev *origin, struct bio *bio)
  1046. {
  1047. struct origin *o;
  1048. int r = DM_MAPIO_REMAPPED;
  1049. down_read(&_origins_lock);
  1050. o = __lookup_origin(origin->bdev);
  1051. if (o)
  1052. r = __origin_write(&o->snapshots, bio);
  1053. up_read(&_origins_lock);
  1054. return r;
  1055. }
  1056. /*
  1057. * Origin: maps a linear range of a device, with hooks for snapshotting.
  1058. */
  1059. /*
  1060. * Construct an origin mapping: <dev_path>
  1061. * The context for an origin is merely a 'struct dm_dev *'
  1062. * pointing to the real device.
  1063. */
  1064. static int origin_ctr(struct dm_target *ti, unsigned int argc, char **argv)
  1065. {
  1066. int r;
  1067. struct dm_dev *dev;
  1068. if (argc != 1) {
  1069. ti->error = "origin: incorrect number of arguments";
  1070. return -EINVAL;
  1071. }
  1072. r = dm_get_device(ti, argv[0], 0, ti->len,
  1073. dm_table_get_mode(ti->table), &dev);
  1074. if (r) {
  1075. ti->error = "Cannot get target device";
  1076. return r;
  1077. }
  1078. ti->private = dev;
  1079. return 0;
  1080. }
  1081. static void origin_dtr(struct dm_target *ti)
  1082. {
  1083. struct dm_dev *dev = ti->private;
  1084. dm_put_device(ti, dev);
  1085. }
  1086. static int origin_map(struct dm_target *ti, struct bio *bio,
  1087. union map_info *map_context)
  1088. {
  1089. struct dm_dev *dev = ti->private;
  1090. bio->bi_bdev = dev->bdev;
  1091. /* Only tell snapshots if this is a write */
  1092. return (bio_rw(bio) == WRITE) ? do_origin(dev, bio) : DM_MAPIO_REMAPPED;
  1093. }
  1094. #define min_not_zero(l, r) (l == 0) ? r : ((r == 0) ? l : min(l, r))
  1095. /*
  1096. * Set the target "split_io" field to the minimum of all the snapshots'
  1097. * chunk sizes.
  1098. */
  1099. static void origin_resume(struct dm_target *ti)
  1100. {
  1101. struct dm_dev *dev = ti->private;
  1102. struct dm_snapshot *snap;
  1103. struct origin *o;
  1104. chunk_t chunk_size = 0;
  1105. down_read(&_origins_lock);
  1106. o = __lookup_origin(dev->bdev);
  1107. if (o)
  1108. list_for_each_entry (snap, &o->snapshots, list)
  1109. chunk_size = min_not_zero(chunk_size, snap->chunk_size);
  1110. up_read(&_origins_lock);
  1111. ti->split_io = chunk_size;
  1112. }
  1113. static int origin_status(struct dm_target *ti, status_type_t type, char *result,
  1114. unsigned int maxlen)
  1115. {
  1116. struct dm_dev *dev = ti->private;
  1117. switch (type) {
  1118. case STATUSTYPE_INFO:
  1119. result[0] = '\0';
  1120. break;
  1121. case STATUSTYPE_TABLE:
  1122. snprintf(result, maxlen, "%s", dev->name);
  1123. break;
  1124. }
  1125. return 0;
  1126. }
  1127. static struct target_type origin_target = {
  1128. .name = "snapshot-origin",
  1129. .version = {1, 6, 0},
  1130. .module = THIS_MODULE,
  1131. .ctr = origin_ctr,
  1132. .dtr = origin_dtr,
  1133. .map = origin_map,
  1134. .resume = origin_resume,
  1135. .status = origin_status,
  1136. };
  1137. static struct target_type snapshot_target = {
  1138. .name = "snapshot",
  1139. .version = {1, 6, 0},
  1140. .module = THIS_MODULE,
  1141. .ctr = snapshot_ctr,
  1142. .dtr = snapshot_dtr,
  1143. .map = snapshot_map,
  1144. .end_io = snapshot_end_io,
  1145. .resume = snapshot_resume,
  1146. .status = snapshot_status,
  1147. };
  1148. static int __init dm_snapshot_init(void)
  1149. {
  1150. int r;
  1151. r = dm_exception_store_init();
  1152. if (r) {
  1153. DMERR("Failed to initialize exception stores");
  1154. return r;
  1155. }
  1156. r = dm_register_target(&snapshot_target);
  1157. if (r) {
  1158. DMERR("snapshot target register failed %d", r);
  1159. return r;
  1160. }
  1161. r = dm_register_target(&origin_target);
  1162. if (r < 0) {
  1163. DMERR("Origin target register failed %d", r);
  1164. goto bad1;
  1165. }
  1166. r = init_origin_hash();
  1167. if (r) {
  1168. DMERR("init_origin_hash failed.");
  1169. goto bad2;
  1170. }
  1171. exception_cache = KMEM_CACHE(dm_snap_exception, 0);
  1172. if (!exception_cache) {
  1173. DMERR("Couldn't create exception cache.");
  1174. r = -ENOMEM;
  1175. goto bad3;
  1176. }
  1177. pending_cache = KMEM_CACHE(dm_snap_pending_exception, 0);
  1178. if (!pending_cache) {
  1179. DMERR("Couldn't create pending cache.");
  1180. r = -ENOMEM;
  1181. goto bad4;
  1182. }
  1183. tracked_chunk_cache = KMEM_CACHE(dm_snap_tracked_chunk, 0);
  1184. if (!tracked_chunk_cache) {
  1185. DMERR("Couldn't create cache to track chunks in use.");
  1186. r = -ENOMEM;
  1187. goto bad5;
  1188. }
  1189. ksnapd = create_singlethread_workqueue("ksnapd");
  1190. if (!ksnapd) {
  1191. DMERR("Failed to create ksnapd workqueue.");
  1192. r = -ENOMEM;
  1193. goto bad_pending_pool;
  1194. }
  1195. return 0;
  1196. bad_pending_pool:
  1197. kmem_cache_destroy(tracked_chunk_cache);
  1198. bad5:
  1199. kmem_cache_destroy(pending_cache);
  1200. bad4:
  1201. kmem_cache_destroy(exception_cache);
  1202. bad3:
  1203. exit_origin_hash();
  1204. bad2:
  1205. dm_unregister_target(&origin_target);
  1206. bad1:
  1207. dm_unregister_target(&snapshot_target);
  1208. return r;
  1209. }
  1210. static void __exit dm_snapshot_exit(void)
  1211. {
  1212. destroy_workqueue(ksnapd);
  1213. dm_unregister_target(&snapshot_target);
  1214. dm_unregister_target(&origin_target);
  1215. exit_origin_hash();
  1216. kmem_cache_destroy(pending_cache);
  1217. kmem_cache_destroy(exception_cache);
  1218. kmem_cache_destroy(tracked_chunk_cache);
  1219. dm_exception_store_exit();
  1220. }
  1221. /* Module hooks */
  1222. module_init(dm_snapshot_init);
  1223. module_exit(dm_snapshot_exit);
  1224. MODULE_DESCRIPTION(DM_NAME " snapshot target");
  1225. MODULE_AUTHOR("Joe Thornber");
  1226. MODULE_LICENSE("GPL");