dm-snap-persistent.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781
  1. /*
  2. * Copyright (C) 2001-2002 Sistina Software (UK) Limited.
  3. * Copyright (C) 2006-2008 Red Hat GmbH
  4. *
  5. * This file is released under the GPL.
  6. */
  7. #include "dm-exception-store.h"
  8. #include <linux/mm.h>
  9. #include <linux/pagemap.h>
  10. #include <linux/vmalloc.h>
  11. #include <linux/slab.h>
  12. #include <linux/dm-io.h>
  13. #define DM_MSG_PREFIX "persistent snapshot"
  14. #define DM_CHUNK_SIZE_DEFAULT_SECTORS 32 /* 16KB */
  15. /*-----------------------------------------------------------------
  16. * Persistent snapshots, by persistent we mean that the snapshot
  17. * will survive a reboot.
  18. *---------------------------------------------------------------*/
  19. /*
  20. * We need to store a record of which parts of the origin have
  21. * been copied to the snapshot device. The snapshot code
  22. * requires that we copy exception chunks to chunk aligned areas
  23. * of the COW store. It makes sense therefore, to store the
  24. * metadata in chunk size blocks.
  25. *
  26. * There is no backward or forward compatibility implemented,
  27. * snapshots with different disk versions than the kernel will
  28. * not be usable. It is expected that "lvcreate" will blank out
  29. * the start of a fresh COW device before calling the snapshot
  30. * constructor.
  31. *
  32. * The first chunk of the COW device just contains the header.
  33. * After this there is a chunk filled with exception metadata,
  34. * followed by as many exception chunks as can fit in the
  35. * metadata areas.
  36. *
  37. * All on disk structures are in little-endian format. The end
  38. * of the exceptions info is indicated by an exception with a
  39. * new_chunk of 0, which is invalid since it would point to the
  40. * header chunk.
  41. */
  42. /*
  43. * Magic for persistent snapshots: "SnAp" - Feeble isn't it.
  44. */
  45. #define SNAP_MAGIC 0x70416e53
  46. /*
  47. * The on-disk version of the metadata.
  48. */
  49. #define SNAPSHOT_DISK_VERSION 1
  50. struct disk_header {
  51. uint32_t magic;
  52. /*
  53. * Is this snapshot valid. There is no way of recovering
  54. * an invalid snapshot.
  55. */
  56. uint32_t valid;
  57. /*
  58. * Simple, incrementing version. no backward
  59. * compatibility.
  60. */
  61. uint32_t version;
  62. /* In sectors */
  63. uint32_t chunk_size;
  64. };
  65. struct disk_exception {
  66. uint64_t old_chunk;
  67. uint64_t new_chunk;
  68. };
  69. struct commit_callback {
  70. void (*callback)(void *, int success);
  71. void *context;
  72. };
  73. /*
  74. * The top level structure for a persistent exception store.
  75. */
  76. struct pstore {
  77. struct dm_exception_store *store;
  78. int version;
  79. int valid;
  80. uint32_t exceptions_per_area;
  81. /*
  82. * Now that we have an asynchronous kcopyd there is no
  83. * need for large chunk sizes, so it wont hurt to have a
  84. * whole chunks worth of metadata in memory at once.
  85. */
  86. void *area;
  87. /*
  88. * An area of zeros used to clear the next area.
  89. */
  90. void *zero_area;
  91. /*
  92. * An area used for header. The header can be written
  93. * concurrently with metadata (when invalidating the snapshot),
  94. * so it needs a separate buffer.
  95. */
  96. void *header_area;
  97. /*
  98. * Used to keep track of which metadata area the data in
  99. * 'chunk' refers to.
  100. */
  101. chunk_t current_area;
  102. /*
  103. * The next free chunk for an exception.
  104. */
  105. chunk_t next_free;
  106. /*
  107. * The index of next free exception in the current
  108. * metadata area.
  109. */
  110. uint32_t current_committed;
  111. atomic_t pending_count;
  112. uint32_t callback_count;
  113. struct commit_callback *callbacks;
  114. struct dm_io_client *io_client;
  115. struct workqueue_struct *metadata_wq;
  116. };
  117. static unsigned sectors_to_pages(unsigned sectors)
  118. {
  119. return DIV_ROUND_UP(sectors, PAGE_SIZE >> 9);
  120. }
  121. static int alloc_area(struct pstore *ps)
  122. {
  123. int r = -ENOMEM;
  124. size_t len;
  125. len = ps->store->chunk_size << SECTOR_SHIFT;
  126. /*
  127. * Allocate the chunk_size block of memory that will hold
  128. * a single metadata area.
  129. */
  130. ps->area = vmalloc(len);
  131. if (!ps->area)
  132. goto err_area;
  133. ps->zero_area = vmalloc(len);
  134. if (!ps->zero_area)
  135. goto err_zero_area;
  136. memset(ps->zero_area, 0, len);
  137. ps->header_area = vmalloc(len);
  138. if (!ps->header_area)
  139. goto err_header_area;
  140. return 0;
  141. err_header_area:
  142. vfree(ps->zero_area);
  143. err_zero_area:
  144. vfree(ps->area);
  145. err_area:
  146. return r;
  147. }
  148. static void free_area(struct pstore *ps)
  149. {
  150. if (ps->area)
  151. vfree(ps->area);
  152. ps->area = NULL;
  153. if (ps->zero_area)
  154. vfree(ps->zero_area);
  155. ps->zero_area = NULL;
  156. if (ps->header_area)
  157. vfree(ps->header_area);
  158. ps->header_area = NULL;
  159. }
  160. struct mdata_req {
  161. struct dm_io_region *where;
  162. struct dm_io_request *io_req;
  163. struct work_struct work;
  164. int result;
  165. };
  166. static void do_metadata(struct work_struct *work)
  167. {
  168. struct mdata_req *req = container_of(work, struct mdata_req, work);
  169. req->result = dm_io(req->io_req, 1, req->where, NULL);
  170. }
  171. /*
  172. * Read or write a chunk aligned and sized block of data from a device.
  173. */
  174. static int chunk_io(struct pstore *ps, void *area, chunk_t chunk, int rw,
  175. int metadata)
  176. {
  177. struct dm_io_region where = {
  178. .bdev = ps->store->cow->bdev,
  179. .sector = ps->store->chunk_size * chunk,
  180. .count = ps->store->chunk_size,
  181. };
  182. struct dm_io_request io_req = {
  183. .bi_rw = rw,
  184. .mem.type = DM_IO_VMA,
  185. .mem.ptr.vma = area,
  186. .client = ps->io_client,
  187. .notify.fn = NULL,
  188. };
  189. struct mdata_req req;
  190. if (!metadata)
  191. return dm_io(&io_req, 1, &where, NULL);
  192. req.where = &where;
  193. req.io_req = &io_req;
  194. /*
  195. * Issue the synchronous I/O from a different thread
  196. * to avoid generic_make_request recursion.
  197. */
  198. INIT_WORK(&req.work, do_metadata);
  199. queue_work(ps->metadata_wq, &req.work);
  200. flush_workqueue(ps->metadata_wq);
  201. return req.result;
  202. }
  203. /*
  204. * Convert a metadata area index to a chunk index.
  205. */
  206. static chunk_t area_location(struct pstore *ps, chunk_t area)
  207. {
  208. return 1 + ((ps->exceptions_per_area + 1) * area);
  209. }
  210. /*
  211. * Read or write a metadata area. Remembering to skip the first
  212. * chunk which holds the header.
  213. */
  214. static int area_io(struct pstore *ps, int rw)
  215. {
  216. int r;
  217. chunk_t chunk;
  218. chunk = area_location(ps, ps->current_area);
  219. r = chunk_io(ps, ps->area, chunk, rw, 0);
  220. if (r)
  221. return r;
  222. return 0;
  223. }
  224. static void zero_memory_area(struct pstore *ps)
  225. {
  226. memset(ps->area, 0, ps->store->chunk_size << SECTOR_SHIFT);
  227. }
  228. static int zero_disk_area(struct pstore *ps, chunk_t area)
  229. {
  230. return chunk_io(ps, ps->zero_area, area_location(ps, area), WRITE, 0);
  231. }
  232. static int read_header(struct pstore *ps, int *new_snapshot)
  233. {
  234. int r;
  235. struct disk_header *dh;
  236. chunk_t chunk_size;
  237. int chunk_size_supplied = 1;
  238. /*
  239. * Use default chunk size (or hardsect_size, if larger) if none supplied
  240. */
  241. if (!ps->store->chunk_size) {
  242. ps->store->chunk_size = max(DM_CHUNK_SIZE_DEFAULT_SECTORS,
  243. bdev_logical_block_size(ps->store->cow->bdev) >> 9);
  244. ps->store->chunk_mask = ps->store->chunk_size - 1;
  245. ps->store->chunk_shift = ffs(ps->store->chunk_size) - 1;
  246. chunk_size_supplied = 0;
  247. }
  248. ps->io_client = dm_io_client_create(sectors_to_pages(ps->store->
  249. chunk_size));
  250. if (IS_ERR(ps->io_client))
  251. return PTR_ERR(ps->io_client);
  252. r = alloc_area(ps);
  253. if (r)
  254. return r;
  255. r = chunk_io(ps, ps->header_area, 0, READ, 1);
  256. if (r)
  257. goto bad;
  258. dh = ps->header_area;
  259. if (le32_to_cpu(dh->magic) == 0) {
  260. *new_snapshot = 1;
  261. return 0;
  262. }
  263. if (le32_to_cpu(dh->magic) != SNAP_MAGIC) {
  264. DMWARN("Invalid or corrupt snapshot");
  265. r = -ENXIO;
  266. goto bad;
  267. }
  268. *new_snapshot = 0;
  269. ps->valid = le32_to_cpu(dh->valid);
  270. ps->version = le32_to_cpu(dh->version);
  271. chunk_size = le32_to_cpu(dh->chunk_size);
  272. if (!chunk_size_supplied || ps->store->chunk_size == chunk_size)
  273. return 0;
  274. DMWARN("chunk size %llu in device metadata overrides "
  275. "table chunk size of %llu.",
  276. (unsigned long long)chunk_size,
  277. (unsigned long long)ps->store->chunk_size);
  278. /* We had a bogus chunk_size. Fix stuff up. */
  279. free_area(ps);
  280. ps->store->chunk_size = chunk_size;
  281. ps->store->chunk_mask = chunk_size - 1;
  282. ps->store->chunk_shift = ffs(chunk_size) - 1;
  283. r = dm_io_client_resize(sectors_to_pages(ps->store->chunk_size),
  284. ps->io_client);
  285. if (r)
  286. return r;
  287. r = alloc_area(ps);
  288. return r;
  289. bad:
  290. free_area(ps);
  291. return r;
  292. }
  293. static int write_header(struct pstore *ps)
  294. {
  295. struct disk_header *dh;
  296. memset(ps->header_area, 0, ps->store->chunk_size << SECTOR_SHIFT);
  297. dh = ps->header_area;
  298. dh->magic = cpu_to_le32(SNAP_MAGIC);
  299. dh->valid = cpu_to_le32(ps->valid);
  300. dh->version = cpu_to_le32(ps->version);
  301. dh->chunk_size = cpu_to_le32(ps->store->chunk_size);
  302. return chunk_io(ps, ps->header_area, 0, WRITE, 1);
  303. }
  304. /*
  305. * Access functions for the disk exceptions, these do the endian conversions.
  306. */
  307. static struct disk_exception *get_exception(struct pstore *ps, uint32_t index)
  308. {
  309. BUG_ON(index >= ps->exceptions_per_area);
  310. return ((struct disk_exception *) ps->area) + index;
  311. }
  312. static void read_exception(struct pstore *ps,
  313. uint32_t index, struct disk_exception *result)
  314. {
  315. struct disk_exception *e = get_exception(ps, index);
  316. /* copy it */
  317. result->old_chunk = le64_to_cpu(e->old_chunk);
  318. result->new_chunk = le64_to_cpu(e->new_chunk);
  319. }
  320. static void write_exception(struct pstore *ps,
  321. uint32_t index, struct disk_exception *de)
  322. {
  323. struct disk_exception *e = get_exception(ps, index);
  324. /* copy it */
  325. e->old_chunk = cpu_to_le64(de->old_chunk);
  326. e->new_chunk = cpu_to_le64(de->new_chunk);
  327. }
  328. /*
  329. * Registers the exceptions that are present in the current area.
  330. * 'full' is filled in to indicate if the area has been
  331. * filled.
  332. */
  333. static int insert_exceptions(struct pstore *ps,
  334. int (*callback)(void *callback_context,
  335. chunk_t old, chunk_t new),
  336. void *callback_context,
  337. int *full)
  338. {
  339. int r;
  340. unsigned int i;
  341. struct disk_exception de;
  342. /* presume the area is full */
  343. *full = 1;
  344. for (i = 0; i < ps->exceptions_per_area; i++) {
  345. read_exception(ps, i, &de);
  346. /*
  347. * If the new_chunk is pointing at the start of
  348. * the COW device, where the first metadata area
  349. * is we know that we've hit the end of the
  350. * exceptions. Therefore the area is not full.
  351. */
  352. if (de.new_chunk == 0LL) {
  353. ps->current_committed = i;
  354. *full = 0;
  355. break;
  356. }
  357. /*
  358. * Keep track of the start of the free chunks.
  359. */
  360. if (ps->next_free <= de.new_chunk)
  361. ps->next_free = de.new_chunk + 1;
  362. /*
  363. * Otherwise we add the exception to the snapshot.
  364. */
  365. r = callback(callback_context, de.old_chunk, de.new_chunk);
  366. if (r)
  367. return r;
  368. }
  369. return 0;
  370. }
  371. static int read_exceptions(struct pstore *ps,
  372. int (*callback)(void *callback_context, chunk_t old,
  373. chunk_t new),
  374. void *callback_context)
  375. {
  376. int r, full = 1;
  377. /*
  378. * Keeping reading chunks and inserting exceptions until
  379. * we find a partially full area.
  380. */
  381. for (ps->current_area = 0; full; ps->current_area++) {
  382. r = area_io(ps, READ);
  383. if (r)
  384. return r;
  385. r = insert_exceptions(ps, callback, callback_context, &full);
  386. if (r)
  387. return r;
  388. }
  389. ps->current_area--;
  390. return 0;
  391. }
  392. static struct pstore *get_info(struct dm_exception_store *store)
  393. {
  394. return (struct pstore *) store->context;
  395. }
  396. static void persistent_fraction_full(struct dm_exception_store *store,
  397. sector_t *numerator, sector_t *denominator)
  398. {
  399. *numerator = get_info(store)->next_free * store->chunk_size;
  400. *denominator = get_dev_size(store->cow->bdev);
  401. }
  402. static void persistent_dtr(struct dm_exception_store *store)
  403. {
  404. struct pstore *ps = get_info(store);
  405. destroy_workqueue(ps->metadata_wq);
  406. /* Created in read_header */
  407. if (ps->io_client)
  408. dm_io_client_destroy(ps->io_client);
  409. free_area(ps);
  410. /* Allocated in persistent_read_metadata */
  411. if (ps->callbacks)
  412. vfree(ps->callbacks);
  413. kfree(ps);
  414. }
  415. static int persistent_read_metadata(struct dm_exception_store *store,
  416. int (*callback)(void *callback_context,
  417. chunk_t old, chunk_t new),
  418. void *callback_context)
  419. {
  420. int r, uninitialized_var(new_snapshot);
  421. struct pstore *ps = get_info(store);
  422. /*
  423. * Read the snapshot header.
  424. */
  425. r = read_header(ps, &new_snapshot);
  426. if (r)
  427. return r;
  428. /*
  429. * Now we know correct chunk_size, complete the initialisation.
  430. */
  431. ps->exceptions_per_area = (ps->store->chunk_size << SECTOR_SHIFT) /
  432. sizeof(struct disk_exception);
  433. ps->callbacks = dm_vcalloc(ps->exceptions_per_area,
  434. sizeof(*ps->callbacks));
  435. if (!ps->callbacks)
  436. return -ENOMEM;
  437. /*
  438. * Do we need to setup a new snapshot ?
  439. */
  440. if (new_snapshot) {
  441. r = write_header(ps);
  442. if (r) {
  443. DMWARN("write_header failed");
  444. return r;
  445. }
  446. ps->current_area = 0;
  447. zero_memory_area(ps);
  448. r = zero_disk_area(ps, 0);
  449. if (r) {
  450. DMWARN("zero_disk_area(0) failed");
  451. return r;
  452. }
  453. } else {
  454. /*
  455. * Sanity checks.
  456. */
  457. if (ps->version != SNAPSHOT_DISK_VERSION) {
  458. DMWARN("unable to handle snapshot disk version %d",
  459. ps->version);
  460. return -EINVAL;
  461. }
  462. /*
  463. * Metadata are valid, but snapshot is invalidated
  464. */
  465. if (!ps->valid)
  466. return 1;
  467. /*
  468. * Read the metadata.
  469. */
  470. r = read_exceptions(ps, callback, callback_context);
  471. if (r)
  472. return r;
  473. }
  474. return 0;
  475. }
  476. static int persistent_prepare_exception(struct dm_exception_store *store,
  477. struct dm_snap_exception *e)
  478. {
  479. struct pstore *ps = get_info(store);
  480. uint32_t stride;
  481. chunk_t next_free;
  482. sector_t size = get_dev_size(store->cow->bdev);
  483. /* Is there enough room ? */
  484. if (size < ((ps->next_free + 1) * store->chunk_size))
  485. return -ENOSPC;
  486. e->new_chunk = ps->next_free;
  487. /*
  488. * Move onto the next free pending, making sure to take
  489. * into account the location of the metadata chunks.
  490. */
  491. stride = (ps->exceptions_per_area + 1);
  492. next_free = ++ps->next_free;
  493. if (sector_div(next_free, stride) == 1)
  494. ps->next_free++;
  495. atomic_inc(&ps->pending_count);
  496. return 0;
  497. }
  498. static void persistent_commit_exception(struct dm_exception_store *store,
  499. struct dm_snap_exception *e,
  500. void (*callback) (void *, int success),
  501. void *callback_context)
  502. {
  503. unsigned int i;
  504. struct pstore *ps = get_info(store);
  505. struct disk_exception de;
  506. struct commit_callback *cb;
  507. de.old_chunk = e->old_chunk;
  508. de.new_chunk = e->new_chunk;
  509. write_exception(ps, ps->current_committed++, &de);
  510. /*
  511. * Add the callback to the back of the array. This code
  512. * is the only place where the callback array is
  513. * manipulated, and we know that it will never be called
  514. * multiple times concurrently.
  515. */
  516. cb = ps->callbacks + ps->callback_count++;
  517. cb->callback = callback;
  518. cb->context = callback_context;
  519. /*
  520. * If there are exceptions in flight and we have not yet
  521. * filled this metadata area there's nothing more to do.
  522. */
  523. if (!atomic_dec_and_test(&ps->pending_count) &&
  524. (ps->current_committed != ps->exceptions_per_area))
  525. return;
  526. /*
  527. * If we completely filled the current area, then wipe the next one.
  528. */
  529. if ((ps->current_committed == ps->exceptions_per_area) &&
  530. zero_disk_area(ps, ps->current_area + 1))
  531. ps->valid = 0;
  532. /*
  533. * Commit exceptions to disk.
  534. */
  535. if (ps->valid && area_io(ps, WRITE_BARRIER))
  536. ps->valid = 0;
  537. /*
  538. * Advance to the next area if this one is full.
  539. */
  540. if (ps->current_committed == ps->exceptions_per_area) {
  541. ps->current_committed = 0;
  542. ps->current_area++;
  543. zero_memory_area(ps);
  544. }
  545. for (i = 0; i < ps->callback_count; i++) {
  546. cb = ps->callbacks + i;
  547. cb->callback(cb->context, ps->valid);
  548. }
  549. ps->callback_count = 0;
  550. }
  551. static void persistent_drop_snapshot(struct dm_exception_store *store)
  552. {
  553. struct pstore *ps = get_info(store);
  554. ps->valid = 0;
  555. if (write_header(ps))
  556. DMWARN("write header failed");
  557. }
  558. static int persistent_ctr(struct dm_exception_store *store,
  559. unsigned argc, char **argv)
  560. {
  561. struct pstore *ps;
  562. /* allocate the pstore */
  563. ps = kzalloc(sizeof(*ps), GFP_KERNEL);
  564. if (!ps)
  565. return -ENOMEM;
  566. ps->store = store;
  567. ps->valid = 1;
  568. ps->version = SNAPSHOT_DISK_VERSION;
  569. ps->area = NULL;
  570. ps->zero_area = NULL;
  571. ps->header_area = NULL;
  572. ps->next_free = 2; /* skipping the header and first area */
  573. ps->current_committed = 0;
  574. ps->callback_count = 0;
  575. atomic_set(&ps->pending_count, 0);
  576. ps->callbacks = NULL;
  577. ps->metadata_wq = create_singlethread_workqueue("ksnaphd");
  578. if (!ps->metadata_wq) {
  579. kfree(ps);
  580. DMERR("couldn't start header metadata update thread");
  581. return -ENOMEM;
  582. }
  583. store->context = ps;
  584. return 0;
  585. }
  586. static unsigned persistent_status(struct dm_exception_store *store,
  587. status_type_t status, char *result,
  588. unsigned maxlen)
  589. {
  590. unsigned sz = 0;
  591. switch (status) {
  592. case STATUSTYPE_INFO:
  593. break;
  594. case STATUSTYPE_TABLE:
  595. DMEMIT(" %s P %llu", store->cow->name,
  596. (unsigned long long)store->chunk_size);
  597. }
  598. return sz;
  599. }
  600. static struct dm_exception_store_type _persistent_type = {
  601. .name = "persistent",
  602. .module = THIS_MODULE,
  603. .ctr = persistent_ctr,
  604. .dtr = persistent_dtr,
  605. .read_metadata = persistent_read_metadata,
  606. .prepare_exception = persistent_prepare_exception,
  607. .commit_exception = persistent_commit_exception,
  608. .drop_snapshot = persistent_drop_snapshot,
  609. .fraction_full = persistent_fraction_full,
  610. .status = persistent_status,
  611. };
  612. static struct dm_exception_store_type _persistent_compat_type = {
  613. .name = "P",
  614. .module = THIS_MODULE,
  615. .ctr = persistent_ctr,
  616. .dtr = persistent_dtr,
  617. .read_metadata = persistent_read_metadata,
  618. .prepare_exception = persistent_prepare_exception,
  619. .commit_exception = persistent_commit_exception,
  620. .drop_snapshot = persistent_drop_snapshot,
  621. .fraction_full = persistent_fraction_full,
  622. .status = persistent_status,
  623. };
  624. int dm_persistent_snapshot_init(void)
  625. {
  626. int r;
  627. r = dm_exception_store_type_register(&_persistent_type);
  628. if (r) {
  629. DMERR("Unable to register persistent exception store type");
  630. return r;
  631. }
  632. r = dm_exception_store_type_register(&_persistent_compat_type);
  633. if (r) {
  634. DMERR("Unable to register old-style persistent exception "
  635. "store type");
  636. dm_exception_store_type_unregister(&_persistent_type);
  637. return r;
  638. }
  639. return r;
  640. }
  641. void dm_persistent_snapshot_exit(void)
  642. {
  643. dm_exception_store_type_unregister(&_persistent_type);
  644. dm_exception_store_type_unregister(&_persistent_compat_type);
  645. }