dm-snap-persistent.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769
  1. /*
  2. * Copyright (C) 2001-2002 Sistina Software (UK) Limited.
  3. * Copyright (C) 2006-2008 Red Hat GmbH
  4. *
  5. * This file is released under the GPL.
  6. */
  7. #include "dm-exception-store.h"
  8. #include <linux/mm.h>
  9. #include <linux/pagemap.h>
  10. #include <linux/vmalloc.h>
  11. #include <linux/slab.h>
  12. #include <linux/dm-io.h>
  13. #define DM_MSG_PREFIX "persistent snapshot"
  14. #define DM_CHUNK_SIZE_DEFAULT_SECTORS 32 /* 16KB */
  15. /*-----------------------------------------------------------------
  16. * Persistent snapshots, by persistent we mean that the snapshot
  17. * will survive a reboot.
  18. *---------------------------------------------------------------*/
  19. /*
  20. * We need to store a record of which parts of the origin have
  21. * been copied to the snapshot device. The snapshot code
  22. * requires that we copy exception chunks to chunk aligned areas
  23. * of the COW store. It makes sense therefore, to store the
  24. * metadata in chunk size blocks.
  25. *
  26. * There is no backward or forward compatibility implemented,
  27. * snapshots with different disk versions than the kernel will
  28. * not be usable. It is expected that "lvcreate" will blank out
  29. * the start of a fresh COW device before calling the snapshot
  30. * constructor.
  31. *
  32. * The first chunk of the COW device just contains the header.
  33. * After this there is a chunk filled with exception metadata,
  34. * followed by as many exception chunks as can fit in the
  35. * metadata areas.
  36. *
  37. * All on disk structures are in little-endian format. The end
  38. * of the exceptions info is indicated by an exception with a
  39. * new_chunk of 0, which is invalid since it would point to the
  40. * header chunk.
  41. */
  42. /*
  43. * Magic for persistent snapshots: "SnAp" - Feeble isn't it.
  44. */
  45. #define SNAP_MAGIC 0x70416e53
  46. /*
  47. * The on-disk version of the metadata.
  48. */
  49. #define SNAPSHOT_DISK_VERSION 1
  50. struct disk_header {
  51. uint32_t magic;
  52. /*
  53. * Is this snapshot valid. There is no way of recovering
  54. * an invalid snapshot.
  55. */
  56. uint32_t valid;
  57. /*
  58. * Simple, incrementing version. no backward
  59. * compatibility.
  60. */
  61. uint32_t version;
  62. /* In sectors */
  63. uint32_t chunk_size;
  64. };
  65. struct disk_exception {
  66. uint64_t old_chunk;
  67. uint64_t new_chunk;
  68. };
  69. struct commit_callback {
  70. void (*callback)(void *, int success);
  71. void *context;
  72. };
  73. /*
  74. * The top level structure for a persistent exception store.
  75. */
  76. struct pstore {
  77. struct dm_exception_store *store;
  78. int version;
  79. int valid;
  80. uint32_t exceptions_per_area;
  81. /*
  82. * Now that we have an asynchronous kcopyd there is no
  83. * need for large chunk sizes, so it wont hurt to have a
  84. * whole chunks worth of metadata in memory at once.
  85. */
  86. void *area;
  87. /*
  88. * An area of zeros used to clear the next area.
  89. */
  90. void *zero_area;
  91. /*
  92. * Used to keep track of which metadata area the data in
  93. * 'chunk' refers to.
  94. */
  95. chunk_t current_area;
  96. /*
  97. * The next free chunk for an exception.
  98. */
  99. chunk_t next_free;
  100. /*
  101. * The index of next free exception in the current
  102. * metadata area.
  103. */
  104. uint32_t current_committed;
  105. atomic_t pending_count;
  106. uint32_t callback_count;
  107. struct commit_callback *callbacks;
  108. struct dm_io_client *io_client;
  109. struct workqueue_struct *metadata_wq;
  110. };
  111. static unsigned sectors_to_pages(unsigned sectors)
  112. {
  113. return DIV_ROUND_UP(sectors, PAGE_SIZE >> 9);
  114. }
  115. static int alloc_area(struct pstore *ps)
  116. {
  117. int r = -ENOMEM;
  118. size_t len;
  119. len = ps->store->chunk_size << SECTOR_SHIFT;
  120. /*
  121. * Allocate the chunk_size block of memory that will hold
  122. * a single metadata area.
  123. */
  124. ps->area = vmalloc(len);
  125. if (!ps->area)
  126. return r;
  127. ps->zero_area = vmalloc(len);
  128. if (!ps->zero_area) {
  129. vfree(ps->area);
  130. return r;
  131. }
  132. memset(ps->zero_area, 0, len);
  133. return 0;
  134. }
  135. static void free_area(struct pstore *ps)
  136. {
  137. if (ps->area)
  138. vfree(ps->area);
  139. ps->area = NULL;
  140. if (ps->zero_area)
  141. vfree(ps->zero_area);
  142. ps->zero_area = NULL;
  143. }
  144. struct mdata_req {
  145. struct dm_io_region *where;
  146. struct dm_io_request *io_req;
  147. struct work_struct work;
  148. int result;
  149. };
  150. static void do_metadata(struct work_struct *work)
  151. {
  152. struct mdata_req *req = container_of(work, struct mdata_req, work);
  153. req->result = dm_io(req->io_req, 1, req->where, NULL);
  154. }
  155. /*
  156. * Read or write a chunk aligned and sized block of data from a device.
  157. */
  158. static int chunk_io(struct pstore *ps, chunk_t chunk, int rw, int metadata)
  159. {
  160. struct dm_io_region where = {
  161. .bdev = ps->store->cow->bdev,
  162. .sector = ps->store->chunk_size * chunk,
  163. .count = ps->store->chunk_size,
  164. };
  165. struct dm_io_request io_req = {
  166. .bi_rw = rw,
  167. .mem.type = DM_IO_VMA,
  168. .mem.ptr.vma = ps->area,
  169. .client = ps->io_client,
  170. .notify.fn = NULL,
  171. };
  172. struct mdata_req req;
  173. if (!metadata)
  174. return dm_io(&io_req, 1, &where, NULL);
  175. req.where = &where;
  176. req.io_req = &io_req;
  177. /*
  178. * Issue the synchronous I/O from a different thread
  179. * to avoid generic_make_request recursion.
  180. */
  181. INIT_WORK(&req.work, do_metadata);
  182. queue_work(ps->metadata_wq, &req.work);
  183. flush_workqueue(ps->metadata_wq);
  184. return req.result;
  185. }
  186. /*
  187. * Convert a metadata area index to a chunk index.
  188. */
  189. static chunk_t area_location(struct pstore *ps, chunk_t area)
  190. {
  191. return 1 + ((ps->exceptions_per_area + 1) * area);
  192. }
  193. /*
  194. * Read or write a metadata area. Remembering to skip the first
  195. * chunk which holds the header.
  196. */
  197. static int area_io(struct pstore *ps, int rw)
  198. {
  199. int r;
  200. chunk_t chunk;
  201. chunk = area_location(ps, ps->current_area);
  202. r = chunk_io(ps, chunk, rw, 0);
  203. if (r)
  204. return r;
  205. return 0;
  206. }
  207. static void zero_memory_area(struct pstore *ps)
  208. {
  209. memset(ps->area, 0, ps->store->chunk_size << SECTOR_SHIFT);
  210. }
  211. static int zero_disk_area(struct pstore *ps, chunk_t area)
  212. {
  213. struct dm_io_region where = {
  214. .bdev = ps->store->cow->bdev,
  215. .sector = ps->store->chunk_size * area_location(ps, area),
  216. .count = ps->store->chunk_size,
  217. };
  218. struct dm_io_request io_req = {
  219. .bi_rw = WRITE,
  220. .mem.type = DM_IO_VMA,
  221. .mem.ptr.vma = ps->zero_area,
  222. .client = ps->io_client,
  223. .notify.fn = NULL,
  224. };
  225. return dm_io(&io_req, 1, &where, NULL);
  226. }
  227. static int read_header(struct pstore *ps, int *new_snapshot)
  228. {
  229. int r;
  230. struct disk_header *dh;
  231. chunk_t chunk_size;
  232. int chunk_size_supplied = 1;
  233. /*
  234. * Use default chunk size (or hardsect_size, if larger) if none supplied
  235. */
  236. if (!ps->store->chunk_size) {
  237. ps->store->chunk_size = max(DM_CHUNK_SIZE_DEFAULT_SECTORS,
  238. bdev_hardsect_size(ps->store->cow->bdev) >> 9);
  239. ps->store->chunk_mask = ps->store->chunk_size - 1;
  240. ps->store->chunk_shift = ffs(ps->store->chunk_size) - 1;
  241. chunk_size_supplied = 0;
  242. }
  243. ps->io_client = dm_io_client_create(sectors_to_pages(ps->store->
  244. chunk_size));
  245. if (IS_ERR(ps->io_client))
  246. return PTR_ERR(ps->io_client);
  247. r = alloc_area(ps);
  248. if (r)
  249. return r;
  250. r = chunk_io(ps, 0, READ, 1);
  251. if (r)
  252. goto bad;
  253. dh = (struct disk_header *) ps->area;
  254. if (le32_to_cpu(dh->magic) == 0) {
  255. *new_snapshot = 1;
  256. return 0;
  257. }
  258. if (le32_to_cpu(dh->magic) != SNAP_MAGIC) {
  259. DMWARN("Invalid or corrupt snapshot");
  260. r = -ENXIO;
  261. goto bad;
  262. }
  263. *new_snapshot = 0;
  264. ps->valid = le32_to_cpu(dh->valid);
  265. ps->version = le32_to_cpu(dh->version);
  266. chunk_size = le32_to_cpu(dh->chunk_size);
  267. if (!chunk_size_supplied || ps->store->chunk_size == chunk_size)
  268. return 0;
  269. DMWARN("chunk size %llu in device metadata overrides "
  270. "table chunk size of %llu.",
  271. (unsigned long long)chunk_size,
  272. (unsigned long long)ps->store->chunk_size);
  273. /* We had a bogus chunk_size. Fix stuff up. */
  274. free_area(ps);
  275. ps->store->chunk_size = chunk_size;
  276. ps->store->chunk_mask = chunk_size - 1;
  277. ps->store->chunk_shift = ffs(chunk_size) - 1;
  278. r = dm_io_client_resize(sectors_to_pages(ps->store->chunk_size),
  279. ps->io_client);
  280. if (r)
  281. return r;
  282. r = alloc_area(ps);
  283. return r;
  284. bad:
  285. free_area(ps);
  286. return r;
  287. }
  288. static int write_header(struct pstore *ps)
  289. {
  290. struct disk_header *dh;
  291. memset(ps->area, 0, ps->store->chunk_size << SECTOR_SHIFT);
  292. dh = (struct disk_header *) ps->area;
  293. dh->magic = cpu_to_le32(SNAP_MAGIC);
  294. dh->valid = cpu_to_le32(ps->valid);
  295. dh->version = cpu_to_le32(ps->version);
  296. dh->chunk_size = cpu_to_le32(ps->store->chunk_size);
  297. return chunk_io(ps, 0, WRITE, 1);
  298. }
  299. /*
  300. * Access functions for the disk exceptions, these do the endian conversions.
  301. */
  302. static struct disk_exception *get_exception(struct pstore *ps, uint32_t index)
  303. {
  304. BUG_ON(index >= ps->exceptions_per_area);
  305. return ((struct disk_exception *) ps->area) + index;
  306. }
  307. static void read_exception(struct pstore *ps,
  308. uint32_t index, struct disk_exception *result)
  309. {
  310. struct disk_exception *e = get_exception(ps, index);
  311. /* copy it */
  312. result->old_chunk = le64_to_cpu(e->old_chunk);
  313. result->new_chunk = le64_to_cpu(e->new_chunk);
  314. }
  315. static void write_exception(struct pstore *ps,
  316. uint32_t index, struct disk_exception *de)
  317. {
  318. struct disk_exception *e = get_exception(ps, index);
  319. /* copy it */
  320. e->old_chunk = cpu_to_le64(de->old_chunk);
  321. e->new_chunk = cpu_to_le64(de->new_chunk);
  322. }
  323. /*
  324. * Registers the exceptions that are present in the current area.
  325. * 'full' is filled in to indicate if the area has been
  326. * filled.
  327. */
  328. static int insert_exceptions(struct pstore *ps,
  329. int (*callback)(void *callback_context,
  330. chunk_t old, chunk_t new),
  331. void *callback_context,
  332. int *full)
  333. {
  334. int r;
  335. unsigned int i;
  336. struct disk_exception de;
  337. /* presume the area is full */
  338. *full = 1;
  339. for (i = 0; i < ps->exceptions_per_area; i++) {
  340. read_exception(ps, i, &de);
  341. /*
  342. * If the new_chunk is pointing at the start of
  343. * the COW device, where the first metadata area
  344. * is we know that we've hit the end of the
  345. * exceptions. Therefore the area is not full.
  346. */
  347. if (de.new_chunk == 0LL) {
  348. ps->current_committed = i;
  349. *full = 0;
  350. break;
  351. }
  352. /*
  353. * Keep track of the start of the free chunks.
  354. */
  355. if (ps->next_free <= de.new_chunk)
  356. ps->next_free = de.new_chunk + 1;
  357. /*
  358. * Otherwise we add the exception to the snapshot.
  359. */
  360. r = callback(callback_context, de.old_chunk, de.new_chunk);
  361. if (r)
  362. return r;
  363. }
  364. return 0;
  365. }
  366. static int read_exceptions(struct pstore *ps,
  367. int (*callback)(void *callback_context, chunk_t old,
  368. chunk_t new),
  369. void *callback_context)
  370. {
  371. int r, full = 1;
  372. /*
  373. * Keeping reading chunks and inserting exceptions until
  374. * we find a partially full area.
  375. */
  376. for (ps->current_area = 0; full; ps->current_area++) {
  377. r = area_io(ps, READ);
  378. if (r)
  379. return r;
  380. r = insert_exceptions(ps, callback, callback_context, &full);
  381. if (r)
  382. return r;
  383. }
  384. ps->current_area--;
  385. return 0;
  386. }
  387. static struct pstore *get_info(struct dm_exception_store *store)
  388. {
  389. return (struct pstore *) store->context;
  390. }
  391. static void persistent_fraction_full(struct dm_exception_store *store,
  392. sector_t *numerator, sector_t *denominator)
  393. {
  394. *numerator = get_info(store)->next_free * store->chunk_size;
  395. *denominator = get_dev_size(store->cow->bdev);
  396. }
  397. static void persistent_dtr(struct dm_exception_store *store)
  398. {
  399. struct pstore *ps = get_info(store);
  400. destroy_workqueue(ps->metadata_wq);
  401. /* Created in read_header */
  402. if (ps->io_client)
  403. dm_io_client_destroy(ps->io_client);
  404. free_area(ps);
  405. /* Allocated in persistent_read_metadata */
  406. if (ps->callbacks)
  407. vfree(ps->callbacks);
  408. kfree(ps);
  409. }
  410. static int persistent_read_metadata(struct dm_exception_store *store,
  411. int (*callback)(void *callback_context,
  412. chunk_t old, chunk_t new),
  413. void *callback_context)
  414. {
  415. int r, uninitialized_var(new_snapshot);
  416. struct pstore *ps = get_info(store);
  417. /*
  418. * Read the snapshot header.
  419. */
  420. r = read_header(ps, &new_snapshot);
  421. if (r)
  422. return r;
  423. /*
  424. * Now we know correct chunk_size, complete the initialisation.
  425. */
  426. ps->exceptions_per_area = (ps->store->chunk_size << SECTOR_SHIFT) /
  427. sizeof(struct disk_exception);
  428. ps->callbacks = dm_vcalloc(ps->exceptions_per_area,
  429. sizeof(*ps->callbacks));
  430. if (!ps->callbacks)
  431. return -ENOMEM;
  432. /*
  433. * Do we need to setup a new snapshot ?
  434. */
  435. if (new_snapshot) {
  436. r = write_header(ps);
  437. if (r) {
  438. DMWARN("write_header failed");
  439. return r;
  440. }
  441. ps->current_area = 0;
  442. zero_memory_area(ps);
  443. r = zero_disk_area(ps, 0);
  444. if (r) {
  445. DMWARN("zero_disk_area(0) failed");
  446. return r;
  447. }
  448. } else {
  449. /*
  450. * Sanity checks.
  451. */
  452. if (ps->version != SNAPSHOT_DISK_VERSION) {
  453. DMWARN("unable to handle snapshot disk version %d",
  454. ps->version);
  455. return -EINVAL;
  456. }
  457. /*
  458. * Metadata are valid, but snapshot is invalidated
  459. */
  460. if (!ps->valid)
  461. return 1;
  462. /*
  463. * Read the metadata.
  464. */
  465. r = read_exceptions(ps, callback, callback_context);
  466. if (r)
  467. return r;
  468. }
  469. return 0;
  470. }
  471. static int persistent_prepare_exception(struct dm_exception_store *store,
  472. struct dm_snap_exception *e)
  473. {
  474. struct pstore *ps = get_info(store);
  475. uint32_t stride;
  476. chunk_t next_free;
  477. sector_t size = get_dev_size(store->cow->bdev);
  478. /* Is there enough room ? */
  479. if (size < ((ps->next_free + 1) * store->chunk_size))
  480. return -ENOSPC;
  481. e->new_chunk = ps->next_free;
  482. /*
  483. * Move onto the next free pending, making sure to take
  484. * into account the location of the metadata chunks.
  485. */
  486. stride = (ps->exceptions_per_area + 1);
  487. next_free = ++ps->next_free;
  488. if (sector_div(next_free, stride) == 1)
  489. ps->next_free++;
  490. atomic_inc(&ps->pending_count);
  491. return 0;
  492. }
  493. static void persistent_commit_exception(struct dm_exception_store *store,
  494. struct dm_snap_exception *e,
  495. void (*callback) (void *, int success),
  496. void *callback_context)
  497. {
  498. unsigned int i;
  499. struct pstore *ps = get_info(store);
  500. struct disk_exception de;
  501. struct commit_callback *cb;
  502. de.old_chunk = e->old_chunk;
  503. de.new_chunk = e->new_chunk;
  504. write_exception(ps, ps->current_committed++, &de);
  505. /*
  506. * Add the callback to the back of the array. This code
  507. * is the only place where the callback array is
  508. * manipulated, and we know that it will never be called
  509. * multiple times concurrently.
  510. */
  511. cb = ps->callbacks + ps->callback_count++;
  512. cb->callback = callback;
  513. cb->context = callback_context;
  514. /*
  515. * If there are exceptions in flight and we have not yet
  516. * filled this metadata area there's nothing more to do.
  517. */
  518. if (!atomic_dec_and_test(&ps->pending_count) &&
  519. (ps->current_committed != ps->exceptions_per_area))
  520. return;
  521. /*
  522. * If we completely filled the current area, then wipe the next one.
  523. */
  524. if ((ps->current_committed == ps->exceptions_per_area) &&
  525. zero_disk_area(ps, ps->current_area + 1))
  526. ps->valid = 0;
  527. /*
  528. * Commit exceptions to disk.
  529. */
  530. if (ps->valid && area_io(ps, WRITE))
  531. ps->valid = 0;
  532. /*
  533. * Advance to the next area if this one is full.
  534. */
  535. if (ps->current_committed == ps->exceptions_per_area) {
  536. ps->current_committed = 0;
  537. ps->current_area++;
  538. zero_memory_area(ps);
  539. }
  540. for (i = 0; i < ps->callback_count; i++) {
  541. cb = ps->callbacks + i;
  542. cb->callback(cb->context, ps->valid);
  543. }
  544. ps->callback_count = 0;
  545. }
  546. static void persistent_drop_snapshot(struct dm_exception_store *store)
  547. {
  548. struct pstore *ps = get_info(store);
  549. ps->valid = 0;
  550. if (write_header(ps))
  551. DMWARN("write header failed");
  552. }
  553. static int persistent_ctr(struct dm_exception_store *store,
  554. unsigned argc, char **argv)
  555. {
  556. struct pstore *ps;
  557. /* allocate the pstore */
  558. ps = kzalloc(sizeof(*ps), GFP_KERNEL);
  559. if (!ps)
  560. return -ENOMEM;
  561. ps->store = store;
  562. ps->valid = 1;
  563. ps->version = SNAPSHOT_DISK_VERSION;
  564. ps->area = NULL;
  565. ps->next_free = 2; /* skipping the header and first area */
  566. ps->current_committed = 0;
  567. ps->callback_count = 0;
  568. atomic_set(&ps->pending_count, 0);
  569. ps->callbacks = NULL;
  570. ps->metadata_wq = create_singlethread_workqueue("ksnaphd");
  571. if (!ps->metadata_wq) {
  572. kfree(ps);
  573. DMERR("couldn't start header metadata update thread");
  574. return -ENOMEM;
  575. }
  576. store->context = ps;
  577. return 0;
  578. }
  579. static unsigned persistent_status(struct dm_exception_store *store,
  580. status_type_t status, char *result,
  581. unsigned maxlen)
  582. {
  583. unsigned sz = 0;
  584. switch (status) {
  585. case STATUSTYPE_INFO:
  586. break;
  587. case STATUSTYPE_TABLE:
  588. DMEMIT(" %s P %llu", store->cow->name,
  589. (unsigned long long)store->chunk_size);
  590. }
  591. return sz;
  592. }
  593. static struct dm_exception_store_type _persistent_type = {
  594. .name = "persistent",
  595. .module = THIS_MODULE,
  596. .ctr = persistent_ctr,
  597. .dtr = persistent_dtr,
  598. .read_metadata = persistent_read_metadata,
  599. .prepare_exception = persistent_prepare_exception,
  600. .commit_exception = persistent_commit_exception,
  601. .drop_snapshot = persistent_drop_snapshot,
  602. .fraction_full = persistent_fraction_full,
  603. .status = persistent_status,
  604. };
  605. static struct dm_exception_store_type _persistent_compat_type = {
  606. .name = "P",
  607. .module = THIS_MODULE,
  608. .ctr = persistent_ctr,
  609. .dtr = persistent_dtr,
  610. .read_metadata = persistent_read_metadata,
  611. .prepare_exception = persistent_prepare_exception,
  612. .commit_exception = persistent_commit_exception,
  613. .drop_snapshot = persistent_drop_snapshot,
  614. .fraction_full = persistent_fraction_full,
  615. .status = persistent_status,
  616. };
  617. int dm_persistent_snapshot_init(void)
  618. {
  619. int r;
  620. r = dm_exception_store_type_register(&_persistent_type);
  621. if (r) {
  622. DMERR("Unable to register persistent exception store type");
  623. return r;
  624. }
  625. r = dm_exception_store_type_register(&_persistent_compat_type);
  626. if (r) {
  627. DMERR("Unable to register old-style persistent exception "
  628. "store type");
  629. dm_exception_store_type_unregister(&_persistent_type);
  630. return r;
  631. }
  632. return r;
  633. }
  634. void dm_persistent_snapshot_exit(void)
  635. {
  636. dm_exception_store_type_unregister(&_persistent_type);
  637. dm_exception_store_type_unregister(&_persistent_compat_type);
  638. }