dm-log.c 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739
  1. /*
  2. * Copyright (C) 2003 Sistina Software
  3. *
  4. * This file is released under the LGPL.
  5. */
  6. #include <linux/init.h>
  7. #include <linux/slab.h>
  8. #include <linux/module.h>
  9. #include <linux/vmalloc.h>
  10. #include "dm-log.h"
  11. #include "dm-io.h"
  12. #define DM_MSG_PREFIX "mirror log"
  13. static LIST_HEAD(_log_types);
  14. static DEFINE_SPINLOCK(_lock);
  15. int dm_register_dirty_log_type(struct dirty_log_type *type)
  16. {
  17. spin_lock(&_lock);
  18. type->use_count = 0;
  19. list_add(&type->list, &_log_types);
  20. spin_unlock(&_lock);
  21. return 0;
  22. }
  23. int dm_unregister_dirty_log_type(struct dirty_log_type *type)
  24. {
  25. spin_lock(&_lock);
  26. if (type->use_count)
  27. DMWARN("Attempt to unregister a log type that is still in use");
  28. else
  29. list_del(&type->list);
  30. spin_unlock(&_lock);
  31. return 0;
  32. }
  33. static struct dirty_log_type *get_type(const char *type_name)
  34. {
  35. struct dirty_log_type *type;
  36. spin_lock(&_lock);
  37. list_for_each_entry (type, &_log_types, list)
  38. if (!strcmp(type_name, type->name)) {
  39. if (!type->use_count && !try_module_get(type->module)){
  40. spin_unlock(&_lock);
  41. return NULL;
  42. }
  43. type->use_count++;
  44. spin_unlock(&_lock);
  45. return type;
  46. }
  47. spin_unlock(&_lock);
  48. return NULL;
  49. }
  50. static void put_type(struct dirty_log_type *type)
  51. {
  52. spin_lock(&_lock);
  53. if (!--type->use_count)
  54. module_put(type->module);
  55. spin_unlock(&_lock);
  56. }
  57. struct dirty_log *dm_create_dirty_log(const char *type_name, struct dm_target *ti,
  58. unsigned int argc, char **argv)
  59. {
  60. struct dirty_log_type *type;
  61. struct dirty_log *log;
  62. log = kmalloc(sizeof(*log), GFP_KERNEL);
  63. if (!log)
  64. return NULL;
  65. type = get_type(type_name);
  66. if (!type) {
  67. kfree(log);
  68. return NULL;
  69. }
  70. log->type = type;
  71. if (type->ctr(log, ti, argc, argv)) {
  72. kfree(log);
  73. put_type(type);
  74. return NULL;
  75. }
  76. return log;
  77. }
  78. void dm_destroy_dirty_log(struct dirty_log *log)
  79. {
  80. log->type->dtr(log);
  81. put_type(log->type);
  82. kfree(log);
  83. }
  84. /*-----------------------------------------------------------------
  85. * Persistent and core logs share a lot of their implementation.
  86. * FIXME: need a reload method to be called from a resume
  87. *---------------------------------------------------------------*/
  88. /*
  89. * Magic for persistent mirrors: "MiRr"
  90. */
  91. #define MIRROR_MAGIC 0x4D695272
  92. /*
  93. * The on-disk version of the metadata.
  94. */
  95. #define MIRROR_DISK_VERSION 2
  96. #define LOG_OFFSET 2
  97. struct log_header {
  98. uint32_t magic;
  99. /*
  100. * Simple, incrementing version. no backward
  101. * compatibility.
  102. */
  103. uint32_t version;
  104. sector_t nr_regions;
  105. };
  106. struct log_c {
  107. struct dm_target *ti;
  108. int touched;
  109. uint32_t region_size;
  110. unsigned int region_count;
  111. region_t sync_count;
  112. unsigned bitset_uint32_count;
  113. uint32_t *clean_bits;
  114. uint32_t *sync_bits;
  115. uint32_t *recovering_bits; /* FIXME: this seems excessive */
  116. int sync_search;
  117. /* Resync flag */
  118. enum sync {
  119. DEFAULTSYNC, /* Synchronize if necessary */
  120. NOSYNC, /* Devices known to be already in sync */
  121. FORCESYNC, /* Force a sync to happen */
  122. } sync;
  123. struct dm_io_request io_req;
  124. /*
  125. * Disk log fields
  126. */
  127. int log_dev_failed;
  128. struct dm_dev *log_dev;
  129. struct log_header header;
  130. struct io_region header_location;
  131. struct log_header *disk_header;
  132. };
  133. /*
  134. * The touched member needs to be updated every time we access
  135. * one of the bitsets.
  136. */
  137. static inline int log_test_bit(uint32_t *bs, unsigned bit)
  138. {
  139. return ext2_test_bit(bit, (unsigned long *) bs) ? 1 : 0;
  140. }
  141. static inline void log_set_bit(struct log_c *l,
  142. uint32_t *bs, unsigned bit)
  143. {
  144. ext2_set_bit(bit, (unsigned long *) bs);
  145. l->touched = 1;
  146. }
  147. static inline void log_clear_bit(struct log_c *l,
  148. uint32_t *bs, unsigned bit)
  149. {
  150. ext2_clear_bit(bit, (unsigned long *) bs);
  151. l->touched = 1;
  152. }
  153. /*----------------------------------------------------------------
  154. * Header IO
  155. *--------------------------------------------------------------*/
  156. static void header_to_disk(struct log_header *core, struct log_header *disk)
  157. {
  158. disk->magic = cpu_to_le32(core->magic);
  159. disk->version = cpu_to_le32(core->version);
  160. disk->nr_regions = cpu_to_le64(core->nr_regions);
  161. }
  162. static void header_from_disk(struct log_header *core, struct log_header *disk)
  163. {
  164. core->magic = le32_to_cpu(disk->magic);
  165. core->version = le32_to_cpu(disk->version);
  166. core->nr_regions = le64_to_cpu(disk->nr_regions);
  167. }
  168. static int rw_header(struct log_c *lc, int rw)
  169. {
  170. lc->io_req.bi_rw = rw;
  171. lc->io_req.mem.ptr.vma = lc->disk_header;
  172. lc->io_req.notify.fn = NULL;
  173. return dm_io(&lc->io_req, 1, &lc->header_location, NULL);
  174. }
  175. static int read_header(struct log_c *log)
  176. {
  177. int r;
  178. r = rw_header(log, READ);
  179. if (r)
  180. return r;
  181. header_from_disk(&log->header, log->disk_header);
  182. /* New log required? */
  183. if (log->sync != DEFAULTSYNC || log->header.magic != MIRROR_MAGIC) {
  184. log->header.magic = MIRROR_MAGIC;
  185. log->header.version = MIRROR_DISK_VERSION;
  186. log->header.nr_regions = 0;
  187. }
  188. #ifdef __LITTLE_ENDIAN
  189. if (log->header.version == 1)
  190. log->header.version = 2;
  191. #endif
  192. if (log->header.version != MIRROR_DISK_VERSION) {
  193. DMWARN("incompatible disk log version");
  194. return -EINVAL;
  195. }
  196. return 0;
  197. }
  198. static inline int write_header(struct log_c *log)
  199. {
  200. header_to_disk(&log->header, log->disk_header);
  201. return rw_header(log, WRITE);
  202. }
  203. /*----------------------------------------------------------------
  204. * core log constructor/destructor
  205. *
  206. * argv contains region_size followed optionally by [no]sync
  207. *--------------------------------------------------------------*/
  208. #define BYTE_SHIFT 3
  209. static int create_log_context(struct dirty_log *log, struct dm_target *ti,
  210. unsigned int argc, char **argv,
  211. struct dm_dev *dev)
  212. {
  213. enum sync sync = DEFAULTSYNC;
  214. struct log_c *lc;
  215. uint32_t region_size;
  216. unsigned int region_count;
  217. size_t bitset_size, buf_size;
  218. int r;
  219. if (argc < 1 || argc > 2) {
  220. DMWARN("wrong number of arguments to mirror log");
  221. return -EINVAL;
  222. }
  223. if (argc > 1) {
  224. if (!strcmp(argv[1], "sync"))
  225. sync = FORCESYNC;
  226. else if (!strcmp(argv[1], "nosync"))
  227. sync = NOSYNC;
  228. else {
  229. DMWARN("unrecognised sync argument to mirror log: %s",
  230. argv[1]);
  231. return -EINVAL;
  232. }
  233. }
  234. if (sscanf(argv[0], "%u", &region_size) != 1) {
  235. DMWARN("invalid region size string");
  236. return -EINVAL;
  237. }
  238. region_count = dm_sector_div_up(ti->len, region_size);
  239. lc = kmalloc(sizeof(*lc), GFP_KERNEL);
  240. if (!lc) {
  241. DMWARN("couldn't allocate core log");
  242. return -ENOMEM;
  243. }
  244. lc->ti = ti;
  245. lc->touched = 0;
  246. lc->region_size = region_size;
  247. lc->region_count = region_count;
  248. lc->sync = sync;
  249. /*
  250. * Work out how many "unsigned long"s we need to hold the bitset.
  251. */
  252. bitset_size = dm_round_up(region_count,
  253. sizeof(*lc->clean_bits) << BYTE_SHIFT);
  254. bitset_size >>= BYTE_SHIFT;
  255. lc->bitset_uint32_count = bitset_size / sizeof(*lc->clean_bits);
  256. /*
  257. * Disk log?
  258. */
  259. if (!dev) {
  260. lc->clean_bits = vmalloc(bitset_size);
  261. if (!lc->clean_bits) {
  262. DMWARN("couldn't allocate clean bitset");
  263. kfree(lc);
  264. return -ENOMEM;
  265. }
  266. lc->disk_header = NULL;
  267. } else {
  268. lc->log_dev = dev;
  269. lc->log_dev_failed = 0;
  270. lc->header_location.bdev = lc->log_dev->bdev;
  271. lc->header_location.sector = 0;
  272. /*
  273. * Buffer holds both header and bitset.
  274. */
  275. buf_size = dm_round_up((LOG_OFFSET << SECTOR_SHIFT) +
  276. bitset_size, ti->limits.hardsect_size);
  277. lc->header_location.count = buf_size >> SECTOR_SHIFT;
  278. lc->io_req.mem.type = DM_IO_VMA;
  279. lc->io_req.client = dm_io_client_create(dm_div_up(buf_size,
  280. PAGE_SIZE));
  281. if (IS_ERR(lc->io_req.client)) {
  282. r = PTR_ERR(lc->io_req.client);
  283. DMWARN("couldn't allocate disk io client");
  284. kfree(lc);
  285. return -ENOMEM;
  286. }
  287. lc->disk_header = vmalloc(buf_size);
  288. if (!lc->disk_header) {
  289. DMWARN("couldn't allocate disk log buffer");
  290. kfree(lc);
  291. return -ENOMEM;
  292. }
  293. lc->clean_bits = (void *)lc->disk_header +
  294. (LOG_OFFSET << SECTOR_SHIFT);
  295. }
  296. memset(lc->clean_bits, -1, bitset_size);
  297. lc->sync_bits = vmalloc(bitset_size);
  298. if (!lc->sync_bits) {
  299. DMWARN("couldn't allocate sync bitset");
  300. if (!dev)
  301. vfree(lc->clean_bits);
  302. vfree(lc->disk_header);
  303. kfree(lc);
  304. return -ENOMEM;
  305. }
  306. memset(lc->sync_bits, (sync == NOSYNC) ? -1 : 0, bitset_size);
  307. lc->sync_count = (sync == NOSYNC) ? region_count : 0;
  308. lc->recovering_bits = vmalloc(bitset_size);
  309. if (!lc->recovering_bits) {
  310. DMWARN("couldn't allocate sync bitset");
  311. vfree(lc->sync_bits);
  312. if (!dev)
  313. vfree(lc->clean_bits);
  314. vfree(lc->disk_header);
  315. kfree(lc);
  316. return -ENOMEM;
  317. }
  318. memset(lc->recovering_bits, 0, bitset_size);
  319. lc->sync_search = 0;
  320. log->context = lc;
  321. return 0;
  322. }
  323. static int core_ctr(struct dirty_log *log, struct dm_target *ti,
  324. unsigned int argc, char **argv)
  325. {
  326. return create_log_context(log, ti, argc, argv, NULL);
  327. }
  328. static void destroy_log_context(struct log_c *lc)
  329. {
  330. vfree(lc->sync_bits);
  331. vfree(lc->recovering_bits);
  332. kfree(lc);
  333. }
  334. static void core_dtr(struct dirty_log *log)
  335. {
  336. struct log_c *lc = (struct log_c *) log->context;
  337. vfree(lc->clean_bits);
  338. destroy_log_context(lc);
  339. }
  340. /*----------------------------------------------------------------
  341. * disk log constructor/destructor
  342. *
  343. * argv contains log_device region_size followed optionally by [no]sync
  344. *--------------------------------------------------------------*/
  345. static int disk_ctr(struct dirty_log *log, struct dm_target *ti,
  346. unsigned int argc, char **argv)
  347. {
  348. int r;
  349. struct dm_dev *dev;
  350. if (argc < 2 || argc > 3) {
  351. DMWARN("wrong number of arguments to disk mirror log");
  352. return -EINVAL;
  353. }
  354. r = dm_get_device(ti, argv[0], 0, 0 /* FIXME */,
  355. FMODE_READ | FMODE_WRITE, &dev);
  356. if (r)
  357. return r;
  358. r = create_log_context(log, ti, argc - 1, argv + 1, dev);
  359. if (r) {
  360. dm_put_device(ti, dev);
  361. return r;
  362. }
  363. return 0;
  364. }
  365. static void disk_dtr(struct dirty_log *log)
  366. {
  367. struct log_c *lc = (struct log_c *) log->context;
  368. dm_put_device(lc->ti, lc->log_dev);
  369. vfree(lc->disk_header);
  370. dm_io_client_destroy(lc->io_req.client);
  371. destroy_log_context(lc);
  372. }
  373. static int count_bits32(uint32_t *addr, unsigned size)
  374. {
  375. int count = 0, i;
  376. for (i = 0; i < size; i++) {
  377. count += hweight32(*(addr+i));
  378. }
  379. return count;
  380. }
  381. static void fail_log_device(struct log_c *lc)
  382. {
  383. if (lc->log_dev_failed)
  384. return;
  385. lc->log_dev_failed = 1;
  386. dm_table_event(lc->ti->table);
  387. }
  388. static int disk_resume(struct dirty_log *log)
  389. {
  390. int r;
  391. unsigned i;
  392. struct log_c *lc = (struct log_c *) log->context;
  393. size_t size = lc->bitset_uint32_count * sizeof(uint32_t);
  394. /* read the disk header */
  395. r = read_header(lc);
  396. if (r) {
  397. DMWARN("%s: Failed to read header on mirror log device",
  398. lc->log_dev->name);
  399. fail_log_device(lc);
  400. /*
  401. * If the log device cannot be read, we must assume
  402. * all regions are out-of-sync. If we simply return
  403. * here, the state will be uninitialized and could
  404. * lead us to return 'in-sync' status for regions
  405. * that are actually 'out-of-sync'.
  406. */
  407. lc->header.nr_regions = 0;
  408. }
  409. /* set or clear any new bits -- device has grown */
  410. if (lc->sync == NOSYNC)
  411. for (i = lc->header.nr_regions; i < lc->region_count; i++)
  412. /* FIXME: amazingly inefficient */
  413. log_set_bit(lc, lc->clean_bits, i);
  414. else
  415. for (i = lc->header.nr_regions; i < lc->region_count; i++)
  416. /* FIXME: amazingly inefficient */
  417. log_clear_bit(lc, lc->clean_bits, i);
  418. /* clear any old bits -- device has shrunk */
  419. for (i = lc->region_count; i % (sizeof(*lc->clean_bits) << BYTE_SHIFT); i++)
  420. log_clear_bit(lc, lc->clean_bits, i);
  421. /* copy clean across to sync */
  422. memcpy(lc->sync_bits, lc->clean_bits, size);
  423. lc->sync_count = count_bits32(lc->clean_bits, lc->bitset_uint32_count);
  424. lc->sync_search = 0;
  425. /* set the correct number of regions in the header */
  426. lc->header.nr_regions = lc->region_count;
  427. /* write the new header */
  428. r = write_header(lc);
  429. if (r) {
  430. DMWARN("%s: Failed to write header on mirror log device",
  431. lc->log_dev->name);
  432. fail_log_device(lc);
  433. }
  434. return r;
  435. }
  436. static uint32_t core_get_region_size(struct dirty_log *log)
  437. {
  438. struct log_c *lc = (struct log_c *) log->context;
  439. return lc->region_size;
  440. }
  441. static int core_resume(struct dirty_log *log)
  442. {
  443. struct log_c *lc = (struct log_c *) log->context;
  444. lc->sync_search = 0;
  445. return 0;
  446. }
  447. static int core_is_clean(struct dirty_log *log, region_t region)
  448. {
  449. struct log_c *lc = (struct log_c *) log->context;
  450. return log_test_bit(lc->clean_bits, region);
  451. }
  452. static int core_in_sync(struct dirty_log *log, region_t region, int block)
  453. {
  454. struct log_c *lc = (struct log_c *) log->context;
  455. return log_test_bit(lc->sync_bits, region);
  456. }
  457. static int core_flush(struct dirty_log *log)
  458. {
  459. /* no op */
  460. return 0;
  461. }
  462. static int disk_flush(struct dirty_log *log)
  463. {
  464. int r;
  465. struct log_c *lc = (struct log_c *) log->context;
  466. /* only write if the log has changed */
  467. if (!lc->touched)
  468. return 0;
  469. r = write_header(lc);
  470. if (r)
  471. fail_log_device(lc);
  472. else
  473. lc->touched = 0;
  474. return r;
  475. }
  476. static void core_mark_region(struct dirty_log *log, region_t region)
  477. {
  478. struct log_c *lc = (struct log_c *) log->context;
  479. log_clear_bit(lc, lc->clean_bits, region);
  480. }
  481. static void core_clear_region(struct dirty_log *log, region_t region)
  482. {
  483. struct log_c *lc = (struct log_c *) log->context;
  484. log_set_bit(lc, lc->clean_bits, region);
  485. }
  486. static int core_get_resync_work(struct dirty_log *log, region_t *region)
  487. {
  488. struct log_c *lc = (struct log_c *) log->context;
  489. if (lc->sync_search >= lc->region_count)
  490. return 0;
  491. do {
  492. *region = ext2_find_next_zero_bit(
  493. (unsigned long *) lc->sync_bits,
  494. lc->region_count,
  495. lc->sync_search);
  496. lc->sync_search = *region + 1;
  497. if (*region >= lc->region_count)
  498. return 0;
  499. } while (log_test_bit(lc->recovering_bits, *region));
  500. log_set_bit(lc, lc->recovering_bits, *region);
  501. return 1;
  502. }
  503. static void core_set_region_sync(struct dirty_log *log, region_t region,
  504. int in_sync)
  505. {
  506. struct log_c *lc = (struct log_c *) log->context;
  507. log_clear_bit(lc, lc->recovering_bits, region);
  508. if (in_sync) {
  509. log_set_bit(lc, lc->sync_bits, region);
  510. lc->sync_count++;
  511. } else if (log_test_bit(lc->sync_bits, region)) {
  512. lc->sync_count--;
  513. log_clear_bit(lc, lc->sync_bits, region);
  514. }
  515. }
  516. static region_t core_get_sync_count(struct dirty_log *log)
  517. {
  518. struct log_c *lc = (struct log_c *) log->context;
  519. return lc->sync_count;
  520. }
  521. #define DMEMIT_SYNC \
  522. if (lc->sync != DEFAULTSYNC) \
  523. DMEMIT("%ssync ", lc->sync == NOSYNC ? "no" : "")
  524. static int core_status(struct dirty_log *log, status_type_t status,
  525. char *result, unsigned int maxlen)
  526. {
  527. int sz = 0;
  528. struct log_c *lc = log->context;
  529. switch(status) {
  530. case STATUSTYPE_INFO:
  531. DMEMIT("1 %s", log->type->name);
  532. break;
  533. case STATUSTYPE_TABLE:
  534. DMEMIT("%s %u %u ", log->type->name,
  535. lc->sync == DEFAULTSYNC ? 1 : 2, lc->region_size);
  536. DMEMIT_SYNC;
  537. }
  538. return sz;
  539. }
  540. static int disk_status(struct dirty_log *log, status_type_t status,
  541. char *result, unsigned int maxlen)
  542. {
  543. int sz = 0;
  544. struct log_c *lc = log->context;
  545. switch(status) {
  546. case STATUSTYPE_INFO:
  547. DMEMIT("3 %s %s %c", log->type->name, lc->log_dev->name,
  548. lc->log_dev_failed ? 'D' : 'A');
  549. break;
  550. case STATUSTYPE_TABLE:
  551. DMEMIT("%s %u %s %u ", log->type->name,
  552. lc->sync == DEFAULTSYNC ? 2 : 3, lc->log_dev->name,
  553. lc->region_size);
  554. DMEMIT_SYNC;
  555. }
  556. return sz;
  557. }
  558. static struct dirty_log_type _core_type = {
  559. .name = "core",
  560. .module = THIS_MODULE,
  561. .ctr = core_ctr,
  562. .dtr = core_dtr,
  563. .resume = core_resume,
  564. .get_region_size = core_get_region_size,
  565. .is_clean = core_is_clean,
  566. .in_sync = core_in_sync,
  567. .flush = core_flush,
  568. .mark_region = core_mark_region,
  569. .clear_region = core_clear_region,
  570. .get_resync_work = core_get_resync_work,
  571. .set_region_sync = core_set_region_sync,
  572. .get_sync_count = core_get_sync_count,
  573. .status = core_status,
  574. };
  575. static struct dirty_log_type _disk_type = {
  576. .name = "disk",
  577. .module = THIS_MODULE,
  578. .ctr = disk_ctr,
  579. .dtr = disk_dtr,
  580. .suspend = disk_flush,
  581. .resume = disk_resume,
  582. .get_region_size = core_get_region_size,
  583. .is_clean = core_is_clean,
  584. .in_sync = core_in_sync,
  585. .flush = disk_flush,
  586. .mark_region = core_mark_region,
  587. .clear_region = core_clear_region,
  588. .get_resync_work = core_get_resync_work,
  589. .set_region_sync = core_set_region_sync,
  590. .get_sync_count = core_get_sync_count,
  591. .status = disk_status,
  592. };
  593. int __init dm_dirty_log_init(void)
  594. {
  595. int r;
  596. r = dm_register_dirty_log_type(&_core_type);
  597. if (r)
  598. DMWARN("couldn't register core log");
  599. r = dm_register_dirty_log_type(&_disk_type);
  600. if (r) {
  601. DMWARN("couldn't register disk type");
  602. dm_unregister_dirty_log_type(&_core_type);
  603. }
  604. return r;
  605. }
  606. void dm_dirty_log_exit(void)
  607. {
  608. dm_unregister_dirty_log_type(&_disk_type);
  609. dm_unregister_dirty_log_type(&_core_type);
  610. }
  611. EXPORT_SYMBOL(dm_register_dirty_log_type);
  612. EXPORT_SYMBOL(dm_unregister_dirty_log_type);
  613. EXPORT_SYMBOL(dm_create_dirty_log);
  614. EXPORT_SYMBOL(dm_destroy_dirty_log);