dm-log.c 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788
  1. /*
  2. * Copyright (C) 2003 Sistina Software
  3. *
  4. * This file is released under the LGPL.
  5. */
  6. #include <linux/init.h>
  7. #include <linux/slab.h>
  8. #include <linux/module.h>
  9. #include <linux/vmalloc.h>
  10. #include "dm-log.h"
  11. #include "dm-io.h"
  12. #define DM_MSG_PREFIX "mirror log"
  13. static LIST_HEAD(_log_types);
  14. static DEFINE_SPINLOCK(_lock);
  15. int dm_register_dirty_log_type(struct dirty_log_type *type)
  16. {
  17. spin_lock(&_lock);
  18. type->use_count = 0;
  19. list_add(&type->list, &_log_types);
  20. spin_unlock(&_lock);
  21. return 0;
  22. }
  23. int dm_unregister_dirty_log_type(struct dirty_log_type *type)
  24. {
  25. spin_lock(&_lock);
  26. if (type->use_count)
  27. DMWARN("Attempt to unregister a log type that is still in use");
  28. else
  29. list_del(&type->list);
  30. spin_unlock(&_lock);
  31. return 0;
  32. }
  33. static struct dirty_log_type *_get_type(const char *type_name)
  34. {
  35. struct dirty_log_type *type;
  36. spin_lock(&_lock);
  37. list_for_each_entry (type, &_log_types, list)
  38. if (!strcmp(type_name, type->name)) {
  39. if (!type->use_count && !try_module_get(type->module)){
  40. spin_unlock(&_lock);
  41. return NULL;
  42. }
  43. type->use_count++;
  44. spin_unlock(&_lock);
  45. return type;
  46. }
  47. spin_unlock(&_lock);
  48. return NULL;
  49. }
  50. /*
  51. * get_type
  52. * @type_name
  53. *
  54. * Attempt to retrieve the dirty_log_type by name. If not already
  55. * available, attempt to load the appropriate module.
  56. *
  57. * Log modules are named "dm-log-" followed by the 'type_name'.
  58. * Modules may contain multiple types.
  59. * This function will first try the module "dm-log-<type_name>",
  60. * then truncate 'type_name' on the last '-' and try again.
  61. *
  62. * For example, if type_name was "clustered-disk", it would search
  63. * 'dm-log-clustered-disk' then 'dm-log-clustered'.
  64. *
  65. * Returns: dirty_log_type* on success, NULL on failure
  66. */
  67. static struct dirty_log_type *get_type(const char *type_name)
  68. {
  69. char *p, *type_name_dup;
  70. struct dirty_log_type *type;
  71. type = _get_type(type_name);
  72. if (type)
  73. return type;
  74. type_name_dup = kstrdup(type_name, GFP_KERNEL);
  75. if (!type_name_dup) {
  76. DMWARN("No memory left to attempt log module load for \"%s\"",
  77. type_name);
  78. return NULL;
  79. }
  80. while (request_module("dm-log-%s", type_name_dup) ||
  81. !(type = _get_type(type_name))) {
  82. p = strrchr(type_name_dup, '-');
  83. if (!p)
  84. break;
  85. p[0] = '\0';
  86. }
  87. if (!type)
  88. DMWARN("Module for logging type \"%s\" not found.", type_name);
  89. kfree(type_name_dup);
  90. return type;
  91. }
  92. static void put_type(struct dirty_log_type *type)
  93. {
  94. spin_lock(&_lock);
  95. if (!--type->use_count)
  96. module_put(type->module);
  97. spin_unlock(&_lock);
  98. }
  99. struct dirty_log *dm_create_dirty_log(const char *type_name, struct dm_target *ti,
  100. unsigned int argc, char **argv)
  101. {
  102. struct dirty_log_type *type;
  103. struct dirty_log *log;
  104. log = kmalloc(sizeof(*log), GFP_KERNEL);
  105. if (!log)
  106. return NULL;
  107. type = get_type(type_name);
  108. if (!type) {
  109. kfree(log);
  110. return NULL;
  111. }
  112. log->type = type;
  113. if (type->ctr(log, ti, argc, argv)) {
  114. kfree(log);
  115. put_type(type);
  116. return NULL;
  117. }
  118. return log;
  119. }
  120. void dm_destroy_dirty_log(struct dirty_log *log)
  121. {
  122. log->type->dtr(log);
  123. put_type(log->type);
  124. kfree(log);
  125. }
  126. /*-----------------------------------------------------------------
  127. * Persistent and core logs share a lot of their implementation.
  128. * FIXME: need a reload method to be called from a resume
  129. *---------------------------------------------------------------*/
  130. /*
  131. * Magic for persistent mirrors: "MiRr"
  132. */
  133. #define MIRROR_MAGIC 0x4D695272
  134. /*
  135. * The on-disk version of the metadata.
  136. */
  137. #define MIRROR_DISK_VERSION 2
  138. #define LOG_OFFSET 2
  139. struct log_header {
  140. uint32_t magic;
  141. /*
  142. * Simple, incrementing version. no backward
  143. * compatibility.
  144. */
  145. uint32_t version;
  146. sector_t nr_regions;
  147. };
  148. struct log_c {
  149. struct dm_target *ti;
  150. int touched;
  151. uint32_t region_size;
  152. unsigned int region_count;
  153. region_t sync_count;
  154. unsigned bitset_uint32_count;
  155. uint32_t *clean_bits;
  156. uint32_t *sync_bits;
  157. uint32_t *recovering_bits; /* FIXME: this seems excessive */
  158. int sync_search;
  159. /* Resync flag */
  160. enum sync {
  161. DEFAULTSYNC, /* Synchronize if necessary */
  162. NOSYNC, /* Devices known to be already in sync */
  163. FORCESYNC, /* Force a sync to happen */
  164. } sync;
  165. struct dm_io_request io_req;
  166. /*
  167. * Disk log fields
  168. */
  169. int log_dev_failed;
  170. struct dm_dev *log_dev;
  171. struct log_header header;
  172. struct io_region header_location;
  173. struct log_header *disk_header;
  174. };
  175. /*
  176. * The touched member needs to be updated every time we access
  177. * one of the bitsets.
  178. */
  179. static inline int log_test_bit(uint32_t *bs, unsigned bit)
  180. {
  181. return ext2_test_bit(bit, (unsigned long *) bs) ? 1 : 0;
  182. }
  183. static inline void log_set_bit(struct log_c *l,
  184. uint32_t *bs, unsigned bit)
  185. {
  186. ext2_set_bit(bit, (unsigned long *) bs);
  187. l->touched = 1;
  188. }
  189. static inline void log_clear_bit(struct log_c *l,
  190. uint32_t *bs, unsigned bit)
  191. {
  192. ext2_clear_bit(bit, (unsigned long *) bs);
  193. l->touched = 1;
  194. }
  195. /*----------------------------------------------------------------
  196. * Header IO
  197. *--------------------------------------------------------------*/
  198. static void header_to_disk(struct log_header *core, struct log_header *disk)
  199. {
  200. disk->magic = cpu_to_le32(core->magic);
  201. disk->version = cpu_to_le32(core->version);
  202. disk->nr_regions = cpu_to_le64(core->nr_regions);
  203. }
  204. static void header_from_disk(struct log_header *core, struct log_header *disk)
  205. {
  206. core->magic = le32_to_cpu(disk->magic);
  207. core->version = le32_to_cpu(disk->version);
  208. core->nr_regions = le64_to_cpu(disk->nr_regions);
  209. }
  210. static int rw_header(struct log_c *lc, int rw)
  211. {
  212. lc->io_req.bi_rw = rw;
  213. lc->io_req.mem.ptr.vma = lc->disk_header;
  214. lc->io_req.notify.fn = NULL;
  215. return dm_io(&lc->io_req, 1, &lc->header_location, NULL);
  216. }
  217. static int read_header(struct log_c *log)
  218. {
  219. int r;
  220. r = rw_header(log, READ);
  221. if (r)
  222. return r;
  223. header_from_disk(&log->header, log->disk_header);
  224. /* New log required? */
  225. if (log->sync != DEFAULTSYNC || log->header.magic != MIRROR_MAGIC) {
  226. log->header.magic = MIRROR_MAGIC;
  227. log->header.version = MIRROR_DISK_VERSION;
  228. log->header.nr_regions = 0;
  229. }
  230. #ifdef __LITTLE_ENDIAN
  231. if (log->header.version == 1)
  232. log->header.version = 2;
  233. #endif
  234. if (log->header.version != MIRROR_DISK_VERSION) {
  235. DMWARN("incompatible disk log version");
  236. return -EINVAL;
  237. }
  238. return 0;
  239. }
  240. static inline int write_header(struct log_c *log)
  241. {
  242. header_to_disk(&log->header, log->disk_header);
  243. return rw_header(log, WRITE);
  244. }
  245. /*----------------------------------------------------------------
  246. * core log constructor/destructor
  247. *
  248. * argv contains region_size followed optionally by [no]sync
  249. *--------------------------------------------------------------*/
  250. #define BYTE_SHIFT 3
  251. static int create_log_context(struct dirty_log *log, struct dm_target *ti,
  252. unsigned int argc, char **argv,
  253. struct dm_dev *dev)
  254. {
  255. enum sync sync = DEFAULTSYNC;
  256. struct log_c *lc;
  257. uint32_t region_size;
  258. unsigned int region_count;
  259. size_t bitset_size, buf_size;
  260. int r;
  261. if (argc < 1 || argc > 2) {
  262. DMWARN("wrong number of arguments to mirror log");
  263. return -EINVAL;
  264. }
  265. if (argc > 1) {
  266. if (!strcmp(argv[1], "sync"))
  267. sync = FORCESYNC;
  268. else if (!strcmp(argv[1], "nosync"))
  269. sync = NOSYNC;
  270. else {
  271. DMWARN("unrecognised sync argument to mirror log: %s",
  272. argv[1]);
  273. return -EINVAL;
  274. }
  275. }
  276. if (sscanf(argv[0], "%u", &region_size) != 1) {
  277. DMWARN("invalid region size string");
  278. return -EINVAL;
  279. }
  280. region_count = dm_sector_div_up(ti->len, region_size);
  281. lc = kmalloc(sizeof(*lc), GFP_KERNEL);
  282. if (!lc) {
  283. DMWARN("couldn't allocate core log");
  284. return -ENOMEM;
  285. }
  286. lc->ti = ti;
  287. lc->touched = 0;
  288. lc->region_size = region_size;
  289. lc->region_count = region_count;
  290. lc->sync = sync;
  291. /*
  292. * Work out how many "unsigned long"s we need to hold the bitset.
  293. */
  294. bitset_size = dm_round_up(region_count,
  295. sizeof(*lc->clean_bits) << BYTE_SHIFT);
  296. bitset_size >>= BYTE_SHIFT;
  297. lc->bitset_uint32_count = bitset_size / sizeof(*lc->clean_bits);
  298. /*
  299. * Disk log?
  300. */
  301. if (!dev) {
  302. lc->clean_bits = vmalloc(bitset_size);
  303. if (!lc->clean_bits) {
  304. DMWARN("couldn't allocate clean bitset");
  305. kfree(lc);
  306. return -ENOMEM;
  307. }
  308. lc->disk_header = NULL;
  309. } else {
  310. lc->log_dev = dev;
  311. lc->log_dev_failed = 0;
  312. lc->header_location.bdev = lc->log_dev->bdev;
  313. lc->header_location.sector = 0;
  314. /*
  315. * Buffer holds both header and bitset.
  316. */
  317. buf_size = dm_round_up((LOG_OFFSET << SECTOR_SHIFT) +
  318. bitset_size, ti->limits.hardsect_size);
  319. lc->header_location.count = buf_size >> SECTOR_SHIFT;
  320. lc->io_req.mem.type = DM_IO_VMA;
  321. lc->io_req.client = dm_io_client_create(dm_div_up(buf_size,
  322. PAGE_SIZE));
  323. if (IS_ERR(lc->io_req.client)) {
  324. r = PTR_ERR(lc->io_req.client);
  325. DMWARN("couldn't allocate disk io client");
  326. kfree(lc);
  327. return -ENOMEM;
  328. }
  329. lc->disk_header = vmalloc(buf_size);
  330. if (!lc->disk_header) {
  331. DMWARN("couldn't allocate disk log buffer");
  332. kfree(lc);
  333. return -ENOMEM;
  334. }
  335. lc->clean_bits = (void *)lc->disk_header +
  336. (LOG_OFFSET << SECTOR_SHIFT);
  337. }
  338. memset(lc->clean_bits, -1, bitset_size);
  339. lc->sync_bits = vmalloc(bitset_size);
  340. if (!lc->sync_bits) {
  341. DMWARN("couldn't allocate sync bitset");
  342. if (!dev)
  343. vfree(lc->clean_bits);
  344. vfree(lc->disk_header);
  345. kfree(lc);
  346. return -ENOMEM;
  347. }
  348. memset(lc->sync_bits, (sync == NOSYNC) ? -1 : 0, bitset_size);
  349. lc->sync_count = (sync == NOSYNC) ? region_count : 0;
  350. lc->recovering_bits = vmalloc(bitset_size);
  351. if (!lc->recovering_bits) {
  352. DMWARN("couldn't allocate sync bitset");
  353. vfree(lc->sync_bits);
  354. if (!dev)
  355. vfree(lc->clean_bits);
  356. vfree(lc->disk_header);
  357. kfree(lc);
  358. return -ENOMEM;
  359. }
  360. memset(lc->recovering_bits, 0, bitset_size);
  361. lc->sync_search = 0;
  362. log->context = lc;
  363. return 0;
  364. }
  365. static int core_ctr(struct dirty_log *log, struct dm_target *ti,
  366. unsigned int argc, char **argv)
  367. {
  368. return create_log_context(log, ti, argc, argv, NULL);
  369. }
  370. static void destroy_log_context(struct log_c *lc)
  371. {
  372. vfree(lc->sync_bits);
  373. vfree(lc->recovering_bits);
  374. kfree(lc);
  375. }
  376. static void core_dtr(struct dirty_log *log)
  377. {
  378. struct log_c *lc = (struct log_c *) log->context;
  379. vfree(lc->clean_bits);
  380. destroy_log_context(lc);
  381. }
  382. /*----------------------------------------------------------------
  383. * disk log constructor/destructor
  384. *
  385. * argv contains log_device region_size followed optionally by [no]sync
  386. *--------------------------------------------------------------*/
  387. static int disk_ctr(struct dirty_log *log, struct dm_target *ti,
  388. unsigned int argc, char **argv)
  389. {
  390. int r;
  391. struct dm_dev *dev;
  392. if (argc < 2 || argc > 3) {
  393. DMWARN("wrong number of arguments to disk mirror log");
  394. return -EINVAL;
  395. }
  396. r = dm_get_device(ti, argv[0], 0, 0 /* FIXME */,
  397. FMODE_READ | FMODE_WRITE, &dev);
  398. if (r)
  399. return r;
  400. r = create_log_context(log, ti, argc - 1, argv + 1, dev);
  401. if (r) {
  402. dm_put_device(ti, dev);
  403. return r;
  404. }
  405. return 0;
  406. }
  407. static void disk_dtr(struct dirty_log *log)
  408. {
  409. struct log_c *lc = (struct log_c *) log->context;
  410. dm_put_device(lc->ti, lc->log_dev);
  411. vfree(lc->disk_header);
  412. dm_io_client_destroy(lc->io_req.client);
  413. destroy_log_context(lc);
  414. }
  415. static int count_bits32(uint32_t *addr, unsigned size)
  416. {
  417. int count = 0, i;
  418. for (i = 0; i < size; i++) {
  419. count += hweight32(*(addr+i));
  420. }
  421. return count;
  422. }
  423. static void fail_log_device(struct log_c *lc)
  424. {
  425. if (lc->log_dev_failed)
  426. return;
  427. lc->log_dev_failed = 1;
  428. dm_table_event(lc->ti->table);
  429. }
  430. static int disk_resume(struct dirty_log *log)
  431. {
  432. int r;
  433. unsigned i;
  434. struct log_c *lc = (struct log_c *) log->context;
  435. size_t size = lc->bitset_uint32_count * sizeof(uint32_t);
  436. /* read the disk header */
  437. r = read_header(lc);
  438. if (r) {
  439. DMWARN("%s: Failed to read header on mirror log device",
  440. lc->log_dev->name);
  441. fail_log_device(lc);
  442. /*
  443. * If the log device cannot be read, we must assume
  444. * all regions are out-of-sync. If we simply return
  445. * here, the state will be uninitialized and could
  446. * lead us to return 'in-sync' status for regions
  447. * that are actually 'out-of-sync'.
  448. */
  449. lc->header.nr_regions = 0;
  450. }
  451. /* set or clear any new bits -- device has grown */
  452. if (lc->sync == NOSYNC)
  453. for (i = lc->header.nr_regions; i < lc->region_count; i++)
  454. /* FIXME: amazingly inefficient */
  455. log_set_bit(lc, lc->clean_bits, i);
  456. else
  457. for (i = lc->header.nr_regions; i < lc->region_count; i++)
  458. /* FIXME: amazingly inefficient */
  459. log_clear_bit(lc, lc->clean_bits, i);
  460. /* clear any old bits -- device has shrunk */
  461. for (i = lc->region_count; i % (sizeof(*lc->clean_bits) << BYTE_SHIFT); i++)
  462. log_clear_bit(lc, lc->clean_bits, i);
  463. /* copy clean across to sync */
  464. memcpy(lc->sync_bits, lc->clean_bits, size);
  465. lc->sync_count = count_bits32(lc->clean_bits, lc->bitset_uint32_count);
  466. lc->sync_search = 0;
  467. /* set the correct number of regions in the header */
  468. lc->header.nr_regions = lc->region_count;
  469. /* write the new header */
  470. r = write_header(lc);
  471. if (r) {
  472. DMWARN("%s: Failed to write header on mirror log device",
  473. lc->log_dev->name);
  474. fail_log_device(lc);
  475. }
  476. return r;
  477. }
  478. static uint32_t core_get_region_size(struct dirty_log *log)
  479. {
  480. struct log_c *lc = (struct log_c *) log->context;
  481. return lc->region_size;
  482. }
  483. static int core_resume(struct dirty_log *log)
  484. {
  485. struct log_c *lc = (struct log_c *) log->context;
  486. lc->sync_search = 0;
  487. return 0;
  488. }
  489. static int core_is_clean(struct dirty_log *log, region_t region)
  490. {
  491. struct log_c *lc = (struct log_c *) log->context;
  492. return log_test_bit(lc->clean_bits, region);
  493. }
  494. static int core_in_sync(struct dirty_log *log, region_t region, int block)
  495. {
  496. struct log_c *lc = (struct log_c *) log->context;
  497. return log_test_bit(lc->sync_bits, region);
  498. }
  499. static int core_flush(struct dirty_log *log)
  500. {
  501. /* no op */
  502. return 0;
  503. }
  504. static int disk_flush(struct dirty_log *log)
  505. {
  506. int r;
  507. struct log_c *lc = (struct log_c *) log->context;
  508. /* only write if the log has changed */
  509. if (!lc->touched)
  510. return 0;
  511. r = write_header(lc);
  512. if (r)
  513. fail_log_device(lc);
  514. else
  515. lc->touched = 0;
  516. return r;
  517. }
  518. static void core_mark_region(struct dirty_log *log, region_t region)
  519. {
  520. struct log_c *lc = (struct log_c *) log->context;
  521. log_clear_bit(lc, lc->clean_bits, region);
  522. }
  523. static void core_clear_region(struct dirty_log *log, region_t region)
  524. {
  525. struct log_c *lc = (struct log_c *) log->context;
  526. log_set_bit(lc, lc->clean_bits, region);
  527. }
  528. static int core_get_resync_work(struct dirty_log *log, region_t *region)
  529. {
  530. struct log_c *lc = (struct log_c *) log->context;
  531. if (lc->sync_search >= lc->region_count)
  532. return 0;
  533. do {
  534. *region = ext2_find_next_zero_bit(
  535. (unsigned long *) lc->sync_bits,
  536. lc->region_count,
  537. lc->sync_search);
  538. lc->sync_search = *region + 1;
  539. if (*region >= lc->region_count)
  540. return 0;
  541. } while (log_test_bit(lc->recovering_bits, *region));
  542. log_set_bit(lc, lc->recovering_bits, *region);
  543. return 1;
  544. }
  545. static void core_set_region_sync(struct dirty_log *log, region_t region,
  546. int in_sync)
  547. {
  548. struct log_c *lc = (struct log_c *) log->context;
  549. log_clear_bit(lc, lc->recovering_bits, region);
  550. if (in_sync) {
  551. log_set_bit(lc, lc->sync_bits, region);
  552. lc->sync_count++;
  553. } else if (log_test_bit(lc->sync_bits, region)) {
  554. lc->sync_count--;
  555. log_clear_bit(lc, lc->sync_bits, region);
  556. }
  557. }
  558. static region_t core_get_sync_count(struct dirty_log *log)
  559. {
  560. struct log_c *lc = (struct log_c *) log->context;
  561. return lc->sync_count;
  562. }
  563. #define DMEMIT_SYNC \
  564. if (lc->sync != DEFAULTSYNC) \
  565. DMEMIT("%ssync ", lc->sync == NOSYNC ? "no" : "")
  566. static int core_status(struct dirty_log *log, status_type_t status,
  567. char *result, unsigned int maxlen)
  568. {
  569. int sz = 0;
  570. struct log_c *lc = log->context;
  571. switch(status) {
  572. case STATUSTYPE_INFO:
  573. DMEMIT("1 %s", log->type->name);
  574. break;
  575. case STATUSTYPE_TABLE:
  576. DMEMIT("%s %u %u ", log->type->name,
  577. lc->sync == DEFAULTSYNC ? 1 : 2, lc->region_size);
  578. DMEMIT_SYNC;
  579. }
  580. return sz;
  581. }
  582. static int disk_status(struct dirty_log *log, status_type_t status,
  583. char *result, unsigned int maxlen)
  584. {
  585. int sz = 0;
  586. struct log_c *lc = log->context;
  587. switch(status) {
  588. case STATUSTYPE_INFO:
  589. DMEMIT("3 %s %s %c", log->type->name, lc->log_dev->name,
  590. lc->log_dev_failed ? 'D' : 'A');
  591. break;
  592. case STATUSTYPE_TABLE:
  593. DMEMIT("%s %u %s %u ", log->type->name,
  594. lc->sync == DEFAULTSYNC ? 2 : 3, lc->log_dev->name,
  595. lc->region_size);
  596. DMEMIT_SYNC;
  597. }
  598. return sz;
  599. }
  600. static struct dirty_log_type _core_type = {
  601. .name = "core",
  602. .module = THIS_MODULE,
  603. .ctr = core_ctr,
  604. .dtr = core_dtr,
  605. .resume = core_resume,
  606. .get_region_size = core_get_region_size,
  607. .is_clean = core_is_clean,
  608. .in_sync = core_in_sync,
  609. .flush = core_flush,
  610. .mark_region = core_mark_region,
  611. .clear_region = core_clear_region,
  612. .get_resync_work = core_get_resync_work,
  613. .set_region_sync = core_set_region_sync,
  614. .get_sync_count = core_get_sync_count,
  615. .status = core_status,
  616. };
  617. static struct dirty_log_type _disk_type = {
  618. .name = "disk",
  619. .module = THIS_MODULE,
  620. .ctr = disk_ctr,
  621. .dtr = disk_dtr,
  622. .postsuspend = disk_flush,
  623. .resume = disk_resume,
  624. .get_region_size = core_get_region_size,
  625. .is_clean = core_is_clean,
  626. .in_sync = core_in_sync,
  627. .flush = disk_flush,
  628. .mark_region = core_mark_region,
  629. .clear_region = core_clear_region,
  630. .get_resync_work = core_get_resync_work,
  631. .set_region_sync = core_set_region_sync,
  632. .get_sync_count = core_get_sync_count,
  633. .status = disk_status,
  634. };
  635. int __init dm_dirty_log_init(void)
  636. {
  637. int r;
  638. r = dm_register_dirty_log_type(&_core_type);
  639. if (r)
  640. DMWARN("couldn't register core log");
  641. r = dm_register_dirty_log_type(&_disk_type);
  642. if (r) {
  643. DMWARN("couldn't register disk type");
  644. dm_unregister_dirty_log_type(&_core_type);
  645. }
  646. return r;
  647. }
  648. void dm_dirty_log_exit(void)
  649. {
  650. dm_unregister_dirty_log_type(&_disk_type);
  651. dm_unregister_dirty_log_type(&_core_type);
  652. }
  653. EXPORT_SYMBOL(dm_register_dirty_log_type);
  654. EXPORT_SYMBOL(dm_unregister_dirty_log_type);
  655. EXPORT_SYMBOL(dm_create_dirty_log);
  656. EXPORT_SYMBOL(dm_destroy_dirty_log);