dm-log.c 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789
  1. /*
  2. * Copyright (C) 2003 Sistina Software
  3. * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
  4. *
  5. * This file is released under the LGPL.
  6. */
  7. #include <linux/init.h>
  8. #include <linux/slab.h>
  9. #include <linux/module.h>
  10. #include <linux/vmalloc.h>
  11. #include "dm-log.h"
  12. #include "dm-io.h"
  13. #define DM_MSG_PREFIX "dirty region log"
  14. static LIST_HEAD(_log_types);
  15. static DEFINE_SPINLOCK(_lock);
  16. int dm_register_dirty_log_type(struct dirty_log_type *type)
  17. {
  18. spin_lock(&_lock);
  19. type->use_count = 0;
  20. list_add(&type->list, &_log_types);
  21. spin_unlock(&_lock);
  22. return 0;
  23. }
  24. int dm_unregister_dirty_log_type(struct dirty_log_type *type)
  25. {
  26. spin_lock(&_lock);
  27. if (type->use_count)
  28. DMWARN("Attempt to unregister a log type that is still in use");
  29. else
  30. list_del(&type->list);
  31. spin_unlock(&_lock);
  32. return 0;
  33. }
  34. static struct dirty_log_type *_get_type(const char *type_name)
  35. {
  36. struct dirty_log_type *type;
  37. spin_lock(&_lock);
  38. list_for_each_entry (type, &_log_types, list)
  39. if (!strcmp(type_name, type->name)) {
  40. if (!type->use_count && !try_module_get(type->module)){
  41. spin_unlock(&_lock);
  42. return NULL;
  43. }
  44. type->use_count++;
  45. spin_unlock(&_lock);
  46. return type;
  47. }
  48. spin_unlock(&_lock);
  49. return NULL;
  50. }
  51. /*
  52. * get_type
  53. * @type_name
  54. *
  55. * Attempt to retrieve the dirty_log_type by name. If not already
  56. * available, attempt to load the appropriate module.
  57. *
  58. * Log modules are named "dm-log-" followed by the 'type_name'.
  59. * Modules may contain multiple types.
  60. * This function will first try the module "dm-log-<type_name>",
  61. * then truncate 'type_name' on the last '-' and try again.
  62. *
  63. * For example, if type_name was "clustered-disk", it would search
  64. * 'dm-log-clustered-disk' then 'dm-log-clustered'.
  65. *
  66. * Returns: dirty_log_type* on success, NULL on failure
  67. */
  68. static struct dirty_log_type *get_type(const char *type_name)
  69. {
  70. char *p, *type_name_dup;
  71. struct dirty_log_type *type;
  72. type = _get_type(type_name);
  73. if (type)
  74. return type;
  75. type_name_dup = kstrdup(type_name, GFP_KERNEL);
  76. if (!type_name_dup) {
  77. DMWARN("No memory left to attempt log module load for \"%s\"",
  78. type_name);
  79. return NULL;
  80. }
  81. while (request_module("dm-log-%s", type_name_dup) ||
  82. !(type = _get_type(type_name))) {
  83. p = strrchr(type_name_dup, '-');
  84. if (!p)
  85. break;
  86. p[0] = '\0';
  87. }
  88. if (!type)
  89. DMWARN("Module for logging type \"%s\" not found.", type_name);
  90. kfree(type_name_dup);
  91. return type;
  92. }
  93. static void put_type(struct dirty_log_type *type)
  94. {
  95. spin_lock(&_lock);
  96. if (!--type->use_count)
  97. module_put(type->module);
  98. spin_unlock(&_lock);
  99. }
  100. struct dirty_log *dm_create_dirty_log(const char *type_name, struct dm_target *ti,
  101. unsigned int argc, char **argv)
  102. {
  103. struct dirty_log_type *type;
  104. struct dirty_log *log;
  105. log = kmalloc(sizeof(*log), GFP_KERNEL);
  106. if (!log)
  107. return NULL;
  108. type = get_type(type_name);
  109. if (!type) {
  110. kfree(log);
  111. return NULL;
  112. }
  113. log->type = type;
  114. if (type->ctr(log, ti, argc, argv)) {
  115. kfree(log);
  116. put_type(type);
  117. return NULL;
  118. }
  119. return log;
  120. }
  121. void dm_destroy_dirty_log(struct dirty_log *log)
  122. {
  123. log->type->dtr(log);
  124. put_type(log->type);
  125. kfree(log);
  126. }
  127. /*-----------------------------------------------------------------
  128. * Persistent and core logs share a lot of their implementation.
  129. * FIXME: need a reload method to be called from a resume
  130. *---------------------------------------------------------------*/
  131. /*
  132. * Magic for persistent mirrors: "MiRr"
  133. */
  134. #define MIRROR_MAGIC 0x4D695272
  135. /*
  136. * The on-disk version of the metadata.
  137. */
  138. #define MIRROR_DISK_VERSION 2
  139. #define LOG_OFFSET 2
  140. struct log_header {
  141. uint32_t magic;
  142. /*
  143. * Simple, incrementing version. no backward
  144. * compatibility.
  145. */
  146. uint32_t version;
  147. sector_t nr_regions;
  148. };
  149. struct log_c {
  150. struct dm_target *ti;
  151. int touched;
  152. uint32_t region_size;
  153. unsigned int region_count;
  154. region_t sync_count;
  155. unsigned bitset_uint32_count;
  156. uint32_t *clean_bits;
  157. uint32_t *sync_bits;
  158. uint32_t *recovering_bits; /* FIXME: this seems excessive */
  159. int sync_search;
  160. /* Resync flag */
  161. enum sync {
  162. DEFAULTSYNC, /* Synchronize if necessary */
  163. NOSYNC, /* Devices known to be already in sync */
  164. FORCESYNC, /* Force a sync to happen */
  165. } sync;
  166. struct dm_io_request io_req;
  167. /*
  168. * Disk log fields
  169. */
  170. int log_dev_failed;
  171. struct dm_dev *log_dev;
  172. struct log_header header;
  173. struct io_region header_location;
  174. struct log_header *disk_header;
  175. };
  176. /*
  177. * The touched member needs to be updated every time we access
  178. * one of the bitsets.
  179. */
  180. static inline int log_test_bit(uint32_t *bs, unsigned bit)
  181. {
  182. return ext2_test_bit(bit, (unsigned long *) bs) ? 1 : 0;
  183. }
  184. static inline void log_set_bit(struct log_c *l,
  185. uint32_t *bs, unsigned bit)
  186. {
  187. ext2_set_bit(bit, (unsigned long *) bs);
  188. l->touched = 1;
  189. }
  190. static inline void log_clear_bit(struct log_c *l,
  191. uint32_t *bs, unsigned bit)
  192. {
  193. ext2_clear_bit(bit, (unsigned long *) bs);
  194. l->touched = 1;
  195. }
  196. /*----------------------------------------------------------------
  197. * Header IO
  198. *--------------------------------------------------------------*/
  199. static void header_to_disk(struct log_header *core, struct log_header *disk)
  200. {
  201. disk->magic = cpu_to_le32(core->magic);
  202. disk->version = cpu_to_le32(core->version);
  203. disk->nr_regions = cpu_to_le64(core->nr_regions);
  204. }
  205. static void header_from_disk(struct log_header *core, struct log_header *disk)
  206. {
  207. core->magic = le32_to_cpu(disk->magic);
  208. core->version = le32_to_cpu(disk->version);
  209. core->nr_regions = le64_to_cpu(disk->nr_regions);
  210. }
  211. static int rw_header(struct log_c *lc, int rw)
  212. {
  213. lc->io_req.bi_rw = rw;
  214. lc->io_req.mem.ptr.vma = lc->disk_header;
  215. lc->io_req.notify.fn = NULL;
  216. return dm_io(&lc->io_req, 1, &lc->header_location, NULL);
  217. }
  218. static int read_header(struct log_c *log)
  219. {
  220. int r;
  221. r = rw_header(log, READ);
  222. if (r)
  223. return r;
  224. header_from_disk(&log->header, log->disk_header);
  225. /* New log required? */
  226. if (log->sync != DEFAULTSYNC || log->header.magic != MIRROR_MAGIC) {
  227. log->header.magic = MIRROR_MAGIC;
  228. log->header.version = MIRROR_DISK_VERSION;
  229. log->header.nr_regions = 0;
  230. }
  231. #ifdef __LITTLE_ENDIAN
  232. if (log->header.version == 1)
  233. log->header.version = 2;
  234. #endif
  235. if (log->header.version != MIRROR_DISK_VERSION) {
  236. DMWARN("incompatible disk log version");
  237. return -EINVAL;
  238. }
  239. return 0;
  240. }
  241. static inline int write_header(struct log_c *log)
  242. {
  243. header_to_disk(&log->header, log->disk_header);
  244. return rw_header(log, WRITE);
  245. }
  246. /*----------------------------------------------------------------
  247. * core log constructor/destructor
  248. *
  249. * argv contains region_size followed optionally by [no]sync
  250. *--------------------------------------------------------------*/
  251. #define BYTE_SHIFT 3
  252. static int create_log_context(struct dirty_log *log, struct dm_target *ti,
  253. unsigned int argc, char **argv,
  254. struct dm_dev *dev)
  255. {
  256. enum sync sync = DEFAULTSYNC;
  257. struct log_c *lc;
  258. uint32_t region_size;
  259. unsigned int region_count;
  260. size_t bitset_size, buf_size;
  261. int r;
  262. if (argc < 1 || argc > 2) {
  263. DMWARN("wrong number of arguments to dirty region log");
  264. return -EINVAL;
  265. }
  266. if (argc > 1) {
  267. if (!strcmp(argv[1], "sync"))
  268. sync = FORCESYNC;
  269. else if (!strcmp(argv[1], "nosync"))
  270. sync = NOSYNC;
  271. else {
  272. DMWARN("unrecognised sync argument to "
  273. "dirty region log: %s", argv[1]);
  274. return -EINVAL;
  275. }
  276. }
  277. if (sscanf(argv[0], "%u", &region_size) != 1) {
  278. DMWARN("invalid region size string");
  279. return -EINVAL;
  280. }
  281. region_count = dm_sector_div_up(ti->len, region_size);
  282. lc = kmalloc(sizeof(*lc), GFP_KERNEL);
  283. if (!lc) {
  284. DMWARN("couldn't allocate core log");
  285. return -ENOMEM;
  286. }
  287. lc->ti = ti;
  288. lc->touched = 0;
  289. lc->region_size = region_size;
  290. lc->region_count = region_count;
  291. lc->sync = sync;
  292. /*
  293. * Work out how many "unsigned long"s we need to hold the bitset.
  294. */
  295. bitset_size = dm_round_up(region_count,
  296. sizeof(*lc->clean_bits) << BYTE_SHIFT);
  297. bitset_size >>= BYTE_SHIFT;
  298. lc->bitset_uint32_count = bitset_size / sizeof(*lc->clean_bits);
  299. /*
  300. * Disk log?
  301. */
  302. if (!dev) {
  303. lc->clean_bits = vmalloc(bitset_size);
  304. if (!lc->clean_bits) {
  305. DMWARN("couldn't allocate clean bitset");
  306. kfree(lc);
  307. return -ENOMEM;
  308. }
  309. lc->disk_header = NULL;
  310. } else {
  311. lc->log_dev = dev;
  312. lc->log_dev_failed = 0;
  313. lc->header_location.bdev = lc->log_dev->bdev;
  314. lc->header_location.sector = 0;
  315. /*
  316. * Buffer holds both header and bitset.
  317. */
  318. buf_size = dm_round_up((LOG_OFFSET << SECTOR_SHIFT) +
  319. bitset_size, ti->limits.hardsect_size);
  320. lc->header_location.count = buf_size >> SECTOR_SHIFT;
  321. lc->io_req.mem.type = DM_IO_VMA;
  322. lc->io_req.client = dm_io_client_create(dm_div_up(buf_size,
  323. PAGE_SIZE));
  324. if (IS_ERR(lc->io_req.client)) {
  325. r = PTR_ERR(lc->io_req.client);
  326. DMWARN("couldn't allocate disk io client");
  327. kfree(lc);
  328. return -ENOMEM;
  329. }
  330. lc->disk_header = vmalloc(buf_size);
  331. if (!lc->disk_header) {
  332. DMWARN("couldn't allocate disk log buffer");
  333. kfree(lc);
  334. return -ENOMEM;
  335. }
  336. lc->clean_bits = (void *)lc->disk_header +
  337. (LOG_OFFSET << SECTOR_SHIFT);
  338. }
  339. memset(lc->clean_bits, -1, bitset_size);
  340. lc->sync_bits = vmalloc(bitset_size);
  341. if (!lc->sync_bits) {
  342. DMWARN("couldn't allocate sync bitset");
  343. if (!dev)
  344. vfree(lc->clean_bits);
  345. vfree(lc->disk_header);
  346. kfree(lc);
  347. return -ENOMEM;
  348. }
  349. memset(lc->sync_bits, (sync == NOSYNC) ? -1 : 0, bitset_size);
  350. lc->sync_count = (sync == NOSYNC) ? region_count : 0;
  351. lc->recovering_bits = vmalloc(bitset_size);
  352. if (!lc->recovering_bits) {
  353. DMWARN("couldn't allocate sync bitset");
  354. vfree(lc->sync_bits);
  355. if (!dev)
  356. vfree(lc->clean_bits);
  357. vfree(lc->disk_header);
  358. kfree(lc);
  359. return -ENOMEM;
  360. }
  361. memset(lc->recovering_bits, 0, bitset_size);
  362. lc->sync_search = 0;
  363. log->context = lc;
  364. return 0;
  365. }
  366. static int core_ctr(struct dirty_log *log, struct dm_target *ti,
  367. unsigned int argc, char **argv)
  368. {
  369. return create_log_context(log, ti, argc, argv, NULL);
  370. }
  371. static void destroy_log_context(struct log_c *lc)
  372. {
  373. vfree(lc->sync_bits);
  374. vfree(lc->recovering_bits);
  375. kfree(lc);
  376. }
  377. static void core_dtr(struct dirty_log *log)
  378. {
  379. struct log_c *lc = (struct log_c *) log->context;
  380. vfree(lc->clean_bits);
  381. destroy_log_context(lc);
  382. }
  383. /*----------------------------------------------------------------
  384. * disk log constructor/destructor
  385. *
  386. * argv contains log_device region_size followed optionally by [no]sync
  387. *--------------------------------------------------------------*/
  388. static int disk_ctr(struct dirty_log *log, struct dm_target *ti,
  389. unsigned int argc, char **argv)
  390. {
  391. int r;
  392. struct dm_dev *dev;
  393. if (argc < 2 || argc > 3) {
  394. DMWARN("wrong number of arguments to disk dirty region log");
  395. return -EINVAL;
  396. }
  397. r = dm_get_device(ti, argv[0], 0, 0 /* FIXME */,
  398. FMODE_READ | FMODE_WRITE, &dev);
  399. if (r)
  400. return r;
  401. r = create_log_context(log, ti, argc - 1, argv + 1, dev);
  402. if (r) {
  403. dm_put_device(ti, dev);
  404. return r;
  405. }
  406. return 0;
  407. }
  408. static void disk_dtr(struct dirty_log *log)
  409. {
  410. struct log_c *lc = (struct log_c *) log->context;
  411. dm_put_device(lc->ti, lc->log_dev);
  412. vfree(lc->disk_header);
  413. dm_io_client_destroy(lc->io_req.client);
  414. destroy_log_context(lc);
  415. }
  416. static int count_bits32(uint32_t *addr, unsigned size)
  417. {
  418. int count = 0, i;
  419. for (i = 0; i < size; i++) {
  420. count += hweight32(*(addr+i));
  421. }
  422. return count;
  423. }
  424. static void fail_log_device(struct log_c *lc)
  425. {
  426. if (lc->log_dev_failed)
  427. return;
  428. lc->log_dev_failed = 1;
  429. dm_table_event(lc->ti->table);
  430. }
  431. static int disk_resume(struct dirty_log *log)
  432. {
  433. int r;
  434. unsigned i;
  435. struct log_c *lc = (struct log_c *) log->context;
  436. size_t size = lc->bitset_uint32_count * sizeof(uint32_t);
  437. /* read the disk header */
  438. r = read_header(lc);
  439. if (r) {
  440. DMWARN("%s: Failed to read header on dirty region log device",
  441. lc->log_dev->name);
  442. fail_log_device(lc);
  443. /*
  444. * If the log device cannot be read, we must assume
  445. * all regions are out-of-sync. If we simply return
  446. * here, the state will be uninitialized and could
  447. * lead us to return 'in-sync' status for regions
  448. * that are actually 'out-of-sync'.
  449. */
  450. lc->header.nr_regions = 0;
  451. }
  452. /* set or clear any new bits -- device has grown */
  453. if (lc->sync == NOSYNC)
  454. for (i = lc->header.nr_regions; i < lc->region_count; i++)
  455. /* FIXME: amazingly inefficient */
  456. log_set_bit(lc, lc->clean_bits, i);
  457. else
  458. for (i = lc->header.nr_regions; i < lc->region_count; i++)
  459. /* FIXME: amazingly inefficient */
  460. log_clear_bit(lc, lc->clean_bits, i);
  461. /* clear any old bits -- device has shrunk */
  462. for (i = lc->region_count; i % (sizeof(*lc->clean_bits) << BYTE_SHIFT); i++)
  463. log_clear_bit(lc, lc->clean_bits, i);
  464. /* copy clean across to sync */
  465. memcpy(lc->sync_bits, lc->clean_bits, size);
  466. lc->sync_count = count_bits32(lc->clean_bits, lc->bitset_uint32_count);
  467. lc->sync_search = 0;
  468. /* set the correct number of regions in the header */
  469. lc->header.nr_regions = lc->region_count;
  470. /* write the new header */
  471. r = write_header(lc);
  472. if (r) {
  473. DMWARN("%s: Failed to write header on dirty region log device",
  474. lc->log_dev->name);
  475. fail_log_device(lc);
  476. }
  477. return r;
  478. }
  479. static uint32_t core_get_region_size(struct dirty_log *log)
  480. {
  481. struct log_c *lc = (struct log_c *) log->context;
  482. return lc->region_size;
  483. }
  484. static int core_resume(struct dirty_log *log)
  485. {
  486. struct log_c *lc = (struct log_c *) log->context;
  487. lc->sync_search = 0;
  488. return 0;
  489. }
  490. static int core_is_clean(struct dirty_log *log, region_t region)
  491. {
  492. struct log_c *lc = (struct log_c *) log->context;
  493. return log_test_bit(lc->clean_bits, region);
  494. }
  495. static int core_in_sync(struct dirty_log *log, region_t region, int block)
  496. {
  497. struct log_c *lc = (struct log_c *) log->context;
  498. return log_test_bit(lc->sync_bits, region);
  499. }
  500. static int core_flush(struct dirty_log *log)
  501. {
  502. /* no op */
  503. return 0;
  504. }
  505. static int disk_flush(struct dirty_log *log)
  506. {
  507. int r;
  508. struct log_c *lc = (struct log_c *) log->context;
  509. /* only write if the log has changed */
  510. if (!lc->touched)
  511. return 0;
  512. r = write_header(lc);
  513. if (r)
  514. fail_log_device(lc);
  515. else
  516. lc->touched = 0;
  517. return r;
  518. }
  519. static void core_mark_region(struct dirty_log *log, region_t region)
  520. {
  521. struct log_c *lc = (struct log_c *) log->context;
  522. log_clear_bit(lc, lc->clean_bits, region);
  523. }
  524. static void core_clear_region(struct dirty_log *log, region_t region)
  525. {
  526. struct log_c *lc = (struct log_c *) log->context;
  527. log_set_bit(lc, lc->clean_bits, region);
  528. }
  529. static int core_get_resync_work(struct dirty_log *log, region_t *region)
  530. {
  531. struct log_c *lc = (struct log_c *) log->context;
  532. if (lc->sync_search >= lc->region_count)
  533. return 0;
  534. do {
  535. *region = ext2_find_next_zero_bit(
  536. (unsigned long *) lc->sync_bits,
  537. lc->region_count,
  538. lc->sync_search);
  539. lc->sync_search = *region + 1;
  540. if (*region >= lc->region_count)
  541. return 0;
  542. } while (log_test_bit(lc->recovering_bits, *region));
  543. log_set_bit(lc, lc->recovering_bits, *region);
  544. return 1;
  545. }
  546. static void core_set_region_sync(struct dirty_log *log, region_t region,
  547. int in_sync)
  548. {
  549. struct log_c *lc = (struct log_c *) log->context;
  550. log_clear_bit(lc, lc->recovering_bits, region);
  551. if (in_sync) {
  552. log_set_bit(lc, lc->sync_bits, region);
  553. lc->sync_count++;
  554. } else if (log_test_bit(lc->sync_bits, region)) {
  555. lc->sync_count--;
  556. log_clear_bit(lc, lc->sync_bits, region);
  557. }
  558. }
  559. static region_t core_get_sync_count(struct dirty_log *log)
  560. {
  561. struct log_c *lc = (struct log_c *) log->context;
  562. return lc->sync_count;
  563. }
  564. #define DMEMIT_SYNC \
  565. if (lc->sync != DEFAULTSYNC) \
  566. DMEMIT("%ssync ", lc->sync == NOSYNC ? "no" : "")
  567. static int core_status(struct dirty_log *log, status_type_t status,
  568. char *result, unsigned int maxlen)
  569. {
  570. int sz = 0;
  571. struct log_c *lc = log->context;
  572. switch(status) {
  573. case STATUSTYPE_INFO:
  574. DMEMIT("1 %s", log->type->name);
  575. break;
  576. case STATUSTYPE_TABLE:
  577. DMEMIT("%s %u %u ", log->type->name,
  578. lc->sync == DEFAULTSYNC ? 1 : 2, lc->region_size);
  579. DMEMIT_SYNC;
  580. }
  581. return sz;
  582. }
  583. static int disk_status(struct dirty_log *log, status_type_t status,
  584. char *result, unsigned int maxlen)
  585. {
  586. int sz = 0;
  587. struct log_c *lc = log->context;
  588. switch(status) {
  589. case STATUSTYPE_INFO:
  590. DMEMIT("3 %s %s %c", log->type->name, lc->log_dev->name,
  591. lc->log_dev_failed ? 'D' : 'A');
  592. break;
  593. case STATUSTYPE_TABLE:
  594. DMEMIT("%s %u %s %u ", log->type->name,
  595. lc->sync == DEFAULTSYNC ? 2 : 3, lc->log_dev->name,
  596. lc->region_size);
  597. DMEMIT_SYNC;
  598. }
  599. return sz;
  600. }
  601. static struct dirty_log_type _core_type = {
  602. .name = "core",
  603. .module = THIS_MODULE,
  604. .ctr = core_ctr,
  605. .dtr = core_dtr,
  606. .resume = core_resume,
  607. .get_region_size = core_get_region_size,
  608. .is_clean = core_is_clean,
  609. .in_sync = core_in_sync,
  610. .flush = core_flush,
  611. .mark_region = core_mark_region,
  612. .clear_region = core_clear_region,
  613. .get_resync_work = core_get_resync_work,
  614. .set_region_sync = core_set_region_sync,
  615. .get_sync_count = core_get_sync_count,
  616. .status = core_status,
  617. };
  618. static struct dirty_log_type _disk_type = {
  619. .name = "disk",
  620. .module = THIS_MODULE,
  621. .ctr = disk_ctr,
  622. .dtr = disk_dtr,
  623. .postsuspend = disk_flush,
  624. .resume = disk_resume,
  625. .get_region_size = core_get_region_size,
  626. .is_clean = core_is_clean,
  627. .in_sync = core_in_sync,
  628. .flush = disk_flush,
  629. .mark_region = core_mark_region,
  630. .clear_region = core_clear_region,
  631. .get_resync_work = core_get_resync_work,
  632. .set_region_sync = core_set_region_sync,
  633. .get_sync_count = core_get_sync_count,
  634. .status = disk_status,
  635. };
  636. int __init dm_dirty_log_init(void)
  637. {
  638. int r;
  639. r = dm_register_dirty_log_type(&_core_type);
  640. if (r)
  641. DMWARN("couldn't register core log");
  642. r = dm_register_dirty_log_type(&_disk_type);
  643. if (r) {
  644. DMWARN("couldn't register disk type");
  645. dm_unregister_dirty_log_type(&_core_type);
  646. }
  647. return r;
  648. }
  649. void dm_dirty_log_exit(void)
  650. {
  651. dm_unregister_dirty_log_type(&_disk_type);
  652. dm_unregister_dirty_log_type(&_core_type);
  653. }
  654. EXPORT_SYMBOL(dm_register_dirty_log_type);
  655. EXPORT_SYMBOL(dm_unregister_dirty_log_type);
  656. EXPORT_SYMBOL(dm_create_dirty_log);
  657. EXPORT_SYMBOL(dm_destroy_dirty_log);