dm-log.c 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797
  1. /*
  2. * Copyright (C) 2003 Sistina Software
  3. * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
  4. *
  5. * This file is released under the LGPL.
  6. */
  7. #include <linux/init.h>
  8. #include <linux/slab.h>
  9. #include <linux/module.h>
  10. #include <linux/vmalloc.h>
  11. #include "dm-log.h"
  12. #include "dm-io.h"
  13. #include "dm.h"
  14. #define DM_MSG_PREFIX "dirty region log"
  15. static LIST_HEAD(_log_types);
  16. static DEFINE_SPINLOCK(_lock);
  17. int dm_dirty_log_type_register(struct dm_dirty_log_type *type)
  18. {
  19. spin_lock(&_lock);
  20. type->use_count = 0;
  21. list_add(&type->list, &_log_types);
  22. spin_unlock(&_lock);
  23. return 0;
  24. }
  25. EXPORT_SYMBOL(dm_dirty_log_type_register);
  26. int dm_dirty_log_type_unregister(struct dm_dirty_log_type *type)
  27. {
  28. spin_lock(&_lock);
  29. if (type->use_count)
  30. DMWARN("Attempt to unregister a log type that is still in use");
  31. else
  32. list_del(&type->list);
  33. spin_unlock(&_lock);
  34. return 0;
  35. }
  36. EXPORT_SYMBOL(dm_dirty_log_type_unregister);
  37. static struct dm_dirty_log_type *_get_type(const char *type_name)
  38. {
  39. struct dm_dirty_log_type *type;
  40. spin_lock(&_lock);
  41. list_for_each_entry (type, &_log_types, list)
  42. if (!strcmp(type_name, type->name)) {
  43. if (!type->use_count && !try_module_get(type->module)){
  44. spin_unlock(&_lock);
  45. return NULL;
  46. }
  47. type->use_count++;
  48. spin_unlock(&_lock);
  49. return type;
  50. }
  51. spin_unlock(&_lock);
  52. return NULL;
  53. }
  54. /*
  55. * get_type
  56. * @type_name
  57. *
  58. * Attempt to retrieve the dirty_log_type by name. If not already
  59. * available, attempt to load the appropriate module.
  60. *
  61. * Log modules are named "dm-log-" followed by the 'type_name'.
  62. * Modules may contain multiple types.
  63. * This function will first try the module "dm-log-<type_name>",
  64. * then truncate 'type_name' on the last '-' and try again.
  65. *
  66. * For example, if type_name was "clustered-disk", it would search
  67. * 'dm-log-clustered-disk' then 'dm-log-clustered'.
  68. *
  69. * Returns: dirty_log_type* on success, NULL on failure
  70. */
  71. static struct dm_dirty_log_type *get_type(const char *type_name)
  72. {
  73. char *p, *type_name_dup;
  74. struct dm_dirty_log_type *type;
  75. type = _get_type(type_name);
  76. if (type)
  77. return type;
  78. type_name_dup = kstrdup(type_name, GFP_KERNEL);
  79. if (!type_name_dup) {
  80. DMWARN("No memory left to attempt log module load for \"%s\"",
  81. type_name);
  82. return NULL;
  83. }
  84. while (request_module("dm-log-%s", type_name_dup) ||
  85. !(type = _get_type(type_name))) {
  86. p = strrchr(type_name_dup, '-');
  87. if (!p)
  88. break;
  89. p[0] = '\0';
  90. }
  91. if (!type)
  92. DMWARN("Module for logging type \"%s\" not found.", type_name);
  93. kfree(type_name_dup);
  94. return type;
  95. }
  96. static void put_type(struct dm_dirty_log_type *type)
  97. {
  98. spin_lock(&_lock);
  99. if (!--type->use_count)
  100. module_put(type->module);
  101. spin_unlock(&_lock);
  102. }
  103. struct dm_dirty_log *dm_dirty_log_create(const char *type_name,
  104. struct dm_target *ti,
  105. unsigned int argc, char **argv)
  106. {
  107. struct dm_dirty_log_type *type;
  108. struct dm_dirty_log *log;
  109. log = kmalloc(sizeof(*log), GFP_KERNEL);
  110. if (!log)
  111. return NULL;
  112. type = get_type(type_name);
  113. if (!type) {
  114. kfree(log);
  115. return NULL;
  116. }
  117. log->type = type;
  118. if (type->ctr(log, ti, argc, argv)) {
  119. kfree(log);
  120. put_type(type);
  121. return NULL;
  122. }
  123. return log;
  124. }
  125. EXPORT_SYMBOL(dm_dirty_log_create);
  126. void dm_dirty_log_destroy(struct dm_dirty_log *log)
  127. {
  128. log->type->dtr(log);
  129. put_type(log->type);
  130. kfree(log);
  131. }
  132. EXPORT_SYMBOL(dm_dirty_log_destroy);
  133. /*-----------------------------------------------------------------
  134. * Persistent and core logs share a lot of their implementation.
  135. * FIXME: need a reload method to be called from a resume
  136. *---------------------------------------------------------------*/
  137. /*
  138. * Magic for persistent mirrors: "MiRr"
  139. */
  140. #define MIRROR_MAGIC 0x4D695272
  141. /*
  142. * The on-disk version of the metadata.
  143. */
  144. #define MIRROR_DISK_VERSION 2
  145. #define LOG_OFFSET 2
  146. struct log_header {
  147. uint32_t magic;
  148. /*
  149. * Simple, incrementing version. no backward
  150. * compatibility.
  151. */
  152. uint32_t version;
  153. sector_t nr_regions;
  154. };
  155. struct log_c {
  156. struct dm_target *ti;
  157. int touched;
  158. uint32_t region_size;
  159. unsigned int region_count;
  160. region_t sync_count;
  161. unsigned bitset_uint32_count;
  162. uint32_t *clean_bits;
  163. uint32_t *sync_bits;
  164. uint32_t *recovering_bits; /* FIXME: this seems excessive */
  165. int sync_search;
  166. /* Resync flag */
  167. enum sync {
  168. DEFAULTSYNC, /* Synchronize if necessary */
  169. NOSYNC, /* Devices known to be already in sync */
  170. FORCESYNC, /* Force a sync to happen */
  171. } sync;
  172. struct dm_io_request io_req;
  173. /*
  174. * Disk log fields
  175. */
  176. int log_dev_failed;
  177. struct dm_dev *log_dev;
  178. struct log_header header;
  179. struct dm_io_region header_location;
  180. struct log_header *disk_header;
  181. };
  182. /*
  183. * The touched member needs to be updated every time we access
  184. * one of the bitsets.
  185. */
  186. static inline int log_test_bit(uint32_t *bs, unsigned bit)
  187. {
  188. return ext2_test_bit(bit, (unsigned long *) bs) ? 1 : 0;
  189. }
  190. static inline void log_set_bit(struct log_c *l,
  191. uint32_t *bs, unsigned bit)
  192. {
  193. ext2_set_bit(bit, (unsigned long *) bs);
  194. l->touched = 1;
  195. }
  196. static inline void log_clear_bit(struct log_c *l,
  197. uint32_t *bs, unsigned bit)
  198. {
  199. ext2_clear_bit(bit, (unsigned long *) bs);
  200. l->touched = 1;
  201. }
  202. /*----------------------------------------------------------------
  203. * Header IO
  204. *--------------------------------------------------------------*/
  205. static void header_to_disk(struct log_header *core, struct log_header *disk)
  206. {
  207. disk->magic = cpu_to_le32(core->magic);
  208. disk->version = cpu_to_le32(core->version);
  209. disk->nr_regions = cpu_to_le64(core->nr_regions);
  210. }
  211. static void header_from_disk(struct log_header *core, struct log_header *disk)
  212. {
  213. core->magic = le32_to_cpu(disk->magic);
  214. core->version = le32_to_cpu(disk->version);
  215. core->nr_regions = le64_to_cpu(disk->nr_regions);
  216. }
  217. static int rw_header(struct log_c *lc, int rw)
  218. {
  219. lc->io_req.bi_rw = rw;
  220. lc->io_req.mem.ptr.vma = lc->disk_header;
  221. lc->io_req.notify.fn = NULL;
  222. return dm_io(&lc->io_req, 1, &lc->header_location, NULL);
  223. }
  224. static int read_header(struct log_c *log)
  225. {
  226. int r;
  227. r = rw_header(log, READ);
  228. if (r)
  229. return r;
  230. header_from_disk(&log->header, log->disk_header);
  231. /* New log required? */
  232. if (log->sync != DEFAULTSYNC || log->header.magic != MIRROR_MAGIC) {
  233. log->header.magic = MIRROR_MAGIC;
  234. log->header.version = MIRROR_DISK_VERSION;
  235. log->header.nr_regions = 0;
  236. }
  237. #ifdef __LITTLE_ENDIAN
  238. if (log->header.version == 1)
  239. log->header.version = 2;
  240. #endif
  241. if (log->header.version != MIRROR_DISK_VERSION) {
  242. DMWARN("incompatible disk log version");
  243. return -EINVAL;
  244. }
  245. return 0;
  246. }
  247. static inline int write_header(struct log_c *log)
  248. {
  249. header_to_disk(&log->header, log->disk_header);
  250. return rw_header(log, WRITE);
  251. }
  252. /*----------------------------------------------------------------
  253. * core log constructor/destructor
  254. *
  255. * argv contains region_size followed optionally by [no]sync
  256. *--------------------------------------------------------------*/
  257. #define BYTE_SHIFT 3
  258. static int create_log_context(struct dm_dirty_log *log, struct dm_target *ti,
  259. unsigned int argc, char **argv,
  260. struct dm_dev *dev)
  261. {
  262. enum sync sync = DEFAULTSYNC;
  263. struct log_c *lc;
  264. uint32_t region_size;
  265. unsigned int region_count;
  266. size_t bitset_size, buf_size;
  267. int r;
  268. if (argc < 1 || argc > 2) {
  269. DMWARN("wrong number of arguments to dirty region log");
  270. return -EINVAL;
  271. }
  272. if (argc > 1) {
  273. if (!strcmp(argv[1], "sync"))
  274. sync = FORCESYNC;
  275. else if (!strcmp(argv[1], "nosync"))
  276. sync = NOSYNC;
  277. else {
  278. DMWARN("unrecognised sync argument to "
  279. "dirty region log: %s", argv[1]);
  280. return -EINVAL;
  281. }
  282. }
  283. if (sscanf(argv[0], "%u", &region_size) != 1) {
  284. DMWARN("invalid region size string");
  285. return -EINVAL;
  286. }
  287. region_count = dm_sector_div_up(ti->len, region_size);
  288. lc = kmalloc(sizeof(*lc), GFP_KERNEL);
  289. if (!lc) {
  290. DMWARN("couldn't allocate core log");
  291. return -ENOMEM;
  292. }
  293. lc->ti = ti;
  294. lc->touched = 0;
  295. lc->region_size = region_size;
  296. lc->region_count = region_count;
  297. lc->sync = sync;
  298. /*
  299. * Work out how many "unsigned long"s we need to hold the bitset.
  300. */
  301. bitset_size = dm_round_up(region_count,
  302. sizeof(*lc->clean_bits) << BYTE_SHIFT);
  303. bitset_size >>= BYTE_SHIFT;
  304. lc->bitset_uint32_count = bitset_size / sizeof(*lc->clean_bits);
  305. /*
  306. * Disk log?
  307. */
  308. if (!dev) {
  309. lc->clean_bits = vmalloc(bitset_size);
  310. if (!lc->clean_bits) {
  311. DMWARN("couldn't allocate clean bitset");
  312. kfree(lc);
  313. return -ENOMEM;
  314. }
  315. lc->disk_header = NULL;
  316. } else {
  317. lc->log_dev = dev;
  318. lc->log_dev_failed = 0;
  319. lc->header_location.bdev = lc->log_dev->bdev;
  320. lc->header_location.sector = 0;
  321. /*
  322. * Buffer holds both header and bitset.
  323. */
  324. buf_size = dm_round_up((LOG_OFFSET << SECTOR_SHIFT) +
  325. bitset_size, ti->limits.hardsect_size);
  326. lc->header_location.count = buf_size >> SECTOR_SHIFT;
  327. lc->io_req.mem.type = DM_IO_VMA;
  328. lc->io_req.client = dm_io_client_create(dm_div_up(buf_size,
  329. PAGE_SIZE));
  330. if (IS_ERR(lc->io_req.client)) {
  331. r = PTR_ERR(lc->io_req.client);
  332. DMWARN("couldn't allocate disk io client");
  333. kfree(lc);
  334. return -ENOMEM;
  335. }
  336. lc->disk_header = vmalloc(buf_size);
  337. if (!lc->disk_header) {
  338. DMWARN("couldn't allocate disk log buffer");
  339. kfree(lc);
  340. return -ENOMEM;
  341. }
  342. lc->clean_bits = (void *)lc->disk_header +
  343. (LOG_OFFSET << SECTOR_SHIFT);
  344. }
  345. memset(lc->clean_bits, -1, bitset_size);
  346. lc->sync_bits = vmalloc(bitset_size);
  347. if (!lc->sync_bits) {
  348. DMWARN("couldn't allocate sync bitset");
  349. if (!dev)
  350. vfree(lc->clean_bits);
  351. vfree(lc->disk_header);
  352. kfree(lc);
  353. return -ENOMEM;
  354. }
  355. memset(lc->sync_bits, (sync == NOSYNC) ? -1 : 0, bitset_size);
  356. lc->sync_count = (sync == NOSYNC) ? region_count : 0;
  357. lc->recovering_bits = vmalloc(bitset_size);
  358. if (!lc->recovering_bits) {
  359. DMWARN("couldn't allocate sync bitset");
  360. vfree(lc->sync_bits);
  361. if (!dev)
  362. vfree(lc->clean_bits);
  363. vfree(lc->disk_header);
  364. kfree(lc);
  365. return -ENOMEM;
  366. }
  367. memset(lc->recovering_bits, 0, bitset_size);
  368. lc->sync_search = 0;
  369. log->context = lc;
  370. return 0;
  371. }
  372. static int core_ctr(struct dm_dirty_log *log, struct dm_target *ti,
  373. unsigned int argc, char **argv)
  374. {
  375. return create_log_context(log, ti, argc, argv, NULL);
  376. }
  377. static void destroy_log_context(struct log_c *lc)
  378. {
  379. vfree(lc->sync_bits);
  380. vfree(lc->recovering_bits);
  381. kfree(lc);
  382. }
  383. static void core_dtr(struct dm_dirty_log *log)
  384. {
  385. struct log_c *lc = (struct log_c *) log->context;
  386. vfree(lc->clean_bits);
  387. destroy_log_context(lc);
  388. }
  389. /*----------------------------------------------------------------
  390. * disk log constructor/destructor
  391. *
  392. * argv contains log_device region_size followed optionally by [no]sync
  393. *--------------------------------------------------------------*/
  394. static int disk_ctr(struct dm_dirty_log *log, struct dm_target *ti,
  395. unsigned int argc, char **argv)
  396. {
  397. int r;
  398. struct dm_dev *dev;
  399. if (argc < 2 || argc > 3) {
  400. DMWARN("wrong number of arguments to disk dirty region log");
  401. return -EINVAL;
  402. }
  403. r = dm_get_device(ti, argv[0], 0, 0 /* FIXME */,
  404. FMODE_READ | FMODE_WRITE, &dev);
  405. if (r)
  406. return r;
  407. r = create_log_context(log, ti, argc - 1, argv + 1, dev);
  408. if (r) {
  409. dm_put_device(ti, dev);
  410. return r;
  411. }
  412. return 0;
  413. }
  414. static void disk_dtr(struct dm_dirty_log *log)
  415. {
  416. struct log_c *lc = (struct log_c *) log->context;
  417. dm_put_device(lc->ti, lc->log_dev);
  418. vfree(lc->disk_header);
  419. dm_io_client_destroy(lc->io_req.client);
  420. destroy_log_context(lc);
  421. }
  422. static int count_bits32(uint32_t *addr, unsigned size)
  423. {
  424. int count = 0, i;
  425. for (i = 0; i < size; i++) {
  426. count += hweight32(*(addr+i));
  427. }
  428. return count;
  429. }
  430. static void fail_log_device(struct log_c *lc)
  431. {
  432. if (lc->log_dev_failed)
  433. return;
  434. lc->log_dev_failed = 1;
  435. dm_table_event(lc->ti->table);
  436. }
  437. static int disk_resume(struct dm_dirty_log *log)
  438. {
  439. int r;
  440. unsigned i;
  441. struct log_c *lc = (struct log_c *) log->context;
  442. size_t size = lc->bitset_uint32_count * sizeof(uint32_t);
  443. /* read the disk header */
  444. r = read_header(lc);
  445. if (r) {
  446. DMWARN("%s: Failed to read header on dirty region log device",
  447. lc->log_dev->name);
  448. fail_log_device(lc);
  449. /*
  450. * If the log device cannot be read, we must assume
  451. * all regions are out-of-sync. If we simply return
  452. * here, the state will be uninitialized and could
  453. * lead us to return 'in-sync' status for regions
  454. * that are actually 'out-of-sync'.
  455. */
  456. lc->header.nr_regions = 0;
  457. }
  458. /* set or clear any new bits -- device has grown */
  459. if (lc->sync == NOSYNC)
  460. for (i = lc->header.nr_regions; i < lc->region_count; i++)
  461. /* FIXME: amazingly inefficient */
  462. log_set_bit(lc, lc->clean_bits, i);
  463. else
  464. for (i = lc->header.nr_regions; i < lc->region_count; i++)
  465. /* FIXME: amazingly inefficient */
  466. log_clear_bit(lc, lc->clean_bits, i);
  467. /* clear any old bits -- device has shrunk */
  468. for (i = lc->region_count; i % (sizeof(*lc->clean_bits) << BYTE_SHIFT); i++)
  469. log_clear_bit(lc, lc->clean_bits, i);
  470. /* copy clean across to sync */
  471. memcpy(lc->sync_bits, lc->clean_bits, size);
  472. lc->sync_count = count_bits32(lc->clean_bits, lc->bitset_uint32_count);
  473. lc->sync_search = 0;
  474. /* set the correct number of regions in the header */
  475. lc->header.nr_regions = lc->region_count;
  476. /* write the new header */
  477. r = write_header(lc);
  478. if (r) {
  479. DMWARN("%s: Failed to write header on dirty region log device",
  480. lc->log_dev->name);
  481. fail_log_device(lc);
  482. }
  483. return r;
  484. }
  485. static uint32_t core_get_region_size(struct dm_dirty_log *log)
  486. {
  487. struct log_c *lc = (struct log_c *) log->context;
  488. return lc->region_size;
  489. }
  490. static int core_resume(struct dm_dirty_log *log)
  491. {
  492. struct log_c *lc = (struct log_c *) log->context;
  493. lc->sync_search = 0;
  494. return 0;
  495. }
  496. static int core_is_clean(struct dm_dirty_log *log, region_t region)
  497. {
  498. struct log_c *lc = (struct log_c *) log->context;
  499. return log_test_bit(lc->clean_bits, region);
  500. }
  501. static int core_in_sync(struct dm_dirty_log *log, region_t region, int block)
  502. {
  503. struct log_c *lc = (struct log_c *) log->context;
  504. return log_test_bit(lc->sync_bits, region);
  505. }
  506. static int core_flush(struct dm_dirty_log *log)
  507. {
  508. /* no op */
  509. return 0;
  510. }
  511. static int disk_flush(struct dm_dirty_log *log)
  512. {
  513. int r;
  514. struct log_c *lc = (struct log_c *) log->context;
  515. /* only write if the log has changed */
  516. if (!lc->touched)
  517. return 0;
  518. r = write_header(lc);
  519. if (r)
  520. fail_log_device(lc);
  521. else
  522. lc->touched = 0;
  523. return r;
  524. }
  525. static void core_mark_region(struct dm_dirty_log *log, region_t region)
  526. {
  527. struct log_c *lc = (struct log_c *) log->context;
  528. log_clear_bit(lc, lc->clean_bits, region);
  529. }
  530. static void core_clear_region(struct dm_dirty_log *log, region_t region)
  531. {
  532. struct log_c *lc = (struct log_c *) log->context;
  533. log_set_bit(lc, lc->clean_bits, region);
  534. }
  535. static int core_get_resync_work(struct dm_dirty_log *log, region_t *region)
  536. {
  537. struct log_c *lc = (struct log_c *) log->context;
  538. if (lc->sync_search >= lc->region_count)
  539. return 0;
  540. do {
  541. *region = ext2_find_next_zero_bit(
  542. (unsigned long *) lc->sync_bits,
  543. lc->region_count,
  544. lc->sync_search);
  545. lc->sync_search = *region + 1;
  546. if (*region >= lc->region_count)
  547. return 0;
  548. } while (log_test_bit(lc->recovering_bits, *region));
  549. log_set_bit(lc, lc->recovering_bits, *region);
  550. return 1;
  551. }
  552. static void core_set_region_sync(struct dm_dirty_log *log, region_t region,
  553. int in_sync)
  554. {
  555. struct log_c *lc = (struct log_c *) log->context;
  556. log_clear_bit(lc, lc->recovering_bits, region);
  557. if (in_sync) {
  558. log_set_bit(lc, lc->sync_bits, region);
  559. lc->sync_count++;
  560. } else if (log_test_bit(lc->sync_bits, region)) {
  561. lc->sync_count--;
  562. log_clear_bit(lc, lc->sync_bits, region);
  563. }
  564. }
  565. static region_t core_get_sync_count(struct dm_dirty_log *log)
  566. {
  567. struct log_c *lc = (struct log_c *) log->context;
  568. return lc->sync_count;
  569. }
  570. #define DMEMIT_SYNC \
  571. if (lc->sync != DEFAULTSYNC) \
  572. DMEMIT("%ssync ", lc->sync == NOSYNC ? "no" : "")
  573. static int core_status(struct dm_dirty_log *log, status_type_t status,
  574. char *result, unsigned int maxlen)
  575. {
  576. int sz = 0;
  577. struct log_c *lc = log->context;
  578. switch(status) {
  579. case STATUSTYPE_INFO:
  580. DMEMIT("1 %s", log->type->name);
  581. break;
  582. case STATUSTYPE_TABLE:
  583. DMEMIT("%s %u %u ", log->type->name,
  584. lc->sync == DEFAULTSYNC ? 1 : 2, lc->region_size);
  585. DMEMIT_SYNC;
  586. }
  587. return sz;
  588. }
  589. static int disk_status(struct dm_dirty_log *log, status_type_t status,
  590. char *result, unsigned int maxlen)
  591. {
  592. int sz = 0;
  593. struct log_c *lc = log->context;
  594. switch(status) {
  595. case STATUSTYPE_INFO:
  596. DMEMIT("3 %s %s %c", log->type->name, lc->log_dev->name,
  597. lc->log_dev_failed ? 'D' : 'A');
  598. break;
  599. case STATUSTYPE_TABLE:
  600. DMEMIT("%s %u %s %u ", log->type->name,
  601. lc->sync == DEFAULTSYNC ? 2 : 3, lc->log_dev->name,
  602. lc->region_size);
  603. DMEMIT_SYNC;
  604. }
  605. return sz;
  606. }
  607. static struct dm_dirty_log_type _core_type = {
  608. .name = "core",
  609. .module = THIS_MODULE,
  610. .ctr = core_ctr,
  611. .dtr = core_dtr,
  612. .resume = core_resume,
  613. .get_region_size = core_get_region_size,
  614. .is_clean = core_is_clean,
  615. .in_sync = core_in_sync,
  616. .flush = core_flush,
  617. .mark_region = core_mark_region,
  618. .clear_region = core_clear_region,
  619. .get_resync_work = core_get_resync_work,
  620. .set_region_sync = core_set_region_sync,
  621. .get_sync_count = core_get_sync_count,
  622. .status = core_status,
  623. };
  624. static struct dm_dirty_log_type _disk_type = {
  625. .name = "disk",
  626. .module = THIS_MODULE,
  627. .ctr = disk_ctr,
  628. .dtr = disk_dtr,
  629. .postsuspend = disk_flush,
  630. .resume = disk_resume,
  631. .get_region_size = core_get_region_size,
  632. .is_clean = core_is_clean,
  633. .in_sync = core_in_sync,
  634. .flush = disk_flush,
  635. .mark_region = core_mark_region,
  636. .clear_region = core_clear_region,
  637. .get_resync_work = core_get_resync_work,
  638. .set_region_sync = core_set_region_sync,
  639. .get_sync_count = core_get_sync_count,
  640. .status = disk_status,
  641. };
  642. int __init dm_dirty_log_init(void)
  643. {
  644. int r;
  645. r = dm_dirty_log_type_register(&_core_type);
  646. if (r)
  647. DMWARN("couldn't register core log");
  648. r = dm_dirty_log_type_register(&_disk_type);
  649. if (r) {
  650. DMWARN("couldn't register disk type");
  651. dm_dirty_log_type_unregister(&_core_type);
  652. }
  653. return r;
  654. }
  655. void __exit dm_dirty_log_exit(void)
  656. {
  657. dm_dirty_log_type_unregister(&_disk_type);
  658. dm_dirty_log_type_unregister(&_core_type);
  659. }
  660. module_init(dm_dirty_log_init);
  661. module_exit(dm_dirty_log_exit);
  662. MODULE_DESCRIPTION(DM_NAME " dirty region log");
  663. MODULE_AUTHOR("Joe Thornber, Heinz Mauelshagen <dm-devel@redhat.com>");
  664. MODULE_LICENSE("GPL");