dm-log.c 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884
  1. /*
  2. * Copyright (C) 2003 Sistina Software
  3. * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
  4. *
  5. * This file is released under the LGPL.
  6. */
  7. #include <linux/init.h>
  8. #include <linux/slab.h>
  9. #include <linux/module.h>
  10. #include <linux/vmalloc.h>
  11. #include <linux/dm-io.h>
  12. #include <linux/dm-dirty-log.h>
  13. #include <linux/device-mapper.h>
  14. #define DM_MSG_PREFIX "dirty region log"
  15. struct dm_dirty_log_internal {
  16. struct dm_dirty_log_type *type;
  17. struct list_head list;
  18. long use;
  19. };
  20. static LIST_HEAD(_log_types);
  21. static DEFINE_SPINLOCK(_lock);
  22. static struct dm_dirty_log_internal *__find_dirty_log_type(const char *name)
  23. {
  24. struct dm_dirty_log_internal *log_type;
  25. list_for_each_entry(log_type, &_log_types, list)
  26. if (!strcmp(name, log_type->type->name))
  27. return log_type;
  28. return NULL;
  29. }
  30. static struct dm_dirty_log_internal *_get_dirty_log_type(const char *name)
  31. {
  32. struct dm_dirty_log_internal *log_type;
  33. spin_lock(&_lock);
  34. log_type = __find_dirty_log_type(name);
  35. if (log_type) {
  36. if (!log_type->use && !try_module_get(log_type->type->module))
  37. log_type = NULL;
  38. else
  39. log_type->use++;
  40. }
  41. spin_unlock(&_lock);
  42. return log_type;
  43. }
  44. /*
  45. * get_type
  46. * @type_name
  47. *
  48. * Attempt to retrieve the dm_dirty_log_type by name. If not already
  49. * available, attempt to load the appropriate module.
  50. *
  51. * Log modules are named "dm-log-" followed by the 'type_name'.
  52. * Modules may contain multiple types.
  53. * This function will first try the module "dm-log-<type_name>",
  54. * then truncate 'type_name' on the last '-' and try again.
  55. *
  56. * For example, if type_name was "clustered-disk", it would search
  57. * 'dm-log-clustered-disk' then 'dm-log-clustered'.
  58. *
  59. * Returns: dirty_log_type* on success, NULL on failure
  60. */
  61. static struct dm_dirty_log_type *get_type(const char *type_name)
  62. {
  63. char *p, *type_name_dup;
  64. struct dm_dirty_log_internal *log_type;
  65. if (!type_name)
  66. return NULL;
  67. log_type = _get_dirty_log_type(type_name);
  68. if (log_type)
  69. return log_type->type;
  70. type_name_dup = kstrdup(type_name, GFP_KERNEL);
  71. if (!type_name_dup) {
  72. DMWARN("No memory left to attempt log module load for \"%s\"",
  73. type_name);
  74. return NULL;
  75. }
  76. while (request_module("dm-log-%s", type_name_dup) ||
  77. !(log_type = _get_dirty_log_type(type_name))) {
  78. p = strrchr(type_name_dup, '-');
  79. if (!p)
  80. break;
  81. p[0] = '\0';
  82. }
  83. if (!log_type)
  84. DMWARN("Module for logging type \"%s\" not found.", type_name);
  85. kfree(type_name_dup);
  86. return log_type ? log_type->type : NULL;
  87. }
  88. static void put_type(struct dm_dirty_log_type *type)
  89. {
  90. struct dm_dirty_log_internal *log_type;
  91. if (!type)
  92. return;
  93. spin_lock(&_lock);
  94. log_type = __find_dirty_log_type(type->name);
  95. if (!log_type)
  96. goto out;
  97. if (!--log_type->use)
  98. module_put(type->module);
  99. BUG_ON(log_type->use < 0);
  100. out:
  101. spin_unlock(&_lock);
  102. }
  103. static struct dm_dirty_log_internal *_alloc_dirty_log_type(struct dm_dirty_log_type *type)
  104. {
  105. struct dm_dirty_log_internal *log_type = kzalloc(sizeof(*log_type),
  106. GFP_KERNEL);
  107. if (log_type)
  108. log_type->type = type;
  109. return log_type;
  110. }
  111. int dm_dirty_log_type_register(struct dm_dirty_log_type *type)
  112. {
  113. struct dm_dirty_log_internal *log_type = _alloc_dirty_log_type(type);
  114. int r = 0;
  115. if (!log_type)
  116. return -ENOMEM;
  117. spin_lock(&_lock);
  118. if (!__find_dirty_log_type(type->name))
  119. list_add(&log_type->list, &_log_types);
  120. else {
  121. kfree(log_type);
  122. r = -EEXIST;
  123. }
  124. spin_unlock(&_lock);
  125. return r;
  126. }
  127. EXPORT_SYMBOL(dm_dirty_log_type_register);
  128. int dm_dirty_log_type_unregister(struct dm_dirty_log_type *type)
  129. {
  130. struct dm_dirty_log_internal *log_type;
  131. spin_lock(&_lock);
  132. log_type = __find_dirty_log_type(type->name);
  133. if (!log_type) {
  134. spin_unlock(&_lock);
  135. return -EINVAL;
  136. }
  137. if (log_type->use) {
  138. spin_unlock(&_lock);
  139. return -ETXTBSY;
  140. }
  141. list_del(&log_type->list);
  142. spin_unlock(&_lock);
  143. kfree(log_type);
  144. return 0;
  145. }
  146. EXPORT_SYMBOL(dm_dirty_log_type_unregister);
  147. struct dm_dirty_log *dm_dirty_log_create(const char *type_name,
  148. struct dm_target *ti,
  149. unsigned int argc, char **argv)
  150. {
  151. struct dm_dirty_log_type *type;
  152. struct dm_dirty_log *log;
  153. log = kmalloc(sizeof(*log), GFP_KERNEL);
  154. if (!log)
  155. return NULL;
  156. type = get_type(type_name);
  157. if (!type) {
  158. kfree(log);
  159. return NULL;
  160. }
  161. log->type = type;
  162. if (type->ctr(log, ti, argc, argv)) {
  163. kfree(log);
  164. put_type(type);
  165. return NULL;
  166. }
  167. return log;
  168. }
  169. EXPORT_SYMBOL(dm_dirty_log_create);
  170. void dm_dirty_log_destroy(struct dm_dirty_log *log)
  171. {
  172. log->type->dtr(log);
  173. put_type(log->type);
  174. kfree(log);
  175. }
  176. EXPORT_SYMBOL(dm_dirty_log_destroy);
  177. /*-----------------------------------------------------------------
  178. * Persistent and core logs share a lot of their implementation.
  179. * FIXME: need a reload method to be called from a resume
  180. *---------------------------------------------------------------*/
  181. /*
  182. * Magic for persistent mirrors: "MiRr"
  183. */
  184. #define MIRROR_MAGIC 0x4D695272
  185. /*
  186. * The on-disk version of the metadata.
  187. */
  188. #define MIRROR_DISK_VERSION 2
  189. #define LOG_OFFSET 2
  190. struct log_header {
  191. uint32_t magic;
  192. /*
  193. * Simple, incrementing version. no backward
  194. * compatibility.
  195. */
  196. uint32_t version;
  197. sector_t nr_regions;
  198. };
  199. struct log_c {
  200. struct dm_target *ti;
  201. int touched;
  202. uint32_t region_size;
  203. unsigned int region_count;
  204. region_t sync_count;
  205. unsigned bitset_uint32_count;
  206. uint32_t *clean_bits;
  207. uint32_t *sync_bits;
  208. uint32_t *recovering_bits; /* FIXME: this seems excessive */
  209. int sync_search;
  210. /* Resync flag */
  211. enum sync {
  212. DEFAULTSYNC, /* Synchronize if necessary */
  213. NOSYNC, /* Devices known to be already in sync */
  214. FORCESYNC, /* Force a sync to happen */
  215. } sync;
  216. struct dm_io_request io_req;
  217. /*
  218. * Disk log fields
  219. */
  220. int log_dev_failed;
  221. struct dm_dev *log_dev;
  222. struct log_header header;
  223. struct dm_io_region header_location;
  224. struct log_header *disk_header;
  225. };
  226. /*
  227. * The touched member needs to be updated every time we access
  228. * one of the bitsets.
  229. */
  230. static inline int log_test_bit(uint32_t *bs, unsigned bit)
  231. {
  232. return ext2_test_bit(bit, (unsigned long *) bs) ? 1 : 0;
  233. }
  234. static inline void log_set_bit(struct log_c *l,
  235. uint32_t *bs, unsigned bit)
  236. {
  237. ext2_set_bit(bit, (unsigned long *) bs);
  238. l->touched = 1;
  239. }
  240. static inline void log_clear_bit(struct log_c *l,
  241. uint32_t *bs, unsigned bit)
  242. {
  243. ext2_clear_bit(bit, (unsigned long *) bs);
  244. l->touched = 1;
  245. }
  246. /*----------------------------------------------------------------
  247. * Header IO
  248. *--------------------------------------------------------------*/
  249. static void header_to_disk(struct log_header *core, struct log_header *disk)
  250. {
  251. disk->magic = cpu_to_le32(core->magic);
  252. disk->version = cpu_to_le32(core->version);
  253. disk->nr_regions = cpu_to_le64(core->nr_regions);
  254. }
  255. static void header_from_disk(struct log_header *core, struct log_header *disk)
  256. {
  257. core->magic = le32_to_cpu(disk->magic);
  258. core->version = le32_to_cpu(disk->version);
  259. core->nr_regions = le64_to_cpu(disk->nr_regions);
  260. }
  261. static int rw_header(struct log_c *lc, int rw)
  262. {
  263. lc->io_req.bi_rw = rw;
  264. return dm_io(&lc->io_req, 1, &lc->header_location, NULL);
  265. }
  266. static int read_header(struct log_c *log)
  267. {
  268. int r;
  269. r = rw_header(log, READ);
  270. if (r)
  271. return r;
  272. header_from_disk(&log->header, log->disk_header);
  273. /* New log required? */
  274. if (log->sync != DEFAULTSYNC || log->header.magic != MIRROR_MAGIC) {
  275. log->header.magic = MIRROR_MAGIC;
  276. log->header.version = MIRROR_DISK_VERSION;
  277. log->header.nr_regions = 0;
  278. }
  279. #ifdef __LITTLE_ENDIAN
  280. if (log->header.version == 1)
  281. log->header.version = 2;
  282. #endif
  283. if (log->header.version != MIRROR_DISK_VERSION) {
  284. DMWARN("incompatible disk log version");
  285. return -EINVAL;
  286. }
  287. return 0;
  288. }
  289. static int _check_region_size(struct dm_target *ti, uint32_t region_size)
  290. {
  291. if (region_size < 2 || region_size > ti->len)
  292. return 0;
  293. if (!is_power_of_2(region_size))
  294. return 0;
  295. return 1;
  296. }
  297. /*----------------------------------------------------------------
  298. * core log constructor/destructor
  299. *
  300. * argv contains region_size followed optionally by [no]sync
  301. *--------------------------------------------------------------*/
  302. #define BYTE_SHIFT 3
  303. static int create_log_context(struct dm_dirty_log *log, struct dm_target *ti,
  304. unsigned int argc, char **argv,
  305. struct dm_dev *dev)
  306. {
  307. enum sync sync = DEFAULTSYNC;
  308. struct log_c *lc;
  309. uint32_t region_size;
  310. unsigned int region_count;
  311. size_t bitset_size, buf_size;
  312. int r;
  313. if (argc < 1 || argc > 2) {
  314. DMWARN("wrong number of arguments to dirty region log");
  315. return -EINVAL;
  316. }
  317. if (argc > 1) {
  318. if (!strcmp(argv[1], "sync"))
  319. sync = FORCESYNC;
  320. else if (!strcmp(argv[1], "nosync"))
  321. sync = NOSYNC;
  322. else {
  323. DMWARN("unrecognised sync argument to "
  324. "dirty region log: %s", argv[1]);
  325. return -EINVAL;
  326. }
  327. }
  328. if (sscanf(argv[0], "%u", &region_size) != 1 ||
  329. !_check_region_size(ti, region_size)) {
  330. DMWARN("invalid region size %s", argv[0]);
  331. return -EINVAL;
  332. }
  333. region_count = dm_sector_div_up(ti->len, region_size);
  334. lc = kmalloc(sizeof(*lc), GFP_KERNEL);
  335. if (!lc) {
  336. DMWARN("couldn't allocate core log");
  337. return -ENOMEM;
  338. }
  339. lc->ti = ti;
  340. lc->touched = 0;
  341. lc->region_size = region_size;
  342. lc->region_count = region_count;
  343. lc->sync = sync;
  344. /*
  345. * Work out how many "unsigned long"s we need to hold the bitset.
  346. */
  347. bitset_size = dm_round_up(region_count,
  348. sizeof(*lc->clean_bits) << BYTE_SHIFT);
  349. bitset_size >>= BYTE_SHIFT;
  350. lc->bitset_uint32_count = bitset_size / sizeof(*lc->clean_bits);
  351. /*
  352. * Disk log?
  353. */
  354. if (!dev) {
  355. lc->clean_bits = vmalloc(bitset_size);
  356. if (!lc->clean_bits) {
  357. DMWARN("couldn't allocate clean bitset");
  358. kfree(lc);
  359. return -ENOMEM;
  360. }
  361. lc->disk_header = NULL;
  362. } else {
  363. lc->log_dev = dev;
  364. lc->log_dev_failed = 0;
  365. lc->header_location.bdev = lc->log_dev->bdev;
  366. lc->header_location.sector = 0;
  367. /*
  368. * Buffer holds both header and bitset.
  369. */
  370. buf_size = dm_round_up((LOG_OFFSET << SECTOR_SHIFT) +
  371. bitset_size, ti->limits.hardsect_size);
  372. if (buf_size > dev->bdev->bd_inode->i_size) {
  373. DMWARN("log device %s too small: need %llu bytes",
  374. dev->name, (unsigned long long)buf_size);
  375. kfree(lc);
  376. return -EINVAL;
  377. }
  378. lc->header_location.count = buf_size >> SECTOR_SHIFT;
  379. lc->io_req.mem.type = DM_IO_VMA;
  380. lc->io_req.notify.fn = NULL;
  381. lc->io_req.client = dm_io_client_create(dm_div_up(buf_size,
  382. PAGE_SIZE));
  383. if (IS_ERR(lc->io_req.client)) {
  384. r = PTR_ERR(lc->io_req.client);
  385. DMWARN("couldn't allocate disk io client");
  386. kfree(lc);
  387. return -ENOMEM;
  388. }
  389. lc->disk_header = vmalloc(buf_size);
  390. if (!lc->disk_header) {
  391. DMWARN("couldn't allocate disk log buffer");
  392. dm_io_client_destroy(lc->io_req.client);
  393. kfree(lc);
  394. return -ENOMEM;
  395. }
  396. lc->io_req.mem.ptr.vma = lc->disk_header;
  397. lc->clean_bits = (void *)lc->disk_header +
  398. (LOG_OFFSET << SECTOR_SHIFT);
  399. }
  400. memset(lc->clean_bits, -1, bitset_size);
  401. lc->sync_bits = vmalloc(bitset_size);
  402. if (!lc->sync_bits) {
  403. DMWARN("couldn't allocate sync bitset");
  404. if (!dev)
  405. vfree(lc->clean_bits);
  406. else
  407. dm_io_client_destroy(lc->io_req.client);
  408. vfree(lc->disk_header);
  409. kfree(lc);
  410. return -ENOMEM;
  411. }
  412. memset(lc->sync_bits, (sync == NOSYNC) ? -1 : 0, bitset_size);
  413. lc->sync_count = (sync == NOSYNC) ? region_count : 0;
  414. lc->recovering_bits = vmalloc(bitset_size);
  415. if (!lc->recovering_bits) {
  416. DMWARN("couldn't allocate sync bitset");
  417. vfree(lc->sync_bits);
  418. if (!dev)
  419. vfree(lc->clean_bits);
  420. else
  421. dm_io_client_destroy(lc->io_req.client);
  422. vfree(lc->disk_header);
  423. kfree(lc);
  424. return -ENOMEM;
  425. }
  426. memset(lc->recovering_bits, 0, bitset_size);
  427. lc->sync_search = 0;
  428. log->context = lc;
  429. return 0;
  430. }
  431. static int core_ctr(struct dm_dirty_log *log, struct dm_target *ti,
  432. unsigned int argc, char **argv)
  433. {
  434. return create_log_context(log, ti, argc, argv, NULL);
  435. }
  436. static void destroy_log_context(struct log_c *lc)
  437. {
  438. vfree(lc->sync_bits);
  439. vfree(lc->recovering_bits);
  440. kfree(lc);
  441. }
  442. static void core_dtr(struct dm_dirty_log *log)
  443. {
  444. struct log_c *lc = (struct log_c *) log->context;
  445. vfree(lc->clean_bits);
  446. destroy_log_context(lc);
  447. }
  448. /*----------------------------------------------------------------
  449. * disk log constructor/destructor
  450. *
  451. * argv contains log_device region_size followed optionally by [no]sync
  452. *--------------------------------------------------------------*/
  453. static int disk_ctr(struct dm_dirty_log *log, struct dm_target *ti,
  454. unsigned int argc, char **argv)
  455. {
  456. int r;
  457. struct dm_dev *dev;
  458. if (argc < 2 || argc > 3) {
  459. DMWARN("wrong number of arguments to disk dirty region log");
  460. return -EINVAL;
  461. }
  462. r = dm_get_device(ti, argv[0], 0, 0 /* FIXME */,
  463. FMODE_READ | FMODE_WRITE, &dev);
  464. if (r)
  465. return r;
  466. r = create_log_context(log, ti, argc - 1, argv + 1, dev);
  467. if (r) {
  468. dm_put_device(ti, dev);
  469. return r;
  470. }
  471. return 0;
  472. }
  473. static void disk_dtr(struct dm_dirty_log *log)
  474. {
  475. struct log_c *lc = (struct log_c *) log->context;
  476. dm_put_device(lc->ti, lc->log_dev);
  477. vfree(lc->disk_header);
  478. dm_io_client_destroy(lc->io_req.client);
  479. destroy_log_context(lc);
  480. }
  481. static int count_bits32(uint32_t *addr, unsigned size)
  482. {
  483. int count = 0, i;
  484. for (i = 0; i < size; i++) {
  485. count += hweight32(*(addr+i));
  486. }
  487. return count;
  488. }
  489. static void fail_log_device(struct log_c *lc)
  490. {
  491. if (lc->log_dev_failed)
  492. return;
  493. lc->log_dev_failed = 1;
  494. dm_table_event(lc->ti->table);
  495. }
  496. static int disk_resume(struct dm_dirty_log *log)
  497. {
  498. int r;
  499. unsigned i;
  500. struct log_c *lc = (struct log_c *) log->context;
  501. size_t size = lc->bitset_uint32_count * sizeof(uint32_t);
  502. /* read the disk header */
  503. r = read_header(lc);
  504. if (r) {
  505. DMWARN("%s: Failed to read header on dirty region log device",
  506. lc->log_dev->name);
  507. fail_log_device(lc);
  508. /*
  509. * If the log device cannot be read, we must assume
  510. * all regions are out-of-sync. If we simply return
  511. * here, the state will be uninitialized and could
  512. * lead us to return 'in-sync' status for regions
  513. * that are actually 'out-of-sync'.
  514. */
  515. lc->header.nr_regions = 0;
  516. }
  517. /* set or clear any new bits -- device has grown */
  518. if (lc->sync == NOSYNC)
  519. for (i = lc->header.nr_regions; i < lc->region_count; i++)
  520. /* FIXME: amazingly inefficient */
  521. log_set_bit(lc, lc->clean_bits, i);
  522. else
  523. for (i = lc->header.nr_regions; i < lc->region_count; i++)
  524. /* FIXME: amazingly inefficient */
  525. log_clear_bit(lc, lc->clean_bits, i);
  526. /* clear any old bits -- device has shrunk */
  527. for (i = lc->region_count; i % (sizeof(*lc->clean_bits) << BYTE_SHIFT); i++)
  528. log_clear_bit(lc, lc->clean_bits, i);
  529. /* copy clean across to sync */
  530. memcpy(lc->sync_bits, lc->clean_bits, size);
  531. lc->sync_count = count_bits32(lc->clean_bits, lc->bitset_uint32_count);
  532. lc->sync_search = 0;
  533. /* set the correct number of regions in the header */
  534. lc->header.nr_regions = lc->region_count;
  535. header_to_disk(&lc->header, lc->disk_header);
  536. /* write the new header */
  537. r = rw_header(lc, WRITE);
  538. if (r) {
  539. DMWARN("%s: Failed to write header on dirty region log device",
  540. lc->log_dev->name);
  541. fail_log_device(lc);
  542. }
  543. return r;
  544. }
  545. static uint32_t core_get_region_size(struct dm_dirty_log *log)
  546. {
  547. struct log_c *lc = (struct log_c *) log->context;
  548. return lc->region_size;
  549. }
  550. static int core_resume(struct dm_dirty_log *log)
  551. {
  552. struct log_c *lc = (struct log_c *) log->context;
  553. lc->sync_search = 0;
  554. return 0;
  555. }
  556. static int core_is_clean(struct dm_dirty_log *log, region_t region)
  557. {
  558. struct log_c *lc = (struct log_c *) log->context;
  559. return log_test_bit(lc->clean_bits, region);
  560. }
  561. static int core_in_sync(struct dm_dirty_log *log, region_t region, int block)
  562. {
  563. struct log_c *lc = (struct log_c *) log->context;
  564. return log_test_bit(lc->sync_bits, region);
  565. }
  566. static int core_flush(struct dm_dirty_log *log)
  567. {
  568. /* no op */
  569. return 0;
  570. }
  571. static int disk_flush(struct dm_dirty_log *log)
  572. {
  573. int r;
  574. struct log_c *lc = (struct log_c *) log->context;
  575. /* only write if the log has changed */
  576. if (!lc->touched)
  577. return 0;
  578. r = rw_header(lc, WRITE);
  579. if (r)
  580. fail_log_device(lc);
  581. else
  582. lc->touched = 0;
  583. return r;
  584. }
  585. static void core_mark_region(struct dm_dirty_log *log, region_t region)
  586. {
  587. struct log_c *lc = (struct log_c *) log->context;
  588. log_clear_bit(lc, lc->clean_bits, region);
  589. }
  590. static void core_clear_region(struct dm_dirty_log *log, region_t region)
  591. {
  592. struct log_c *lc = (struct log_c *) log->context;
  593. log_set_bit(lc, lc->clean_bits, region);
  594. }
  595. static int core_get_resync_work(struct dm_dirty_log *log, region_t *region)
  596. {
  597. struct log_c *lc = (struct log_c *) log->context;
  598. if (lc->sync_search >= lc->region_count)
  599. return 0;
  600. do {
  601. *region = ext2_find_next_zero_bit(
  602. (unsigned long *) lc->sync_bits,
  603. lc->region_count,
  604. lc->sync_search);
  605. lc->sync_search = *region + 1;
  606. if (*region >= lc->region_count)
  607. return 0;
  608. } while (log_test_bit(lc->recovering_bits, *region));
  609. log_set_bit(lc, lc->recovering_bits, *region);
  610. return 1;
  611. }
  612. static void core_set_region_sync(struct dm_dirty_log *log, region_t region,
  613. int in_sync)
  614. {
  615. struct log_c *lc = (struct log_c *) log->context;
  616. log_clear_bit(lc, lc->recovering_bits, region);
  617. if (in_sync) {
  618. log_set_bit(lc, lc->sync_bits, region);
  619. lc->sync_count++;
  620. } else if (log_test_bit(lc->sync_bits, region)) {
  621. lc->sync_count--;
  622. log_clear_bit(lc, lc->sync_bits, region);
  623. }
  624. }
  625. static region_t core_get_sync_count(struct dm_dirty_log *log)
  626. {
  627. struct log_c *lc = (struct log_c *) log->context;
  628. return lc->sync_count;
  629. }
  630. #define DMEMIT_SYNC \
  631. if (lc->sync != DEFAULTSYNC) \
  632. DMEMIT("%ssync ", lc->sync == NOSYNC ? "no" : "")
  633. static int core_status(struct dm_dirty_log *log, status_type_t status,
  634. char *result, unsigned int maxlen)
  635. {
  636. int sz = 0;
  637. struct log_c *lc = log->context;
  638. switch(status) {
  639. case STATUSTYPE_INFO:
  640. DMEMIT("1 %s", log->type->name);
  641. break;
  642. case STATUSTYPE_TABLE:
  643. DMEMIT("%s %u %u ", log->type->name,
  644. lc->sync == DEFAULTSYNC ? 1 : 2, lc->region_size);
  645. DMEMIT_SYNC;
  646. }
  647. return sz;
  648. }
  649. static int disk_status(struct dm_dirty_log *log, status_type_t status,
  650. char *result, unsigned int maxlen)
  651. {
  652. int sz = 0;
  653. struct log_c *lc = log->context;
  654. switch(status) {
  655. case STATUSTYPE_INFO:
  656. DMEMIT("3 %s %s %c", log->type->name, lc->log_dev->name,
  657. lc->log_dev_failed ? 'D' : 'A');
  658. break;
  659. case STATUSTYPE_TABLE:
  660. DMEMIT("%s %u %s %u ", log->type->name,
  661. lc->sync == DEFAULTSYNC ? 2 : 3, lc->log_dev->name,
  662. lc->region_size);
  663. DMEMIT_SYNC;
  664. }
  665. return sz;
  666. }
  667. static struct dm_dirty_log_type _core_type = {
  668. .name = "core",
  669. .module = THIS_MODULE,
  670. .ctr = core_ctr,
  671. .dtr = core_dtr,
  672. .resume = core_resume,
  673. .get_region_size = core_get_region_size,
  674. .is_clean = core_is_clean,
  675. .in_sync = core_in_sync,
  676. .flush = core_flush,
  677. .mark_region = core_mark_region,
  678. .clear_region = core_clear_region,
  679. .get_resync_work = core_get_resync_work,
  680. .set_region_sync = core_set_region_sync,
  681. .get_sync_count = core_get_sync_count,
  682. .status = core_status,
  683. };
  684. static struct dm_dirty_log_type _disk_type = {
  685. .name = "disk",
  686. .module = THIS_MODULE,
  687. .ctr = disk_ctr,
  688. .dtr = disk_dtr,
  689. .postsuspend = disk_flush,
  690. .resume = disk_resume,
  691. .get_region_size = core_get_region_size,
  692. .is_clean = core_is_clean,
  693. .in_sync = core_in_sync,
  694. .flush = disk_flush,
  695. .mark_region = core_mark_region,
  696. .clear_region = core_clear_region,
  697. .get_resync_work = core_get_resync_work,
  698. .set_region_sync = core_set_region_sync,
  699. .get_sync_count = core_get_sync_count,
  700. .status = disk_status,
  701. };
  702. static int __init dm_dirty_log_init(void)
  703. {
  704. int r;
  705. r = dm_dirty_log_type_register(&_core_type);
  706. if (r)
  707. DMWARN("couldn't register core log");
  708. r = dm_dirty_log_type_register(&_disk_type);
  709. if (r) {
  710. DMWARN("couldn't register disk type");
  711. dm_dirty_log_type_unregister(&_core_type);
  712. }
  713. return r;
  714. }
  715. static void __exit dm_dirty_log_exit(void)
  716. {
  717. dm_dirty_log_type_unregister(&_disk_type);
  718. dm_dirty_log_type_unregister(&_core_type);
  719. }
  720. module_init(dm_dirty_log_init);
  721. module_exit(dm_dirty_log_exit);
  722. MODULE_DESCRIPTION(DM_NAME " dirty region log");
  723. MODULE_AUTHOR("Joe Thornber, Heinz Mauelshagen <dm-devel@redhat.com>");
  724. MODULE_LICENSE("GPL");