dm-log.c 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689
  1. /*
  2. * Copyright (C) 2003 Sistina Software
  3. *
  4. * This file is released under the LGPL.
  5. */
  6. #include <linux/init.h>
  7. #include <linux/slab.h>
  8. #include <linux/module.h>
  9. #include <linux/vmalloc.h>
  10. #include "dm-log.h"
  11. #include "dm-io.h"
  12. static LIST_HEAD(_log_types);
  13. static DEFINE_SPINLOCK(_lock);
  14. int dm_register_dirty_log_type(struct dirty_log_type *type)
  15. {
  16. spin_lock(&_lock);
  17. type->use_count = 0;
  18. list_add(&type->list, &_log_types);
  19. spin_unlock(&_lock);
  20. return 0;
  21. }
  22. int dm_unregister_dirty_log_type(struct dirty_log_type *type)
  23. {
  24. spin_lock(&_lock);
  25. if (type->use_count)
  26. DMWARN("Attempt to unregister a log type that is still in use");
  27. else
  28. list_del(&type->list);
  29. spin_unlock(&_lock);
  30. return 0;
  31. }
  32. static struct dirty_log_type *get_type(const char *type_name)
  33. {
  34. struct dirty_log_type *type;
  35. spin_lock(&_lock);
  36. list_for_each_entry (type, &_log_types, list)
  37. if (!strcmp(type_name, type->name)) {
  38. if (!type->use_count && !try_module_get(type->module)){
  39. spin_unlock(&_lock);
  40. return NULL;
  41. }
  42. type->use_count++;
  43. spin_unlock(&_lock);
  44. return type;
  45. }
  46. spin_unlock(&_lock);
  47. return NULL;
  48. }
  49. static void put_type(struct dirty_log_type *type)
  50. {
  51. spin_lock(&_lock);
  52. if (!--type->use_count)
  53. module_put(type->module);
  54. spin_unlock(&_lock);
  55. }
  56. struct dirty_log *dm_create_dirty_log(const char *type_name, struct dm_target *ti,
  57. unsigned int argc, char **argv)
  58. {
  59. struct dirty_log_type *type;
  60. struct dirty_log *log;
  61. log = kmalloc(sizeof(*log), GFP_KERNEL);
  62. if (!log)
  63. return NULL;
  64. type = get_type(type_name);
  65. if (!type) {
  66. kfree(log);
  67. return NULL;
  68. }
  69. log->type = type;
  70. if (type->ctr(log, ti, argc, argv)) {
  71. kfree(log);
  72. put_type(type);
  73. return NULL;
  74. }
  75. return log;
  76. }
  77. void dm_destroy_dirty_log(struct dirty_log *log)
  78. {
  79. log->type->dtr(log);
  80. put_type(log->type);
  81. kfree(log);
  82. }
  83. /*-----------------------------------------------------------------
  84. * Persistent and core logs share a lot of their implementation.
  85. * FIXME: need a reload method to be called from a resume
  86. *---------------------------------------------------------------*/
  87. /*
  88. * Magic for persistent mirrors: "MiRr"
  89. */
  90. #define MIRROR_MAGIC 0x4D695272
  91. /*
  92. * The on-disk version of the metadata.
  93. */
  94. #define MIRROR_DISK_VERSION 2
  95. #define LOG_OFFSET 2
  96. struct log_header {
  97. uint32_t magic;
  98. /*
  99. * Simple, incrementing version. no backward
  100. * compatibility.
  101. */
  102. uint32_t version;
  103. sector_t nr_regions;
  104. };
  105. struct log_c {
  106. struct dm_target *ti;
  107. int touched;
  108. uint32_t region_size;
  109. unsigned int region_count;
  110. region_t sync_count;
  111. unsigned bitset_uint32_count;
  112. uint32_t *clean_bits;
  113. uint32_t *sync_bits;
  114. uint32_t *recovering_bits; /* FIXME: this seems excessive */
  115. int sync_search;
  116. /* Resync flag */
  117. enum sync {
  118. DEFAULTSYNC, /* Synchronize if necessary */
  119. NOSYNC, /* Devices known to be already in sync */
  120. FORCESYNC, /* Force a sync to happen */
  121. } sync;
  122. /*
  123. * Disk log fields
  124. */
  125. struct dm_dev *log_dev;
  126. struct log_header header;
  127. struct io_region header_location;
  128. struct log_header *disk_header;
  129. struct io_region bits_location;
  130. };
  131. /*
  132. * The touched member needs to be updated every time we access
  133. * one of the bitsets.
  134. */
  135. static inline int log_test_bit(uint32_t *bs, unsigned bit)
  136. {
  137. return ext2_test_bit(bit, (unsigned long *) bs) ? 1 : 0;
  138. }
  139. static inline void log_set_bit(struct log_c *l,
  140. uint32_t *bs, unsigned bit)
  141. {
  142. ext2_set_bit(bit, (unsigned long *) bs);
  143. l->touched = 1;
  144. }
  145. static inline void log_clear_bit(struct log_c *l,
  146. uint32_t *bs, unsigned bit)
  147. {
  148. ext2_clear_bit(bit, (unsigned long *) bs);
  149. l->touched = 1;
  150. }
  151. /*----------------------------------------------------------------
  152. * Header IO
  153. *--------------------------------------------------------------*/
  154. static void header_to_disk(struct log_header *core, struct log_header *disk)
  155. {
  156. disk->magic = cpu_to_le32(core->magic);
  157. disk->version = cpu_to_le32(core->version);
  158. disk->nr_regions = cpu_to_le64(core->nr_regions);
  159. }
  160. static void header_from_disk(struct log_header *core, struct log_header *disk)
  161. {
  162. core->magic = le32_to_cpu(disk->magic);
  163. core->version = le32_to_cpu(disk->version);
  164. core->nr_regions = le64_to_cpu(disk->nr_regions);
  165. }
  166. static int read_header(struct log_c *log)
  167. {
  168. int r;
  169. unsigned long ebits;
  170. r = dm_io_sync_vm(1, &log->header_location, READ,
  171. log->disk_header, &ebits);
  172. if (r)
  173. return r;
  174. header_from_disk(&log->header, log->disk_header);
  175. /* New log required? */
  176. if (log->sync != DEFAULTSYNC || log->header.magic != MIRROR_MAGIC) {
  177. log->header.magic = MIRROR_MAGIC;
  178. log->header.version = MIRROR_DISK_VERSION;
  179. log->header.nr_regions = 0;
  180. }
  181. #ifdef __LITTLE_ENDIAN
  182. if (log->header.version == 1)
  183. log->header.version = 2;
  184. #endif
  185. if (log->header.version != MIRROR_DISK_VERSION) {
  186. DMWARN("incompatible disk log version");
  187. return -EINVAL;
  188. }
  189. return 0;
  190. }
  191. static inline int write_header(struct log_c *log)
  192. {
  193. unsigned long ebits;
  194. header_to_disk(&log->header, log->disk_header);
  195. return dm_io_sync_vm(1, &log->header_location, WRITE,
  196. log->disk_header, &ebits);
  197. }
  198. /*----------------------------------------------------------------
  199. * Bits IO
  200. *--------------------------------------------------------------*/
  201. static int read_bits(struct log_c *log)
  202. {
  203. int r;
  204. unsigned long ebits;
  205. r = dm_io_sync_vm(1, &log->bits_location, READ,
  206. log->clean_bits, &ebits);
  207. if (r)
  208. return r;
  209. return 0;
  210. }
  211. static int write_bits(struct log_c *log)
  212. {
  213. unsigned long ebits;
  214. return dm_io_sync_vm(1, &log->bits_location, WRITE,
  215. log->clean_bits, &ebits);
  216. }
  217. /*----------------------------------------------------------------
  218. * core log constructor/destructor
  219. *
  220. * argv contains region_size followed optionally by [no]sync
  221. *--------------------------------------------------------------*/
  222. #define BYTE_SHIFT 3
  223. static int core_ctr(struct dirty_log *log, struct dm_target *ti,
  224. unsigned int argc, char **argv)
  225. {
  226. enum sync sync = DEFAULTSYNC;
  227. struct log_c *lc;
  228. uint32_t region_size;
  229. unsigned int region_count;
  230. size_t bitset_size;
  231. if (argc < 1 || argc > 2) {
  232. DMWARN("wrong number of arguments to mirror log");
  233. return -EINVAL;
  234. }
  235. if (argc > 1) {
  236. if (!strcmp(argv[1], "sync"))
  237. sync = FORCESYNC;
  238. else if (!strcmp(argv[1], "nosync"))
  239. sync = NOSYNC;
  240. else {
  241. DMWARN("unrecognised sync argument to mirror log: %s",
  242. argv[1]);
  243. return -EINVAL;
  244. }
  245. }
  246. if (sscanf(argv[0], "%u", &region_size) != 1) {
  247. DMWARN("invalid region size string");
  248. return -EINVAL;
  249. }
  250. region_count = dm_sector_div_up(ti->len, region_size);
  251. lc = kmalloc(sizeof(*lc), GFP_KERNEL);
  252. if (!lc) {
  253. DMWARN("couldn't allocate core log");
  254. return -ENOMEM;
  255. }
  256. lc->ti = ti;
  257. lc->touched = 0;
  258. lc->region_size = region_size;
  259. lc->region_count = region_count;
  260. lc->sync = sync;
  261. /*
  262. * Work out how many "unsigned long"s we need to hold the bitset.
  263. */
  264. bitset_size = dm_round_up(region_count,
  265. sizeof(unsigned long) << BYTE_SHIFT);
  266. bitset_size >>= BYTE_SHIFT;
  267. lc->bitset_uint32_count = bitset_size / 4;
  268. lc->clean_bits = vmalloc(bitset_size);
  269. if (!lc->clean_bits) {
  270. DMWARN("couldn't allocate clean bitset");
  271. kfree(lc);
  272. return -ENOMEM;
  273. }
  274. memset(lc->clean_bits, -1, bitset_size);
  275. lc->sync_bits = vmalloc(bitset_size);
  276. if (!lc->sync_bits) {
  277. DMWARN("couldn't allocate sync bitset");
  278. vfree(lc->clean_bits);
  279. kfree(lc);
  280. return -ENOMEM;
  281. }
  282. memset(lc->sync_bits, (sync == NOSYNC) ? -1 : 0, bitset_size);
  283. lc->sync_count = (sync == NOSYNC) ? region_count : 0;
  284. lc->recovering_bits = vmalloc(bitset_size);
  285. if (!lc->recovering_bits) {
  286. DMWARN("couldn't allocate sync bitset");
  287. vfree(lc->sync_bits);
  288. vfree(lc->clean_bits);
  289. kfree(lc);
  290. return -ENOMEM;
  291. }
  292. memset(lc->recovering_bits, 0, bitset_size);
  293. lc->sync_search = 0;
  294. log->context = lc;
  295. return 0;
  296. }
  297. static void core_dtr(struct dirty_log *log)
  298. {
  299. struct log_c *lc = (struct log_c *) log->context;
  300. vfree(lc->clean_bits);
  301. vfree(lc->sync_bits);
  302. vfree(lc->recovering_bits);
  303. kfree(lc);
  304. }
  305. /*----------------------------------------------------------------
  306. * disk log constructor/destructor
  307. *
  308. * argv contains log_device region_size followed optionally by [no]sync
  309. *--------------------------------------------------------------*/
  310. static int disk_ctr(struct dirty_log *log, struct dm_target *ti,
  311. unsigned int argc, char **argv)
  312. {
  313. int r;
  314. size_t size;
  315. struct log_c *lc;
  316. struct dm_dev *dev;
  317. if (argc < 2 || argc > 3) {
  318. DMWARN("wrong number of arguments to disk mirror log");
  319. return -EINVAL;
  320. }
  321. r = dm_get_device(ti, argv[0], 0, 0 /* FIXME */,
  322. FMODE_READ | FMODE_WRITE, &dev);
  323. if (r)
  324. return r;
  325. r = core_ctr(log, ti, argc - 1, argv + 1);
  326. if (r) {
  327. dm_put_device(ti, dev);
  328. return r;
  329. }
  330. lc = (struct log_c *) log->context;
  331. lc->log_dev = dev;
  332. /* setup the disk header fields */
  333. lc->header_location.bdev = lc->log_dev->bdev;
  334. lc->header_location.sector = 0;
  335. lc->header_location.count = 1;
  336. /*
  337. * We can't read less than this amount, even though we'll
  338. * not be using most of this space.
  339. */
  340. lc->disk_header = vmalloc(1 << SECTOR_SHIFT);
  341. if (!lc->disk_header)
  342. goto bad;
  343. /* setup the disk bitset fields */
  344. lc->bits_location.bdev = lc->log_dev->bdev;
  345. lc->bits_location.sector = LOG_OFFSET;
  346. size = dm_round_up(lc->bitset_uint32_count * sizeof(uint32_t),
  347. 1 << SECTOR_SHIFT);
  348. lc->bits_location.count = size >> SECTOR_SHIFT;
  349. return 0;
  350. bad:
  351. dm_put_device(ti, lc->log_dev);
  352. core_dtr(log);
  353. return -ENOMEM;
  354. }
  355. static void disk_dtr(struct dirty_log *log)
  356. {
  357. struct log_c *lc = (struct log_c *) log->context;
  358. dm_put_device(lc->ti, lc->log_dev);
  359. vfree(lc->disk_header);
  360. core_dtr(log);
  361. }
  362. static int count_bits32(uint32_t *addr, unsigned size)
  363. {
  364. int count = 0, i;
  365. for (i = 0; i < size; i++) {
  366. count += hweight32(*(addr+i));
  367. }
  368. return count;
  369. }
  370. static int disk_resume(struct dirty_log *log)
  371. {
  372. int r;
  373. unsigned i;
  374. struct log_c *lc = (struct log_c *) log->context;
  375. size_t size = lc->bitset_uint32_count * sizeof(uint32_t);
  376. /* read the disk header */
  377. r = read_header(lc);
  378. if (r)
  379. return r;
  380. /* read the bits */
  381. r = read_bits(lc);
  382. if (r)
  383. return r;
  384. /* set or clear any new bits */
  385. if (lc->sync == NOSYNC)
  386. for (i = lc->header.nr_regions; i < lc->region_count; i++)
  387. /* FIXME: amazingly inefficient */
  388. log_set_bit(lc, lc->clean_bits, i);
  389. else
  390. for (i = lc->header.nr_regions; i < lc->region_count; i++)
  391. /* FIXME: amazingly inefficient */
  392. log_clear_bit(lc, lc->clean_bits, i);
  393. /* copy clean across to sync */
  394. memcpy(lc->sync_bits, lc->clean_bits, size);
  395. lc->sync_count = count_bits32(lc->clean_bits, lc->bitset_uint32_count);
  396. /* write the bits */
  397. r = write_bits(lc);
  398. if (r)
  399. return r;
  400. /* set the correct number of regions in the header */
  401. lc->header.nr_regions = lc->region_count;
  402. /* write the new header */
  403. return write_header(lc);
  404. }
  405. static uint32_t core_get_region_size(struct dirty_log *log)
  406. {
  407. struct log_c *lc = (struct log_c *) log->context;
  408. return lc->region_size;
  409. }
  410. static int core_is_clean(struct dirty_log *log, region_t region)
  411. {
  412. struct log_c *lc = (struct log_c *) log->context;
  413. return log_test_bit(lc->clean_bits, region);
  414. }
  415. static int core_in_sync(struct dirty_log *log, region_t region, int block)
  416. {
  417. struct log_c *lc = (struct log_c *) log->context;
  418. return log_test_bit(lc->sync_bits, region);
  419. }
  420. static int core_flush(struct dirty_log *log)
  421. {
  422. /* no op */
  423. return 0;
  424. }
  425. static int disk_flush(struct dirty_log *log)
  426. {
  427. int r;
  428. struct log_c *lc = (struct log_c *) log->context;
  429. /* only write if the log has changed */
  430. if (!lc->touched)
  431. return 0;
  432. r = write_bits(lc);
  433. if (!r)
  434. lc->touched = 0;
  435. return r;
  436. }
  437. static void core_mark_region(struct dirty_log *log, region_t region)
  438. {
  439. struct log_c *lc = (struct log_c *) log->context;
  440. log_clear_bit(lc, lc->clean_bits, region);
  441. }
  442. static void core_clear_region(struct dirty_log *log, region_t region)
  443. {
  444. struct log_c *lc = (struct log_c *) log->context;
  445. log_set_bit(lc, lc->clean_bits, region);
  446. }
  447. static int core_get_resync_work(struct dirty_log *log, region_t *region)
  448. {
  449. struct log_c *lc = (struct log_c *) log->context;
  450. if (lc->sync_search >= lc->region_count)
  451. return 0;
  452. do {
  453. *region = ext2_find_next_zero_bit(
  454. (unsigned long *) lc->sync_bits,
  455. lc->region_count,
  456. lc->sync_search);
  457. lc->sync_search = *region + 1;
  458. if (*region >= lc->region_count)
  459. return 0;
  460. } while (log_test_bit(lc->recovering_bits, *region));
  461. log_set_bit(lc, lc->recovering_bits, *region);
  462. return 1;
  463. }
  464. static void core_complete_resync_work(struct dirty_log *log, region_t region,
  465. int success)
  466. {
  467. struct log_c *lc = (struct log_c *) log->context;
  468. log_clear_bit(lc, lc->recovering_bits, region);
  469. if (success) {
  470. log_set_bit(lc, lc->sync_bits, region);
  471. lc->sync_count++;
  472. }
  473. }
  474. static region_t core_get_sync_count(struct dirty_log *log)
  475. {
  476. struct log_c *lc = (struct log_c *) log->context;
  477. return lc->sync_count;
  478. }
  479. #define DMEMIT_SYNC \
  480. if (lc->sync != DEFAULTSYNC) \
  481. DMEMIT("%ssync ", lc->sync == NOSYNC ? "no" : "")
  482. static int core_status(struct dirty_log *log, status_type_t status,
  483. char *result, unsigned int maxlen)
  484. {
  485. int sz = 0;
  486. struct log_c *lc = log->context;
  487. switch(status) {
  488. case STATUSTYPE_INFO:
  489. break;
  490. case STATUSTYPE_TABLE:
  491. DMEMIT("%s %u %u ", log->type->name,
  492. lc->sync == DEFAULTSYNC ? 1 : 2, lc->region_size);
  493. DMEMIT_SYNC;
  494. }
  495. return sz;
  496. }
  497. static int disk_status(struct dirty_log *log, status_type_t status,
  498. char *result, unsigned int maxlen)
  499. {
  500. int sz = 0;
  501. char buffer[16];
  502. struct log_c *lc = log->context;
  503. switch(status) {
  504. case STATUSTYPE_INFO:
  505. break;
  506. case STATUSTYPE_TABLE:
  507. format_dev_t(buffer, lc->log_dev->bdev->bd_dev);
  508. DMEMIT("%s %u %s %u ", log->type->name,
  509. lc->sync == DEFAULTSYNC ? 2 : 3, buffer,
  510. lc->region_size);
  511. DMEMIT_SYNC;
  512. }
  513. return sz;
  514. }
  515. static struct dirty_log_type _core_type = {
  516. .name = "core",
  517. .module = THIS_MODULE,
  518. .ctr = core_ctr,
  519. .dtr = core_dtr,
  520. .get_region_size = core_get_region_size,
  521. .is_clean = core_is_clean,
  522. .in_sync = core_in_sync,
  523. .flush = core_flush,
  524. .mark_region = core_mark_region,
  525. .clear_region = core_clear_region,
  526. .get_resync_work = core_get_resync_work,
  527. .complete_resync_work = core_complete_resync_work,
  528. .get_sync_count = core_get_sync_count,
  529. .status = core_status,
  530. };
  531. static struct dirty_log_type _disk_type = {
  532. .name = "disk",
  533. .module = THIS_MODULE,
  534. .ctr = disk_ctr,
  535. .dtr = disk_dtr,
  536. .suspend = disk_flush,
  537. .resume = disk_resume,
  538. .get_region_size = core_get_region_size,
  539. .is_clean = core_is_clean,
  540. .in_sync = core_in_sync,
  541. .flush = disk_flush,
  542. .mark_region = core_mark_region,
  543. .clear_region = core_clear_region,
  544. .get_resync_work = core_get_resync_work,
  545. .complete_resync_work = core_complete_resync_work,
  546. .get_sync_count = core_get_sync_count,
  547. .status = disk_status,
  548. };
  549. int __init dm_dirty_log_init(void)
  550. {
  551. int r;
  552. r = dm_register_dirty_log_type(&_core_type);
  553. if (r)
  554. DMWARN("couldn't register core log");
  555. r = dm_register_dirty_log_type(&_disk_type);
  556. if (r) {
  557. DMWARN("couldn't register disk type");
  558. dm_unregister_dirty_log_type(&_core_type);
  559. }
  560. return r;
  561. }
  562. void dm_dirty_log_exit(void)
  563. {
  564. dm_unregister_dirty_log_type(&_disk_type);
  565. dm_unregister_dirty_log_type(&_core_type);
  566. }
  567. EXPORT_SYMBOL(dm_register_dirty_log_type);
  568. EXPORT_SYMBOL(dm_unregister_dirty_log_type);
  569. EXPORT_SYMBOL(dm_create_dirty_log);
  570. EXPORT_SYMBOL(dm_destroy_dirty_log);