dm-snap-persistent.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757
  1. /*
  2. * Copyright (C) 2001-2002 Sistina Software (UK) Limited.
  3. * Copyright (C) 2006-2008 Red Hat GmbH
  4. *
  5. * This file is released under the GPL.
  6. */
  7. #include "dm-exception-store.h"
  8. #include <linux/mm.h>
  9. #include <linux/pagemap.h>
  10. #include <linux/vmalloc.h>
  11. #include <linux/slab.h>
  12. #include <linux/dm-io.h>
  13. #define DM_MSG_PREFIX "persistent snapshot"
  14. #define DM_CHUNK_SIZE_DEFAULT_SECTORS 32 /* 16KB */
  15. /*-----------------------------------------------------------------
  16. * Persistent snapshots, by persistent we mean that the snapshot
  17. * will survive a reboot.
  18. *---------------------------------------------------------------*/
  19. /*
  20. * We need to store a record of which parts of the origin have
  21. * been copied to the snapshot device. The snapshot code
  22. * requires that we copy exception chunks to chunk aligned areas
  23. * of the COW store. It makes sense therefore, to store the
  24. * metadata in chunk size blocks.
  25. *
  26. * There is no backward or forward compatibility implemented,
  27. * snapshots with different disk versions than the kernel will
  28. * not be usable. It is expected that "lvcreate" will blank out
  29. * the start of a fresh COW device before calling the snapshot
  30. * constructor.
  31. *
  32. * The first chunk of the COW device just contains the header.
  33. * After this there is a chunk filled with exception metadata,
  34. * followed by as many exception chunks as can fit in the
  35. * metadata areas.
  36. *
  37. * All on disk structures are in little-endian format. The end
  38. * of the exceptions info is indicated by an exception with a
  39. * new_chunk of 0, which is invalid since it would point to the
  40. * header chunk.
  41. */
  42. /*
  43. * Magic for persistent snapshots: "SnAp" - Feeble isn't it.
  44. */
  45. #define SNAP_MAGIC 0x70416e53
  46. /*
  47. * The on-disk version of the metadata.
  48. */
  49. #define SNAPSHOT_DISK_VERSION 1
  50. struct disk_header {
  51. uint32_t magic;
  52. /*
  53. * Is this snapshot valid. There is no way of recovering
  54. * an invalid snapshot.
  55. */
  56. uint32_t valid;
  57. /*
  58. * Simple, incrementing version. no backward
  59. * compatibility.
  60. */
  61. uint32_t version;
  62. /* In sectors */
  63. uint32_t chunk_size;
  64. };
  65. struct disk_exception {
  66. uint64_t old_chunk;
  67. uint64_t new_chunk;
  68. };
  69. struct commit_callback {
  70. void (*callback)(void *, int success);
  71. void *context;
  72. };
  73. /*
  74. * The top level structure for a persistent exception store.
  75. */
  76. struct pstore {
  77. struct dm_exception_store *store;
  78. int version;
  79. int valid;
  80. uint32_t exceptions_per_area;
  81. /*
  82. * Now that we have an asynchronous kcopyd there is no
  83. * need for large chunk sizes, so it wont hurt to have a
  84. * whole chunks worth of metadata in memory at once.
  85. */
  86. void *area;
  87. /*
  88. * An area of zeros used to clear the next area.
  89. */
  90. void *zero_area;
  91. /*
  92. * Used to keep track of which metadata area the data in
  93. * 'chunk' refers to.
  94. */
  95. chunk_t current_area;
  96. /*
  97. * The next free chunk for an exception.
  98. */
  99. chunk_t next_free;
  100. /*
  101. * The index of next free exception in the current
  102. * metadata area.
  103. */
  104. uint32_t current_committed;
  105. atomic_t pending_count;
  106. uint32_t callback_count;
  107. struct commit_callback *callbacks;
  108. struct dm_io_client *io_client;
  109. struct workqueue_struct *metadata_wq;
  110. };
  111. static unsigned sectors_to_pages(unsigned sectors)
  112. {
  113. return DIV_ROUND_UP(sectors, PAGE_SIZE >> 9);
  114. }
  115. static int alloc_area(struct pstore *ps)
  116. {
  117. int r = -ENOMEM;
  118. size_t len;
  119. len = ps->store->chunk_size << SECTOR_SHIFT;
  120. /*
  121. * Allocate the chunk_size block of memory that will hold
  122. * a single metadata area.
  123. */
  124. ps->area = vmalloc(len);
  125. if (!ps->area)
  126. return r;
  127. ps->zero_area = vmalloc(len);
  128. if (!ps->zero_area) {
  129. vfree(ps->area);
  130. return r;
  131. }
  132. memset(ps->zero_area, 0, len);
  133. return 0;
  134. }
  135. static void free_area(struct pstore *ps)
  136. {
  137. if (ps->area)
  138. vfree(ps->area);
  139. ps->area = NULL;
  140. if (ps->zero_area)
  141. vfree(ps->zero_area);
  142. ps->zero_area = NULL;
  143. }
  144. struct mdata_req {
  145. struct dm_io_region *where;
  146. struct dm_io_request *io_req;
  147. struct work_struct work;
  148. int result;
  149. };
  150. static void do_metadata(struct work_struct *work)
  151. {
  152. struct mdata_req *req = container_of(work, struct mdata_req, work);
  153. req->result = dm_io(req->io_req, 1, req->where, NULL);
  154. }
  155. /*
  156. * Read or write a chunk aligned and sized block of data from a device.
  157. */
  158. static int chunk_io(struct pstore *ps, void *area, chunk_t chunk, int rw,
  159. int metadata)
  160. {
  161. struct dm_io_region where = {
  162. .bdev = ps->store->cow->bdev,
  163. .sector = ps->store->chunk_size * chunk,
  164. .count = ps->store->chunk_size,
  165. };
  166. struct dm_io_request io_req = {
  167. .bi_rw = rw,
  168. .mem.type = DM_IO_VMA,
  169. .mem.ptr.vma = area,
  170. .client = ps->io_client,
  171. .notify.fn = NULL,
  172. };
  173. struct mdata_req req;
  174. if (!metadata)
  175. return dm_io(&io_req, 1, &where, NULL);
  176. req.where = &where;
  177. req.io_req = &io_req;
  178. /*
  179. * Issue the synchronous I/O from a different thread
  180. * to avoid generic_make_request recursion.
  181. */
  182. INIT_WORK(&req.work, do_metadata);
  183. queue_work(ps->metadata_wq, &req.work);
  184. flush_workqueue(ps->metadata_wq);
  185. return req.result;
  186. }
  187. /*
  188. * Convert a metadata area index to a chunk index.
  189. */
  190. static chunk_t area_location(struct pstore *ps, chunk_t area)
  191. {
  192. return 1 + ((ps->exceptions_per_area + 1) * area);
  193. }
  194. /*
  195. * Read or write a metadata area. Remembering to skip the first
  196. * chunk which holds the header.
  197. */
  198. static int area_io(struct pstore *ps, int rw)
  199. {
  200. int r;
  201. chunk_t chunk;
  202. chunk = area_location(ps, ps->current_area);
  203. r = chunk_io(ps, ps->area, chunk, rw, 0);
  204. if (r)
  205. return r;
  206. return 0;
  207. }
  208. static void zero_memory_area(struct pstore *ps)
  209. {
  210. memset(ps->area, 0, ps->store->chunk_size << SECTOR_SHIFT);
  211. }
  212. static int zero_disk_area(struct pstore *ps, chunk_t area)
  213. {
  214. return chunk_io(ps, ps->zero_area, area_location(ps, area), WRITE, 0);
  215. }
  216. static int read_header(struct pstore *ps, int *new_snapshot)
  217. {
  218. int r;
  219. struct disk_header *dh;
  220. chunk_t chunk_size;
  221. int chunk_size_supplied = 1;
  222. /*
  223. * Use default chunk size (or hardsect_size, if larger) if none supplied
  224. */
  225. if (!ps->store->chunk_size) {
  226. ps->store->chunk_size = max(DM_CHUNK_SIZE_DEFAULT_SECTORS,
  227. bdev_logical_block_size(ps->store->cow->bdev) >> 9);
  228. ps->store->chunk_mask = ps->store->chunk_size - 1;
  229. ps->store->chunk_shift = ffs(ps->store->chunk_size) - 1;
  230. chunk_size_supplied = 0;
  231. }
  232. ps->io_client = dm_io_client_create(sectors_to_pages(ps->store->
  233. chunk_size));
  234. if (IS_ERR(ps->io_client))
  235. return PTR_ERR(ps->io_client);
  236. r = alloc_area(ps);
  237. if (r)
  238. return r;
  239. r = chunk_io(ps, ps->area, 0, READ, 1);
  240. if (r)
  241. goto bad;
  242. dh = (struct disk_header *) ps->area;
  243. if (le32_to_cpu(dh->magic) == 0) {
  244. *new_snapshot = 1;
  245. return 0;
  246. }
  247. if (le32_to_cpu(dh->magic) != SNAP_MAGIC) {
  248. DMWARN("Invalid or corrupt snapshot");
  249. r = -ENXIO;
  250. goto bad;
  251. }
  252. *new_snapshot = 0;
  253. ps->valid = le32_to_cpu(dh->valid);
  254. ps->version = le32_to_cpu(dh->version);
  255. chunk_size = le32_to_cpu(dh->chunk_size);
  256. if (!chunk_size_supplied || ps->store->chunk_size == chunk_size)
  257. return 0;
  258. DMWARN("chunk size %llu in device metadata overrides "
  259. "table chunk size of %llu.",
  260. (unsigned long long)chunk_size,
  261. (unsigned long long)ps->store->chunk_size);
  262. /* We had a bogus chunk_size. Fix stuff up. */
  263. free_area(ps);
  264. ps->store->chunk_size = chunk_size;
  265. ps->store->chunk_mask = chunk_size - 1;
  266. ps->store->chunk_shift = ffs(chunk_size) - 1;
  267. r = dm_io_client_resize(sectors_to_pages(ps->store->chunk_size),
  268. ps->io_client);
  269. if (r)
  270. return r;
  271. r = alloc_area(ps);
  272. return r;
  273. bad:
  274. free_area(ps);
  275. return r;
  276. }
  277. static int write_header(struct pstore *ps)
  278. {
  279. struct disk_header *dh;
  280. memset(ps->area, 0, ps->store->chunk_size << SECTOR_SHIFT);
  281. dh = (struct disk_header *) ps->area;
  282. dh->magic = cpu_to_le32(SNAP_MAGIC);
  283. dh->valid = cpu_to_le32(ps->valid);
  284. dh->version = cpu_to_le32(ps->version);
  285. dh->chunk_size = cpu_to_le32(ps->store->chunk_size);
  286. return chunk_io(ps, ps->area, 0, WRITE, 1);
  287. }
  288. /*
  289. * Access functions for the disk exceptions, these do the endian conversions.
  290. */
  291. static struct disk_exception *get_exception(struct pstore *ps, uint32_t index)
  292. {
  293. BUG_ON(index >= ps->exceptions_per_area);
  294. return ((struct disk_exception *) ps->area) + index;
  295. }
  296. static void read_exception(struct pstore *ps,
  297. uint32_t index, struct disk_exception *result)
  298. {
  299. struct disk_exception *e = get_exception(ps, index);
  300. /* copy it */
  301. result->old_chunk = le64_to_cpu(e->old_chunk);
  302. result->new_chunk = le64_to_cpu(e->new_chunk);
  303. }
  304. static void write_exception(struct pstore *ps,
  305. uint32_t index, struct disk_exception *de)
  306. {
  307. struct disk_exception *e = get_exception(ps, index);
  308. /* copy it */
  309. e->old_chunk = cpu_to_le64(de->old_chunk);
  310. e->new_chunk = cpu_to_le64(de->new_chunk);
  311. }
  312. /*
  313. * Registers the exceptions that are present in the current area.
  314. * 'full' is filled in to indicate if the area has been
  315. * filled.
  316. */
  317. static int insert_exceptions(struct pstore *ps,
  318. int (*callback)(void *callback_context,
  319. chunk_t old, chunk_t new),
  320. void *callback_context,
  321. int *full)
  322. {
  323. int r;
  324. unsigned int i;
  325. struct disk_exception de;
  326. /* presume the area is full */
  327. *full = 1;
  328. for (i = 0; i < ps->exceptions_per_area; i++) {
  329. read_exception(ps, i, &de);
  330. /*
  331. * If the new_chunk is pointing at the start of
  332. * the COW device, where the first metadata area
  333. * is we know that we've hit the end of the
  334. * exceptions. Therefore the area is not full.
  335. */
  336. if (de.new_chunk == 0LL) {
  337. ps->current_committed = i;
  338. *full = 0;
  339. break;
  340. }
  341. /*
  342. * Keep track of the start of the free chunks.
  343. */
  344. if (ps->next_free <= de.new_chunk)
  345. ps->next_free = de.new_chunk + 1;
  346. /*
  347. * Otherwise we add the exception to the snapshot.
  348. */
  349. r = callback(callback_context, de.old_chunk, de.new_chunk);
  350. if (r)
  351. return r;
  352. }
  353. return 0;
  354. }
  355. static int read_exceptions(struct pstore *ps,
  356. int (*callback)(void *callback_context, chunk_t old,
  357. chunk_t new),
  358. void *callback_context)
  359. {
  360. int r, full = 1;
  361. /*
  362. * Keeping reading chunks and inserting exceptions until
  363. * we find a partially full area.
  364. */
  365. for (ps->current_area = 0; full; ps->current_area++) {
  366. r = area_io(ps, READ);
  367. if (r)
  368. return r;
  369. r = insert_exceptions(ps, callback, callback_context, &full);
  370. if (r)
  371. return r;
  372. }
  373. ps->current_area--;
  374. return 0;
  375. }
  376. static struct pstore *get_info(struct dm_exception_store *store)
  377. {
  378. return (struct pstore *) store->context;
  379. }
  380. static void persistent_fraction_full(struct dm_exception_store *store,
  381. sector_t *numerator, sector_t *denominator)
  382. {
  383. *numerator = get_info(store)->next_free * store->chunk_size;
  384. *denominator = get_dev_size(store->cow->bdev);
  385. }
  386. static void persistent_dtr(struct dm_exception_store *store)
  387. {
  388. struct pstore *ps = get_info(store);
  389. destroy_workqueue(ps->metadata_wq);
  390. /* Created in read_header */
  391. if (ps->io_client)
  392. dm_io_client_destroy(ps->io_client);
  393. free_area(ps);
  394. /* Allocated in persistent_read_metadata */
  395. if (ps->callbacks)
  396. vfree(ps->callbacks);
  397. kfree(ps);
  398. }
  399. static int persistent_read_metadata(struct dm_exception_store *store,
  400. int (*callback)(void *callback_context,
  401. chunk_t old, chunk_t new),
  402. void *callback_context)
  403. {
  404. int r, uninitialized_var(new_snapshot);
  405. struct pstore *ps = get_info(store);
  406. /*
  407. * Read the snapshot header.
  408. */
  409. r = read_header(ps, &new_snapshot);
  410. if (r)
  411. return r;
  412. /*
  413. * Now we know correct chunk_size, complete the initialisation.
  414. */
  415. ps->exceptions_per_area = (ps->store->chunk_size << SECTOR_SHIFT) /
  416. sizeof(struct disk_exception);
  417. ps->callbacks = dm_vcalloc(ps->exceptions_per_area,
  418. sizeof(*ps->callbacks));
  419. if (!ps->callbacks)
  420. return -ENOMEM;
  421. /*
  422. * Do we need to setup a new snapshot ?
  423. */
  424. if (new_snapshot) {
  425. r = write_header(ps);
  426. if (r) {
  427. DMWARN("write_header failed");
  428. return r;
  429. }
  430. ps->current_area = 0;
  431. zero_memory_area(ps);
  432. r = zero_disk_area(ps, 0);
  433. if (r) {
  434. DMWARN("zero_disk_area(0) failed");
  435. return r;
  436. }
  437. } else {
  438. /*
  439. * Sanity checks.
  440. */
  441. if (ps->version != SNAPSHOT_DISK_VERSION) {
  442. DMWARN("unable to handle snapshot disk version %d",
  443. ps->version);
  444. return -EINVAL;
  445. }
  446. /*
  447. * Metadata are valid, but snapshot is invalidated
  448. */
  449. if (!ps->valid)
  450. return 1;
  451. /*
  452. * Read the metadata.
  453. */
  454. r = read_exceptions(ps, callback, callback_context);
  455. if (r)
  456. return r;
  457. }
  458. return 0;
  459. }
  460. static int persistent_prepare_exception(struct dm_exception_store *store,
  461. struct dm_snap_exception *e)
  462. {
  463. struct pstore *ps = get_info(store);
  464. uint32_t stride;
  465. chunk_t next_free;
  466. sector_t size = get_dev_size(store->cow->bdev);
  467. /* Is there enough room ? */
  468. if (size < ((ps->next_free + 1) * store->chunk_size))
  469. return -ENOSPC;
  470. e->new_chunk = ps->next_free;
  471. /*
  472. * Move onto the next free pending, making sure to take
  473. * into account the location of the metadata chunks.
  474. */
  475. stride = (ps->exceptions_per_area + 1);
  476. next_free = ++ps->next_free;
  477. if (sector_div(next_free, stride) == 1)
  478. ps->next_free++;
  479. atomic_inc(&ps->pending_count);
  480. return 0;
  481. }
  482. static void persistent_commit_exception(struct dm_exception_store *store,
  483. struct dm_snap_exception *e,
  484. void (*callback) (void *, int success),
  485. void *callback_context)
  486. {
  487. unsigned int i;
  488. struct pstore *ps = get_info(store);
  489. struct disk_exception de;
  490. struct commit_callback *cb;
  491. de.old_chunk = e->old_chunk;
  492. de.new_chunk = e->new_chunk;
  493. write_exception(ps, ps->current_committed++, &de);
  494. /*
  495. * Add the callback to the back of the array. This code
  496. * is the only place where the callback array is
  497. * manipulated, and we know that it will never be called
  498. * multiple times concurrently.
  499. */
  500. cb = ps->callbacks + ps->callback_count++;
  501. cb->callback = callback;
  502. cb->context = callback_context;
  503. /*
  504. * If there are exceptions in flight and we have not yet
  505. * filled this metadata area there's nothing more to do.
  506. */
  507. if (!atomic_dec_and_test(&ps->pending_count) &&
  508. (ps->current_committed != ps->exceptions_per_area))
  509. return;
  510. /*
  511. * If we completely filled the current area, then wipe the next one.
  512. */
  513. if ((ps->current_committed == ps->exceptions_per_area) &&
  514. zero_disk_area(ps, ps->current_area + 1))
  515. ps->valid = 0;
  516. /*
  517. * Commit exceptions to disk.
  518. */
  519. if (ps->valid && area_io(ps, WRITE_BARRIER))
  520. ps->valid = 0;
  521. /*
  522. * Advance to the next area if this one is full.
  523. */
  524. if (ps->current_committed == ps->exceptions_per_area) {
  525. ps->current_committed = 0;
  526. ps->current_area++;
  527. zero_memory_area(ps);
  528. }
  529. for (i = 0; i < ps->callback_count; i++) {
  530. cb = ps->callbacks + i;
  531. cb->callback(cb->context, ps->valid);
  532. }
  533. ps->callback_count = 0;
  534. }
  535. static void persistent_drop_snapshot(struct dm_exception_store *store)
  536. {
  537. struct pstore *ps = get_info(store);
  538. ps->valid = 0;
  539. if (write_header(ps))
  540. DMWARN("write header failed");
  541. }
  542. static int persistent_ctr(struct dm_exception_store *store,
  543. unsigned argc, char **argv)
  544. {
  545. struct pstore *ps;
  546. /* allocate the pstore */
  547. ps = kzalloc(sizeof(*ps), GFP_KERNEL);
  548. if (!ps)
  549. return -ENOMEM;
  550. ps->store = store;
  551. ps->valid = 1;
  552. ps->version = SNAPSHOT_DISK_VERSION;
  553. ps->area = NULL;
  554. ps->next_free = 2; /* skipping the header and first area */
  555. ps->current_committed = 0;
  556. ps->callback_count = 0;
  557. atomic_set(&ps->pending_count, 0);
  558. ps->callbacks = NULL;
  559. ps->metadata_wq = create_singlethread_workqueue("ksnaphd");
  560. if (!ps->metadata_wq) {
  561. kfree(ps);
  562. DMERR("couldn't start header metadata update thread");
  563. return -ENOMEM;
  564. }
  565. store->context = ps;
  566. return 0;
  567. }
  568. static unsigned persistent_status(struct dm_exception_store *store,
  569. status_type_t status, char *result,
  570. unsigned maxlen)
  571. {
  572. unsigned sz = 0;
  573. switch (status) {
  574. case STATUSTYPE_INFO:
  575. break;
  576. case STATUSTYPE_TABLE:
  577. DMEMIT(" %s P %llu", store->cow->name,
  578. (unsigned long long)store->chunk_size);
  579. }
  580. return sz;
  581. }
  582. static struct dm_exception_store_type _persistent_type = {
  583. .name = "persistent",
  584. .module = THIS_MODULE,
  585. .ctr = persistent_ctr,
  586. .dtr = persistent_dtr,
  587. .read_metadata = persistent_read_metadata,
  588. .prepare_exception = persistent_prepare_exception,
  589. .commit_exception = persistent_commit_exception,
  590. .drop_snapshot = persistent_drop_snapshot,
  591. .fraction_full = persistent_fraction_full,
  592. .status = persistent_status,
  593. };
  594. static struct dm_exception_store_type _persistent_compat_type = {
  595. .name = "P",
  596. .module = THIS_MODULE,
  597. .ctr = persistent_ctr,
  598. .dtr = persistent_dtr,
  599. .read_metadata = persistent_read_metadata,
  600. .prepare_exception = persistent_prepare_exception,
  601. .commit_exception = persistent_commit_exception,
  602. .drop_snapshot = persistent_drop_snapshot,
  603. .fraction_full = persistent_fraction_full,
  604. .status = persistent_status,
  605. };
  606. int dm_persistent_snapshot_init(void)
  607. {
  608. int r;
  609. r = dm_exception_store_type_register(&_persistent_type);
  610. if (r) {
  611. DMERR("Unable to register persistent exception store type");
  612. return r;
  613. }
  614. r = dm_exception_store_type_register(&_persistent_compat_type);
  615. if (r) {
  616. DMERR("Unable to register old-style persistent exception "
  617. "store type");
  618. dm_exception_store_type_unregister(&_persistent_type);
  619. return r;
  620. }
  621. return r;
  622. }
  623. void dm_persistent_snapshot_exit(void)
  624. {
  625. dm_exception_store_type_unregister(&_persistent_type);
  626. dm_exception_store_type_unregister(&_persistent_compat_type);
  627. }