dm-snap-persistent.c 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704
  1. /*
  2. * Copyright (C) 2001-2002 Sistina Software (UK) Limited.
  3. * Copyright (C) 2006-2008 Red Hat GmbH
  4. *
  5. * This file is released under the GPL.
  6. */
  7. #include "dm-exception-store.h"
  8. #include "dm-snap.h"
  9. #include <linux/mm.h>
  10. #include <linux/pagemap.h>
  11. #include <linux/vmalloc.h>
  12. #include <linux/slab.h>
  13. #include <linux/dm-io.h>
  14. #define DM_MSG_PREFIX "persistent snapshot"
  15. #define DM_CHUNK_SIZE_DEFAULT_SECTORS 32 /* 16KB */
  16. /*-----------------------------------------------------------------
  17. * Persistent snapshots, by persistent we mean that the snapshot
  18. * will survive a reboot.
  19. *---------------------------------------------------------------*/
  20. /*
  21. * We need to store a record of which parts of the origin have
  22. * been copied to the snapshot device. The snapshot code
  23. * requires that we copy exception chunks to chunk aligned areas
  24. * of the COW store. It makes sense therefore, to store the
  25. * metadata in chunk size blocks.
  26. *
  27. * There is no backward or forward compatibility implemented,
  28. * snapshots with different disk versions than the kernel will
  29. * not be usable. It is expected that "lvcreate" will blank out
  30. * the start of a fresh COW device before calling the snapshot
  31. * constructor.
  32. *
  33. * The first chunk of the COW device just contains the header.
  34. * After this there is a chunk filled with exception metadata,
  35. * followed by as many exception chunks as can fit in the
  36. * metadata areas.
  37. *
  38. * All on disk structures are in little-endian format. The end
  39. * of the exceptions info is indicated by an exception with a
  40. * new_chunk of 0, which is invalid since it would point to the
  41. * header chunk.
  42. */
  43. /*
  44. * Magic for persistent snapshots: "SnAp" - Feeble isn't it.
  45. */
  46. #define SNAP_MAGIC 0x70416e53
  47. /*
  48. * The on-disk version of the metadata.
  49. */
  50. #define SNAPSHOT_DISK_VERSION 1
  51. struct disk_header {
  52. uint32_t magic;
  53. /*
  54. * Is this snapshot valid. There is no way of recovering
  55. * an invalid snapshot.
  56. */
  57. uint32_t valid;
  58. /*
  59. * Simple, incrementing version. no backward
  60. * compatibility.
  61. */
  62. uint32_t version;
  63. /* In sectors */
  64. uint32_t chunk_size;
  65. };
  66. struct disk_exception {
  67. uint64_t old_chunk;
  68. uint64_t new_chunk;
  69. };
  70. struct commit_callback {
  71. void (*callback)(void *, int success);
  72. void *context;
  73. };
  74. /*
  75. * The top level structure for a persistent exception store.
  76. */
  77. struct pstore {
  78. struct dm_snapshot *snap; /* up pointer to my snapshot */
  79. int version;
  80. int valid;
  81. uint32_t exceptions_per_area;
  82. /*
  83. * Now that we have an asynchronous kcopyd there is no
  84. * need for large chunk sizes, so it wont hurt to have a
  85. * whole chunks worth of metadata in memory at once.
  86. */
  87. void *area;
  88. /*
  89. * An area of zeros used to clear the next area.
  90. */
  91. void *zero_area;
  92. /*
  93. * Used to keep track of which metadata area the data in
  94. * 'chunk' refers to.
  95. */
  96. chunk_t current_area;
  97. /*
  98. * The next free chunk for an exception.
  99. */
  100. chunk_t next_free;
  101. /*
  102. * The index of next free exception in the current
  103. * metadata area.
  104. */
  105. uint32_t current_committed;
  106. atomic_t pending_count;
  107. uint32_t callback_count;
  108. struct commit_callback *callbacks;
  109. struct dm_io_client *io_client;
  110. struct workqueue_struct *metadata_wq;
  111. };
  112. static unsigned sectors_to_pages(unsigned sectors)
  113. {
  114. return DIV_ROUND_UP(sectors, PAGE_SIZE >> 9);
  115. }
  116. static int alloc_area(struct pstore *ps)
  117. {
  118. int r = -ENOMEM;
  119. size_t len;
  120. len = ps->snap->chunk_size << SECTOR_SHIFT;
  121. /*
  122. * Allocate the chunk_size block of memory that will hold
  123. * a single metadata area.
  124. */
  125. ps->area = vmalloc(len);
  126. if (!ps->area)
  127. return r;
  128. ps->zero_area = vmalloc(len);
  129. if (!ps->zero_area) {
  130. vfree(ps->area);
  131. return r;
  132. }
  133. memset(ps->zero_area, 0, len);
  134. return 0;
  135. }
  136. static void free_area(struct pstore *ps)
  137. {
  138. vfree(ps->area);
  139. ps->area = NULL;
  140. vfree(ps->zero_area);
  141. ps->zero_area = NULL;
  142. }
  143. struct mdata_req {
  144. struct dm_io_region *where;
  145. struct dm_io_request *io_req;
  146. struct work_struct work;
  147. int result;
  148. };
  149. static void do_metadata(struct work_struct *work)
  150. {
  151. struct mdata_req *req = container_of(work, struct mdata_req, work);
  152. req->result = dm_io(req->io_req, 1, req->where, NULL);
  153. }
  154. /*
  155. * Read or write a chunk aligned and sized block of data from a device.
  156. */
  157. static int chunk_io(struct pstore *ps, chunk_t chunk, int rw, int metadata)
  158. {
  159. struct dm_io_region where = {
  160. .bdev = ps->snap->cow->bdev,
  161. .sector = ps->snap->chunk_size * chunk,
  162. .count = ps->snap->chunk_size,
  163. };
  164. struct dm_io_request io_req = {
  165. .bi_rw = rw,
  166. .mem.type = DM_IO_VMA,
  167. .mem.ptr.vma = ps->area,
  168. .client = ps->io_client,
  169. .notify.fn = NULL,
  170. };
  171. struct mdata_req req;
  172. if (!metadata)
  173. return dm_io(&io_req, 1, &where, NULL);
  174. req.where = &where;
  175. req.io_req = &io_req;
  176. /*
  177. * Issue the synchronous I/O from a different thread
  178. * to avoid generic_make_request recursion.
  179. */
  180. INIT_WORK(&req.work, do_metadata);
  181. queue_work(ps->metadata_wq, &req.work);
  182. flush_workqueue(ps->metadata_wq);
  183. return req.result;
  184. }
  185. /*
  186. * Convert a metadata area index to a chunk index.
  187. */
  188. static chunk_t area_location(struct pstore *ps, chunk_t area)
  189. {
  190. return 1 + ((ps->exceptions_per_area + 1) * area);
  191. }
  192. /*
  193. * Read or write a metadata area. Remembering to skip the first
  194. * chunk which holds the header.
  195. */
  196. static int area_io(struct pstore *ps, int rw)
  197. {
  198. int r;
  199. chunk_t chunk;
  200. chunk = area_location(ps, ps->current_area);
  201. r = chunk_io(ps, chunk, rw, 0);
  202. if (r)
  203. return r;
  204. return 0;
  205. }
  206. static void zero_memory_area(struct pstore *ps)
  207. {
  208. memset(ps->area, 0, ps->snap->chunk_size << SECTOR_SHIFT);
  209. }
  210. static int zero_disk_area(struct pstore *ps, chunk_t area)
  211. {
  212. struct dm_io_region where = {
  213. .bdev = ps->snap->cow->bdev,
  214. .sector = ps->snap->chunk_size * area_location(ps, area),
  215. .count = ps->snap->chunk_size,
  216. };
  217. struct dm_io_request io_req = {
  218. .bi_rw = WRITE,
  219. .mem.type = DM_IO_VMA,
  220. .mem.ptr.vma = ps->zero_area,
  221. .client = ps->io_client,
  222. .notify.fn = NULL,
  223. };
  224. return dm_io(&io_req, 1, &where, NULL);
  225. }
  226. static int read_header(struct pstore *ps, int *new_snapshot)
  227. {
  228. int r;
  229. struct disk_header *dh;
  230. chunk_t chunk_size;
  231. int chunk_size_supplied = 1;
  232. /*
  233. * Use default chunk size (or hardsect_size, if larger) if none supplied
  234. */
  235. if (!ps->snap->chunk_size) {
  236. ps->snap->chunk_size = max(DM_CHUNK_SIZE_DEFAULT_SECTORS,
  237. bdev_hardsect_size(ps->snap->cow->bdev) >> 9);
  238. ps->snap->chunk_mask = ps->snap->chunk_size - 1;
  239. ps->snap->chunk_shift = ffs(ps->snap->chunk_size) - 1;
  240. chunk_size_supplied = 0;
  241. }
  242. ps->io_client = dm_io_client_create(sectors_to_pages(ps->snap->
  243. chunk_size));
  244. if (IS_ERR(ps->io_client))
  245. return PTR_ERR(ps->io_client);
  246. r = alloc_area(ps);
  247. if (r)
  248. return r;
  249. r = chunk_io(ps, 0, READ, 1);
  250. if (r)
  251. goto bad;
  252. dh = (struct disk_header *) ps->area;
  253. if (le32_to_cpu(dh->magic) == 0) {
  254. *new_snapshot = 1;
  255. return 0;
  256. }
  257. if (le32_to_cpu(dh->magic) != SNAP_MAGIC) {
  258. DMWARN("Invalid or corrupt snapshot");
  259. r = -ENXIO;
  260. goto bad;
  261. }
  262. *new_snapshot = 0;
  263. ps->valid = le32_to_cpu(dh->valid);
  264. ps->version = le32_to_cpu(dh->version);
  265. chunk_size = le32_to_cpu(dh->chunk_size);
  266. if (!chunk_size_supplied || ps->snap->chunk_size == chunk_size)
  267. return 0;
  268. DMWARN("chunk size %llu in device metadata overrides "
  269. "table chunk size of %llu.",
  270. (unsigned long long)chunk_size,
  271. (unsigned long long)ps->snap->chunk_size);
  272. /* We had a bogus chunk_size. Fix stuff up. */
  273. free_area(ps);
  274. ps->snap->chunk_size = chunk_size;
  275. ps->snap->chunk_mask = chunk_size - 1;
  276. ps->snap->chunk_shift = ffs(chunk_size) - 1;
  277. r = dm_io_client_resize(sectors_to_pages(ps->snap->chunk_size),
  278. ps->io_client);
  279. if (r)
  280. return r;
  281. r = alloc_area(ps);
  282. return r;
  283. bad:
  284. free_area(ps);
  285. return r;
  286. }
  287. static int write_header(struct pstore *ps)
  288. {
  289. struct disk_header *dh;
  290. memset(ps->area, 0, ps->snap->chunk_size << SECTOR_SHIFT);
  291. dh = (struct disk_header *) ps->area;
  292. dh->magic = cpu_to_le32(SNAP_MAGIC);
  293. dh->valid = cpu_to_le32(ps->valid);
  294. dh->version = cpu_to_le32(ps->version);
  295. dh->chunk_size = cpu_to_le32(ps->snap->chunk_size);
  296. return chunk_io(ps, 0, WRITE, 1);
  297. }
  298. /*
  299. * Access functions for the disk exceptions, these do the endian conversions.
  300. */
  301. static struct disk_exception *get_exception(struct pstore *ps, uint32_t index)
  302. {
  303. BUG_ON(index >= ps->exceptions_per_area);
  304. return ((struct disk_exception *) ps->area) + index;
  305. }
  306. static void read_exception(struct pstore *ps,
  307. uint32_t index, struct disk_exception *result)
  308. {
  309. struct disk_exception *e = get_exception(ps, index);
  310. /* copy it */
  311. result->old_chunk = le64_to_cpu(e->old_chunk);
  312. result->new_chunk = le64_to_cpu(e->new_chunk);
  313. }
  314. static void write_exception(struct pstore *ps,
  315. uint32_t index, struct disk_exception *de)
  316. {
  317. struct disk_exception *e = get_exception(ps, index);
  318. /* copy it */
  319. e->old_chunk = cpu_to_le64(de->old_chunk);
  320. e->new_chunk = cpu_to_le64(de->new_chunk);
  321. }
  322. /*
  323. * Registers the exceptions that are present in the current area.
  324. * 'full' is filled in to indicate if the area has been
  325. * filled.
  326. */
  327. static int insert_exceptions(struct pstore *ps,
  328. int (*callback)(void *callback_context,
  329. chunk_t old, chunk_t new),
  330. void *callback_context,
  331. int *full)
  332. {
  333. int r;
  334. unsigned int i;
  335. struct disk_exception de;
  336. /* presume the area is full */
  337. *full = 1;
  338. for (i = 0; i < ps->exceptions_per_area; i++) {
  339. read_exception(ps, i, &de);
  340. /*
  341. * If the new_chunk is pointing at the start of
  342. * the COW device, where the first metadata area
  343. * is we know that we've hit the end of the
  344. * exceptions. Therefore the area is not full.
  345. */
  346. if (de.new_chunk == 0LL) {
  347. ps->current_committed = i;
  348. *full = 0;
  349. break;
  350. }
  351. /*
  352. * Keep track of the start of the free chunks.
  353. */
  354. if (ps->next_free <= de.new_chunk)
  355. ps->next_free = de.new_chunk + 1;
  356. /*
  357. * Otherwise we add the exception to the snapshot.
  358. */
  359. r = callback(callback_context, de.old_chunk, de.new_chunk);
  360. if (r)
  361. return r;
  362. }
  363. return 0;
  364. }
  365. static int read_exceptions(struct pstore *ps,
  366. int (*callback)(void *callback_context, chunk_t old,
  367. chunk_t new),
  368. void *callback_context)
  369. {
  370. int r, full = 1;
  371. /*
  372. * Keeping reading chunks and inserting exceptions until
  373. * we find a partially full area.
  374. */
  375. for (ps->current_area = 0; full; ps->current_area++) {
  376. r = area_io(ps, READ);
  377. if (r)
  378. return r;
  379. r = insert_exceptions(ps, callback, callback_context, &full);
  380. if (r)
  381. return r;
  382. }
  383. ps->current_area--;
  384. return 0;
  385. }
  386. static struct pstore *get_info(struct dm_exception_store *store)
  387. {
  388. return (struct pstore *) store->context;
  389. }
  390. static void persistent_fraction_full(struct dm_exception_store *store,
  391. sector_t *numerator, sector_t *denominator)
  392. {
  393. *numerator = get_info(store)->next_free * store->snap->chunk_size;
  394. *denominator = get_dev_size(store->snap->cow->bdev);
  395. }
  396. static void persistent_destroy(struct dm_exception_store *store)
  397. {
  398. struct pstore *ps = get_info(store);
  399. destroy_workqueue(ps->metadata_wq);
  400. dm_io_client_destroy(ps->io_client);
  401. vfree(ps->callbacks);
  402. free_area(ps);
  403. kfree(ps);
  404. }
  405. static int persistent_read_metadata(struct dm_exception_store *store,
  406. int (*callback)(void *callback_context,
  407. chunk_t old, chunk_t new),
  408. void *callback_context)
  409. {
  410. int r, uninitialized_var(new_snapshot);
  411. struct pstore *ps = get_info(store);
  412. /*
  413. * Read the snapshot header.
  414. */
  415. r = read_header(ps, &new_snapshot);
  416. if (r)
  417. return r;
  418. /*
  419. * Now we know correct chunk_size, complete the initialisation.
  420. */
  421. ps->exceptions_per_area = (ps->snap->chunk_size << SECTOR_SHIFT) /
  422. sizeof(struct disk_exception);
  423. ps->callbacks = dm_vcalloc(ps->exceptions_per_area,
  424. sizeof(*ps->callbacks));
  425. if (!ps->callbacks)
  426. return -ENOMEM;
  427. /*
  428. * Do we need to setup a new snapshot ?
  429. */
  430. if (new_snapshot) {
  431. r = write_header(ps);
  432. if (r) {
  433. DMWARN("write_header failed");
  434. return r;
  435. }
  436. ps->current_area = 0;
  437. zero_memory_area(ps);
  438. r = zero_disk_area(ps, 0);
  439. if (r) {
  440. DMWARN("zero_disk_area(0) failed");
  441. return r;
  442. }
  443. } else {
  444. /*
  445. * Sanity checks.
  446. */
  447. if (ps->version != SNAPSHOT_DISK_VERSION) {
  448. DMWARN("unable to handle snapshot disk version %d",
  449. ps->version);
  450. return -EINVAL;
  451. }
  452. /*
  453. * Metadata are valid, but snapshot is invalidated
  454. */
  455. if (!ps->valid)
  456. return 1;
  457. /*
  458. * Read the metadata.
  459. */
  460. r = read_exceptions(ps, callback, callback_context);
  461. if (r)
  462. return r;
  463. }
  464. return 0;
  465. }
  466. static int persistent_prepare_exception(struct dm_exception_store *store,
  467. struct dm_snap_exception *e)
  468. {
  469. struct pstore *ps = get_info(store);
  470. uint32_t stride;
  471. chunk_t next_free;
  472. sector_t size = get_dev_size(store->snap->cow->bdev);
  473. /* Is there enough room ? */
  474. if (size < ((ps->next_free + 1) * store->snap->chunk_size))
  475. return -ENOSPC;
  476. e->new_chunk = ps->next_free;
  477. /*
  478. * Move onto the next free pending, making sure to take
  479. * into account the location of the metadata chunks.
  480. */
  481. stride = (ps->exceptions_per_area + 1);
  482. next_free = ++ps->next_free;
  483. if (sector_div(next_free, stride) == 1)
  484. ps->next_free++;
  485. atomic_inc(&ps->pending_count);
  486. return 0;
  487. }
  488. static void persistent_commit_exception(struct dm_exception_store *store,
  489. struct dm_snap_exception *e,
  490. void (*callback) (void *, int success),
  491. void *callback_context)
  492. {
  493. unsigned int i;
  494. struct pstore *ps = get_info(store);
  495. struct disk_exception de;
  496. struct commit_callback *cb;
  497. de.old_chunk = e->old_chunk;
  498. de.new_chunk = e->new_chunk;
  499. write_exception(ps, ps->current_committed++, &de);
  500. /*
  501. * Add the callback to the back of the array. This code
  502. * is the only place where the callback array is
  503. * manipulated, and we know that it will never be called
  504. * multiple times concurrently.
  505. */
  506. cb = ps->callbacks + ps->callback_count++;
  507. cb->callback = callback;
  508. cb->context = callback_context;
  509. /*
  510. * If there are exceptions in flight and we have not yet
  511. * filled this metadata area there's nothing more to do.
  512. */
  513. if (!atomic_dec_and_test(&ps->pending_count) &&
  514. (ps->current_committed != ps->exceptions_per_area))
  515. return;
  516. /*
  517. * If we completely filled the current area, then wipe the next one.
  518. */
  519. if ((ps->current_committed == ps->exceptions_per_area) &&
  520. zero_disk_area(ps, ps->current_area + 1))
  521. ps->valid = 0;
  522. /*
  523. * Commit exceptions to disk.
  524. */
  525. if (ps->valid && area_io(ps, WRITE))
  526. ps->valid = 0;
  527. /*
  528. * Advance to the next area if this one is full.
  529. */
  530. if (ps->current_committed == ps->exceptions_per_area) {
  531. ps->current_committed = 0;
  532. ps->current_area++;
  533. zero_memory_area(ps);
  534. }
  535. for (i = 0; i < ps->callback_count; i++) {
  536. cb = ps->callbacks + i;
  537. cb->callback(cb->context, ps->valid);
  538. }
  539. ps->callback_count = 0;
  540. }
  541. static void persistent_drop_snapshot(struct dm_exception_store *store)
  542. {
  543. struct pstore *ps = get_info(store);
  544. ps->valid = 0;
  545. if (write_header(ps))
  546. DMWARN("write header failed");
  547. }
  548. int dm_create_persistent(struct dm_exception_store *store)
  549. {
  550. struct pstore *ps;
  551. /* allocate the pstore */
  552. ps = kmalloc(sizeof(*ps), GFP_KERNEL);
  553. if (!ps)
  554. return -ENOMEM;
  555. ps->snap = store->snap;
  556. ps->valid = 1;
  557. ps->version = SNAPSHOT_DISK_VERSION;
  558. ps->area = NULL;
  559. ps->next_free = 2; /* skipping the header and first area */
  560. ps->current_committed = 0;
  561. ps->callback_count = 0;
  562. atomic_set(&ps->pending_count, 0);
  563. ps->callbacks = NULL;
  564. ps->metadata_wq = create_singlethread_workqueue("ksnaphd");
  565. if (!ps->metadata_wq) {
  566. kfree(ps);
  567. DMERR("couldn't start header metadata update thread");
  568. return -ENOMEM;
  569. }
  570. store->destroy = persistent_destroy;
  571. store->read_metadata = persistent_read_metadata;
  572. store->prepare_exception = persistent_prepare_exception;
  573. store->commit_exception = persistent_commit_exception;
  574. store->drop_snapshot = persistent_drop_snapshot;
  575. store->fraction_full = persistent_fraction_full;
  576. store->context = ps;
  577. return 0;
  578. }
  579. int dm_persistent_snapshot_init(void)
  580. {
  581. return 0;
  582. }
  583. void dm_persistent_snapshot_exit(void)
  584. {
  585. }