dm-exception-store.c 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666
  1. /*
  2. * dm-snapshot.c
  3. *
  4. * Copyright (C) 2001-2002 Sistina Software (UK) Limited.
  5. *
  6. * This file is released under the GPL.
  7. */
  8. #include "dm.h"
  9. #include "dm-snap.h"
  10. #include "dm-io.h"
  11. #include "kcopyd.h"
  12. #include <linux/mm.h>
  13. #include <linux/pagemap.h>
  14. #include <linux/vmalloc.h>
  15. #include <linux/slab.h>
  16. #define DM_MSG_PREFIX "snapshots"
  17. /*-----------------------------------------------------------------
  18. * Persistent snapshots, by persistent we mean that the snapshot
  19. * will survive a reboot.
  20. *---------------------------------------------------------------*/
  21. /*
  22. * We need to store a record of which parts of the origin have
  23. * been copied to the snapshot device. The snapshot code
  24. * requires that we copy exception chunks to chunk aligned areas
  25. * of the COW store. It makes sense therefore, to store the
  26. * metadata in chunk size blocks.
  27. *
  28. * There is no backward or forward compatibility implemented,
  29. * snapshots with different disk versions than the kernel will
  30. * not be usable. It is expected that "lvcreate" will blank out
  31. * the start of a fresh COW device before calling the snapshot
  32. * constructor.
  33. *
  34. * The first chunk of the COW device just contains the header.
  35. * After this there is a chunk filled with exception metadata,
  36. * followed by as many exception chunks as can fit in the
  37. * metadata areas.
  38. *
  39. * All on disk structures are in little-endian format. The end
  40. * of the exceptions info is indicated by an exception with a
  41. * new_chunk of 0, which is invalid since it would point to the
  42. * header chunk.
  43. */
  44. /*
  45. * Magic for persistent snapshots: "SnAp" - Feeble isn't it.
  46. */
  47. #define SNAP_MAGIC 0x70416e53
  48. /*
  49. * The on-disk version of the metadata.
  50. */
  51. #define SNAPSHOT_DISK_VERSION 1
  52. struct disk_header {
  53. uint32_t magic;
  54. /*
  55. * Is this snapshot valid. There is no way of recovering
  56. * an invalid snapshot.
  57. */
  58. uint32_t valid;
  59. /*
  60. * Simple, incrementing version. no backward
  61. * compatibility.
  62. */
  63. uint32_t version;
  64. /* In sectors */
  65. uint32_t chunk_size;
  66. };
  67. struct disk_exception {
  68. uint64_t old_chunk;
  69. uint64_t new_chunk;
  70. };
  71. struct commit_callback {
  72. void (*callback)(void *, int success);
  73. void *context;
  74. };
  75. /*
  76. * The top level structure for a persistent exception store.
  77. */
  78. struct pstore {
  79. struct dm_snapshot *snap; /* up pointer to my snapshot */
  80. int version;
  81. int valid;
  82. uint32_t exceptions_per_area;
  83. /*
  84. * Now that we have an asynchronous kcopyd there is no
  85. * need for large chunk sizes, so it wont hurt to have a
  86. * whole chunks worth of metadata in memory at once.
  87. */
  88. void *area;
  89. /*
  90. * Used to keep track of which metadata area the data in
  91. * 'chunk' refers to.
  92. */
  93. uint32_t current_area;
  94. /*
  95. * The next free chunk for an exception.
  96. */
  97. uint32_t next_free;
  98. /*
  99. * The index of next free exception in the current
  100. * metadata area.
  101. */
  102. uint32_t current_committed;
  103. atomic_t pending_count;
  104. uint32_t callback_count;
  105. struct commit_callback *callbacks;
  106. };
  107. static inline unsigned int sectors_to_pages(unsigned int sectors)
  108. {
  109. return sectors / (PAGE_SIZE >> 9);
  110. }
  111. static int alloc_area(struct pstore *ps)
  112. {
  113. int r = -ENOMEM;
  114. size_t len;
  115. len = ps->snap->chunk_size << SECTOR_SHIFT;
  116. /*
  117. * Allocate the chunk_size block of memory that will hold
  118. * a single metadata area.
  119. */
  120. ps->area = vmalloc(len);
  121. if (!ps->area)
  122. return r;
  123. return 0;
  124. }
  125. static void free_area(struct pstore *ps)
  126. {
  127. vfree(ps->area);
  128. }
  129. /*
  130. * Read or write a chunk aligned and sized block of data from a device.
  131. */
  132. static int chunk_io(struct pstore *ps, uint32_t chunk, int rw)
  133. {
  134. struct io_region where;
  135. unsigned long bits;
  136. where.bdev = ps->snap->cow->bdev;
  137. where.sector = ps->snap->chunk_size * chunk;
  138. where.count = ps->snap->chunk_size;
  139. return dm_io_sync_vm(1, &where, rw, ps->area, &bits);
  140. }
  141. /*
  142. * Read or write a metadata area. Remembering to skip the first
  143. * chunk which holds the header.
  144. */
  145. static int area_io(struct pstore *ps, uint32_t area, int rw)
  146. {
  147. int r;
  148. uint32_t chunk;
  149. /* convert a metadata area index to a chunk index */
  150. chunk = 1 + ((ps->exceptions_per_area + 1) * area);
  151. r = chunk_io(ps, chunk, rw);
  152. if (r)
  153. return r;
  154. ps->current_area = area;
  155. return 0;
  156. }
  157. static int zero_area(struct pstore *ps, uint32_t area)
  158. {
  159. memset(ps->area, 0, ps->snap->chunk_size << SECTOR_SHIFT);
  160. return area_io(ps, area, WRITE);
  161. }
  162. static int read_header(struct pstore *ps, int *new_snapshot)
  163. {
  164. int r;
  165. struct disk_header *dh;
  166. chunk_t chunk_size;
  167. r = chunk_io(ps, 0, READ);
  168. if (r)
  169. return r;
  170. dh = (struct disk_header *) ps->area;
  171. if (le32_to_cpu(dh->magic) == 0) {
  172. *new_snapshot = 1;
  173. } else if (le32_to_cpu(dh->magic) == SNAP_MAGIC) {
  174. *new_snapshot = 0;
  175. ps->valid = le32_to_cpu(dh->valid);
  176. ps->version = le32_to_cpu(dh->version);
  177. chunk_size = le32_to_cpu(dh->chunk_size);
  178. if (ps->snap->chunk_size != chunk_size) {
  179. DMWARN("chunk size %llu in device metadata overrides "
  180. "table chunk size of %llu.",
  181. (unsigned long long)chunk_size,
  182. (unsigned long long)ps->snap->chunk_size);
  183. /* We had a bogus chunk_size. Fix stuff up. */
  184. dm_io_put(sectors_to_pages(ps->snap->chunk_size));
  185. free_area(ps);
  186. ps->snap->chunk_size = chunk_size;
  187. ps->snap->chunk_mask = chunk_size - 1;
  188. ps->snap->chunk_shift = ffs(chunk_size) - 1;
  189. r = alloc_area(ps);
  190. if (r)
  191. return r;
  192. r = dm_io_get(sectors_to_pages(chunk_size));
  193. if (r)
  194. return r;
  195. }
  196. } else {
  197. DMWARN("Invalid/corrupt snapshot");
  198. r = -ENXIO;
  199. }
  200. return r;
  201. }
  202. static int write_header(struct pstore *ps)
  203. {
  204. struct disk_header *dh;
  205. memset(ps->area, 0, ps->snap->chunk_size << SECTOR_SHIFT);
  206. dh = (struct disk_header *) ps->area;
  207. dh->magic = cpu_to_le32(SNAP_MAGIC);
  208. dh->valid = cpu_to_le32(ps->valid);
  209. dh->version = cpu_to_le32(ps->version);
  210. dh->chunk_size = cpu_to_le32(ps->snap->chunk_size);
  211. return chunk_io(ps, 0, WRITE);
  212. }
  213. /*
  214. * Access functions for the disk exceptions, these do the endian conversions.
  215. */
  216. static struct disk_exception *get_exception(struct pstore *ps, uint32_t index)
  217. {
  218. if (index >= ps->exceptions_per_area)
  219. return NULL;
  220. return ((struct disk_exception *) ps->area) + index;
  221. }
  222. static int read_exception(struct pstore *ps,
  223. uint32_t index, struct disk_exception *result)
  224. {
  225. struct disk_exception *e;
  226. e = get_exception(ps, index);
  227. if (!e)
  228. return -EINVAL;
  229. /* copy it */
  230. result->old_chunk = le64_to_cpu(e->old_chunk);
  231. result->new_chunk = le64_to_cpu(e->new_chunk);
  232. return 0;
  233. }
  234. static int write_exception(struct pstore *ps,
  235. uint32_t index, struct disk_exception *de)
  236. {
  237. struct disk_exception *e;
  238. e = get_exception(ps, index);
  239. if (!e)
  240. return -EINVAL;
  241. /* copy it */
  242. e->old_chunk = cpu_to_le64(de->old_chunk);
  243. e->new_chunk = cpu_to_le64(de->new_chunk);
  244. return 0;
  245. }
  246. /*
  247. * Registers the exceptions that are present in the current area.
  248. * 'full' is filled in to indicate if the area has been
  249. * filled.
  250. */
  251. static int insert_exceptions(struct pstore *ps, int *full)
  252. {
  253. int r;
  254. unsigned int i;
  255. struct disk_exception de;
  256. /* presume the area is full */
  257. *full = 1;
  258. for (i = 0; i < ps->exceptions_per_area; i++) {
  259. r = read_exception(ps, i, &de);
  260. if (r)
  261. return r;
  262. /*
  263. * If the new_chunk is pointing at the start of
  264. * the COW device, where the first metadata area
  265. * is we know that we've hit the end of the
  266. * exceptions. Therefore the area is not full.
  267. */
  268. if (de.new_chunk == 0LL) {
  269. ps->current_committed = i;
  270. *full = 0;
  271. break;
  272. }
  273. /*
  274. * Keep track of the start of the free chunks.
  275. */
  276. if (ps->next_free <= de.new_chunk)
  277. ps->next_free = de.new_chunk + 1;
  278. /*
  279. * Otherwise we add the exception to the snapshot.
  280. */
  281. r = dm_add_exception(ps->snap, de.old_chunk, de.new_chunk);
  282. if (r)
  283. return r;
  284. }
  285. return 0;
  286. }
  287. static int read_exceptions(struct pstore *ps)
  288. {
  289. uint32_t area;
  290. int r, full = 1;
  291. /*
  292. * Keeping reading chunks and inserting exceptions until
  293. * we find a partially full area.
  294. */
  295. for (area = 0; full; area++) {
  296. r = area_io(ps, area, READ);
  297. if (r)
  298. return r;
  299. r = insert_exceptions(ps, &full);
  300. if (r)
  301. return r;
  302. }
  303. return 0;
  304. }
  305. static inline struct pstore *get_info(struct exception_store *store)
  306. {
  307. return (struct pstore *) store->context;
  308. }
  309. static void persistent_fraction_full(struct exception_store *store,
  310. sector_t *numerator, sector_t *denominator)
  311. {
  312. *numerator = get_info(store)->next_free * store->snap->chunk_size;
  313. *denominator = get_dev_size(store->snap->cow->bdev);
  314. }
  315. static void persistent_destroy(struct exception_store *store)
  316. {
  317. struct pstore *ps = get_info(store);
  318. dm_io_put(sectors_to_pages(ps->snap->chunk_size));
  319. vfree(ps->callbacks);
  320. free_area(ps);
  321. kfree(ps);
  322. }
  323. static int persistent_read_metadata(struct exception_store *store)
  324. {
  325. int r, new_snapshot;
  326. struct pstore *ps = get_info(store);
  327. /*
  328. * Read the snapshot header.
  329. */
  330. r = read_header(ps, &new_snapshot);
  331. if (r)
  332. return r;
  333. /*
  334. * Now we know correct chunk_size, complete the initialisation.
  335. */
  336. ps->exceptions_per_area = (ps->snap->chunk_size << SECTOR_SHIFT) /
  337. sizeof(struct disk_exception);
  338. ps->callbacks = dm_vcalloc(ps->exceptions_per_area,
  339. sizeof(*ps->callbacks));
  340. if (!ps->callbacks)
  341. return -ENOMEM;
  342. /*
  343. * Do we need to setup a new snapshot ?
  344. */
  345. if (new_snapshot) {
  346. r = write_header(ps);
  347. if (r) {
  348. DMWARN("write_header failed");
  349. return r;
  350. }
  351. r = zero_area(ps, 0);
  352. if (r) {
  353. DMWARN("zero_area(0) failed");
  354. return r;
  355. }
  356. } else {
  357. /*
  358. * Sanity checks.
  359. */
  360. if (!ps->valid) {
  361. DMWARN("snapshot is marked invalid");
  362. return -EINVAL;
  363. }
  364. if (ps->version != SNAPSHOT_DISK_VERSION) {
  365. DMWARN("unable to handle snapshot disk version %d",
  366. ps->version);
  367. return -EINVAL;
  368. }
  369. /*
  370. * Read the metadata.
  371. */
  372. r = read_exceptions(ps);
  373. if (r)
  374. return r;
  375. }
  376. return 0;
  377. }
  378. static int persistent_prepare(struct exception_store *store,
  379. struct exception *e)
  380. {
  381. struct pstore *ps = get_info(store);
  382. uint32_t stride;
  383. sector_t size = get_dev_size(store->snap->cow->bdev);
  384. /* Is there enough room ? */
  385. if (size < ((ps->next_free + 1) * store->snap->chunk_size))
  386. return -ENOSPC;
  387. e->new_chunk = ps->next_free;
  388. /*
  389. * Move onto the next free pending, making sure to take
  390. * into account the location of the metadata chunks.
  391. */
  392. stride = (ps->exceptions_per_area + 1);
  393. if ((++ps->next_free % stride) == 1)
  394. ps->next_free++;
  395. atomic_inc(&ps->pending_count);
  396. return 0;
  397. }
  398. static void persistent_commit(struct exception_store *store,
  399. struct exception *e,
  400. void (*callback) (void *, int success),
  401. void *callback_context)
  402. {
  403. int r;
  404. unsigned int i;
  405. struct pstore *ps = get_info(store);
  406. struct disk_exception de;
  407. struct commit_callback *cb;
  408. de.old_chunk = e->old_chunk;
  409. de.new_chunk = e->new_chunk;
  410. write_exception(ps, ps->current_committed++, &de);
  411. /*
  412. * Add the callback to the back of the array. This code
  413. * is the only place where the callback array is
  414. * manipulated, and we know that it will never be called
  415. * multiple times concurrently.
  416. */
  417. cb = ps->callbacks + ps->callback_count++;
  418. cb->callback = callback;
  419. cb->context = callback_context;
  420. /*
  421. * If there are no more exceptions in flight, or we have
  422. * filled this metadata area we commit the exceptions to
  423. * disk.
  424. */
  425. if (atomic_dec_and_test(&ps->pending_count) ||
  426. (ps->current_committed == ps->exceptions_per_area)) {
  427. r = area_io(ps, ps->current_area, WRITE);
  428. if (r)
  429. ps->valid = 0;
  430. for (i = 0; i < ps->callback_count; i++) {
  431. cb = ps->callbacks + i;
  432. cb->callback(cb->context, r == 0 ? 1 : 0);
  433. }
  434. ps->callback_count = 0;
  435. }
  436. /*
  437. * Have we completely filled the current area ?
  438. */
  439. if (ps->current_committed == ps->exceptions_per_area) {
  440. ps->current_committed = 0;
  441. r = zero_area(ps, ps->current_area + 1);
  442. if (r)
  443. ps->valid = 0;
  444. }
  445. }
  446. static void persistent_drop(struct exception_store *store)
  447. {
  448. struct pstore *ps = get_info(store);
  449. ps->valid = 0;
  450. if (write_header(ps))
  451. DMWARN("write header failed");
  452. }
  453. int dm_create_persistent(struct exception_store *store, uint32_t chunk_size)
  454. {
  455. int r;
  456. struct pstore *ps;
  457. r = dm_io_get(sectors_to_pages(chunk_size));
  458. if (r)
  459. return r;
  460. /* allocate the pstore */
  461. ps = kmalloc(sizeof(*ps), GFP_KERNEL);
  462. if (!ps) {
  463. r = -ENOMEM;
  464. goto bad;
  465. }
  466. ps->snap = store->snap;
  467. ps->valid = 1;
  468. ps->version = SNAPSHOT_DISK_VERSION;
  469. ps->next_free = 2; /* skipping the header and first area */
  470. ps->current_committed = 0;
  471. r = alloc_area(ps);
  472. if (r)
  473. goto bad;
  474. ps->callback_count = 0;
  475. atomic_set(&ps->pending_count, 0);
  476. ps->callbacks = NULL;
  477. store->destroy = persistent_destroy;
  478. store->read_metadata = persistent_read_metadata;
  479. store->prepare_exception = persistent_prepare;
  480. store->commit_exception = persistent_commit;
  481. store->drop_snapshot = persistent_drop;
  482. store->fraction_full = persistent_fraction_full;
  483. store->context = ps;
  484. return 0;
  485. bad:
  486. dm_io_put(sectors_to_pages(chunk_size));
  487. if (ps && ps->area)
  488. free_area(ps);
  489. kfree(ps);
  490. return r;
  491. }
  492. /*-----------------------------------------------------------------
  493. * Implementation of the store for non-persistent snapshots.
  494. *---------------------------------------------------------------*/
  495. struct transient_c {
  496. sector_t next_free;
  497. };
  498. static void transient_destroy(struct exception_store *store)
  499. {
  500. kfree(store->context);
  501. }
  502. static int transient_read_metadata(struct exception_store *store)
  503. {
  504. return 0;
  505. }
  506. static int transient_prepare(struct exception_store *store, struct exception *e)
  507. {
  508. struct transient_c *tc = (struct transient_c *) store->context;
  509. sector_t size = get_dev_size(store->snap->cow->bdev);
  510. if (size < (tc->next_free + store->snap->chunk_size))
  511. return -1;
  512. e->new_chunk = sector_to_chunk(store->snap, tc->next_free);
  513. tc->next_free += store->snap->chunk_size;
  514. return 0;
  515. }
  516. static void transient_commit(struct exception_store *store,
  517. struct exception *e,
  518. void (*callback) (void *, int success),
  519. void *callback_context)
  520. {
  521. /* Just succeed */
  522. callback(callback_context, 1);
  523. }
  524. static void transient_fraction_full(struct exception_store *store,
  525. sector_t *numerator, sector_t *denominator)
  526. {
  527. *numerator = ((struct transient_c *) store->context)->next_free;
  528. *denominator = get_dev_size(store->snap->cow->bdev);
  529. }
  530. int dm_create_transient(struct exception_store *store,
  531. struct dm_snapshot *s, int blocksize)
  532. {
  533. struct transient_c *tc;
  534. memset(store, 0, sizeof(*store));
  535. store->destroy = transient_destroy;
  536. store->read_metadata = transient_read_metadata;
  537. store->prepare_exception = transient_prepare;
  538. store->commit_exception = transient_commit;
  539. store->fraction_full = transient_fraction_full;
  540. store->snap = s;
  541. tc = kmalloc(sizeof(struct transient_c), GFP_KERNEL);
  542. if (!tc)
  543. return -ENOMEM;
  544. tc->next_free = 0;
  545. store->context = tc;
  546. return 0;
  547. }