dm-exception-store.c 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664
  1. /*
  2. * dm-snapshot.c
  3. *
  4. * Copyright (C) 2001-2002 Sistina Software (UK) Limited.
  5. *
  6. * This file is released under the GPL.
  7. */
  8. #include "dm.h"
  9. #include "dm-snap.h"
  10. #include "dm-io.h"
  11. #include "kcopyd.h"
  12. #include <linux/mm.h>
  13. #include <linux/pagemap.h>
  14. #include <linux/vmalloc.h>
  15. #include <linux/slab.h>
  16. /*-----------------------------------------------------------------
  17. * Persistent snapshots, by persistent we mean that the snapshot
  18. * will survive a reboot.
  19. *---------------------------------------------------------------*/
  20. /*
  21. * We need to store a record of which parts of the origin have
  22. * been copied to the snapshot device. The snapshot code
  23. * requires that we copy exception chunks to chunk aligned areas
  24. * of the COW store. It makes sense therefore, to store the
  25. * metadata in chunk size blocks.
  26. *
  27. * There is no backward or forward compatibility implemented,
  28. * snapshots with different disk versions than the kernel will
  29. * not be usable. It is expected that "lvcreate" will blank out
  30. * the start of a fresh COW device before calling the snapshot
  31. * constructor.
  32. *
  33. * The first chunk of the COW device just contains the header.
  34. * After this there is a chunk filled with exception metadata,
  35. * followed by as many exception chunks as can fit in the
  36. * metadata areas.
  37. *
  38. * All on disk structures are in little-endian format. The end
  39. * of the exceptions info is indicated by an exception with a
  40. * new_chunk of 0, which is invalid since it would point to the
  41. * header chunk.
  42. */
  43. /*
  44. * Magic for persistent snapshots: "SnAp" - Feeble isn't it.
  45. */
  46. #define SNAP_MAGIC 0x70416e53
  47. /*
  48. * The on-disk version of the metadata.
  49. */
  50. #define SNAPSHOT_DISK_VERSION 1
  51. struct disk_header {
  52. uint32_t magic;
  53. /*
  54. * Is this snapshot valid. There is no way of recovering
  55. * an invalid snapshot.
  56. */
  57. uint32_t valid;
  58. /*
  59. * Simple, incrementing version. no backward
  60. * compatibility.
  61. */
  62. uint32_t version;
  63. /* In sectors */
  64. uint32_t chunk_size;
  65. };
  66. struct disk_exception {
  67. uint64_t old_chunk;
  68. uint64_t new_chunk;
  69. };
  70. struct commit_callback {
  71. void (*callback)(void *, int success);
  72. void *context;
  73. };
  74. /*
  75. * The top level structure for a persistent exception store.
  76. */
  77. struct pstore {
  78. struct dm_snapshot *snap; /* up pointer to my snapshot */
  79. int version;
  80. int valid;
  81. uint32_t exceptions_per_area;
  82. /*
  83. * Now that we have an asynchronous kcopyd there is no
  84. * need for large chunk sizes, so it wont hurt to have a
  85. * whole chunks worth of metadata in memory at once.
  86. */
  87. void *area;
  88. /*
  89. * Used to keep track of which metadata area the data in
  90. * 'chunk' refers to.
  91. */
  92. uint32_t current_area;
  93. /*
  94. * The next free chunk for an exception.
  95. */
  96. uint32_t next_free;
  97. /*
  98. * The index of next free exception in the current
  99. * metadata area.
  100. */
  101. uint32_t current_committed;
  102. atomic_t pending_count;
  103. uint32_t callback_count;
  104. struct commit_callback *callbacks;
  105. };
  106. static inline unsigned int sectors_to_pages(unsigned int sectors)
  107. {
  108. return sectors / (PAGE_SIZE >> 9);
  109. }
  110. static int alloc_area(struct pstore *ps)
  111. {
  112. int r = -ENOMEM;
  113. size_t len;
  114. len = ps->snap->chunk_size << SECTOR_SHIFT;
  115. /*
  116. * Allocate the chunk_size block of memory that will hold
  117. * a single metadata area.
  118. */
  119. ps->area = vmalloc(len);
  120. if (!ps->area)
  121. return r;
  122. return 0;
  123. }
  124. static void free_area(struct pstore *ps)
  125. {
  126. vfree(ps->area);
  127. }
  128. /*
  129. * Read or write a chunk aligned and sized block of data from a device.
  130. */
  131. static int chunk_io(struct pstore *ps, uint32_t chunk, int rw)
  132. {
  133. struct io_region where;
  134. unsigned long bits;
  135. where.bdev = ps->snap->cow->bdev;
  136. where.sector = ps->snap->chunk_size * chunk;
  137. where.count = ps->snap->chunk_size;
  138. return dm_io_sync_vm(1, &where, rw, ps->area, &bits);
  139. }
  140. /*
  141. * Read or write a metadata area. Remembering to skip the first
  142. * chunk which holds the header.
  143. */
  144. static int area_io(struct pstore *ps, uint32_t area, int rw)
  145. {
  146. int r;
  147. uint32_t chunk;
  148. /* convert a metadata area index to a chunk index */
  149. chunk = 1 + ((ps->exceptions_per_area + 1) * area);
  150. r = chunk_io(ps, chunk, rw);
  151. if (r)
  152. return r;
  153. ps->current_area = area;
  154. return 0;
  155. }
  156. static int zero_area(struct pstore *ps, uint32_t area)
  157. {
  158. memset(ps->area, 0, ps->snap->chunk_size << SECTOR_SHIFT);
  159. return area_io(ps, area, WRITE);
  160. }
  161. static int read_header(struct pstore *ps, int *new_snapshot)
  162. {
  163. int r;
  164. struct disk_header *dh;
  165. chunk_t chunk_size;
  166. r = chunk_io(ps, 0, READ);
  167. if (r)
  168. return r;
  169. dh = (struct disk_header *) ps->area;
  170. if (le32_to_cpu(dh->magic) == 0) {
  171. *new_snapshot = 1;
  172. } else if (le32_to_cpu(dh->magic) == SNAP_MAGIC) {
  173. *new_snapshot = 0;
  174. ps->valid = le32_to_cpu(dh->valid);
  175. ps->version = le32_to_cpu(dh->version);
  176. chunk_size = le32_to_cpu(dh->chunk_size);
  177. if (ps->snap->chunk_size != chunk_size) {
  178. DMWARN("chunk size %llu in device metadata overrides "
  179. "table chunk size of %llu.",
  180. (unsigned long long)chunk_size,
  181. (unsigned long long)ps->snap->chunk_size);
  182. /* We had a bogus chunk_size. Fix stuff up. */
  183. dm_io_put(sectors_to_pages(ps->snap->chunk_size));
  184. free_area(ps);
  185. ps->snap->chunk_size = chunk_size;
  186. ps->snap->chunk_mask = chunk_size - 1;
  187. ps->snap->chunk_shift = ffs(chunk_size) - 1;
  188. r = alloc_area(ps);
  189. if (r)
  190. return r;
  191. r = dm_io_get(sectors_to_pages(chunk_size));
  192. if (r)
  193. return r;
  194. }
  195. } else {
  196. DMWARN("Invalid/corrupt snapshot");
  197. r = -ENXIO;
  198. }
  199. return r;
  200. }
  201. static int write_header(struct pstore *ps)
  202. {
  203. struct disk_header *dh;
  204. memset(ps->area, 0, ps->snap->chunk_size << SECTOR_SHIFT);
  205. dh = (struct disk_header *) ps->area;
  206. dh->magic = cpu_to_le32(SNAP_MAGIC);
  207. dh->valid = cpu_to_le32(ps->valid);
  208. dh->version = cpu_to_le32(ps->version);
  209. dh->chunk_size = cpu_to_le32(ps->snap->chunk_size);
  210. return chunk_io(ps, 0, WRITE);
  211. }
  212. /*
  213. * Access functions for the disk exceptions, these do the endian conversions.
  214. */
  215. static struct disk_exception *get_exception(struct pstore *ps, uint32_t index)
  216. {
  217. if (index >= ps->exceptions_per_area)
  218. return NULL;
  219. return ((struct disk_exception *) ps->area) + index;
  220. }
  221. static int read_exception(struct pstore *ps,
  222. uint32_t index, struct disk_exception *result)
  223. {
  224. struct disk_exception *e;
  225. e = get_exception(ps, index);
  226. if (!e)
  227. return -EINVAL;
  228. /* copy it */
  229. result->old_chunk = le64_to_cpu(e->old_chunk);
  230. result->new_chunk = le64_to_cpu(e->new_chunk);
  231. return 0;
  232. }
  233. static int write_exception(struct pstore *ps,
  234. uint32_t index, struct disk_exception *de)
  235. {
  236. struct disk_exception *e;
  237. e = get_exception(ps, index);
  238. if (!e)
  239. return -EINVAL;
  240. /* copy it */
  241. e->old_chunk = cpu_to_le64(de->old_chunk);
  242. e->new_chunk = cpu_to_le64(de->new_chunk);
  243. return 0;
  244. }
  245. /*
  246. * Registers the exceptions that are present in the current area.
  247. * 'full' is filled in to indicate if the area has been
  248. * filled.
  249. */
  250. static int insert_exceptions(struct pstore *ps, int *full)
  251. {
  252. int r;
  253. unsigned int i;
  254. struct disk_exception de;
  255. /* presume the area is full */
  256. *full = 1;
  257. for (i = 0; i < ps->exceptions_per_area; i++) {
  258. r = read_exception(ps, i, &de);
  259. if (r)
  260. return r;
  261. /*
  262. * If the new_chunk is pointing at the start of
  263. * the COW device, where the first metadata area
  264. * is we know that we've hit the end of the
  265. * exceptions. Therefore the area is not full.
  266. */
  267. if (de.new_chunk == 0LL) {
  268. ps->current_committed = i;
  269. *full = 0;
  270. break;
  271. }
  272. /*
  273. * Keep track of the start of the free chunks.
  274. */
  275. if (ps->next_free <= de.new_chunk)
  276. ps->next_free = de.new_chunk + 1;
  277. /*
  278. * Otherwise we add the exception to the snapshot.
  279. */
  280. r = dm_add_exception(ps->snap, de.old_chunk, de.new_chunk);
  281. if (r)
  282. return r;
  283. }
  284. return 0;
  285. }
  286. static int read_exceptions(struct pstore *ps)
  287. {
  288. uint32_t area;
  289. int r, full = 1;
  290. /*
  291. * Keeping reading chunks and inserting exceptions until
  292. * we find a partially full area.
  293. */
  294. for (area = 0; full; area++) {
  295. r = area_io(ps, area, READ);
  296. if (r)
  297. return r;
  298. r = insert_exceptions(ps, &full);
  299. if (r)
  300. return r;
  301. }
  302. return 0;
  303. }
  304. static inline struct pstore *get_info(struct exception_store *store)
  305. {
  306. return (struct pstore *) store->context;
  307. }
  308. static void persistent_fraction_full(struct exception_store *store,
  309. sector_t *numerator, sector_t *denominator)
  310. {
  311. *numerator = get_info(store)->next_free * store->snap->chunk_size;
  312. *denominator = get_dev_size(store->snap->cow->bdev);
  313. }
  314. static void persistent_destroy(struct exception_store *store)
  315. {
  316. struct pstore *ps = get_info(store);
  317. dm_io_put(sectors_to_pages(ps->snap->chunk_size));
  318. vfree(ps->callbacks);
  319. free_area(ps);
  320. kfree(ps);
  321. }
  322. static int persistent_read_metadata(struct exception_store *store)
  323. {
  324. int r, new_snapshot;
  325. struct pstore *ps = get_info(store);
  326. /*
  327. * Read the snapshot header.
  328. */
  329. r = read_header(ps, &new_snapshot);
  330. if (r)
  331. return r;
  332. /*
  333. * Now we know correct chunk_size, complete the initialisation.
  334. */
  335. ps->exceptions_per_area = (ps->snap->chunk_size << SECTOR_SHIFT) /
  336. sizeof(struct disk_exception);
  337. ps->callbacks = dm_vcalloc(ps->exceptions_per_area,
  338. sizeof(*ps->callbacks));
  339. if (!ps->callbacks)
  340. return -ENOMEM;
  341. /*
  342. * Do we need to setup a new snapshot ?
  343. */
  344. if (new_snapshot) {
  345. r = write_header(ps);
  346. if (r) {
  347. DMWARN("write_header failed");
  348. return r;
  349. }
  350. r = zero_area(ps, 0);
  351. if (r) {
  352. DMWARN("zero_area(0) failed");
  353. return r;
  354. }
  355. } else {
  356. /*
  357. * Sanity checks.
  358. */
  359. if (!ps->valid) {
  360. DMWARN("snapshot is marked invalid");
  361. return -EINVAL;
  362. }
  363. if (ps->version != SNAPSHOT_DISK_VERSION) {
  364. DMWARN("unable to handle snapshot disk version %d",
  365. ps->version);
  366. return -EINVAL;
  367. }
  368. /*
  369. * Read the metadata.
  370. */
  371. r = read_exceptions(ps);
  372. if (r)
  373. return r;
  374. }
  375. return 0;
  376. }
  377. static int persistent_prepare(struct exception_store *store,
  378. struct exception *e)
  379. {
  380. struct pstore *ps = get_info(store);
  381. uint32_t stride;
  382. sector_t size = get_dev_size(store->snap->cow->bdev);
  383. /* Is there enough room ? */
  384. if (size < ((ps->next_free + 1) * store->snap->chunk_size))
  385. return -ENOSPC;
  386. e->new_chunk = ps->next_free;
  387. /*
  388. * Move onto the next free pending, making sure to take
  389. * into account the location of the metadata chunks.
  390. */
  391. stride = (ps->exceptions_per_area + 1);
  392. if ((++ps->next_free % stride) == 1)
  393. ps->next_free++;
  394. atomic_inc(&ps->pending_count);
  395. return 0;
  396. }
  397. static void persistent_commit(struct exception_store *store,
  398. struct exception *e,
  399. void (*callback) (void *, int success),
  400. void *callback_context)
  401. {
  402. int r;
  403. unsigned int i;
  404. struct pstore *ps = get_info(store);
  405. struct disk_exception de;
  406. struct commit_callback *cb;
  407. de.old_chunk = e->old_chunk;
  408. de.new_chunk = e->new_chunk;
  409. write_exception(ps, ps->current_committed++, &de);
  410. /*
  411. * Add the callback to the back of the array. This code
  412. * is the only place where the callback array is
  413. * manipulated, and we know that it will never be called
  414. * multiple times concurrently.
  415. */
  416. cb = ps->callbacks + ps->callback_count++;
  417. cb->callback = callback;
  418. cb->context = callback_context;
  419. /*
  420. * If there are no more exceptions in flight, or we have
  421. * filled this metadata area we commit the exceptions to
  422. * disk.
  423. */
  424. if (atomic_dec_and_test(&ps->pending_count) ||
  425. (ps->current_committed == ps->exceptions_per_area)) {
  426. r = area_io(ps, ps->current_area, WRITE);
  427. if (r)
  428. ps->valid = 0;
  429. for (i = 0; i < ps->callback_count; i++) {
  430. cb = ps->callbacks + i;
  431. cb->callback(cb->context, r == 0 ? 1 : 0);
  432. }
  433. ps->callback_count = 0;
  434. }
  435. /*
  436. * Have we completely filled the current area ?
  437. */
  438. if (ps->current_committed == ps->exceptions_per_area) {
  439. ps->current_committed = 0;
  440. r = zero_area(ps, ps->current_area + 1);
  441. if (r)
  442. ps->valid = 0;
  443. }
  444. }
  445. static void persistent_drop(struct exception_store *store)
  446. {
  447. struct pstore *ps = get_info(store);
  448. ps->valid = 0;
  449. if (write_header(ps))
  450. DMWARN("write header failed");
  451. }
  452. int dm_create_persistent(struct exception_store *store, uint32_t chunk_size)
  453. {
  454. int r;
  455. struct pstore *ps;
  456. r = dm_io_get(sectors_to_pages(chunk_size));
  457. if (r)
  458. return r;
  459. /* allocate the pstore */
  460. ps = kmalloc(sizeof(*ps), GFP_KERNEL);
  461. if (!ps) {
  462. r = -ENOMEM;
  463. goto bad;
  464. }
  465. ps->snap = store->snap;
  466. ps->valid = 1;
  467. ps->version = SNAPSHOT_DISK_VERSION;
  468. ps->next_free = 2; /* skipping the header and first area */
  469. ps->current_committed = 0;
  470. r = alloc_area(ps);
  471. if (r)
  472. goto bad;
  473. ps->callback_count = 0;
  474. atomic_set(&ps->pending_count, 0);
  475. ps->callbacks = NULL;
  476. store->destroy = persistent_destroy;
  477. store->read_metadata = persistent_read_metadata;
  478. store->prepare_exception = persistent_prepare;
  479. store->commit_exception = persistent_commit;
  480. store->drop_snapshot = persistent_drop;
  481. store->fraction_full = persistent_fraction_full;
  482. store->context = ps;
  483. return 0;
  484. bad:
  485. dm_io_put(sectors_to_pages(chunk_size));
  486. if (ps && ps->area)
  487. free_area(ps);
  488. kfree(ps);
  489. return r;
  490. }
  491. /*-----------------------------------------------------------------
  492. * Implementation of the store for non-persistent snapshots.
  493. *---------------------------------------------------------------*/
  494. struct transient_c {
  495. sector_t next_free;
  496. };
  497. static void transient_destroy(struct exception_store *store)
  498. {
  499. kfree(store->context);
  500. }
  501. static int transient_read_metadata(struct exception_store *store)
  502. {
  503. return 0;
  504. }
  505. static int transient_prepare(struct exception_store *store, struct exception *e)
  506. {
  507. struct transient_c *tc = (struct transient_c *) store->context;
  508. sector_t size = get_dev_size(store->snap->cow->bdev);
  509. if (size < (tc->next_free + store->snap->chunk_size))
  510. return -1;
  511. e->new_chunk = sector_to_chunk(store->snap, tc->next_free);
  512. tc->next_free += store->snap->chunk_size;
  513. return 0;
  514. }
  515. static void transient_commit(struct exception_store *store,
  516. struct exception *e,
  517. void (*callback) (void *, int success),
  518. void *callback_context)
  519. {
  520. /* Just succeed */
  521. callback(callback_context, 1);
  522. }
  523. static void transient_fraction_full(struct exception_store *store,
  524. sector_t *numerator, sector_t *denominator)
  525. {
  526. *numerator = ((struct transient_c *) store->context)->next_free;
  527. *denominator = get_dev_size(store->snap->cow->bdev);
  528. }
  529. int dm_create_transient(struct exception_store *store,
  530. struct dm_snapshot *s, int blocksize)
  531. {
  532. struct transient_c *tc;
  533. memset(store, 0, sizeof(*store));
  534. store->destroy = transient_destroy;
  535. store->read_metadata = transient_read_metadata;
  536. store->prepare_exception = transient_prepare;
  537. store->commit_exception = transient_commit;
  538. store->fraction_full = transient_fraction_full;
  539. store->snap = s;
  540. tc = kmalloc(sizeof(struct transient_c), GFP_KERNEL);
  541. if (!tc)
  542. return -ENOMEM;
  543. tc->next_free = 0;
  544. store->context = tc;
  545. return 0;
  546. }