dm-snap.c 28 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325
  1. /*
  2. * dm-snapshot.c
  3. *
  4. * Copyright (C) 2001-2002 Sistina Software (UK) Limited.
  5. *
  6. * This file is released under the GPL.
  7. */
  8. #include <linux/blkdev.h>
  9. #include <linux/ctype.h>
  10. #include <linux/device-mapper.h>
  11. #include <linux/fs.h>
  12. #include <linux/init.h>
  13. #include <linux/kdev_t.h>
  14. #include <linux/list.h>
  15. #include <linux/mempool.h>
  16. #include <linux/module.h>
  17. #include <linux/slab.h>
  18. #include <linux/vmalloc.h>
  19. #include "dm-snap.h"
  20. #include "dm-bio-list.h"
  21. #include "kcopyd.h"
  22. #define DM_MSG_PREFIX "snapshots"
  23. /*
  24. * The percentage increment we will wake up users at
  25. */
  26. #define WAKE_UP_PERCENT 5
  27. /*
  28. * kcopyd priority of snapshot operations
  29. */
  30. #define SNAPSHOT_COPY_PRIORITY 2
  31. /*
  32. * Each snapshot reserves this many pages for io
  33. */
  34. #define SNAPSHOT_PAGES 256
  35. struct workqueue_struct *ksnapd;
  36. static void flush_queued_bios(struct work_struct *work);
  37. struct pending_exception {
  38. struct exception e;
  39. /*
  40. * Origin buffers waiting for this to complete are held
  41. * in a bio list
  42. */
  43. struct bio_list origin_bios;
  44. struct bio_list snapshot_bios;
  45. /*
  46. * Short-term queue of pending exceptions prior to submission.
  47. */
  48. struct list_head list;
  49. /*
  50. * The primary pending_exception is the one that holds
  51. * the ref_count and the list of origin_bios for a
  52. * group of pending_exceptions. It is always last to get freed.
  53. * These fields get set up when writing to the origin.
  54. */
  55. struct pending_exception *primary_pe;
  56. /*
  57. * Number of pending_exceptions processing this chunk.
  58. * When this drops to zero we must complete the origin bios.
  59. * If incrementing or decrementing this, hold pe->snap->lock for
  60. * the sibling concerned and not pe->primary_pe->snap->lock unless
  61. * they are the same.
  62. */
  63. atomic_t ref_count;
  64. /* Pointer back to snapshot context */
  65. struct dm_snapshot *snap;
  66. /*
  67. * 1 indicates the exception has already been sent to
  68. * kcopyd.
  69. */
  70. int started;
  71. };
  72. /*
  73. * Hash table mapping origin volumes to lists of snapshots and
  74. * a lock to protect it
  75. */
  76. static struct kmem_cache *exception_cache;
  77. static struct kmem_cache *pending_cache;
  78. static mempool_t *pending_pool;
  79. /*
  80. * One of these per registered origin, held in the snapshot_origins hash
  81. */
  82. struct origin {
  83. /* The origin device */
  84. struct block_device *bdev;
  85. struct list_head hash_list;
  86. /* List of snapshots for this origin */
  87. struct list_head snapshots;
  88. };
  89. /*
  90. * Size of the hash table for origin volumes. If we make this
  91. * the size of the minors list then it should be nearly perfect
  92. */
  93. #define ORIGIN_HASH_SIZE 256
  94. #define ORIGIN_MASK 0xFF
  95. static struct list_head *_origins;
  96. static struct rw_semaphore _origins_lock;
  97. static int init_origin_hash(void)
  98. {
  99. int i;
  100. _origins = kmalloc(ORIGIN_HASH_SIZE * sizeof(struct list_head),
  101. GFP_KERNEL);
  102. if (!_origins) {
  103. DMERR("unable to allocate memory");
  104. return -ENOMEM;
  105. }
  106. for (i = 0; i < ORIGIN_HASH_SIZE; i++)
  107. INIT_LIST_HEAD(_origins + i);
  108. init_rwsem(&_origins_lock);
  109. return 0;
  110. }
  111. static void exit_origin_hash(void)
  112. {
  113. kfree(_origins);
  114. }
  115. static inline unsigned int origin_hash(struct block_device *bdev)
  116. {
  117. return bdev->bd_dev & ORIGIN_MASK;
  118. }
  119. static struct origin *__lookup_origin(struct block_device *origin)
  120. {
  121. struct list_head *ol;
  122. struct origin *o;
  123. ol = &_origins[origin_hash(origin)];
  124. list_for_each_entry (o, ol, hash_list)
  125. if (bdev_equal(o->bdev, origin))
  126. return o;
  127. return NULL;
  128. }
  129. static void __insert_origin(struct origin *o)
  130. {
  131. struct list_head *sl = &_origins[origin_hash(o->bdev)];
  132. list_add_tail(&o->hash_list, sl);
  133. }
  134. /*
  135. * Make a note of the snapshot and its origin so we can look it
  136. * up when the origin has a write on it.
  137. */
  138. static int register_snapshot(struct dm_snapshot *snap)
  139. {
  140. struct origin *o;
  141. struct block_device *bdev = snap->origin->bdev;
  142. down_write(&_origins_lock);
  143. o = __lookup_origin(bdev);
  144. if (!o) {
  145. /* New origin */
  146. o = kmalloc(sizeof(*o), GFP_KERNEL);
  147. if (!o) {
  148. up_write(&_origins_lock);
  149. return -ENOMEM;
  150. }
  151. /* Initialise the struct */
  152. INIT_LIST_HEAD(&o->snapshots);
  153. o->bdev = bdev;
  154. __insert_origin(o);
  155. }
  156. list_add_tail(&snap->list, &o->snapshots);
  157. up_write(&_origins_lock);
  158. return 0;
  159. }
  160. static void unregister_snapshot(struct dm_snapshot *s)
  161. {
  162. struct origin *o;
  163. down_write(&_origins_lock);
  164. o = __lookup_origin(s->origin->bdev);
  165. list_del(&s->list);
  166. if (list_empty(&o->snapshots)) {
  167. list_del(&o->hash_list);
  168. kfree(o);
  169. }
  170. up_write(&_origins_lock);
  171. }
  172. /*
  173. * Implementation of the exception hash tables.
  174. */
  175. static int init_exception_table(struct exception_table *et, uint32_t size)
  176. {
  177. unsigned int i;
  178. et->hash_mask = size - 1;
  179. et->table = dm_vcalloc(size, sizeof(struct list_head));
  180. if (!et->table)
  181. return -ENOMEM;
  182. for (i = 0; i < size; i++)
  183. INIT_LIST_HEAD(et->table + i);
  184. return 0;
  185. }
  186. static void exit_exception_table(struct exception_table *et, struct kmem_cache *mem)
  187. {
  188. struct list_head *slot;
  189. struct exception *ex, *next;
  190. int i, size;
  191. size = et->hash_mask + 1;
  192. for (i = 0; i < size; i++) {
  193. slot = et->table + i;
  194. list_for_each_entry_safe (ex, next, slot, hash_list)
  195. kmem_cache_free(mem, ex);
  196. }
  197. vfree(et->table);
  198. }
  199. static inline uint32_t exception_hash(struct exception_table *et, chunk_t chunk)
  200. {
  201. return chunk & et->hash_mask;
  202. }
  203. static void insert_exception(struct exception_table *eh, struct exception *e)
  204. {
  205. struct list_head *l = &eh->table[exception_hash(eh, e->old_chunk)];
  206. list_add(&e->hash_list, l);
  207. }
  208. static inline void remove_exception(struct exception *e)
  209. {
  210. list_del(&e->hash_list);
  211. }
  212. /*
  213. * Return the exception data for a sector, or NULL if not
  214. * remapped.
  215. */
  216. static struct exception *lookup_exception(struct exception_table *et,
  217. chunk_t chunk)
  218. {
  219. struct list_head *slot;
  220. struct exception *e;
  221. slot = &et->table[exception_hash(et, chunk)];
  222. list_for_each_entry (e, slot, hash_list)
  223. if (e->old_chunk == chunk)
  224. return e;
  225. return NULL;
  226. }
  227. static inline struct exception *alloc_exception(void)
  228. {
  229. struct exception *e;
  230. e = kmem_cache_alloc(exception_cache, GFP_NOIO);
  231. if (!e)
  232. e = kmem_cache_alloc(exception_cache, GFP_ATOMIC);
  233. return e;
  234. }
  235. static inline void free_exception(struct exception *e)
  236. {
  237. kmem_cache_free(exception_cache, e);
  238. }
  239. static inline struct pending_exception *alloc_pending_exception(void)
  240. {
  241. return mempool_alloc(pending_pool, GFP_NOIO);
  242. }
  243. static inline void free_pending_exception(struct pending_exception *pe)
  244. {
  245. mempool_free(pe, pending_pool);
  246. }
  247. int dm_add_exception(struct dm_snapshot *s, chunk_t old, chunk_t new)
  248. {
  249. struct exception *e;
  250. e = alloc_exception();
  251. if (!e)
  252. return -ENOMEM;
  253. e->old_chunk = old;
  254. e->new_chunk = new;
  255. insert_exception(&s->complete, e);
  256. return 0;
  257. }
  258. /*
  259. * Hard coded magic.
  260. */
  261. static int calc_max_buckets(void)
  262. {
  263. /* use a fixed size of 2MB */
  264. unsigned long mem = 2 * 1024 * 1024;
  265. mem /= sizeof(struct list_head);
  266. return mem;
  267. }
  268. /*
  269. * Rounds a number down to a power of 2.
  270. */
  271. static inline uint32_t round_down(uint32_t n)
  272. {
  273. while (n & (n - 1))
  274. n &= (n - 1);
  275. return n;
  276. }
  277. /*
  278. * Allocate room for a suitable hash table.
  279. */
  280. static int init_hash_tables(struct dm_snapshot *s)
  281. {
  282. sector_t hash_size, cow_dev_size, origin_dev_size, max_buckets;
  283. /*
  284. * Calculate based on the size of the original volume or
  285. * the COW volume...
  286. */
  287. cow_dev_size = get_dev_size(s->cow->bdev);
  288. origin_dev_size = get_dev_size(s->origin->bdev);
  289. max_buckets = calc_max_buckets();
  290. hash_size = min(origin_dev_size, cow_dev_size) >> s->chunk_shift;
  291. hash_size = min(hash_size, max_buckets);
  292. /* Round it down to a power of 2 */
  293. hash_size = round_down(hash_size);
  294. if (init_exception_table(&s->complete, hash_size))
  295. return -ENOMEM;
  296. /*
  297. * Allocate hash table for in-flight exceptions
  298. * Make this smaller than the real hash table
  299. */
  300. hash_size >>= 3;
  301. if (hash_size < 64)
  302. hash_size = 64;
  303. if (init_exception_table(&s->pending, hash_size)) {
  304. exit_exception_table(&s->complete, exception_cache);
  305. return -ENOMEM;
  306. }
  307. return 0;
  308. }
  309. /*
  310. * Round a number up to the nearest 'size' boundary. size must
  311. * be a power of 2.
  312. */
  313. static inline ulong round_up(ulong n, ulong size)
  314. {
  315. size--;
  316. return (n + size) & ~size;
  317. }
  318. static int set_chunk_size(struct dm_snapshot *s, const char *chunk_size_arg,
  319. char **error)
  320. {
  321. unsigned long chunk_size;
  322. char *value;
  323. chunk_size = simple_strtoul(chunk_size_arg, &value, 10);
  324. if (*chunk_size_arg == '\0' || *value != '\0') {
  325. *error = "Invalid chunk size";
  326. return -EINVAL;
  327. }
  328. if (!chunk_size) {
  329. s->chunk_size = s->chunk_mask = s->chunk_shift = 0;
  330. return 0;
  331. }
  332. /*
  333. * Chunk size must be multiple of page size. Silently
  334. * round up if it's not.
  335. */
  336. chunk_size = round_up(chunk_size, PAGE_SIZE >> 9);
  337. /* Check chunk_size is a power of 2 */
  338. if (chunk_size & (chunk_size - 1)) {
  339. *error = "Chunk size is not a power of 2";
  340. return -EINVAL;
  341. }
  342. /* Validate the chunk size against the device block size */
  343. if (chunk_size % (bdev_hardsect_size(s->cow->bdev) >> 9)) {
  344. *error = "Chunk size is not a multiple of device blocksize";
  345. return -EINVAL;
  346. }
  347. s->chunk_size = chunk_size;
  348. s->chunk_mask = chunk_size - 1;
  349. s->chunk_shift = ffs(chunk_size) - 1;
  350. return 0;
  351. }
  352. /*
  353. * Construct a snapshot mapping: <origin_dev> <COW-dev> <p/n> <chunk-size>
  354. */
  355. static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv)
  356. {
  357. struct dm_snapshot *s;
  358. int r = -EINVAL;
  359. char persistent;
  360. char *origin_path;
  361. char *cow_path;
  362. if (argc != 4) {
  363. ti->error = "requires exactly 4 arguments";
  364. r = -EINVAL;
  365. goto bad1;
  366. }
  367. origin_path = argv[0];
  368. cow_path = argv[1];
  369. persistent = toupper(*argv[2]);
  370. if (persistent != 'P' && persistent != 'N') {
  371. ti->error = "Persistent flag is not P or N";
  372. r = -EINVAL;
  373. goto bad1;
  374. }
  375. s = kmalloc(sizeof(*s), GFP_KERNEL);
  376. if (s == NULL) {
  377. ti->error = "Cannot allocate snapshot context private "
  378. "structure";
  379. r = -ENOMEM;
  380. goto bad1;
  381. }
  382. r = dm_get_device(ti, origin_path, 0, ti->len, FMODE_READ, &s->origin);
  383. if (r) {
  384. ti->error = "Cannot get origin device";
  385. goto bad2;
  386. }
  387. r = dm_get_device(ti, cow_path, 0, 0,
  388. FMODE_READ | FMODE_WRITE, &s->cow);
  389. if (r) {
  390. dm_put_device(ti, s->origin);
  391. ti->error = "Cannot get COW device";
  392. goto bad2;
  393. }
  394. r = set_chunk_size(s, argv[3], &ti->error);
  395. if (r)
  396. goto bad3;
  397. s->type = persistent;
  398. s->valid = 1;
  399. s->active = 0;
  400. s->last_percent = 0;
  401. init_rwsem(&s->lock);
  402. spin_lock_init(&s->pe_lock);
  403. s->table = ti->table;
  404. /* Allocate hash table for COW data */
  405. if (init_hash_tables(s)) {
  406. ti->error = "Unable to allocate hash table space";
  407. r = -ENOMEM;
  408. goto bad3;
  409. }
  410. s->store.snap = s;
  411. if (persistent == 'P')
  412. r = dm_create_persistent(&s->store);
  413. else
  414. r = dm_create_transient(&s->store);
  415. if (r) {
  416. ti->error = "Couldn't create exception store";
  417. r = -EINVAL;
  418. goto bad4;
  419. }
  420. r = kcopyd_client_create(SNAPSHOT_PAGES, &s->kcopyd_client);
  421. if (r) {
  422. ti->error = "Could not create kcopyd client";
  423. goto bad5;
  424. }
  425. /* Metadata must only be loaded into one table at once */
  426. r = s->store.read_metadata(&s->store);
  427. if (r) {
  428. ti->error = "Failed to read snapshot metadata";
  429. goto bad6;
  430. }
  431. bio_list_init(&s->queued_bios);
  432. INIT_WORK(&s->queued_bios_work, flush_queued_bios);
  433. /* Add snapshot to the list of snapshots for this origin */
  434. /* Exceptions aren't triggered till snapshot_resume() is called */
  435. if (register_snapshot(s)) {
  436. r = -EINVAL;
  437. ti->error = "Cannot register snapshot origin";
  438. goto bad6;
  439. }
  440. ti->private = s;
  441. ti->split_io = s->chunk_size;
  442. return 0;
  443. bad6:
  444. kcopyd_client_destroy(s->kcopyd_client);
  445. bad5:
  446. s->store.destroy(&s->store);
  447. bad4:
  448. exit_exception_table(&s->pending, pending_cache);
  449. exit_exception_table(&s->complete, exception_cache);
  450. bad3:
  451. dm_put_device(ti, s->cow);
  452. dm_put_device(ti, s->origin);
  453. bad2:
  454. kfree(s);
  455. bad1:
  456. return r;
  457. }
  458. static void snapshot_dtr(struct dm_target *ti)
  459. {
  460. struct dm_snapshot *s = (struct dm_snapshot *) ti->private;
  461. flush_workqueue(ksnapd);
  462. /* Prevent further origin writes from using this snapshot. */
  463. /* After this returns there can be no new kcopyd jobs. */
  464. unregister_snapshot(s);
  465. kcopyd_client_destroy(s->kcopyd_client);
  466. exit_exception_table(&s->pending, pending_cache);
  467. exit_exception_table(&s->complete, exception_cache);
  468. /* Deallocate memory used */
  469. s->store.destroy(&s->store);
  470. dm_put_device(ti, s->origin);
  471. dm_put_device(ti, s->cow);
  472. kfree(s);
  473. }
  474. /*
  475. * Flush a list of buffers.
  476. */
  477. static void flush_bios(struct bio *bio)
  478. {
  479. struct bio *n;
  480. while (bio) {
  481. n = bio->bi_next;
  482. bio->bi_next = NULL;
  483. generic_make_request(bio);
  484. bio = n;
  485. }
  486. }
  487. static void flush_queued_bios(struct work_struct *work)
  488. {
  489. struct dm_snapshot *s =
  490. container_of(work, struct dm_snapshot, queued_bios_work);
  491. struct bio *queued_bios;
  492. unsigned long flags;
  493. spin_lock_irqsave(&s->pe_lock, flags);
  494. queued_bios = bio_list_get(&s->queued_bios);
  495. spin_unlock_irqrestore(&s->pe_lock, flags);
  496. flush_bios(queued_bios);
  497. }
  498. /*
  499. * Error a list of buffers.
  500. */
  501. static void error_bios(struct bio *bio)
  502. {
  503. struct bio *n;
  504. while (bio) {
  505. n = bio->bi_next;
  506. bio->bi_next = NULL;
  507. bio_io_error(bio, bio->bi_size);
  508. bio = n;
  509. }
  510. }
  511. static void __invalidate_snapshot(struct dm_snapshot *s, int err)
  512. {
  513. if (!s->valid)
  514. return;
  515. if (err == -EIO)
  516. DMERR("Invalidating snapshot: Error reading/writing.");
  517. else if (err == -ENOMEM)
  518. DMERR("Invalidating snapshot: Unable to allocate exception.");
  519. if (s->store.drop_snapshot)
  520. s->store.drop_snapshot(&s->store);
  521. s->valid = 0;
  522. dm_table_event(s->table);
  523. }
  524. static void get_pending_exception(struct pending_exception *pe)
  525. {
  526. atomic_inc(&pe->ref_count);
  527. }
  528. static struct bio *put_pending_exception(struct pending_exception *pe)
  529. {
  530. struct pending_exception *primary_pe;
  531. struct bio *origin_bios = NULL;
  532. primary_pe = pe->primary_pe;
  533. /*
  534. * If this pe is involved in a write to the origin and
  535. * it is the last sibling to complete then release
  536. * the bios for the original write to the origin.
  537. */
  538. if (primary_pe &&
  539. atomic_dec_and_test(&primary_pe->ref_count))
  540. origin_bios = bio_list_get(&primary_pe->origin_bios);
  541. /*
  542. * Free the pe if it's not linked to an origin write or if
  543. * it's not itself a primary pe.
  544. */
  545. if (!primary_pe || primary_pe != pe)
  546. free_pending_exception(pe);
  547. /*
  548. * Free the primary pe if nothing references it.
  549. */
  550. if (primary_pe && !atomic_read(&primary_pe->ref_count))
  551. free_pending_exception(primary_pe);
  552. return origin_bios;
  553. }
  554. static void pending_complete(struct pending_exception *pe, int success)
  555. {
  556. struct exception *e;
  557. struct dm_snapshot *s = pe->snap;
  558. struct bio *origin_bios = NULL;
  559. struct bio *snapshot_bios = NULL;
  560. int error = 0;
  561. if (!success) {
  562. /* Read/write error - snapshot is unusable */
  563. down_write(&s->lock);
  564. __invalidate_snapshot(s, -EIO);
  565. error = 1;
  566. goto out;
  567. }
  568. e = alloc_exception();
  569. if (!e) {
  570. down_write(&s->lock);
  571. __invalidate_snapshot(s, -ENOMEM);
  572. error = 1;
  573. goto out;
  574. }
  575. *e = pe->e;
  576. down_write(&s->lock);
  577. if (!s->valid) {
  578. free_exception(e);
  579. error = 1;
  580. goto out;
  581. }
  582. /*
  583. * Add a proper exception, and remove the
  584. * in-flight exception from the list.
  585. */
  586. insert_exception(&s->complete, e);
  587. out:
  588. remove_exception(&pe->e);
  589. snapshot_bios = bio_list_get(&pe->snapshot_bios);
  590. origin_bios = put_pending_exception(pe);
  591. up_write(&s->lock);
  592. /* Submit any pending write bios */
  593. if (error)
  594. error_bios(snapshot_bios);
  595. else
  596. flush_bios(snapshot_bios);
  597. flush_bios(origin_bios);
  598. }
  599. static void commit_callback(void *context, int success)
  600. {
  601. struct pending_exception *pe = (struct pending_exception *) context;
  602. pending_complete(pe, success);
  603. }
  604. /*
  605. * Called when the copy I/O has finished. kcopyd actually runs
  606. * this code so don't block.
  607. */
  608. static void copy_callback(int read_err, unsigned int write_err, void *context)
  609. {
  610. struct pending_exception *pe = (struct pending_exception *) context;
  611. struct dm_snapshot *s = pe->snap;
  612. if (read_err || write_err)
  613. pending_complete(pe, 0);
  614. else
  615. /* Update the metadata if we are persistent */
  616. s->store.commit_exception(&s->store, &pe->e, commit_callback,
  617. pe);
  618. }
  619. /*
  620. * Dispatches the copy operation to kcopyd.
  621. */
  622. static void start_copy(struct pending_exception *pe)
  623. {
  624. struct dm_snapshot *s = pe->snap;
  625. struct io_region src, dest;
  626. struct block_device *bdev = s->origin->bdev;
  627. sector_t dev_size;
  628. dev_size = get_dev_size(bdev);
  629. src.bdev = bdev;
  630. src.sector = chunk_to_sector(s, pe->e.old_chunk);
  631. src.count = min(s->chunk_size, dev_size - src.sector);
  632. dest.bdev = s->cow->bdev;
  633. dest.sector = chunk_to_sector(s, pe->e.new_chunk);
  634. dest.count = src.count;
  635. /* Hand over to kcopyd */
  636. kcopyd_copy(s->kcopyd_client,
  637. &src, 1, &dest, 0, copy_callback, pe);
  638. }
  639. /*
  640. * Looks to see if this snapshot already has a pending exception
  641. * for this chunk, otherwise it allocates a new one and inserts
  642. * it into the pending table.
  643. *
  644. * NOTE: a write lock must be held on snap->lock before calling
  645. * this.
  646. */
  647. static struct pending_exception *
  648. __find_pending_exception(struct dm_snapshot *s, struct bio *bio)
  649. {
  650. struct exception *e;
  651. struct pending_exception *pe;
  652. chunk_t chunk = sector_to_chunk(s, bio->bi_sector);
  653. /*
  654. * Is there a pending exception for this already ?
  655. */
  656. e = lookup_exception(&s->pending, chunk);
  657. if (e) {
  658. /* cast the exception to a pending exception */
  659. pe = container_of(e, struct pending_exception, e);
  660. goto out;
  661. }
  662. /*
  663. * Create a new pending exception, we don't want
  664. * to hold the lock while we do this.
  665. */
  666. up_write(&s->lock);
  667. pe = alloc_pending_exception();
  668. down_write(&s->lock);
  669. if (!s->valid) {
  670. free_pending_exception(pe);
  671. return NULL;
  672. }
  673. e = lookup_exception(&s->pending, chunk);
  674. if (e) {
  675. free_pending_exception(pe);
  676. pe = container_of(e, struct pending_exception, e);
  677. goto out;
  678. }
  679. pe->e.old_chunk = chunk;
  680. bio_list_init(&pe->origin_bios);
  681. bio_list_init(&pe->snapshot_bios);
  682. pe->primary_pe = NULL;
  683. atomic_set(&pe->ref_count, 0);
  684. pe->snap = s;
  685. pe->started = 0;
  686. if (s->store.prepare_exception(&s->store, &pe->e)) {
  687. free_pending_exception(pe);
  688. return NULL;
  689. }
  690. get_pending_exception(pe);
  691. insert_exception(&s->pending, &pe->e);
  692. out:
  693. return pe;
  694. }
  695. static inline void remap_exception(struct dm_snapshot *s, struct exception *e,
  696. struct bio *bio)
  697. {
  698. bio->bi_bdev = s->cow->bdev;
  699. bio->bi_sector = chunk_to_sector(s, e->new_chunk) +
  700. (bio->bi_sector & s->chunk_mask);
  701. }
  702. static int snapshot_map(struct dm_target *ti, struct bio *bio,
  703. union map_info *map_context)
  704. {
  705. struct exception *e;
  706. struct dm_snapshot *s = (struct dm_snapshot *) ti->private;
  707. int r = DM_MAPIO_REMAPPED;
  708. chunk_t chunk;
  709. struct pending_exception *pe = NULL;
  710. chunk = sector_to_chunk(s, bio->bi_sector);
  711. /* Full snapshots are not usable */
  712. /* To get here the table must be live so s->active is always set. */
  713. if (!s->valid)
  714. return -EIO;
  715. if (unlikely(bio_barrier(bio)))
  716. return -EOPNOTSUPP;
  717. /* FIXME: should only take write lock if we need
  718. * to copy an exception */
  719. down_write(&s->lock);
  720. if (!s->valid) {
  721. r = -EIO;
  722. goto out_unlock;
  723. }
  724. /* If the block is already remapped - use that, else remap it */
  725. e = lookup_exception(&s->complete, chunk);
  726. if (e) {
  727. remap_exception(s, e, bio);
  728. goto out_unlock;
  729. }
  730. /*
  731. * Write to snapshot - higher level takes care of RW/RO
  732. * flags so we should only get this if we are
  733. * writeable.
  734. */
  735. if (bio_rw(bio) == WRITE) {
  736. pe = __find_pending_exception(s, bio);
  737. if (!pe) {
  738. __invalidate_snapshot(s, -ENOMEM);
  739. r = -EIO;
  740. goto out_unlock;
  741. }
  742. remap_exception(s, &pe->e, bio);
  743. bio_list_add(&pe->snapshot_bios, bio);
  744. r = DM_MAPIO_SUBMITTED;
  745. if (!pe->started) {
  746. /* this is protected by snap->lock */
  747. pe->started = 1;
  748. up_write(&s->lock);
  749. start_copy(pe);
  750. goto out;
  751. }
  752. } else
  753. /*
  754. * FIXME: this read path scares me because we
  755. * always use the origin when we have a pending
  756. * exception. However I can't think of a
  757. * situation where this is wrong - ejt.
  758. */
  759. bio->bi_bdev = s->origin->bdev;
  760. out_unlock:
  761. up_write(&s->lock);
  762. out:
  763. return r;
  764. }
  765. static void snapshot_resume(struct dm_target *ti)
  766. {
  767. struct dm_snapshot *s = (struct dm_snapshot *) ti->private;
  768. down_write(&s->lock);
  769. s->active = 1;
  770. up_write(&s->lock);
  771. }
  772. static int snapshot_status(struct dm_target *ti, status_type_t type,
  773. char *result, unsigned int maxlen)
  774. {
  775. struct dm_snapshot *snap = (struct dm_snapshot *) ti->private;
  776. switch (type) {
  777. case STATUSTYPE_INFO:
  778. if (!snap->valid)
  779. snprintf(result, maxlen, "Invalid");
  780. else {
  781. if (snap->store.fraction_full) {
  782. sector_t numerator, denominator;
  783. snap->store.fraction_full(&snap->store,
  784. &numerator,
  785. &denominator);
  786. snprintf(result, maxlen, "%llu/%llu",
  787. (unsigned long long)numerator,
  788. (unsigned long long)denominator);
  789. }
  790. else
  791. snprintf(result, maxlen, "Unknown");
  792. }
  793. break;
  794. case STATUSTYPE_TABLE:
  795. /*
  796. * kdevname returns a static pointer so we need
  797. * to make private copies if the output is to
  798. * make sense.
  799. */
  800. snprintf(result, maxlen, "%s %s %c %llu",
  801. snap->origin->name, snap->cow->name,
  802. snap->type,
  803. (unsigned long long)snap->chunk_size);
  804. break;
  805. }
  806. return 0;
  807. }
  808. /*-----------------------------------------------------------------
  809. * Origin methods
  810. *---------------------------------------------------------------*/
  811. static int __origin_write(struct list_head *snapshots, struct bio *bio)
  812. {
  813. int r = DM_MAPIO_REMAPPED, first = 0;
  814. struct dm_snapshot *snap;
  815. struct exception *e;
  816. struct pending_exception *pe, *next_pe, *primary_pe = NULL;
  817. chunk_t chunk;
  818. LIST_HEAD(pe_queue);
  819. /* Do all the snapshots on this origin */
  820. list_for_each_entry (snap, snapshots, list) {
  821. down_write(&snap->lock);
  822. /* Only deal with valid and active snapshots */
  823. if (!snap->valid || !snap->active)
  824. goto next_snapshot;
  825. /* Nothing to do if writing beyond end of snapshot */
  826. if (bio->bi_sector >= dm_table_get_size(snap->table))
  827. goto next_snapshot;
  828. /*
  829. * Remember, different snapshots can have
  830. * different chunk sizes.
  831. */
  832. chunk = sector_to_chunk(snap, bio->bi_sector);
  833. /*
  834. * Check exception table to see if block
  835. * is already remapped in this snapshot
  836. * and trigger an exception if not.
  837. *
  838. * ref_count is initialised to 1 so pending_complete()
  839. * won't destroy the primary_pe while we're inside this loop.
  840. */
  841. e = lookup_exception(&snap->complete, chunk);
  842. if (e)
  843. goto next_snapshot;
  844. pe = __find_pending_exception(snap, bio);
  845. if (!pe) {
  846. __invalidate_snapshot(snap, -ENOMEM);
  847. goto next_snapshot;
  848. }
  849. if (!primary_pe) {
  850. /*
  851. * Either every pe here has same
  852. * primary_pe or none has one yet.
  853. */
  854. if (pe->primary_pe)
  855. primary_pe = pe->primary_pe;
  856. else {
  857. primary_pe = pe;
  858. first = 1;
  859. }
  860. bio_list_add(&primary_pe->origin_bios, bio);
  861. r = DM_MAPIO_SUBMITTED;
  862. }
  863. if (!pe->primary_pe) {
  864. pe->primary_pe = primary_pe;
  865. get_pending_exception(primary_pe);
  866. }
  867. if (!pe->started) {
  868. pe->started = 1;
  869. list_add_tail(&pe->list, &pe_queue);
  870. }
  871. next_snapshot:
  872. up_write(&snap->lock);
  873. }
  874. if (!primary_pe)
  875. return r;
  876. /*
  877. * If this is the first time we're processing this chunk and
  878. * ref_count is now 1 it means all the pending exceptions
  879. * got completed while we were in the loop above, so it falls to
  880. * us here to remove the primary_pe and submit any origin_bios.
  881. */
  882. if (first && atomic_dec_and_test(&primary_pe->ref_count)) {
  883. flush_bios(bio_list_get(&primary_pe->origin_bios));
  884. free_pending_exception(primary_pe);
  885. /* If we got here, pe_queue is necessarily empty. */
  886. return r;
  887. }
  888. /*
  889. * Now that we have a complete pe list we can start the copying.
  890. */
  891. list_for_each_entry_safe(pe, next_pe, &pe_queue, list)
  892. start_copy(pe);
  893. return r;
  894. }
  895. /*
  896. * Called on a write from the origin driver.
  897. */
  898. static int do_origin(struct dm_dev *origin, struct bio *bio)
  899. {
  900. struct origin *o;
  901. int r = DM_MAPIO_REMAPPED;
  902. down_read(&_origins_lock);
  903. o = __lookup_origin(origin->bdev);
  904. if (o)
  905. r = __origin_write(&o->snapshots, bio);
  906. up_read(&_origins_lock);
  907. return r;
  908. }
  909. /*
  910. * Origin: maps a linear range of a device, with hooks for snapshotting.
  911. */
  912. /*
  913. * Construct an origin mapping: <dev_path>
  914. * The context for an origin is merely a 'struct dm_dev *'
  915. * pointing to the real device.
  916. */
  917. static int origin_ctr(struct dm_target *ti, unsigned int argc, char **argv)
  918. {
  919. int r;
  920. struct dm_dev *dev;
  921. if (argc != 1) {
  922. ti->error = "origin: incorrect number of arguments";
  923. return -EINVAL;
  924. }
  925. r = dm_get_device(ti, argv[0], 0, ti->len,
  926. dm_table_get_mode(ti->table), &dev);
  927. if (r) {
  928. ti->error = "Cannot get target device";
  929. return r;
  930. }
  931. ti->private = dev;
  932. return 0;
  933. }
  934. static void origin_dtr(struct dm_target *ti)
  935. {
  936. struct dm_dev *dev = (struct dm_dev *) ti->private;
  937. dm_put_device(ti, dev);
  938. }
  939. static int origin_map(struct dm_target *ti, struct bio *bio,
  940. union map_info *map_context)
  941. {
  942. struct dm_dev *dev = (struct dm_dev *) ti->private;
  943. bio->bi_bdev = dev->bdev;
  944. if (unlikely(bio_barrier(bio)))
  945. return -EOPNOTSUPP;
  946. /* Only tell snapshots if this is a write */
  947. return (bio_rw(bio) == WRITE) ? do_origin(dev, bio) : DM_MAPIO_REMAPPED;
  948. }
  949. #define min_not_zero(l, r) (l == 0) ? r : ((r == 0) ? l : min(l, r))
  950. /*
  951. * Set the target "split_io" field to the minimum of all the snapshots'
  952. * chunk sizes.
  953. */
  954. static void origin_resume(struct dm_target *ti)
  955. {
  956. struct dm_dev *dev = (struct dm_dev *) ti->private;
  957. struct dm_snapshot *snap;
  958. struct origin *o;
  959. chunk_t chunk_size = 0;
  960. down_read(&_origins_lock);
  961. o = __lookup_origin(dev->bdev);
  962. if (o)
  963. list_for_each_entry (snap, &o->snapshots, list)
  964. chunk_size = min_not_zero(chunk_size, snap->chunk_size);
  965. up_read(&_origins_lock);
  966. ti->split_io = chunk_size;
  967. }
  968. static int origin_status(struct dm_target *ti, status_type_t type, char *result,
  969. unsigned int maxlen)
  970. {
  971. struct dm_dev *dev = (struct dm_dev *) ti->private;
  972. switch (type) {
  973. case STATUSTYPE_INFO:
  974. result[0] = '\0';
  975. break;
  976. case STATUSTYPE_TABLE:
  977. snprintf(result, maxlen, "%s", dev->name);
  978. break;
  979. }
  980. return 0;
  981. }
  982. static struct target_type origin_target = {
  983. .name = "snapshot-origin",
  984. .version = {1, 5, 0},
  985. .module = THIS_MODULE,
  986. .ctr = origin_ctr,
  987. .dtr = origin_dtr,
  988. .map = origin_map,
  989. .resume = origin_resume,
  990. .status = origin_status,
  991. };
  992. static struct target_type snapshot_target = {
  993. .name = "snapshot",
  994. .version = {1, 5, 0},
  995. .module = THIS_MODULE,
  996. .ctr = snapshot_ctr,
  997. .dtr = snapshot_dtr,
  998. .map = snapshot_map,
  999. .resume = snapshot_resume,
  1000. .status = snapshot_status,
  1001. };
  1002. static int __init dm_snapshot_init(void)
  1003. {
  1004. int r;
  1005. r = dm_register_target(&snapshot_target);
  1006. if (r) {
  1007. DMERR("snapshot target register failed %d", r);
  1008. return r;
  1009. }
  1010. r = dm_register_target(&origin_target);
  1011. if (r < 0) {
  1012. DMERR("Origin target register failed %d", r);
  1013. goto bad1;
  1014. }
  1015. r = init_origin_hash();
  1016. if (r) {
  1017. DMERR("init_origin_hash failed.");
  1018. goto bad2;
  1019. }
  1020. exception_cache = kmem_cache_create("dm-snapshot-ex",
  1021. sizeof(struct exception),
  1022. __alignof__(struct exception),
  1023. 0, NULL, NULL);
  1024. if (!exception_cache) {
  1025. DMERR("Couldn't create exception cache.");
  1026. r = -ENOMEM;
  1027. goto bad3;
  1028. }
  1029. pending_cache =
  1030. kmem_cache_create("dm-snapshot-in",
  1031. sizeof(struct pending_exception),
  1032. __alignof__(struct pending_exception),
  1033. 0, NULL, NULL);
  1034. if (!pending_cache) {
  1035. DMERR("Couldn't create pending cache.");
  1036. r = -ENOMEM;
  1037. goto bad4;
  1038. }
  1039. pending_pool = mempool_create_slab_pool(128, pending_cache);
  1040. if (!pending_pool) {
  1041. DMERR("Couldn't create pending pool.");
  1042. r = -ENOMEM;
  1043. goto bad5;
  1044. }
  1045. ksnapd = create_singlethread_workqueue("ksnapd");
  1046. if (!ksnapd) {
  1047. DMERR("Failed to create ksnapd workqueue.");
  1048. r = -ENOMEM;
  1049. goto bad6;
  1050. }
  1051. return 0;
  1052. bad6:
  1053. mempool_destroy(pending_pool);
  1054. bad5:
  1055. kmem_cache_destroy(pending_cache);
  1056. bad4:
  1057. kmem_cache_destroy(exception_cache);
  1058. bad3:
  1059. exit_origin_hash();
  1060. bad2:
  1061. dm_unregister_target(&origin_target);
  1062. bad1:
  1063. dm_unregister_target(&snapshot_target);
  1064. return r;
  1065. }
  1066. static void __exit dm_snapshot_exit(void)
  1067. {
  1068. int r;
  1069. destroy_workqueue(ksnapd);
  1070. r = dm_unregister_target(&snapshot_target);
  1071. if (r)
  1072. DMERR("snapshot unregister failed %d", r);
  1073. r = dm_unregister_target(&origin_target);
  1074. if (r)
  1075. DMERR("origin unregister failed %d", r);
  1076. exit_origin_hash();
  1077. mempool_destroy(pending_pool);
  1078. kmem_cache_destroy(pending_cache);
  1079. kmem_cache_destroy(exception_cache);
  1080. }
  1081. /* Module hooks */
  1082. module_init(dm_snapshot_init);
  1083. module_exit(dm_snapshot_exit);
  1084. MODULE_DESCRIPTION(DM_NAME " snapshot target");
  1085. MODULE_AUTHOR("Joe Thornber");
  1086. MODULE_LICENSE("GPL");