dm-snap.c 40 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824
  1. /*
  2. * dm-snapshot.c
  3. *
  4. * Copyright (C) 2001-2002 Sistina Software (UK) Limited.
  5. *
  6. * This file is released under the GPL.
  7. */
  8. #include <linux/blkdev.h>
  9. #include <linux/device-mapper.h>
  10. #include <linux/delay.h>
  11. #include <linux/fs.h>
  12. #include <linux/init.h>
  13. #include <linux/kdev_t.h>
  14. #include <linux/list.h>
  15. #include <linux/mempool.h>
  16. #include <linux/module.h>
  17. #include <linux/slab.h>
  18. #include <linux/vmalloc.h>
  19. #include <linux/log2.h>
  20. #include <linux/dm-kcopyd.h>
  21. #include <linux/workqueue.h>
  22. #include "dm-exception-store.h"
  23. #define DM_MSG_PREFIX "snapshots"
  24. /*
  25. * The percentage increment we will wake up users at
  26. */
  27. #define WAKE_UP_PERCENT 5
  28. /*
  29. * kcopyd priority of snapshot operations
  30. */
  31. #define SNAPSHOT_COPY_PRIORITY 2
  32. /*
  33. * Reserve 1MB for each snapshot initially (with minimum of 1 page).
  34. */
  35. #define SNAPSHOT_PAGES (((1UL << 20) >> PAGE_SHIFT) ? : 1)
  36. /*
  37. * The size of the mempool used to track chunks in use.
  38. */
  39. #define MIN_IOS 256
  40. #define DM_TRACKED_CHUNK_HASH_SIZE 16
  41. #define DM_TRACKED_CHUNK_HASH(x) ((unsigned long)(x) & \
  42. (DM_TRACKED_CHUNK_HASH_SIZE - 1))
  43. struct dm_exception_table {
  44. uint32_t hash_mask;
  45. unsigned hash_shift;
  46. struct list_head *table;
  47. };
  48. struct dm_snapshot {
  49. struct rw_semaphore lock;
  50. struct dm_dev *origin;
  51. struct dm_dev *cow;
  52. struct dm_target *ti;
  53. /* List of snapshots per Origin */
  54. struct list_head list;
  55. /* You can't use a snapshot if this is 0 (e.g. if full) */
  56. int valid;
  57. /* Origin writes don't trigger exceptions until this is set */
  58. int active;
  59. /* Whether or not owning mapped_device is suspended */
  60. int suspended;
  61. mempool_t *pending_pool;
  62. atomic_t pending_exceptions_count;
  63. struct dm_exception_table pending;
  64. struct dm_exception_table complete;
  65. /*
  66. * pe_lock protects all pending_exception operations and access
  67. * as well as the snapshot_bios list.
  68. */
  69. spinlock_t pe_lock;
  70. /* The on disk metadata handler */
  71. struct dm_exception_store *store;
  72. struct dm_kcopyd_client *kcopyd_client;
  73. /* Queue of snapshot writes for ksnapd to flush */
  74. struct bio_list queued_bios;
  75. struct work_struct queued_bios_work;
  76. /* Chunks with outstanding reads */
  77. mempool_t *tracked_chunk_pool;
  78. spinlock_t tracked_chunk_lock;
  79. struct hlist_head tracked_chunk_hash[DM_TRACKED_CHUNK_HASH_SIZE];
  80. };
  81. struct dm_dev *dm_snap_cow(struct dm_snapshot *s)
  82. {
  83. return s->cow;
  84. }
  85. EXPORT_SYMBOL(dm_snap_cow);
  86. static struct workqueue_struct *ksnapd;
  87. static void flush_queued_bios(struct work_struct *work);
  88. static sector_t chunk_to_sector(struct dm_exception_store *store,
  89. chunk_t chunk)
  90. {
  91. return chunk << store->chunk_shift;
  92. }
  93. static int bdev_equal(struct block_device *lhs, struct block_device *rhs)
  94. {
  95. /*
  96. * There is only ever one instance of a particular block
  97. * device so we can compare pointers safely.
  98. */
  99. return lhs == rhs;
  100. }
  101. struct dm_snap_pending_exception {
  102. struct dm_exception e;
  103. /*
  104. * Origin buffers waiting for this to complete are held
  105. * in a bio list
  106. */
  107. struct bio_list origin_bios;
  108. struct bio_list snapshot_bios;
  109. /*
  110. * Short-term queue of pending exceptions prior to submission.
  111. */
  112. struct list_head list;
  113. /*
  114. * The primary pending_exception is the one that holds
  115. * the ref_count and the list of origin_bios for a
  116. * group of pending_exceptions. It is always last to get freed.
  117. * These fields get set up when writing to the origin.
  118. */
  119. struct dm_snap_pending_exception *primary_pe;
  120. /*
  121. * Number of pending_exceptions processing this chunk.
  122. * When this drops to zero we must complete the origin bios.
  123. * If incrementing or decrementing this, hold pe->snap->lock for
  124. * the sibling concerned and not pe->primary_pe->snap->lock unless
  125. * they are the same.
  126. */
  127. atomic_t ref_count;
  128. /* Pointer back to snapshot context */
  129. struct dm_snapshot *snap;
  130. /*
  131. * 1 indicates the exception has already been sent to
  132. * kcopyd.
  133. */
  134. int started;
  135. };
  136. /*
  137. * Hash table mapping origin volumes to lists of snapshots and
  138. * a lock to protect it
  139. */
  140. static struct kmem_cache *exception_cache;
  141. static struct kmem_cache *pending_cache;
  142. struct dm_snap_tracked_chunk {
  143. struct hlist_node node;
  144. chunk_t chunk;
  145. };
  146. static struct kmem_cache *tracked_chunk_cache;
  147. static struct dm_snap_tracked_chunk *track_chunk(struct dm_snapshot *s,
  148. chunk_t chunk)
  149. {
  150. struct dm_snap_tracked_chunk *c = mempool_alloc(s->tracked_chunk_pool,
  151. GFP_NOIO);
  152. unsigned long flags;
  153. c->chunk = chunk;
  154. spin_lock_irqsave(&s->tracked_chunk_lock, flags);
  155. hlist_add_head(&c->node,
  156. &s->tracked_chunk_hash[DM_TRACKED_CHUNK_HASH(chunk)]);
  157. spin_unlock_irqrestore(&s->tracked_chunk_lock, flags);
  158. return c;
  159. }
  160. static void stop_tracking_chunk(struct dm_snapshot *s,
  161. struct dm_snap_tracked_chunk *c)
  162. {
  163. unsigned long flags;
  164. spin_lock_irqsave(&s->tracked_chunk_lock, flags);
  165. hlist_del(&c->node);
  166. spin_unlock_irqrestore(&s->tracked_chunk_lock, flags);
  167. mempool_free(c, s->tracked_chunk_pool);
  168. }
  169. static int __chunk_is_tracked(struct dm_snapshot *s, chunk_t chunk)
  170. {
  171. struct dm_snap_tracked_chunk *c;
  172. struct hlist_node *hn;
  173. int found = 0;
  174. spin_lock_irq(&s->tracked_chunk_lock);
  175. hlist_for_each_entry(c, hn,
  176. &s->tracked_chunk_hash[DM_TRACKED_CHUNK_HASH(chunk)], node) {
  177. if (c->chunk == chunk) {
  178. found = 1;
  179. break;
  180. }
  181. }
  182. spin_unlock_irq(&s->tracked_chunk_lock);
  183. return found;
  184. }
  185. /*
  186. * One of these per registered origin, held in the snapshot_origins hash
  187. */
  188. struct origin {
  189. /* The origin device */
  190. struct block_device *bdev;
  191. struct list_head hash_list;
  192. /* List of snapshots for this origin */
  193. struct list_head snapshots;
  194. };
  195. /*
  196. * Size of the hash table for origin volumes. If we make this
  197. * the size of the minors list then it should be nearly perfect
  198. */
  199. #define ORIGIN_HASH_SIZE 256
  200. #define ORIGIN_MASK 0xFF
  201. static struct list_head *_origins;
  202. static struct rw_semaphore _origins_lock;
  203. static int init_origin_hash(void)
  204. {
  205. int i;
  206. _origins = kmalloc(ORIGIN_HASH_SIZE * sizeof(struct list_head),
  207. GFP_KERNEL);
  208. if (!_origins) {
  209. DMERR("unable to allocate memory");
  210. return -ENOMEM;
  211. }
  212. for (i = 0; i < ORIGIN_HASH_SIZE; i++)
  213. INIT_LIST_HEAD(_origins + i);
  214. init_rwsem(&_origins_lock);
  215. return 0;
  216. }
  217. static void exit_origin_hash(void)
  218. {
  219. kfree(_origins);
  220. }
  221. static unsigned origin_hash(struct block_device *bdev)
  222. {
  223. return bdev->bd_dev & ORIGIN_MASK;
  224. }
  225. static struct origin *__lookup_origin(struct block_device *origin)
  226. {
  227. struct list_head *ol;
  228. struct origin *o;
  229. ol = &_origins[origin_hash(origin)];
  230. list_for_each_entry (o, ol, hash_list)
  231. if (bdev_equal(o->bdev, origin))
  232. return o;
  233. return NULL;
  234. }
  235. static void __insert_origin(struct origin *o)
  236. {
  237. struct list_head *sl = &_origins[origin_hash(o->bdev)];
  238. list_add_tail(&o->hash_list, sl);
  239. }
  240. /*
  241. * _origins_lock must be held when calling this function.
  242. * Returns number of snapshots registered using the supplied cow device, plus:
  243. * snap_src - a snapshot suitable for use as a source of exception handover
  244. * snap_dest - a snapshot capable of receiving exception handover.
  245. *
  246. * Possible return values and states:
  247. * 0: NULL, NULL - first new snapshot
  248. * 1: snap_src, NULL - normal snapshot
  249. * 2: snap_src, snap_dest - waiting for handover
  250. * 2: snap_src, NULL - handed over, waiting for old to be deleted
  251. * 1: NULL, snap_dest - source got destroyed without handover
  252. */
  253. static int __find_snapshots_sharing_cow(struct dm_snapshot *snap,
  254. struct dm_snapshot **snap_src,
  255. struct dm_snapshot **snap_dest)
  256. {
  257. struct dm_snapshot *s;
  258. struct origin *o;
  259. int count = 0;
  260. int active;
  261. o = __lookup_origin(snap->origin->bdev);
  262. if (!o)
  263. goto out;
  264. list_for_each_entry(s, &o->snapshots, list) {
  265. if (!bdev_equal(s->cow->bdev, snap->cow->bdev))
  266. continue;
  267. down_read(&s->lock);
  268. active = s->active;
  269. up_read(&s->lock);
  270. if (active) {
  271. if (snap_src)
  272. *snap_src = s;
  273. } else if (snap_dest)
  274. *snap_dest = s;
  275. count++;
  276. }
  277. out:
  278. return count;
  279. }
  280. /*
  281. * On success, returns 1 if this snapshot is a handover destination,
  282. * otherwise returns 0.
  283. */
  284. static int __validate_exception_handover(struct dm_snapshot *snap)
  285. {
  286. struct dm_snapshot *snap_src = NULL, *snap_dest = NULL;
  287. /* Does snapshot need exceptions handed over to it? */
  288. if ((__find_snapshots_sharing_cow(snap, &snap_src, &snap_dest) == 2) ||
  289. snap_dest) {
  290. snap->ti->error = "Snapshot cow pairing for exception "
  291. "table handover failed";
  292. return -EINVAL;
  293. }
  294. /*
  295. * If no snap_src was found, snap cannot become a handover
  296. * destination.
  297. */
  298. if (!snap_src)
  299. return 0;
  300. return 1;
  301. }
  302. static void __insert_snapshot(struct origin *o, struct dm_snapshot *s)
  303. {
  304. struct dm_snapshot *l;
  305. /* Sort the list according to chunk size, largest-first smallest-last */
  306. list_for_each_entry(l, &o->snapshots, list)
  307. if (l->store->chunk_size < s->store->chunk_size)
  308. break;
  309. list_add_tail(&s->list, &l->list);
  310. }
  311. /*
  312. * Make a note of the snapshot and its origin so we can look it
  313. * up when the origin has a write on it.
  314. *
  315. * Also validate snapshot exception store handovers.
  316. * On success, returns 1 if this registration is a handover destination,
  317. * otherwise returns 0.
  318. */
  319. static int register_snapshot(struct dm_snapshot *snap)
  320. {
  321. struct origin *o, *new_o = NULL;
  322. struct block_device *bdev = snap->origin->bdev;
  323. int r = 0;
  324. new_o = kmalloc(sizeof(*new_o), GFP_KERNEL);
  325. if (!new_o)
  326. return -ENOMEM;
  327. down_write(&_origins_lock);
  328. r = __validate_exception_handover(snap);
  329. if (r < 0) {
  330. kfree(new_o);
  331. goto out;
  332. }
  333. o = __lookup_origin(bdev);
  334. if (o)
  335. kfree(new_o);
  336. else {
  337. /* New origin */
  338. o = new_o;
  339. /* Initialise the struct */
  340. INIT_LIST_HEAD(&o->snapshots);
  341. o->bdev = bdev;
  342. __insert_origin(o);
  343. }
  344. __insert_snapshot(o, snap);
  345. out:
  346. up_write(&_origins_lock);
  347. return r;
  348. }
  349. /*
  350. * Move snapshot to correct place in list according to chunk size.
  351. */
  352. static void reregister_snapshot(struct dm_snapshot *s)
  353. {
  354. struct block_device *bdev = s->origin->bdev;
  355. down_write(&_origins_lock);
  356. list_del(&s->list);
  357. __insert_snapshot(__lookup_origin(bdev), s);
  358. up_write(&_origins_lock);
  359. }
  360. static void unregister_snapshot(struct dm_snapshot *s)
  361. {
  362. struct origin *o;
  363. down_write(&_origins_lock);
  364. o = __lookup_origin(s->origin->bdev);
  365. list_del(&s->list);
  366. if (o && list_empty(&o->snapshots)) {
  367. list_del(&o->hash_list);
  368. kfree(o);
  369. }
  370. up_write(&_origins_lock);
  371. }
  372. /*
  373. * Implementation of the exception hash tables.
  374. * The lowest hash_shift bits of the chunk number are ignored, allowing
  375. * some consecutive chunks to be grouped together.
  376. */
  377. static int dm_exception_table_init(struct dm_exception_table *et,
  378. uint32_t size, unsigned hash_shift)
  379. {
  380. unsigned int i;
  381. et->hash_shift = hash_shift;
  382. et->hash_mask = size - 1;
  383. et->table = dm_vcalloc(size, sizeof(struct list_head));
  384. if (!et->table)
  385. return -ENOMEM;
  386. for (i = 0; i < size; i++)
  387. INIT_LIST_HEAD(et->table + i);
  388. return 0;
  389. }
  390. static void dm_exception_table_exit(struct dm_exception_table *et,
  391. struct kmem_cache *mem)
  392. {
  393. struct list_head *slot;
  394. struct dm_exception *ex, *next;
  395. int i, size;
  396. size = et->hash_mask + 1;
  397. for (i = 0; i < size; i++) {
  398. slot = et->table + i;
  399. list_for_each_entry_safe (ex, next, slot, hash_list)
  400. kmem_cache_free(mem, ex);
  401. }
  402. vfree(et->table);
  403. }
  404. static uint32_t exception_hash(struct dm_exception_table *et, chunk_t chunk)
  405. {
  406. return (chunk >> et->hash_shift) & et->hash_mask;
  407. }
  408. static void dm_remove_exception(struct dm_exception *e)
  409. {
  410. list_del(&e->hash_list);
  411. }
  412. /*
  413. * Return the exception data for a sector, or NULL if not
  414. * remapped.
  415. */
  416. static struct dm_exception *dm_lookup_exception(struct dm_exception_table *et,
  417. chunk_t chunk)
  418. {
  419. struct list_head *slot;
  420. struct dm_exception *e;
  421. slot = &et->table[exception_hash(et, chunk)];
  422. list_for_each_entry (e, slot, hash_list)
  423. if (chunk >= e->old_chunk &&
  424. chunk <= e->old_chunk + dm_consecutive_chunk_count(e))
  425. return e;
  426. return NULL;
  427. }
  428. static struct dm_exception *alloc_completed_exception(void)
  429. {
  430. struct dm_exception *e;
  431. e = kmem_cache_alloc(exception_cache, GFP_NOIO);
  432. if (!e)
  433. e = kmem_cache_alloc(exception_cache, GFP_ATOMIC);
  434. return e;
  435. }
  436. static void free_completed_exception(struct dm_exception *e)
  437. {
  438. kmem_cache_free(exception_cache, e);
  439. }
  440. static struct dm_snap_pending_exception *alloc_pending_exception(struct dm_snapshot *s)
  441. {
  442. struct dm_snap_pending_exception *pe = mempool_alloc(s->pending_pool,
  443. GFP_NOIO);
  444. atomic_inc(&s->pending_exceptions_count);
  445. pe->snap = s;
  446. return pe;
  447. }
  448. static void free_pending_exception(struct dm_snap_pending_exception *pe)
  449. {
  450. struct dm_snapshot *s = pe->snap;
  451. mempool_free(pe, s->pending_pool);
  452. smp_mb__before_atomic_dec();
  453. atomic_dec(&s->pending_exceptions_count);
  454. }
  455. static void dm_insert_exception(struct dm_exception_table *eh,
  456. struct dm_exception *new_e)
  457. {
  458. struct list_head *l;
  459. struct dm_exception *e = NULL;
  460. l = &eh->table[exception_hash(eh, new_e->old_chunk)];
  461. /* Add immediately if this table doesn't support consecutive chunks */
  462. if (!eh->hash_shift)
  463. goto out;
  464. /* List is ordered by old_chunk */
  465. list_for_each_entry_reverse(e, l, hash_list) {
  466. /* Insert after an existing chunk? */
  467. if (new_e->old_chunk == (e->old_chunk +
  468. dm_consecutive_chunk_count(e) + 1) &&
  469. new_e->new_chunk == (dm_chunk_number(e->new_chunk) +
  470. dm_consecutive_chunk_count(e) + 1)) {
  471. dm_consecutive_chunk_count_inc(e);
  472. free_completed_exception(new_e);
  473. return;
  474. }
  475. /* Insert before an existing chunk? */
  476. if (new_e->old_chunk == (e->old_chunk - 1) &&
  477. new_e->new_chunk == (dm_chunk_number(e->new_chunk) - 1)) {
  478. dm_consecutive_chunk_count_inc(e);
  479. e->old_chunk--;
  480. e->new_chunk--;
  481. free_completed_exception(new_e);
  482. return;
  483. }
  484. if (new_e->old_chunk > e->old_chunk)
  485. break;
  486. }
  487. out:
  488. list_add(&new_e->hash_list, e ? &e->hash_list : l);
  489. }
  490. /*
  491. * Callback used by the exception stores to load exceptions when
  492. * initialising.
  493. */
  494. static int dm_add_exception(void *context, chunk_t old, chunk_t new)
  495. {
  496. struct dm_snapshot *s = context;
  497. struct dm_exception *e;
  498. e = alloc_completed_exception();
  499. if (!e)
  500. return -ENOMEM;
  501. e->old_chunk = old;
  502. /* Consecutive_count is implicitly initialised to zero */
  503. e->new_chunk = new;
  504. dm_insert_exception(&s->complete, e);
  505. return 0;
  506. }
  507. #define min_not_zero(l, r) (((l) == 0) ? (r) : (((r) == 0) ? (l) : min(l, r)))
  508. /*
  509. * Return a minimum chunk size of all snapshots that have the specified origin.
  510. * Return zero if the origin has no snapshots.
  511. */
  512. static sector_t __minimum_chunk_size(struct origin *o)
  513. {
  514. struct dm_snapshot *snap;
  515. unsigned chunk_size = 0;
  516. if (o)
  517. list_for_each_entry(snap, &o->snapshots, list)
  518. chunk_size = min_not_zero(chunk_size,
  519. snap->store->chunk_size);
  520. return chunk_size;
  521. }
  522. /*
  523. * Hard coded magic.
  524. */
  525. static int calc_max_buckets(void)
  526. {
  527. /* use a fixed size of 2MB */
  528. unsigned long mem = 2 * 1024 * 1024;
  529. mem /= sizeof(struct list_head);
  530. return mem;
  531. }
  532. /*
  533. * Allocate room for a suitable hash table.
  534. */
  535. static int init_hash_tables(struct dm_snapshot *s)
  536. {
  537. sector_t hash_size, cow_dev_size, origin_dev_size, max_buckets;
  538. /*
  539. * Calculate based on the size of the original volume or
  540. * the COW volume...
  541. */
  542. cow_dev_size = get_dev_size(s->cow->bdev);
  543. origin_dev_size = get_dev_size(s->origin->bdev);
  544. max_buckets = calc_max_buckets();
  545. hash_size = min(origin_dev_size, cow_dev_size) >> s->store->chunk_shift;
  546. hash_size = min(hash_size, max_buckets);
  547. if (hash_size < 64)
  548. hash_size = 64;
  549. hash_size = rounddown_pow_of_two(hash_size);
  550. if (dm_exception_table_init(&s->complete, hash_size,
  551. DM_CHUNK_CONSECUTIVE_BITS))
  552. return -ENOMEM;
  553. /*
  554. * Allocate hash table for in-flight exceptions
  555. * Make this smaller than the real hash table
  556. */
  557. hash_size >>= 3;
  558. if (hash_size < 64)
  559. hash_size = 64;
  560. if (dm_exception_table_init(&s->pending, hash_size, 0)) {
  561. dm_exception_table_exit(&s->complete, exception_cache);
  562. return -ENOMEM;
  563. }
  564. return 0;
  565. }
  566. /*
  567. * Construct a snapshot mapping: <origin_dev> <COW-dev> <p/n> <chunk-size>
  568. */
  569. static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv)
  570. {
  571. struct dm_snapshot *s;
  572. int i;
  573. int r = -EINVAL;
  574. char *origin_path, *cow_path;
  575. unsigned args_used;
  576. if (argc != 4) {
  577. ti->error = "requires exactly 4 arguments";
  578. r = -EINVAL;
  579. goto bad;
  580. }
  581. origin_path = argv[0];
  582. argv++;
  583. argc--;
  584. s = kmalloc(sizeof(*s), GFP_KERNEL);
  585. if (!s) {
  586. ti->error = "Cannot allocate snapshot context private "
  587. "structure";
  588. r = -ENOMEM;
  589. goto bad;
  590. }
  591. cow_path = argv[0];
  592. argv++;
  593. argc--;
  594. r = dm_get_device(ti, cow_path, 0, 0,
  595. FMODE_READ | FMODE_WRITE, &s->cow);
  596. if (r) {
  597. ti->error = "Cannot get COW device";
  598. goto bad_cow;
  599. }
  600. r = dm_exception_store_create(ti, argc, argv, s, &args_used, &s->store);
  601. if (r) {
  602. ti->error = "Couldn't create exception store";
  603. r = -EINVAL;
  604. goto bad_store;
  605. }
  606. argv += args_used;
  607. argc -= args_used;
  608. r = dm_get_device(ti, origin_path, 0, ti->len, FMODE_READ, &s->origin);
  609. if (r) {
  610. ti->error = "Cannot get origin device";
  611. goto bad_origin;
  612. }
  613. s->ti = ti;
  614. s->valid = 1;
  615. s->active = 0;
  616. s->suspended = 0;
  617. atomic_set(&s->pending_exceptions_count, 0);
  618. init_rwsem(&s->lock);
  619. INIT_LIST_HEAD(&s->list);
  620. spin_lock_init(&s->pe_lock);
  621. /* Allocate hash table for COW data */
  622. if (init_hash_tables(s)) {
  623. ti->error = "Unable to allocate hash table space";
  624. r = -ENOMEM;
  625. goto bad_hash_tables;
  626. }
  627. r = dm_kcopyd_client_create(SNAPSHOT_PAGES, &s->kcopyd_client);
  628. if (r) {
  629. ti->error = "Could not create kcopyd client";
  630. goto bad_kcopyd;
  631. }
  632. s->pending_pool = mempool_create_slab_pool(MIN_IOS, pending_cache);
  633. if (!s->pending_pool) {
  634. ti->error = "Could not allocate mempool for pending exceptions";
  635. goto bad_pending_pool;
  636. }
  637. s->tracked_chunk_pool = mempool_create_slab_pool(MIN_IOS,
  638. tracked_chunk_cache);
  639. if (!s->tracked_chunk_pool) {
  640. ti->error = "Could not allocate tracked_chunk mempool for "
  641. "tracking reads";
  642. goto bad_tracked_chunk_pool;
  643. }
  644. for (i = 0; i < DM_TRACKED_CHUNK_HASH_SIZE; i++)
  645. INIT_HLIST_HEAD(&s->tracked_chunk_hash[i]);
  646. spin_lock_init(&s->tracked_chunk_lock);
  647. bio_list_init(&s->queued_bios);
  648. INIT_WORK(&s->queued_bios_work, flush_queued_bios);
  649. ti->private = s;
  650. ti->num_flush_requests = 1;
  651. /* Add snapshot to the list of snapshots for this origin */
  652. /* Exceptions aren't triggered till snapshot_resume() is called */
  653. r = register_snapshot(s);
  654. if (r == -ENOMEM) {
  655. ti->error = "Snapshot origin struct allocation failed";
  656. goto bad_load_and_register;
  657. } else if (r < 0) {
  658. /* invalid handover, register_snapshot has set ti->error */
  659. goto bad_load_and_register;
  660. }
  661. /*
  662. * Metadata must only be loaded into one table at once, so skip this
  663. * if metadata will be handed over during resume.
  664. * Chunk size will be set during the handover - set it to zero to
  665. * ensure it's ignored.
  666. */
  667. if (r > 0) {
  668. s->store->chunk_size = 0;
  669. return 0;
  670. }
  671. r = s->store->type->read_metadata(s->store, dm_add_exception,
  672. (void *)s);
  673. if (r < 0) {
  674. ti->error = "Failed to read snapshot metadata";
  675. goto bad_read_metadata;
  676. } else if (r > 0) {
  677. s->valid = 0;
  678. DMWARN("Snapshot is marked invalid.");
  679. }
  680. if (!s->store->chunk_size) {
  681. ti->error = "Chunk size not set";
  682. goto bad_read_metadata;
  683. }
  684. ti->split_io = s->store->chunk_size;
  685. return 0;
  686. bad_read_metadata:
  687. unregister_snapshot(s);
  688. bad_load_and_register:
  689. mempool_destroy(s->tracked_chunk_pool);
  690. bad_tracked_chunk_pool:
  691. mempool_destroy(s->pending_pool);
  692. bad_pending_pool:
  693. dm_kcopyd_client_destroy(s->kcopyd_client);
  694. bad_kcopyd:
  695. dm_exception_table_exit(&s->pending, pending_cache);
  696. dm_exception_table_exit(&s->complete, exception_cache);
  697. bad_hash_tables:
  698. dm_put_device(ti, s->origin);
  699. bad_origin:
  700. dm_exception_store_destroy(s->store);
  701. bad_store:
  702. dm_put_device(ti, s->cow);
  703. bad_cow:
  704. kfree(s);
  705. bad:
  706. return r;
  707. }
  708. static void __free_exceptions(struct dm_snapshot *s)
  709. {
  710. dm_kcopyd_client_destroy(s->kcopyd_client);
  711. s->kcopyd_client = NULL;
  712. dm_exception_table_exit(&s->pending, pending_cache);
  713. dm_exception_table_exit(&s->complete, exception_cache);
  714. }
  715. static void __handover_exceptions(struct dm_snapshot *snap_src,
  716. struct dm_snapshot *snap_dest)
  717. {
  718. union {
  719. struct dm_exception_table table_swap;
  720. struct dm_exception_store *store_swap;
  721. } u;
  722. /*
  723. * Swap all snapshot context information between the two instances.
  724. */
  725. u.table_swap = snap_dest->complete;
  726. snap_dest->complete = snap_src->complete;
  727. snap_src->complete = u.table_swap;
  728. u.store_swap = snap_dest->store;
  729. snap_dest->store = snap_src->store;
  730. snap_src->store = u.store_swap;
  731. snap_dest->store->snap = snap_dest;
  732. snap_src->store->snap = snap_src;
  733. snap_dest->ti->split_io = snap_dest->store->chunk_size;
  734. snap_dest->valid = snap_src->valid;
  735. /*
  736. * Set source invalid to ensure it receives no further I/O.
  737. */
  738. snap_src->valid = 0;
  739. }
  740. static void snapshot_dtr(struct dm_target *ti)
  741. {
  742. #ifdef CONFIG_DM_DEBUG
  743. int i;
  744. #endif
  745. struct dm_snapshot *s = ti->private;
  746. struct dm_snapshot *snap_src = NULL, *snap_dest = NULL;
  747. flush_workqueue(ksnapd);
  748. down_read(&_origins_lock);
  749. /* Check whether exception handover must be cancelled */
  750. (void) __find_snapshots_sharing_cow(s, &snap_src, &snap_dest);
  751. if (snap_src && snap_dest && (s == snap_src)) {
  752. down_write(&snap_dest->lock);
  753. snap_dest->valid = 0;
  754. up_write(&snap_dest->lock);
  755. DMERR("Cancelling snapshot handover.");
  756. }
  757. up_read(&_origins_lock);
  758. /* Prevent further origin writes from using this snapshot. */
  759. /* After this returns there can be no new kcopyd jobs. */
  760. unregister_snapshot(s);
  761. while (atomic_read(&s->pending_exceptions_count))
  762. msleep(1);
  763. /*
  764. * Ensure instructions in mempool_destroy aren't reordered
  765. * before atomic_read.
  766. */
  767. smp_mb();
  768. #ifdef CONFIG_DM_DEBUG
  769. for (i = 0; i < DM_TRACKED_CHUNK_HASH_SIZE; i++)
  770. BUG_ON(!hlist_empty(&s->tracked_chunk_hash[i]));
  771. #endif
  772. mempool_destroy(s->tracked_chunk_pool);
  773. __free_exceptions(s);
  774. mempool_destroy(s->pending_pool);
  775. dm_put_device(ti, s->origin);
  776. dm_exception_store_destroy(s->store);
  777. dm_put_device(ti, s->cow);
  778. kfree(s);
  779. }
  780. /*
  781. * Flush a list of buffers.
  782. */
  783. static void flush_bios(struct bio *bio)
  784. {
  785. struct bio *n;
  786. while (bio) {
  787. n = bio->bi_next;
  788. bio->bi_next = NULL;
  789. generic_make_request(bio);
  790. bio = n;
  791. }
  792. }
  793. static void flush_queued_bios(struct work_struct *work)
  794. {
  795. struct dm_snapshot *s =
  796. container_of(work, struct dm_snapshot, queued_bios_work);
  797. struct bio *queued_bios;
  798. unsigned long flags;
  799. spin_lock_irqsave(&s->pe_lock, flags);
  800. queued_bios = bio_list_get(&s->queued_bios);
  801. spin_unlock_irqrestore(&s->pe_lock, flags);
  802. flush_bios(queued_bios);
  803. }
  804. /*
  805. * Error a list of buffers.
  806. */
  807. static void error_bios(struct bio *bio)
  808. {
  809. struct bio *n;
  810. while (bio) {
  811. n = bio->bi_next;
  812. bio->bi_next = NULL;
  813. bio_io_error(bio);
  814. bio = n;
  815. }
  816. }
  817. static void __invalidate_snapshot(struct dm_snapshot *s, int err)
  818. {
  819. if (!s->valid)
  820. return;
  821. if (err == -EIO)
  822. DMERR("Invalidating snapshot: Error reading/writing.");
  823. else if (err == -ENOMEM)
  824. DMERR("Invalidating snapshot: Unable to allocate exception.");
  825. if (s->store->type->drop_snapshot)
  826. s->store->type->drop_snapshot(s->store);
  827. s->valid = 0;
  828. dm_table_event(s->ti->table);
  829. }
  830. static void get_pending_exception(struct dm_snap_pending_exception *pe)
  831. {
  832. atomic_inc(&pe->ref_count);
  833. }
  834. static struct bio *put_pending_exception(struct dm_snap_pending_exception *pe)
  835. {
  836. struct dm_snap_pending_exception *primary_pe;
  837. struct bio *origin_bios = NULL;
  838. primary_pe = pe->primary_pe;
  839. /*
  840. * If this pe is involved in a write to the origin and
  841. * it is the last sibling to complete then release
  842. * the bios for the original write to the origin.
  843. */
  844. if (primary_pe &&
  845. atomic_dec_and_test(&primary_pe->ref_count)) {
  846. origin_bios = bio_list_get(&primary_pe->origin_bios);
  847. free_pending_exception(primary_pe);
  848. }
  849. /*
  850. * Free the pe if it's not linked to an origin write or if
  851. * it's not itself a primary pe.
  852. */
  853. if (!primary_pe || primary_pe != pe)
  854. free_pending_exception(pe);
  855. return origin_bios;
  856. }
  857. static void pending_complete(struct dm_snap_pending_exception *pe, int success)
  858. {
  859. struct dm_exception *e;
  860. struct dm_snapshot *s = pe->snap;
  861. struct bio *origin_bios = NULL;
  862. struct bio *snapshot_bios = NULL;
  863. int error = 0;
  864. if (!success) {
  865. /* Read/write error - snapshot is unusable */
  866. down_write(&s->lock);
  867. __invalidate_snapshot(s, -EIO);
  868. error = 1;
  869. goto out;
  870. }
  871. e = alloc_completed_exception();
  872. if (!e) {
  873. down_write(&s->lock);
  874. __invalidate_snapshot(s, -ENOMEM);
  875. error = 1;
  876. goto out;
  877. }
  878. *e = pe->e;
  879. down_write(&s->lock);
  880. if (!s->valid) {
  881. free_completed_exception(e);
  882. error = 1;
  883. goto out;
  884. }
  885. /*
  886. * Check for conflicting reads. This is extremely improbable,
  887. * so msleep(1) is sufficient and there is no need for a wait queue.
  888. */
  889. while (__chunk_is_tracked(s, pe->e.old_chunk))
  890. msleep(1);
  891. /*
  892. * Add a proper exception, and remove the
  893. * in-flight exception from the list.
  894. */
  895. dm_insert_exception(&s->complete, e);
  896. out:
  897. dm_remove_exception(&pe->e);
  898. snapshot_bios = bio_list_get(&pe->snapshot_bios);
  899. origin_bios = put_pending_exception(pe);
  900. up_write(&s->lock);
  901. /* Submit any pending write bios */
  902. if (error)
  903. error_bios(snapshot_bios);
  904. else
  905. flush_bios(snapshot_bios);
  906. flush_bios(origin_bios);
  907. }
  908. static void commit_callback(void *context, int success)
  909. {
  910. struct dm_snap_pending_exception *pe = context;
  911. pending_complete(pe, success);
  912. }
  913. /*
  914. * Called when the copy I/O has finished. kcopyd actually runs
  915. * this code so don't block.
  916. */
  917. static void copy_callback(int read_err, unsigned long write_err, void *context)
  918. {
  919. struct dm_snap_pending_exception *pe = context;
  920. struct dm_snapshot *s = pe->snap;
  921. if (read_err || write_err)
  922. pending_complete(pe, 0);
  923. else
  924. /* Update the metadata if we are persistent */
  925. s->store->type->commit_exception(s->store, &pe->e,
  926. commit_callback, pe);
  927. }
  928. /*
  929. * Dispatches the copy operation to kcopyd.
  930. */
  931. static void start_copy(struct dm_snap_pending_exception *pe)
  932. {
  933. struct dm_snapshot *s = pe->snap;
  934. struct dm_io_region src, dest;
  935. struct block_device *bdev = s->origin->bdev;
  936. sector_t dev_size;
  937. dev_size = get_dev_size(bdev);
  938. src.bdev = bdev;
  939. src.sector = chunk_to_sector(s->store, pe->e.old_chunk);
  940. src.count = min((sector_t)s->store->chunk_size, dev_size - src.sector);
  941. dest.bdev = s->cow->bdev;
  942. dest.sector = chunk_to_sector(s->store, pe->e.new_chunk);
  943. dest.count = src.count;
  944. /* Hand over to kcopyd */
  945. dm_kcopyd_copy(s->kcopyd_client,
  946. &src, 1, &dest, 0, copy_callback, pe);
  947. }
  948. static struct dm_snap_pending_exception *
  949. __lookup_pending_exception(struct dm_snapshot *s, chunk_t chunk)
  950. {
  951. struct dm_exception *e = dm_lookup_exception(&s->pending, chunk);
  952. if (!e)
  953. return NULL;
  954. return container_of(e, struct dm_snap_pending_exception, e);
  955. }
  956. /*
  957. * Looks to see if this snapshot already has a pending exception
  958. * for this chunk, otherwise it allocates a new one and inserts
  959. * it into the pending table.
  960. *
  961. * NOTE: a write lock must be held on snap->lock before calling
  962. * this.
  963. */
  964. static struct dm_snap_pending_exception *
  965. __find_pending_exception(struct dm_snapshot *s,
  966. struct dm_snap_pending_exception *pe, chunk_t chunk)
  967. {
  968. struct dm_snap_pending_exception *pe2;
  969. pe2 = __lookup_pending_exception(s, chunk);
  970. if (pe2) {
  971. free_pending_exception(pe);
  972. return pe2;
  973. }
  974. pe->e.old_chunk = chunk;
  975. bio_list_init(&pe->origin_bios);
  976. bio_list_init(&pe->snapshot_bios);
  977. pe->primary_pe = NULL;
  978. atomic_set(&pe->ref_count, 0);
  979. pe->started = 0;
  980. if (s->store->type->prepare_exception(s->store, &pe->e)) {
  981. free_pending_exception(pe);
  982. return NULL;
  983. }
  984. get_pending_exception(pe);
  985. dm_insert_exception(&s->pending, &pe->e);
  986. return pe;
  987. }
  988. static void remap_exception(struct dm_snapshot *s, struct dm_exception *e,
  989. struct bio *bio, chunk_t chunk)
  990. {
  991. bio->bi_bdev = s->cow->bdev;
  992. bio->bi_sector = chunk_to_sector(s->store,
  993. dm_chunk_number(e->new_chunk) +
  994. (chunk - e->old_chunk)) +
  995. (bio->bi_sector &
  996. s->store->chunk_mask);
  997. }
  998. static int snapshot_map(struct dm_target *ti, struct bio *bio,
  999. union map_info *map_context)
  1000. {
  1001. struct dm_exception *e;
  1002. struct dm_snapshot *s = ti->private;
  1003. int r = DM_MAPIO_REMAPPED;
  1004. chunk_t chunk;
  1005. struct dm_snap_pending_exception *pe = NULL;
  1006. if (unlikely(bio_empty_barrier(bio))) {
  1007. bio->bi_bdev = s->cow->bdev;
  1008. return DM_MAPIO_REMAPPED;
  1009. }
  1010. chunk = sector_to_chunk(s->store, bio->bi_sector);
  1011. /* Full snapshots are not usable */
  1012. /* To get here the table must be live so s->active is always set. */
  1013. if (!s->valid)
  1014. return -EIO;
  1015. /* FIXME: should only take write lock if we need
  1016. * to copy an exception */
  1017. down_write(&s->lock);
  1018. if (!s->valid) {
  1019. r = -EIO;
  1020. goto out_unlock;
  1021. }
  1022. /* If the block is already remapped - use that, else remap it */
  1023. e = dm_lookup_exception(&s->complete, chunk);
  1024. if (e) {
  1025. remap_exception(s, e, bio, chunk);
  1026. goto out_unlock;
  1027. }
  1028. /*
  1029. * Write to snapshot - higher level takes care of RW/RO
  1030. * flags so we should only get this if we are
  1031. * writeable.
  1032. */
  1033. if (bio_rw(bio) == WRITE) {
  1034. pe = __lookup_pending_exception(s, chunk);
  1035. if (!pe) {
  1036. up_write(&s->lock);
  1037. pe = alloc_pending_exception(s);
  1038. down_write(&s->lock);
  1039. if (!s->valid) {
  1040. free_pending_exception(pe);
  1041. r = -EIO;
  1042. goto out_unlock;
  1043. }
  1044. e = dm_lookup_exception(&s->complete, chunk);
  1045. if (e) {
  1046. free_pending_exception(pe);
  1047. remap_exception(s, e, bio, chunk);
  1048. goto out_unlock;
  1049. }
  1050. pe = __find_pending_exception(s, pe, chunk);
  1051. if (!pe) {
  1052. __invalidate_snapshot(s, -ENOMEM);
  1053. r = -EIO;
  1054. goto out_unlock;
  1055. }
  1056. }
  1057. remap_exception(s, &pe->e, bio, chunk);
  1058. bio_list_add(&pe->snapshot_bios, bio);
  1059. r = DM_MAPIO_SUBMITTED;
  1060. if (!pe->started) {
  1061. /* this is protected by snap->lock */
  1062. pe->started = 1;
  1063. up_write(&s->lock);
  1064. start_copy(pe);
  1065. goto out;
  1066. }
  1067. } else {
  1068. bio->bi_bdev = s->origin->bdev;
  1069. map_context->ptr = track_chunk(s, chunk);
  1070. }
  1071. out_unlock:
  1072. up_write(&s->lock);
  1073. out:
  1074. return r;
  1075. }
  1076. static int snapshot_end_io(struct dm_target *ti, struct bio *bio,
  1077. int error, union map_info *map_context)
  1078. {
  1079. struct dm_snapshot *s = ti->private;
  1080. struct dm_snap_tracked_chunk *c = map_context->ptr;
  1081. if (c)
  1082. stop_tracking_chunk(s, c);
  1083. return 0;
  1084. }
  1085. static void snapshot_postsuspend(struct dm_target *ti)
  1086. {
  1087. struct dm_snapshot *s = ti->private;
  1088. down_write(&s->lock);
  1089. s->suspended = 1;
  1090. up_write(&s->lock);
  1091. }
  1092. static int snapshot_preresume(struct dm_target *ti)
  1093. {
  1094. int r = 0;
  1095. struct dm_snapshot *s = ti->private;
  1096. struct dm_snapshot *snap_src = NULL, *snap_dest = NULL;
  1097. down_read(&_origins_lock);
  1098. (void) __find_snapshots_sharing_cow(s, &snap_src, &snap_dest);
  1099. if (snap_src && snap_dest) {
  1100. down_read(&snap_src->lock);
  1101. if (s == snap_src) {
  1102. DMERR("Unable to resume snapshot source until "
  1103. "handover completes.");
  1104. r = -EINVAL;
  1105. } else if (!snap_src->suspended) {
  1106. DMERR("Unable to perform snapshot handover until "
  1107. "source is suspended.");
  1108. r = -EINVAL;
  1109. }
  1110. up_read(&snap_src->lock);
  1111. }
  1112. up_read(&_origins_lock);
  1113. return r;
  1114. }
  1115. static void snapshot_resume(struct dm_target *ti)
  1116. {
  1117. struct dm_snapshot *s = ti->private;
  1118. struct dm_snapshot *snap_src = NULL, *snap_dest = NULL;
  1119. down_read(&_origins_lock);
  1120. (void) __find_snapshots_sharing_cow(s, &snap_src, &snap_dest);
  1121. if (snap_src && snap_dest) {
  1122. down_write(&snap_src->lock);
  1123. down_write_nested(&snap_dest->lock, SINGLE_DEPTH_NESTING);
  1124. __handover_exceptions(snap_src, snap_dest);
  1125. up_write(&snap_dest->lock);
  1126. up_write(&snap_src->lock);
  1127. }
  1128. up_read(&_origins_lock);
  1129. /* Now we have correct chunk size, reregister */
  1130. reregister_snapshot(s);
  1131. down_write(&s->lock);
  1132. s->active = 1;
  1133. s->suspended = 0;
  1134. up_write(&s->lock);
  1135. }
  1136. static int snapshot_status(struct dm_target *ti, status_type_t type,
  1137. char *result, unsigned int maxlen)
  1138. {
  1139. unsigned sz = 0;
  1140. struct dm_snapshot *snap = ti->private;
  1141. switch (type) {
  1142. case STATUSTYPE_INFO:
  1143. down_write(&snap->lock);
  1144. if (!snap->valid)
  1145. DMEMIT("Invalid");
  1146. else {
  1147. if (snap->store->type->usage) {
  1148. sector_t total_sectors, sectors_allocated,
  1149. metadata_sectors;
  1150. snap->store->type->usage(snap->store,
  1151. &total_sectors,
  1152. &sectors_allocated,
  1153. &metadata_sectors);
  1154. DMEMIT("%llu/%llu %llu",
  1155. (unsigned long long)sectors_allocated,
  1156. (unsigned long long)total_sectors,
  1157. (unsigned long long)metadata_sectors);
  1158. }
  1159. else
  1160. DMEMIT("Unknown");
  1161. }
  1162. up_write(&snap->lock);
  1163. break;
  1164. case STATUSTYPE_TABLE:
  1165. /*
  1166. * kdevname returns a static pointer so we need
  1167. * to make private copies if the output is to
  1168. * make sense.
  1169. */
  1170. DMEMIT("%s %s", snap->origin->name, snap->cow->name);
  1171. snap->store->type->status(snap->store, type, result + sz,
  1172. maxlen - sz);
  1173. break;
  1174. }
  1175. return 0;
  1176. }
  1177. static int snapshot_iterate_devices(struct dm_target *ti,
  1178. iterate_devices_callout_fn fn, void *data)
  1179. {
  1180. struct dm_snapshot *snap = ti->private;
  1181. return fn(ti, snap->origin, 0, ti->len, data);
  1182. }
  1183. /*-----------------------------------------------------------------
  1184. * Origin methods
  1185. *---------------------------------------------------------------*/
  1186. static int __origin_write(struct list_head *snapshots, struct bio *bio)
  1187. {
  1188. int r = DM_MAPIO_REMAPPED, first = 0;
  1189. struct dm_snapshot *snap;
  1190. struct dm_exception *e;
  1191. struct dm_snap_pending_exception *pe, *next_pe, *primary_pe = NULL;
  1192. chunk_t chunk;
  1193. LIST_HEAD(pe_queue);
  1194. /* Do all the snapshots on this origin */
  1195. list_for_each_entry (snap, snapshots, list) {
  1196. down_write(&snap->lock);
  1197. /* Only deal with valid and active snapshots */
  1198. if (!snap->valid || !snap->active)
  1199. goto next_snapshot;
  1200. /* Nothing to do if writing beyond end of snapshot */
  1201. if (bio->bi_sector >= dm_table_get_size(snap->ti->table))
  1202. goto next_snapshot;
  1203. /*
  1204. * Remember, different snapshots can have
  1205. * different chunk sizes.
  1206. */
  1207. chunk = sector_to_chunk(snap->store, bio->bi_sector);
  1208. /*
  1209. * Check exception table to see if block
  1210. * is already remapped in this snapshot
  1211. * and trigger an exception if not.
  1212. *
  1213. * ref_count is initialised to 1 so pending_complete()
  1214. * won't destroy the primary_pe while we're inside this loop.
  1215. */
  1216. e = dm_lookup_exception(&snap->complete, chunk);
  1217. if (e)
  1218. goto next_snapshot;
  1219. pe = __lookup_pending_exception(snap, chunk);
  1220. if (!pe) {
  1221. up_write(&snap->lock);
  1222. pe = alloc_pending_exception(snap);
  1223. down_write(&snap->lock);
  1224. if (!snap->valid) {
  1225. free_pending_exception(pe);
  1226. goto next_snapshot;
  1227. }
  1228. e = dm_lookup_exception(&snap->complete, chunk);
  1229. if (e) {
  1230. free_pending_exception(pe);
  1231. goto next_snapshot;
  1232. }
  1233. pe = __find_pending_exception(snap, pe, chunk);
  1234. if (!pe) {
  1235. __invalidate_snapshot(snap, -ENOMEM);
  1236. goto next_snapshot;
  1237. }
  1238. }
  1239. if (!primary_pe) {
  1240. /*
  1241. * Either every pe here has same
  1242. * primary_pe or none has one yet.
  1243. */
  1244. if (pe->primary_pe)
  1245. primary_pe = pe->primary_pe;
  1246. else {
  1247. primary_pe = pe;
  1248. first = 1;
  1249. }
  1250. bio_list_add(&primary_pe->origin_bios, bio);
  1251. r = DM_MAPIO_SUBMITTED;
  1252. }
  1253. if (!pe->primary_pe) {
  1254. pe->primary_pe = primary_pe;
  1255. get_pending_exception(primary_pe);
  1256. }
  1257. if (!pe->started) {
  1258. pe->started = 1;
  1259. list_add_tail(&pe->list, &pe_queue);
  1260. }
  1261. next_snapshot:
  1262. up_write(&snap->lock);
  1263. }
  1264. if (!primary_pe)
  1265. return r;
  1266. /*
  1267. * If this is the first time we're processing this chunk and
  1268. * ref_count is now 1 it means all the pending exceptions
  1269. * got completed while we were in the loop above, so it falls to
  1270. * us here to remove the primary_pe and submit any origin_bios.
  1271. */
  1272. if (first && atomic_dec_and_test(&primary_pe->ref_count)) {
  1273. flush_bios(bio_list_get(&primary_pe->origin_bios));
  1274. free_pending_exception(primary_pe);
  1275. /* If we got here, pe_queue is necessarily empty. */
  1276. return r;
  1277. }
  1278. /*
  1279. * Now that we have a complete pe list we can start the copying.
  1280. */
  1281. list_for_each_entry_safe(pe, next_pe, &pe_queue, list)
  1282. start_copy(pe);
  1283. return r;
  1284. }
  1285. /*
  1286. * Called on a write from the origin driver.
  1287. */
  1288. static int do_origin(struct dm_dev *origin, struct bio *bio)
  1289. {
  1290. struct origin *o;
  1291. int r = DM_MAPIO_REMAPPED;
  1292. down_read(&_origins_lock);
  1293. o = __lookup_origin(origin->bdev);
  1294. if (o)
  1295. r = __origin_write(&o->snapshots, bio);
  1296. up_read(&_origins_lock);
  1297. return r;
  1298. }
  1299. /*
  1300. * Origin: maps a linear range of a device, with hooks for snapshotting.
  1301. */
  1302. /*
  1303. * Construct an origin mapping: <dev_path>
  1304. * The context for an origin is merely a 'struct dm_dev *'
  1305. * pointing to the real device.
  1306. */
  1307. static int origin_ctr(struct dm_target *ti, unsigned int argc, char **argv)
  1308. {
  1309. int r;
  1310. struct dm_dev *dev;
  1311. if (argc != 1) {
  1312. ti->error = "origin: incorrect number of arguments";
  1313. return -EINVAL;
  1314. }
  1315. r = dm_get_device(ti, argv[0], 0, ti->len,
  1316. dm_table_get_mode(ti->table), &dev);
  1317. if (r) {
  1318. ti->error = "Cannot get target device";
  1319. return r;
  1320. }
  1321. ti->private = dev;
  1322. ti->num_flush_requests = 1;
  1323. return 0;
  1324. }
  1325. static void origin_dtr(struct dm_target *ti)
  1326. {
  1327. struct dm_dev *dev = ti->private;
  1328. dm_put_device(ti, dev);
  1329. }
  1330. static int origin_map(struct dm_target *ti, struct bio *bio,
  1331. union map_info *map_context)
  1332. {
  1333. struct dm_dev *dev = ti->private;
  1334. bio->bi_bdev = dev->bdev;
  1335. if (unlikely(bio_empty_barrier(bio)))
  1336. return DM_MAPIO_REMAPPED;
  1337. /* Only tell snapshots if this is a write */
  1338. return (bio_rw(bio) == WRITE) ? do_origin(dev, bio) : DM_MAPIO_REMAPPED;
  1339. }
  1340. /*
  1341. * Set the target "split_io" field to the minimum of all the snapshots'
  1342. * chunk sizes.
  1343. */
  1344. static void origin_resume(struct dm_target *ti)
  1345. {
  1346. struct dm_dev *dev = ti->private;
  1347. down_read(&_origins_lock);
  1348. ti->split_io = __minimum_chunk_size(__lookup_origin(dev->bdev));
  1349. up_read(&_origins_lock);
  1350. }
  1351. static int origin_status(struct dm_target *ti, status_type_t type, char *result,
  1352. unsigned int maxlen)
  1353. {
  1354. struct dm_dev *dev = ti->private;
  1355. switch (type) {
  1356. case STATUSTYPE_INFO:
  1357. result[0] = '\0';
  1358. break;
  1359. case STATUSTYPE_TABLE:
  1360. snprintf(result, maxlen, "%s", dev->name);
  1361. break;
  1362. }
  1363. return 0;
  1364. }
  1365. static int origin_iterate_devices(struct dm_target *ti,
  1366. iterate_devices_callout_fn fn, void *data)
  1367. {
  1368. struct dm_dev *dev = ti->private;
  1369. return fn(ti, dev, 0, ti->len, data);
  1370. }
  1371. static struct target_type origin_target = {
  1372. .name = "snapshot-origin",
  1373. .version = {1, 7, 0},
  1374. .module = THIS_MODULE,
  1375. .ctr = origin_ctr,
  1376. .dtr = origin_dtr,
  1377. .map = origin_map,
  1378. .resume = origin_resume,
  1379. .status = origin_status,
  1380. .iterate_devices = origin_iterate_devices,
  1381. };
  1382. static struct target_type snapshot_target = {
  1383. .name = "snapshot",
  1384. .version = {1, 9, 0},
  1385. .module = THIS_MODULE,
  1386. .ctr = snapshot_ctr,
  1387. .dtr = snapshot_dtr,
  1388. .map = snapshot_map,
  1389. .end_io = snapshot_end_io,
  1390. .postsuspend = snapshot_postsuspend,
  1391. .preresume = snapshot_preresume,
  1392. .resume = snapshot_resume,
  1393. .status = snapshot_status,
  1394. .iterate_devices = snapshot_iterate_devices,
  1395. };
  1396. static int __init dm_snapshot_init(void)
  1397. {
  1398. int r;
  1399. r = dm_exception_store_init();
  1400. if (r) {
  1401. DMERR("Failed to initialize exception stores");
  1402. return r;
  1403. }
  1404. r = dm_register_target(&snapshot_target);
  1405. if (r) {
  1406. DMERR("snapshot target register failed %d", r);
  1407. goto bad_register_snapshot_target;
  1408. }
  1409. r = dm_register_target(&origin_target);
  1410. if (r < 0) {
  1411. DMERR("Origin target register failed %d", r);
  1412. goto bad1;
  1413. }
  1414. r = init_origin_hash();
  1415. if (r) {
  1416. DMERR("init_origin_hash failed.");
  1417. goto bad2;
  1418. }
  1419. exception_cache = KMEM_CACHE(dm_exception, 0);
  1420. if (!exception_cache) {
  1421. DMERR("Couldn't create exception cache.");
  1422. r = -ENOMEM;
  1423. goto bad3;
  1424. }
  1425. pending_cache = KMEM_CACHE(dm_snap_pending_exception, 0);
  1426. if (!pending_cache) {
  1427. DMERR("Couldn't create pending cache.");
  1428. r = -ENOMEM;
  1429. goto bad4;
  1430. }
  1431. tracked_chunk_cache = KMEM_CACHE(dm_snap_tracked_chunk, 0);
  1432. if (!tracked_chunk_cache) {
  1433. DMERR("Couldn't create cache to track chunks in use.");
  1434. r = -ENOMEM;
  1435. goto bad5;
  1436. }
  1437. ksnapd = create_singlethread_workqueue("ksnapd");
  1438. if (!ksnapd) {
  1439. DMERR("Failed to create ksnapd workqueue.");
  1440. r = -ENOMEM;
  1441. goto bad_pending_pool;
  1442. }
  1443. return 0;
  1444. bad_pending_pool:
  1445. kmem_cache_destroy(tracked_chunk_cache);
  1446. bad5:
  1447. kmem_cache_destroy(pending_cache);
  1448. bad4:
  1449. kmem_cache_destroy(exception_cache);
  1450. bad3:
  1451. exit_origin_hash();
  1452. bad2:
  1453. dm_unregister_target(&origin_target);
  1454. bad1:
  1455. dm_unregister_target(&snapshot_target);
  1456. bad_register_snapshot_target:
  1457. dm_exception_store_exit();
  1458. return r;
  1459. }
  1460. static void __exit dm_snapshot_exit(void)
  1461. {
  1462. destroy_workqueue(ksnapd);
  1463. dm_unregister_target(&snapshot_target);
  1464. dm_unregister_target(&origin_target);
  1465. exit_origin_hash();
  1466. kmem_cache_destroy(pending_cache);
  1467. kmem_cache_destroy(exception_cache);
  1468. kmem_cache_destroy(tracked_chunk_cache);
  1469. dm_exception_store_exit();
  1470. }
  1471. /* Module hooks */
  1472. module_init(dm_snapshot_init);
  1473. module_exit(dm_snapshot_exit);
  1474. MODULE_DESCRIPTION(DM_NAME " snapshot target");
  1475. MODULE_AUTHOR("Joe Thornber");
  1476. MODULE_LICENSE("GPL");