dm-snap.c 47 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133
  1. /*
  2. * dm-snapshot.c
  3. *
  4. * Copyright (C) 2001-2002 Sistina Software (UK) Limited.
  5. *
  6. * This file is released under the GPL.
  7. */
  8. #include <linux/blkdev.h>
  9. #include <linux/device-mapper.h>
  10. #include <linux/delay.h>
  11. #include <linux/fs.h>
  12. #include <linux/init.h>
  13. #include <linux/kdev_t.h>
  14. #include <linux/list.h>
  15. #include <linux/mempool.h>
  16. #include <linux/module.h>
  17. #include <linux/slab.h>
  18. #include <linux/vmalloc.h>
  19. #include <linux/log2.h>
  20. #include <linux/dm-kcopyd.h>
  21. #include <linux/workqueue.h>
  22. #include "dm-exception-store.h"
  23. #define DM_MSG_PREFIX "snapshots"
  24. static const char dm_snapshot_merge_target_name[] = "snapshot-merge";
  25. #define dm_target_is_snapshot_merge(ti) \
  26. ((ti)->type->name == dm_snapshot_merge_target_name)
  27. /*
  28. * The percentage increment we will wake up users at
  29. */
  30. #define WAKE_UP_PERCENT 5
  31. /*
  32. * kcopyd priority of snapshot operations
  33. */
  34. #define SNAPSHOT_COPY_PRIORITY 2
  35. /*
  36. * Reserve 1MB for each snapshot initially (with minimum of 1 page).
  37. */
  38. #define SNAPSHOT_PAGES (((1UL << 20) >> PAGE_SHIFT) ? : 1)
  39. /*
  40. * The size of the mempool used to track chunks in use.
  41. */
  42. #define MIN_IOS 256
  43. #define DM_TRACKED_CHUNK_HASH_SIZE 16
  44. #define DM_TRACKED_CHUNK_HASH(x) ((unsigned long)(x) & \
  45. (DM_TRACKED_CHUNK_HASH_SIZE - 1))
  46. struct dm_exception_table {
  47. uint32_t hash_mask;
  48. unsigned hash_shift;
  49. struct list_head *table;
  50. };
  51. struct dm_snapshot {
  52. struct rw_semaphore lock;
  53. struct dm_dev *origin;
  54. struct dm_dev *cow;
  55. struct dm_target *ti;
  56. /* List of snapshots per Origin */
  57. struct list_head list;
  58. /* You can't use a snapshot if this is 0 (e.g. if full) */
  59. int valid;
  60. /* Origin writes don't trigger exceptions until this is set */
  61. int active;
  62. /* Whether or not owning mapped_device is suspended */
  63. int suspended;
  64. mempool_t *pending_pool;
  65. atomic_t pending_exceptions_count;
  66. struct dm_exception_table pending;
  67. struct dm_exception_table complete;
  68. /*
  69. * pe_lock protects all pending_exception operations and access
  70. * as well as the snapshot_bios list.
  71. */
  72. spinlock_t pe_lock;
  73. /* The on disk metadata handler */
  74. struct dm_exception_store *store;
  75. struct dm_kcopyd_client *kcopyd_client;
  76. /* Queue of snapshot writes for ksnapd to flush */
  77. struct bio_list queued_bios;
  78. struct work_struct queued_bios_work;
  79. /* Chunks with outstanding reads */
  80. mempool_t *tracked_chunk_pool;
  81. spinlock_t tracked_chunk_lock;
  82. struct hlist_head tracked_chunk_hash[DM_TRACKED_CHUNK_HASH_SIZE];
  83. /* Wait for events based on state_bits */
  84. unsigned long state_bits;
  85. };
  86. /*
  87. * state_bits:
  88. * RUNNING_MERGE - Merge operation is in progress.
  89. * SHUTDOWN_MERGE - Set to signal that merge needs to be stopped;
  90. * cleared afterwards.
  91. */
  92. #define RUNNING_MERGE 0
  93. #define SHUTDOWN_MERGE 1
  94. struct dm_dev *dm_snap_cow(struct dm_snapshot *s)
  95. {
  96. return s->cow;
  97. }
  98. EXPORT_SYMBOL(dm_snap_cow);
  99. static struct workqueue_struct *ksnapd;
  100. static void flush_queued_bios(struct work_struct *work);
  101. static sector_t chunk_to_sector(struct dm_exception_store *store,
  102. chunk_t chunk)
  103. {
  104. return chunk << store->chunk_shift;
  105. }
  106. static int bdev_equal(struct block_device *lhs, struct block_device *rhs)
  107. {
  108. /*
  109. * There is only ever one instance of a particular block
  110. * device so we can compare pointers safely.
  111. */
  112. return lhs == rhs;
  113. }
  114. struct dm_snap_pending_exception {
  115. struct dm_exception e;
  116. /*
  117. * Origin buffers waiting for this to complete are held
  118. * in a bio list
  119. */
  120. struct bio_list origin_bios;
  121. struct bio_list snapshot_bios;
  122. /* Pointer back to snapshot context */
  123. struct dm_snapshot *snap;
  124. /*
  125. * 1 indicates the exception has already been sent to
  126. * kcopyd.
  127. */
  128. int started;
  129. };
  130. /*
  131. * Hash table mapping origin volumes to lists of snapshots and
  132. * a lock to protect it
  133. */
  134. static struct kmem_cache *exception_cache;
  135. static struct kmem_cache *pending_cache;
  136. struct dm_snap_tracked_chunk {
  137. struct hlist_node node;
  138. chunk_t chunk;
  139. };
  140. static struct kmem_cache *tracked_chunk_cache;
  141. static struct dm_snap_tracked_chunk *track_chunk(struct dm_snapshot *s,
  142. chunk_t chunk)
  143. {
  144. struct dm_snap_tracked_chunk *c = mempool_alloc(s->tracked_chunk_pool,
  145. GFP_NOIO);
  146. unsigned long flags;
  147. c->chunk = chunk;
  148. spin_lock_irqsave(&s->tracked_chunk_lock, flags);
  149. hlist_add_head(&c->node,
  150. &s->tracked_chunk_hash[DM_TRACKED_CHUNK_HASH(chunk)]);
  151. spin_unlock_irqrestore(&s->tracked_chunk_lock, flags);
  152. return c;
  153. }
  154. static void stop_tracking_chunk(struct dm_snapshot *s,
  155. struct dm_snap_tracked_chunk *c)
  156. {
  157. unsigned long flags;
  158. spin_lock_irqsave(&s->tracked_chunk_lock, flags);
  159. hlist_del(&c->node);
  160. spin_unlock_irqrestore(&s->tracked_chunk_lock, flags);
  161. mempool_free(c, s->tracked_chunk_pool);
  162. }
  163. static int __chunk_is_tracked(struct dm_snapshot *s, chunk_t chunk)
  164. {
  165. struct dm_snap_tracked_chunk *c;
  166. struct hlist_node *hn;
  167. int found = 0;
  168. spin_lock_irq(&s->tracked_chunk_lock);
  169. hlist_for_each_entry(c, hn,
  170. &s->tracked_chunk_hash[DM_TRACKED_CHUNK_HASH(chunk)], node) {
  171. if (c->chunk == chunk) {
  172. found = 1;
  173. break;
  174. }
  175. }
  176. spin_unlock_irq(&s->tracked_chunk_lock);
  177. return found;
  178. }
  179. /*
  180. * This conflicting I/O is extremely improbable in the caller,
  181. * so msleep(1) is sufficient and there is no need for a wait queue.
  182. */
  183. static void __check_for_conflicting_io(struct dm_snapshot *s, chunk_t chunk)
  184. {
  185. while (__chunk_is_tracked(s, chunk))
  186. msleep(1);
  187. }
  188. /*
  189. * One of these per registered origin, held in the snapshot_origins hash
  190. */
  191. struct origin {
  192. /* The origin device */
  193. struct block_device *bdev;
  194. struct list_head hash_list;
  195. /* List of snapshots for this origin */
  196. struct list_head snapshots;
  197. };
  198. /*
  199. * Size of the hash table for origin volumes. If we make this
  200. * the size of the minors list then it should be nearly perfect
  201. */
  202. #define ORIGIN_HASH_SIZE 256
  203. #define ORIGIN_MASK 0xFF
  204. static struct list_head *_origins;
  205. static struct rw_semaphore _origins_lock;
  206. static int init_origin_hash(void)
  207. {
  208. int i;
  209. _origins = kmalloc(ORIGIN_HASH_SIZE * sizeof(struct list_head),
  210. GFP_KERNEL);
  211. if (!_origins) {
  212. DMERR("unable to allocate memory");
  213. return -ENOMEM;
  214. }
  215. for (i = 0; i < ORIGIN_HASH_SIZE; i++)
  216. INIT_LIST_HEAD(_origins + i);
  217. init_rwsem(&_origins_lock);
  218. return 0;
  219. }
  220. static void exit_origin_hash(void)
  221. {
  222. kfree(_origins);
  223. }
  224. static unsigned origin_hash(struct block_device *bdev)
  225. {
  226. return bdev->bd_dev & ORIGIN_MASK;
  227. }
  228. static struct origin *__lookup_origin(struct block_device *origin)
  229. {
  230. struct list_head *ol;
  231. struct origin *o;
  232. ol = &_origins[origin_hash(origin)];
  233. list_for_each_entry (o, ol, hash_list)
  234. if (bdev_equal(o->bdev, origin))
  235. return o;
  236. return NULL;
  237. }
  238. static void __insert_origin(struct origin *o)
  239. {
  240. struct list_head *sl = &_origins[origin_hash(o->bdev)];
  241. list_add_tail(&o->hash_list, sl);
  242. }
  243. /*
  244. * _origins_lock must be held when calling this function.
  245. * Returns number of snapshots registered using the supplied cow device, plus:
  246. * snap_src - a snapshot suitable for use as a source of exception handover
  247. * snap_dest - a snapshot capable of receiving exception handover.
  248. * snap_merge - an existing snapshot-merge target linked to the same origin.
  249. * There can be at most one snapshot-merge target. The parameter is optional.
  250. *
  251. * Possible return values and states of snap_src and snap_dest.
  252. * 0: NULL, NULL - first new snapshot
  253. * 1: snap_src, NULL - normal snapshot
  254. * 2: snap_src, snap_dest - waiting for handover
  255. * 2: snap_src, NULL - handed over, waiting for old to be deleted
  256. * 1: NULL, snap_dest - source got destroyed without handover
  257. */
  258. static int __find_snapshots_sharing_cow(struct dm_snapshot *snap,
  259. struct dm_snapshot **snap_src,
  260. struct dm_snapshot **snap_dest,
  261. struct dm_snapshot **snap_merge)
  262. {
  263. struct dm_snapshot *s;
  264. struct origin *o;
  265. int count = 0;
  266. int active;
  267. o = __lookup_origin(snap->origin->bdev);
  268. if (!o)
  269. goto out;
  270. list_for_each_entry(s, &o->snapshots, list) {
  271. if (dm_target_is_snapshot_merge(s->ti) && snap_merge)
  272. *snap_merge = s;
  273. if (!bdev_equal(s->cow->bdev, snap->cow->bdev))
  274. continue;
  275. down_read(&s->lock);
  276. active = s->active;
  277. up_read(&s->lock);
  278. if (active) {
  279. if (snap_src)
  280. *snap_src = s;
  281. } else if (snap_dest)
  282. *snap_dest = s;
  283. count++;
  284. }
  285. out:
  286. return count;
  287. }
  288. /*
  289. * On success, returns 1 if this snapshot is a handover destination,
  290. * otherwise returns 0.
  291. */
  292. static int __validate_exception_handover(struct dm_snapshot *snap)
  293. {
  294. struct dm_snapshot *snap_src = NULL, *snap_dest = NULL;
  295. struct dm_snapshot *snap_merge = NULL;
  296. /* Does snapshot need exceptions handed over to it? */
  297. if ((__find_snapshots_sharing_cow(snap, &snap_src, &snap_dest,
  298. &snap_merge) == 2) ||
  299. snap_dest) {
  300. snap->ti->error = "Snapshot cow pairing for exception "
  301. "table handover failed";
  302. return -EINVAL;
  303. }
  304. /*
  305. * If no snap_src was found, snap cannot become a handover
  306. * destination.
  307. */
  308. if (!snap_src)
  309. return 0;
  310. /*
  311. * Non-snapshot-merge handover?
  312. */
  313. if (!dm_target_is_snapshot_merge(snap->ti))
  314. return 1;
  315. /*
  316. * Do not allow more than one merging snapshot.
  317. */
  318. if (snap_merge) {
  319. snap->ti->error = "A snapshot is already merging.";
  320. return -EINVAL;
  321. }
  322. if (!snap_src->store->type->prepare_merge ||
  323. !snap_src->store->type->commit_merge) {
  324. snap->ti->error = "Snapshot exception store does not "
  325. "support snapshot-merge.";
  326. return -EINVAL;
  327. }
  328. return 1;
  329. }
  330. static void __insert_snapshot(struct origin *o, struct dm_snapshot *s)
  331. {
  332. struct dm_snapshot *l;
  333. /* Sort the list according to chunk size, largest-first smallest-last */
  334. list_for_each_entry(l, &o->snapshots, list)
  335. if (l->store->chunk_size < s->store->chunk_size)
  336. break;
  337. list_add_tail(&s->list, &l->list);
  338. }
  339. /*
  340. * Make a note of the snapshot and its origin so we can look it
  341. * up when the origin has a write on it.
  342. *
  343. * Also validate snapshot exception store handovers.
  344. * On success, returns 1 if this registration is a handover destination,
  345. * otherwise returns 0.
  346. */
  347. static int register_snapshot(struct dm_snapshot *snap)
  348. {
  349. struct origin *o, *new_o = NULL;
  350. struct block_device *bdev = snap->origin->bdev;
  351. int r = 0;
  352. new_o = kmalloc(sizeof(*new_o), GFP_KERNEL);
  353. if (!new_o)
  354. return -ENOMEM;
  355. down_write(&_origins_lock);
  356. r = __validate_exception_handover(snap);
  357. if (r < 0) {
  358. kfree(new_o);
  359. goto out;
  360. }
  361. o = __lookup_origin(bdev);
  362. if (o)
  363. kfree(new_o);
  364. else {
  365. /* New origin */
  366. o = new_o;
  367. /* Initialise the struct */
  368. INIT_LIST_HEAD(&o->snapshots);
  369. o->bdev = bdev;
  370. __insert_origin(o);
  371. }
  372. __insert_snapshot(o, snap);
  373. out:
  374. up_write(&_origins_lock);
  375. return r;
  376. }
  377. /*
  378. * Move snapshot to correct place in list according to chunk size.
  379. */
  380. static void reregister_snapshot(struct dm_snapshot *s)
  381. {
  382. struct block_device *bdev = s->origin->bdev;
  383. down_write(&_origins_lock);
  384. list_del(&s->list);
  385. __insert_snapshot(__lookup_origin(bdev), s);
  386. up_write(&_origins_lock);
  387. }
  388. static void unregister_snapshot(struct dm_snapshot *s)
  389. {
  390. struct origin *o;
  391. down_write(&_origins_lock);
  392. o = __lookup_origin(s->origin->bdev);
  393. list_del(&s->list);
  394. if (o && list_empty(&o->snapshots)) {
  395. list_del(&o->hash_list);
  396. kfree(o);
  397. }
  398. up_write(&_origins_lock);
  399. }
  400. /*
  401. * Implementation of the exception hash tables.
  402. * The lowest hash_shift bits of the chunk number are ignored, allowing
  403. * some consecutive chunks to be grouped together.
  404. */
  405. static int dm_exception_table_init(struct dm_exception_table *et,
  406. uint32_t size, unsigned hash_shift)
  407. {
  408. unsigned int i;
  409. et->hash_shift = hash_shift;
  410. et->hash_mask = size - 1;
  411. et->table = dm_vcalloc(size, sizeof(struct list_head));
  412. if (!et->table)
  413. return -ENOMEM;
  414. for (i = 0; i < size; i++)
  415. INIT_LIST_HEAD(et->table + i);
  416. return 0;
  417. }
  418. static void dm_exception_table_exit(struct dm_exception_table *et,
  419. struct kmem_cache *mem)
  420. {
  421. struct list_head *slot;
  422. struct dm_exception *ex, *next;
  423. int i, size;
  424. size = et->hash_mask + 1;
  425. for (i = 0; i < size; i++) {
  426. slot = et->table + i;
  427. list_for_each_entry_safe (ex, next, slot, hash_list)
  428. kmem_cache_free(mem, ex);
  429. }
  430. vfree(et->table);
  431. }
  432. static uint32_t exception_hash(struct dm_exception_table *et, chunk_t chunk)
  433. {
  434. return (chunk >> et->hash_shift) & et->hash_mask;
  435. }
  436. static void dm_remove_exception(struct dm_exception *e)
  437. {
  438. list_del(&e->hash_list);
  439. }
  440. /*
  441. * Return the exception data for a sector, or NULL if not
  442. * remapped.
  443. */
  444. static struct dm_exception *dm_lookup_exception(struct dm_exception_table *et,
  445. chunk_t chunk)
  446. {
  447. struct list_head *slot;
  448. struct dm_exception *e;
  449. slot = &et->table[exception_hash(et, chunk)];
  450. list_for_each_entry (e, slot, hash_list)
  451. if (chunk >= e->old_chunk &&
  452. chunk <= e->old_chunk + dm_consecutive_chunk_count(e))
  453. return e;
  454. return NULL;
  455. }
  456. static struct dm_exception *alloc_completed_exception(void)
  457. {
  458. struct dm_exception *e;
  459. e = kmem_cache_alloc(exception_cache, GFP_NOIO);
  460. if (!e)
  461. e = kmem_cache_alloc(exception_cache, GFP_ATOMIC);
  462. return e;
  463. }
  464. static void free_completed_exception(struct dm_exception *e)
  465. {
  466. kmem_cache_free(exception_cache, e);
  467. }
  468. static struct dm_snap_pending_exception *alloc_pending_exception(struct dm_snapshot *s)
  469. {
  470. struct dm_snap_pending_exception *pe = mempool_alloc(s->pending_pool,
  471. GFP_NOIO);
  472. atomic_inc(&s->pending_exceptions_count);
  473. pe->snap = s;
  474. return pe;
  475. }
  476. static void free_pending_exception(struct dm_snap_pending_exception *pe)
  477. {
  478. struct dm_snapshot *s = pe->snap;
  479. mempool_free(pe, s->pending_pool);
  480. smp_mb__before_atomic_dec();
  481. atomic_dec(&s->pending_exceptions_count);
  482. }
  483. static void dm_insert_exception(struct dm_exception_table *eh,
  484. struct dm_exception *new_e)
  485. {
  486. struct list_head *l;
  487. struct dm_exception *e = NULL;
  488. l = &eh->table[exception_hash(eh, new_e->old_chunk)];
  489. /* Add immediately if this table doesn't support consecutive chunks */
  490. if (!eh->hash_shift)
  491. goto out;
  492. /* List is ordered by old_chunk */
  493. list_for_each_entry_reverse(e, l, hash_list) {
  494. /* Insert after an existing chunk? */
  495. if (new_e->old_chunk == (e->old_chunk +
  496. dm_consecutive_chunk_count(e) + 1) &&
  497. new_e->new_chunk == (dm_chunk_number(e->new_chunk) +
  498. dm_consecutive_chunk_count(e) + 1)) {
  499. dm_consecutive_chunk_count_inc(e);
  500. free_completed_exception(new_e);
  501. return;
  502. }
  503. /* Insert before an existing chunk? */
  504. if (new_e->old_chunk == (e->old_chunk - 1) &&
  505. new_e->new_chunk == (dm_chunk_number(e->new_chunk) - 1)) {
  506. dm_consecutive_chunk_count_inc(e);
  507. e->old_chunk--;
  508. e->new_chunk--;
  509. free_completed_exception(new_e);
  510. return;
  511. }
  512. if (new_e->old_chunk > e->old_chunk)
  513. break;
  514. }
  515. out:
  516. list_add(&new_e->hash_list, e ? &e->hash_list : l);
  517. }
  518. /*
  519. * Callback used by the exception stores to load exceptions when
  520. * initialising.
  521. */
  522. static int dm_add_exception(void *context, chunk_t old, chunk_t new)
  523. {
  524. struct dm_snapshot *s = context;
  525. struct dm_exception *e;
  526. e = alloc_completed_exception();
  527. if (!e)
  528. return -ENOMEM;
  529. e->old_chunk = old;
  530. /* Consecutive_count is implicitly initialised to zero */
  531. e->new_chunk = new;
  532. dm_insert_exception(&s->complete, e);
  533. return 0;
  534. }
  535. #define min_not_zero(l, r) (((l) == 0) ? (r) : (((r) == 0) ? (l) : min(l, r)))
  536. /*
  537. * Return a minimum chunk size of all snapshots that have the specified origin.
  538. * Return zero if the origin has no snapshots.
  539. */
  540. static sector_t __minimum_chunk_size(struct origin *o)
  541. {
  542. struct dm_snapshot *snap;
  543. unsigned chunk_size = 0;
  544. if (o)
  545. list_for_each_entry(snap, &o->snapshots, list)
  546. chunk_size = min_not_zero(chunk_size,
  547. snap->store->chunk_size);
  548. return chunk_size;
  549. }
  550. /*
  551. * Hard coded magic.
  552. */
  553. static int calc_max_buckets(void)
  554. {
  555. /* use a fixed size of 2MB */
  556. unsigned long mem = 2 * 1024 * 1024;
  557. mem /= sizeof(struct list_head);
  558. return mem;
  559. }
  560. /*
  561. * Allocate room for a suitable hash table.
  562. */
  563. static int init_hash_tables(struct dm_snapshot *s)
  564. {
  565. sector_t hash_size, cow_dev_size, origin_dev_size, max_buckets;
  566. /*
  567. * Calculate based on the size of the original volume or
  568. * the COW volume...
  569. */
  570. cow_dev_size = get_dev_size(s->cow->bdev);
  571. origin_dev_size = get_dev_size(s->origin->bdev);
  572. max_buckets = calc_max_buckets();
  573. hash_size = min(origin_dev_size, cow_dev_size) >> s->store->chunk_shift;
  574. hash_size = min(hash_size, max_buckets);
  575. if (hash_size < 64)
  576. hash_size = 64;
  577. hash_size = rounddown_pow_of_two(hash_size);
  578. if (dm_exception_table_init(&s->complete, hash_size,
  579. DM_CHUNK_CONSECUTIVE_BITS))
  580. return -ENOMEM;
  581. /*
  582. * Allocate hash table for in-flight exceptions
  583. * Make this smaller than the real hash table
  584. */
  585. hash_size >>= 3;
  586. if (hash_size < 64)
  587. hash_size = 64;
  588. if (dm_exception_table_init(&s->pending, hash_size, 0)) {
  589. dm_exception_table_exit(&s->complete, exception_cache);
  590. return -ENOMEM;
  591. }
  592. return 0;
  593. }
  594. static void merge_shutdown(struct dm_snapshot *s)
  595. {
  596. clear_bit_unlock(RUNNING_MERGE, &s->state_bits);
  597. smp_mb__after_clear_bit();
  598. wake_up_bit(&s->state_bits, RUNNING_MERGE);
  599. }
  600. /*
  601. * Remove one chunk from the index of completed exceptions.
  602. */
  603. static int __remove_single_exception_chunk(struct dm_snapshot *s,
  604. chunk_t old_chunk)
  605. {
  606. struct dm_exception *e;
  607. /* FIXME: interlock writes to this chunk */
  608. e = dm_lookup_exception(&s->complete, old_chunk);
  609. if (!e) {
  610. DMERR("Corruption detected: exception for block %llu is "
  611. "on disk but not in memory",
  612. (unsigned long long)old_chunk);
  613. return -EINVAL;
  614. }
  615. /*
  616. * If this is the only chunk using this exception, remove exception.
  617. */
  618. if (!dm_consecutive_chunk_count(e)) {
  619. dm_remove_exception(e);
  620. free_completed_exception(e);
  621. return 0;
  622. }
  623. /*
  624. * The chunk may be either at the beginning or the end of a
  625. * group of consecutive chunks - never in the middle. We are
  626. * removing chunks in the opposite order to that in which they
  627. * were added, so this should always be true.
  628. * Decrement the consecutive chunk counter and adjust the
  629. * starting point if necessary.
  630. */
  631. if (old_chunk == e->old_chunk) {
  632. e->old_chunk++;
  633. e->new_chunk++;
  634. } else if (old_chunk != e->old_chunk +
  635. dm_consecutive_chunk_count(e)) {
  636. DMERR("Attempt to merge block %llu from the "
  637. "middle of a chunk range [%llu - %llu]",
  638. (unsigned long long)old_chunk,
  639. (unsigned long long)e->old_chunk,
  640. (unsigned long long)
  641. e->old_chunk + dm_consecutive_chunk_count(e));
  642. return -EINVAL;
  643. }
  644. dm_consecutive_chunk_count_dec(e);
  645. return 0;
  646. }
  647. static int remove_single_exception_chunk(struct dm_snapshot *s,
  648. chunk_t old_chunk)
  649. {
  650. int r = 0;
  651. down_write(&s->lock);
  652. r = __remove_single_exception_chunk(s, old_chunk);
  653. up_write(&s->lock);
  654. return r;
  655. }
  656. static void merge_callback(int read_err, unsigned long write_err,
  657. void *context);
  658. static void snapshot_merge_next_chunks(struct dm_snapshot *s)
  659. {
  660. int r;
  661. chunk_t old_chunk, new_chunk;
  662. struct dm_io_region src, dest;
  663. BUG_ON(!test_bit(RUNNING_MERGE, &s->state_bits));
  664. if (unlikely(test_bit(SHUTDOWN_MERGE, &s->state_bits)))
  665. goto shut;
  666. /*
  667. * valid flag never changes during merge, so no lock required.
  668. */
  669. if (!s->valid) {
  670. DMERR("Snapshot is invalid: can't merge");
  671. goto shut;
  672. }
  673. r = s->store->type->prepare_merge(s->store, &old_chunk, &new_chunk);
  674. if (r <= 0) {
  675. if (r < 0)
  676. DMERR("Read error in exception store: "
  677. "shutting down merge");
  678. goto shut;
  679. }
  680. /* TODO: use larger I/O size once we verify that kcopyd handles it */
  681. if (remove_single_exception_chunk(s, old_chunk) < 0)
  682. goto shut;
  683. dest.bdev = s->origin->bdev;
  684. dest.sector = chunk_to_sector(s->store, old_chunk);
  685. dest.count = min((sector_t)s->store->chunk_size,
  686. get_dev_size(dest.bdev) - dest.sector);
  687. src.bdev = s->cow->bdev;
  688. src.sector = chunk_to_sector(s->store, new_chunk);
  689. src.count = dest.count;
  690. dm_kcopyd_copy(s->kcopyd_client, &src, 1, &dest, 0, merge_callback, s);
  691. return;
  692. shut:
  693. merge_shutdown(s);
  694. }
  695. static void merge_callback(int read_err, unsigned long write_err, void *context)
  696. {
  697. struct dm_snapshot *s = context;
  698. if (read_err || write_err) {
  699. if (read_err)
  700. DMERR("Read error: shutting down merge.");
  701. else
  702. DMERR("Write error: shutting down merge.");
  703. goto shut;
  704. }
  705. if (s->store->type->commit_merge(s->store, 1) < 0) {
  706. DMERR("Write error in exception store: shutting down merge");
  707. goto shut;
  708. }
  709. snapshot_merge_next_chunks(s);
  710. return;
  711. shut:
  712. merge_shutdown(s);
  713. }
  714. static void start_merge(struct dm_snapshot *s)
  715. {
  716. if (!test_and_set_bit(RUNNING_MERGE, &s->state_bits))
  717. snapshot_merge_next_chunks(s);
  718. }
  719. static int wait_schedule(void *ptr)
  720. {
  721. schedule();
  722. return 0;
  723. }
  724. /*
  725. * Stop the merging process and wait until it finishes.
  726. */
  727. static void stop_merge(struct dm_snapshot *s)
  728. {
  729. set_bit(SHUTDOWN_MERGE, &s->state_bits);
  730. wait_on_bit(&s->state_bits, RUNNING_MERGE, wait_schedule,
  731. TASK_UNINTERRUPTIBLE);
  732. clear_bit(SHUTDOWN_MERGE, &s->state_bits);
  733. }
  734. /*
  735. * Construct a snapshot mapping: <origin_dev> <COW-dev> <p/n> <chunk-size>
  736. */
  737. static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv)
  738. {
  739. struct dm_snapshot *s;
  740. int i;
  741. int r = -EINVAL;
  742. char *origin_path, *cow_path;
  743. unsigned args_used, num_flush_requests = 1;
  744. fmode_t origin_mode = FMODE_READ;
  745. if (argc != 4) {
  746. ti->error = "requires exactly 4 arguments";
  747. r = -EINVAL;
  748. goto bad;
  749. }
  750. if (dm_target_is_snapshot_merge(ti)) {
  751. num_flush_requests = 2;
  752. origin_mode = FMODE_WRITE;
  753. }
  754. origin_path = argv[0];
  755. argv++;
  756. argc--;
  757. s = kmalloc(sizeof(*s), GFP_KERNEL);
  758. if (!s) {
  759. ti->error = "Cannot allocate snapshot context private "
  760. "structure";
  761. r = -ENOMEM;
  762. goto bad;
  763. }
  764. cow_path = argv[0];
  765. argv++;
  766. argc--;
  767. r = dm_get_device(ti, cow_path, 0, 0,
  768. FMODE_READ | FMODE_WRITE, &s->cow);
  769. if (r) {
  770. ti->error = "Cannot get COW device";
  771. goto bad_cow;
  772. }
  773. r = dm_exception_store_create(ti, argc, argv, s, &args_used, &s->store);
  774. if (r) {
  775. ti->error = "Couldn't create exception store";
  776. r = -EINVAL;
  777. goto bad_store;
  778. }
  779. argv += args_used;
  780. argc -= args_used;
  781. r = dm_get_device(ti, origin_path, 0, ti->len, origin_mode, &s->origin);
  782. if (r) {
  783. ti->error = "Cannot get origin device";
  784. goto bad_origin;
  785. }
  786. s->ti = ti;
  787. s->valid = 1;
  788. s->active = 0;
  789. s->suspended = 0;
  790. atomic_set(&s->pending_exceptions_count, 0);
  791. init_rwsem(&s->lock);
  792. INIT_LIST_HEAD(&s->list);
  793. spin_lock_init(&s->pe_lock);
  794. s->state_bits = 0;
  795. /* Allocate hash table for COW data */
  796. if (init_hash_tables(s)) {
  797. ti->error = "Unable to allocate hash table space";
  798. r = -ENOMEM;
  799. goto bad_hash_tables;
  800. }
  801. r = dm_kcopyd_client_create(SNAPSHOT_PAGES, &s->kcopyd_client);
  802. if (r) {
  803. ti->error = "Could not create kcopyd client";
  804. goto bad_kcopyd;
  805. }
  806. s->pending_pool = mempool_create_slab_pool(MIN_IOS, pending_cache);
  807. if (!s->pending_pool) {
  808. ti->error = "Could not allocate mempool for pending exceptions";
  809. goto bad_pending_pool;
  810. }
  811. s->tracked_chunk_pool = mempool_create_slab_pool(MIN_IOS,
  812. tracked_chunk_cache);
  813. if (!s->tracked_chunk_pool) {
  814. ti->error = "Could not allocate tracked_chunk mempool for "
  815. "tracking reads";
  816. goto bad_tracked_chunk_pool;
  817. }
  818. for (i = 0; i < DM_TRACKED_CHUNK_HASH_SIZE; i++)
  819. INIT_HLIST_HEAD(&s->tracked_chunk_hash[i]);
  820. spin_lock_init(&s->tracked_chunk_lock);
  821. bio_list_init(&s->queued_bios);
  822. INIT_WORK(&s->queued_bios_work, flush_queued_bios);
  823. ti->private = s;
  824. ti->num_flush_requests = num_flush_requests;
  825. /* Add snapshot to the list of snapshots for this origin */
  826. /* Exceptions aren't triggered till snapshot_resume() is called */
  827. r = register_snapshot(s);
  828. if (r == -ENOMEM) {
  829. ti->error = "Snapshot origin struct allocation failed";
  830. goto bad_load_and_register;
  831. } else if (r < 0) {
  832. /* invalid handover, register_snapshot has set ti->error */
  833. goto bad_load_and_register;
  834. }
  835. /*
  836. * Metadata must only be loaded into one table at once, so skip this
  837. * if metadata will be handed over during resume.
  838. * Chunk size will be set during the handover - set it to zero to
  839. * ensure it's ignored.
  840. */
  841. if (r > 0) {
  842. s->store->chunk_size = 0;
  843. return 0;
  844. }
  845. r = s->store->type->read_metadata(s->store, dm_add_exception,
  846. (void *)s);
  847. if (r < 0) {
  848. ti->error = "Failed to read snapshot metadata";
  849. goto bad_read_metadata;
  850. } else if (r > 0) {
  851. s->valid = 0;
  852. DMWARN("Snapshot is marked invalid.");
  853. }
  854. if (!s->store->chunk_size) {
  855. ti->error = "Chunk size not set";
  856. goto bad_read_metadata;
  857. }
  858. ti->split_io = s->store->chunk_size;
  859. return 0;
  860. bad_read_metadata:
  861. unregister_snapshot(s);
  862. bad_load_and_register:
  863. mempool_destroy(s->tracked_chunk_pool);
  864. bad_tracked_chunk_pool:
  865. mempool_destroy(s->pending_pool);
  866. bad_pending_pool:
  867. dm_kcopyd_client_destroy(s->kcopyd_client);
  868. bad_kcopyd:
  869. dm_exception_table_exit(&s->pending, pending_cache);
  870. dm_exception_table_exit(&s->complete, exception_cache);
  871. bad_hash_tables:
  872. dm_put_device(ti, s->origin);
  873. bad_origin:
  874. dm_exception_store_destroy(s->store);
  875. bad_store:
  876. dm_put_device(ti, s->cow);
  877. bad_cow:
  878. kfree(s);
  879. bad:
  880. return r;
  881. }
  882. static void __free_exceptions(struct dm_snapshot *s)
  883. {
  884. dm_kcopyd_client_destroy(s->kcopyd_client);
  885. s->kcopyd_client = NULL;
  886. dm_exception_table_exit(&s->pending, pending_cache);
  887. dm_exception_table_exit(&s->complete, exception_cache);
  888. }
  889. static void __handover_exceptions(struct dm_snapshot *snap_src,
  890. struct dm_snapshot *snap_dest)
  891. {
  892. union {
  893. struct dm_exception_table table_swap;
  894. struct dm_exception_store *store_swap;
  895. } u;
  896. /*
  897. * Swap all snapshot context information between the two instances.
  898. */
  899. u.table_swap = snap_dest->complete;
  900. snap_dest->complete = snap_src->complete;
  901. snap_src->complete = u.table_swap;
  902. u.store_swap = snap_dest->store;
  903. snap_dest->store = snap_src->store;
  904. snap_src->store = u.store_swap;
  905. snap_dest->store->snap = snap_dest;
  906. snap_src->store->snap = snap_src;
  907. snap_dest->ti->split_io = snap_dest->store->chunk_size;
  908. snap_dest->valid = snap_src->valid;
  909. /*
  910. * Set source invalid to ensure it receives no further I/O.
  911. */
  912. snap_src->valid = 0;
  913. }
  914. static void snapshot_dtr(struct dm_target *ti)
  915. {
  916. #ifdef CONFIG_DM_DEBUG
  917. int i;
  918. #endif
  919. struct dm_snapshot *s = ti->private;
  920. struct dm_snapshot *snap_src = NULL, *snap_dest = NULL;
  921. flush_workqueue(ksnapd);
  922. down_read(&_origins_lock);
  923. /* Check whether exception handover must be cancelled */
  924. (void) __find_snapshots_sharing_cow(s, &snap_src, &snap_dest, NULL);
  925. if (snap_src && snap_dest && (s == snap_src)) {
  926. down_write(&snap_dest->lock);
  927. snap_dest->valid = 0;
  928. up_write(&snap_dest->lock);
  929. DMERR("Cancelling snapshot handover.");
  930. }
  931. up_read(&_origins_lock);
  932. if (dm_target_is_snapshot_merge(ti))
  933. stop_merge(s);
  934. /* Prevent further origin writes from using this snapshot. */
  935. /* After this returns there can be no new kcopyd jobs. */
  936. unregister_snapshot(s);
  937. while (atomic_read(&s->pending_exceptions_count))
  938. msleep(1);
  939. /*
  940. * Ensure instructions in mempool_destroy aren't reordered
  941. * before atomic_read.
  942. */
  943. smp_mb();
  944. #ifdef CONFIG_DM_DEBUG
  945. for (i = 0; i < DM_TRACKED_CHUNK_HASH_SIZE; i++)
  946. BUG_ON(!hlist_empty(&s->tracked_chunk_hash[i]));
  947. #endif
  948. mempool_destroy(s->tracked_chunk_pool);
  949. __free_exceptions(s);
  950. mempool_destroy(s->pending_pool);
  951. dm_put_device(ti, s->origin);
  952. dm_exception_store_destroy(s->store);
  953. dm_put_device(ti, s->cow);
  954. kfree(s);
  955. }
  956. /*
  957. * Flush a list of buffers.
  958. */
  959. static void flush_bios(struct bio *bio)
  960. {
  961. struct bio *n;
  962. while (bio) {
  963. n = bio->bi_next;
  964. bio->bi_next = NULL;
  965. generic_make_request(bio);
  966. bio = n;
  967. }
  968. }
  969. static void flush_queued_bios(struct work_struct *work)
  970. {
  971. struct dm_snapshot *s =
  972. container_of(work, struct dm_snapshot, queued_bios_work);
  973. struct bio *queued_bios;
  974. unsigned long flags;
  975. spin_lock_irqsave(&s->pe_lock, flags);
  976. queued_bios = bio_list_get(&s->queued_bios);
  977. spin_unlock_irqrestore(&s->pe_lock, flags);
  978. flush_bios(queued_bios);
  979. }
  980. static int do_origin(struct dm_dev *origin, struct bio *bio);
  981. /*
  982. * Flush a list of buffers.
  983. */
  984. static void retry_origin_bios(struct dm_snapshot *s, struct bio *bio)
  985. {
  986. struct bio *n;
  987. int r;
  988. while (bio) {
  989. n = bio->bi_next;
  990. bio->bi_next = NULL;
  991. r = do_origin(s->origin, bio);
  992. if (r == DM_MAPIO_REMAPPED)
  993. generic_make_request(bio);
  994. bio = n;
  995. }
  996. }
  997. /*
  998. * Error a list of buffers.
  999. */
  1000. static void error_bios(struct bio *bio)
  1001. {
  1002. struct bio *n;
  1003. while (bio) {
  1004. n = bio->bi_next;
  1005. bio->bi_next = NULL;
  1006. bio_io_error(bio);
  1007. bio = n;
  1008. }
  1009. }
  1010. static void __invalidate_snapshot(struct dm_snapshot *s, int err)
  1011. {
  1012. if (!s->valid)
  1013. return;
  1014. if (err == -EIO)
  1015. DMERR("Invalidating snapshot: Error reading/writing.");
  1016. else if (err == -ENOMEM)
  1017. DMERR("Invalidating snapshot: Unable to allocate exception.");
  1018. if (s->store->type->drop_snapshot)
  1019. s->store->type->drop_snapshot(s->store);
  1020. s->valid = 0;
  1021. dm_table_event(s->ti->table);
  1022. }
  1023. static void pending_complete(struct dm_snap_pending_exception *pe, int success)
  1024. {
  1025. struct dm_exception *e;
  1026. struct dm_snapshot *s = pe->snap;
  1027. struct bio *origin_bios = NULL;
  1028. struct bio *snapshot_bios = NULL;
  1029. int error = 0;
  1030. if (!success) {
  1031. /* Read/write error - snapshot is unusable */
  1032. down_write(&s->lock);
  1033. __invalidate_snapshot(s, -EIO);
  1034. error = 1;
  1035. goto out;
  1036. }
  1037. e = alloc_completed_exception();
  1038. if (!e) {
  1039. down_write(&s->lock);
  1040. __invalidate_snapshot(s, -ENOMEM);
  1041. error = 1;
  1042. goto out;
  1043. }
  1044. *e = pe->e;
  1045. down_write(&s->lock);
  1046. if (!s->valid) {
  1047. free_completed_exception(e);
  1048. error = 1;
  1049. goto out;
  1050. }
  1051. /* Check for conflicting reads */
  1052. __check_for_conflicting_io(s, pe->e.old_chunk);
  1053. /*
  1054. * Add a proper exception, and remove the
  1055. * in-flight exception from the list.
  1056. */
  1057. dm_insert_exception(&s->complete, e);
  1058. out:
  1059. dm_remove_exception(&pe->e);
  1060. snapshot_bios = bio_list_get(&pe->snapshot_bios);
  1061. origin_bios = bio_list_get(&pe->origin_bios);
  1062. free_pending_exception(pe);
  1063. up_write(&s->lock);
  1064. /* Submit any pending write bios */
  1065. if (error)
  1066. error_bios(snapshot_bios);
  1067. else
  1068. flush_bios(snapshot_bios);
  1069. retry_origin_bios(s, origin_bios);
  1070. }
  1071. static void commit_callback(void *context, int success)
  1072. {
  1073. struct dm_snap_pending_exception *pe = context;
  1074. pending_complete(pe, success);
  1075. }
  1076. /*
  1077. * Called when the copy I/O has finished. kcopyd actually runs
  1078. * this code so don't block.
  1079. */
  1080. static void copy_callback(int read_err, unsigned long write_err, void *context)
  1081. {
  1082. struct dm_snap_pending_exception *pe = context;
  1083. struct dm_snapshot *s = pe->snap;
  1084. if (read_err || write_err)
  1085. pending_complete(pe, 0);
  1086. else
  1087. /* Update the metadata if we are persistent */
  1088. s->store->type->commit_exception(s->store, &pe->e,
  1089. commit_callback, pe);
  1090. }
  1091. /*
  1092. * Dispatches the copy operation to kcopyd.
  1093. */
  1094. static void start_copy(struct dm_snap_pending_exception *pe)
  1095. {
  1096. struct dm_snapshot *s = pe->snap;
  1097. struct dm_io_region src, dest;
  1098. struct block_device *bdev = s->origin->bdev;
  1099. sector_t dev_size;
  1100. dev_size = get_dev_size(bdev);
  1101. src.bdev = bdev;
  1102. src.sector = chunk_to_sector(s->store, pe->e.old_chunk);
  1103. src.count = min((sector_t)s->store->chunk_size, dev_size - src.sector);
  1104. dest.bdev = s->cow->bdev;
  1105. dest.sector = chunk_to_sector(s->store, pe->e.new_chunk);
  1106. dest.count = src.count;
  1107. /* Hand over to kcopyd */
  1108. dm_kcopyd_copy(s->kcopyd_client,
  1109. &src, 1, &dest, 0, copy_callback, pe);
  1110. }
  1111. static struct dm_snap_pending_exception *
  1112. __lookup_pending_exception(struct dm_snapshot *s, chunk_t chunk)
  1113. {
  1114. struct dm_exception *e = dm_lookup_exception(&s->pending, chunk);
  1115. if (!e)
  1116. return NULL;
  1117. return container_of(e, struct dm_snap_pending_exception, e);
  1118. }
  1119. /*
  1120. * Looks to see if this snapshot already has a pending exception
  1121. * for this chunk, otherwise it allocates a new one and inserts
  1122. * it into the pending table.
  1123. *
  1124. * NOTE: a write lock must be held on snap->lock before calling
  1125. * this.
  1126. */
  1127. static struct dm_snap_pending_exception *
  1128. __find_pending_exception(struct dm_snapshot *s,
  1129. struct dm_snap_pending_exception *pe, chunk_t chunk)
  1130. {
  1131. struct dm_snap_pending_exception *pe2;
  1132. pe2 = __lookup_pending_exception(s, chunk);
  1133. if (pe2) {
  1134. free_pending_exception(pe);
  1135. return pe2;
  1136. }
  1137. pe->e.old_chunk = chunk;
  1138. bio_list_init(&pe->origin_bios);
  1139. bio_list_init(&pe->snapshot_bios);
  1140. pe->started = 0;
  1141. if (s->store->type->prepare_exception(s->store, &pe->e)) {
  1142. free_pending_exception(pe);
  1143. return NULL;
  1144. }
  1145. dm_insert_exception(&s->pending, &pe->e);
  1146. return pe;
  1147. }
  1148. static void remap_exception(struct dm_snapshot *s, struct dm_exception *e,
  1149. struct bio *bio, chunk_t chunk)
  1150. {
  1151. bio->bi_bdev = s->cow->bdev;
  1152. bio->bi_sector = chunk_to_sector(s->store,
  1153. dm_chunk_number(e->new_chunk) +
  1154. (chunk - e->old_chunk)) +
  1155. (bio->bi_sector &
  1156. s->store->chunk_mask);
  1157. }
  1158. static int snapshot_map(struct dm_target *ti, struct bio *bio,
  1159. union map_info *map_context)
  1160. {
  1161. struct dm_exception *e;
  1162. struct dm_snapshot *s = ti->private;
  1163. int r = DM_MAPIO_REMAPPED;
  1164. chunk_t chunk;
  1165. struct dm_snap_pending_exception *pe = NULL;
  1166. if (unlikely(bio_empty_barrier(bio))) {
  1167. bio->bi_bdev = s->cow->bdev;
  1168. return DM_MAPIO_REMAPPED;
  1169. }
  1170. chunk = sector_to_chunk(s->store, bio->bi_sector);
  1171. /* Full snapshots are not usable */
  1172. /* To get here the table must be live so s->active is always set. */
  1173. if (!s->valid)
  1174. return -EIO;
  1175. /* FIXME: should only take write lock if we need
  1176. * to copy an exception */
  1177. down_write(&s->lock);
  1178. if (!s->valid) {
  1179. r = -EIO;
  1180. goto out_unlock;
  1181. }
  1182. /* If the block is already remapped - use that, else remap it */
  1183. e = dm_lookup_exception(&s->complete, chunk);
  1184. if (e) {
  1185. remap_exception(s, e, bio, chunk);
  1186. goto out_unlock;
  1187. }
  1188. /*
  1189. * Write to snapshot - higher level takes care of RW/RO
  1190. * flags so we should only get this if we are
  1191. * writeable.
  1192. */
  1193. if (bio_rw(bio) == WRITE) {
  1194. pe = __lookup_pending_exception(s, chunk);
  1195. if (!pe) {
  1196. up_write(&s->lock);
  1197. pe = alloc_pending_exception(s);
  1198. down_write(&s->lock);
  1199. if (!s->valid) {
  1200. free_pending_exception(pe);
  1201. r = -EIO;
  1202. goto out_unlock;
  1203. }
  1204. e = dm_lookup_exception(&s->complete, chunk);
  1205. if (e) {
  1206. free_pending_exception(pe);
  1207. remap_exception(s, e, bio, chunk);
  1208. goto out_unlock;
  1209. }
  1210. pe = __find_pending_exception(s, pe, chunk);
  1211. if (!pe) {
  1212. __invalidate_snapshot(s, -ENOMEM);
  1213. r = -EIO;
  1214. goto out_unlock;
  1215. }
  1216. }
  1217. remap_exception(s, &pe->e, bio, chunk);
  1218. bio_list_add(&pe->snapshot_bios, bio);
  1219. r = DM_MAPIO_SUBMITTED;
  1220. if (!pe->started) {
  1221. /* this is protected by snap->lock */
  1222. pe->started = 1;
  1223. up_write(&s->lock);
  1224. start_copy(pe);
  1225. goto out;
  1226. }
  1227. } else {
  1228. bio->bi_bdev = s->origin->bdev;
  1229. map_context->ptr = track_chunk(s, chunk);
  1230. }
  1231. out_unlock:
  1232. up_write(&s->lock);
  1233. out:
  1234. return r;
  1235. }
  1236. /*
  1237. * A snapshot-merge target behaves like a combination of a snapshot
  1238. * target and a snapshot-origin target. It only generates new
  1239. * exceptions in other snapshots and not in the one that is being
  1240. * merged.
  1241. *
  1242. * For each chunk, if there is an existing exception, it is used to
  1243. * redirect I/O to the cow device. Otherwise I/O is sent to the origin,
  1244. * which in turn might generate exceptions in other snapshots.
  1245. */
  1246. static int snapshot_merge_map(struct dm_target *ti, struct bio *bio,
  1247. union map_info *map_context)
  1248. {
  1249. struct dm_exception *e;
  1250. struct dm_snapshot *s = ti->private;
  1251. int r = DM_MAPIO_REMAPPED;
  1252. chunk_t chunk;
  1253. if (unlikely(bio_empty_barrier(bio))) {
  1254. if (!map_context->flush_request)
  1255. bio->bi_bdev = s->origin->bdev;
  1256. else
  1257. bio->bi_bdev = s->cow->bdev;
  1258. map_context->ptr = NULL;
  1259. return DM_MAPIO_REMAPPED;
  1260. }
  1261. chunk = sector_to_chunk(s->store, bio->bi_sector);
  1262. down_read(&s->lock);
  1263. /* Full snapshots are not usable */
  1264. if (!s->valid) {
  1265. r = -EIO;
  1266. goto out_unlock;
  1267. }
  1268. /* If the block is already remapped - use that */
  1269. e = dm_lookup_exception(&s->complete, chunk);
  1270. if (e) {
  1271. remap_exception(s, e, bio, chunk);
  1272. goto out_unlock;
  1273. }
  1274. bio->bi_bdev = s->origin->bdev;
  1275. if (bio_rw(bio) == WRITE) {
  1276. up_read(&s->lock);
  1277. return do_origin(s->origin, bio);
  1278. }
  1279. out_unlock:
  1280. up_read(&s->lock);
  1281. return r;
  1282. }
  1283. static int snapshot_end_io(struct dm_target *ti, struct bio *bio,
  1284. int error, union map_info *map_context)
  1285. {
  1286. struct dm_snapshot *s = ti->private;
  1287. struct dm_snap_tracked_chunk *c = map_context->ptr;
  1288. if (c)
  1289. stop_tracking_chunk(s, c);
  1290. return 0;
  1291. }
  1292. static void snapshot_merge_presuspend(struct dm_target *ti)
  1293. {
  1294. struct dm_snapshot *s = ti->private;
  1295. stop_merge(s);
  1296. }
  1297. static void snapshot_postsuspend(struct dm_target *ti)
  1298. {
  1299. struct dm_snapshot *s = ti->private;
  1300. down_write(&s->lock);
  1301. s->suspended = 1;
  1302. up_write(&s->lock);
  1303. }
  1304. static int snapshot_preresume(struct dm_target *ti)
  1305. {
  1306. int r = 0;
  1307. struct dm_snapshot *s = ti->private;
  1308. struct dm_snapshot *snap_src = NULL, *snap_dest = NULL;
  1309. down_read(&_origins_lock);
  1310. (void) __find_snapshots_sharing_cow(s, &snap_src, &snap_dest, NULL);
  1311. if (snap_src && snap_dest) {
  1312. down_read(&snap_src->lock);
  1313. if (s == snap_src) {
  1314. DMERR("Unable to resume snapshot source until "
  1315. "handover completes.");
  1316. r = -EINVAL;
  1317. } else if (!snap_src->suspended) {
  1318. DMERR("Unable to perform snapshot handover until "
  1319. "source is suspended.");
  1320. r = -EINVAL;
  1321. }
  1322. up_read(&snap_src->lock);
  1323. }
  1324. up_read(&_origins_lock);
  1325. return r;
  1326. }
  1327. static void snapshot_resume(struct dm_target *ti)
  1328. {
  1329. struct dm_snapshot *s = ti->private;
  1330. struct dm_snapshot *snap_src = NULL, *snap_dest = NULL;
  1331. down_read(&_origins_lock);
  1332. (void) __find_snapshots_sharing_cow(s, &snap_src, &snap_dest, NULL);
  1333. if (snap_src && snap_dest) {
  1334. down_write(&snap_src->lock);
  1335. down_write_nested(&snap_dest->lock, SINGLE_DEPTH_NESTING);
  1336. __handover_exceptions(snap_src, snap_dest);
  1337. up_write(&snap_dest->lock);
  1338. up_write(&snap_src->lock);
  1339. }
  1340. up_read(&_origins_lock);
  1341. /* Now we have correct chunk size, reregister */
  1342. reregister_snapshot(s);
  1343. down_write(&s->lock);
  1344. s->active = 1;
  1345. s->suspended = 0;
  1346. up_write(&s->lock);
  1347. }
  1348. static sector_t get_origin_minimum_chunksize(struct block_device *bdev)
  1349. {
  1350. sector_t min_chunksize;
  1351. down_read(&_origins_lock);
  1352. min_chunksize = __minimum_chunk_size(__lookup_origin(bdev));
  1353. up_read(&_origins_lock);
  1354. return min_chunksize;
  1355. }
  1356. static void snapshot_merge_resume(struct dm_target *ti)
  1357. {
  1358. struct dm_snapshot *s = ti->private;
  1359. /*
  1360. * Handover exceptions from existing snapshot.
  1361. */
  1362. snapshot_resume(ti);
  1363. /*
  1364. * snapshot-merge acts as an origin, so set ti->split_io
  1365. */
  1366. ti->split_io = get_origin_minimum_chunksize(s->origin->bdev);
  1367. start_merge(s);
  1368. }
  1369. static int snapshot_status(struct dm_target *ti, status_type_t type,
  1370. char *result, unsigned int maxlen)
  1371. {
  1372. unsigned sz = 0;
  1373. struct dm_snapshot *snap = ti->private;
  1374. switch (type) {
  1375. case STATUSTYPE_INFO:
  1376. down_write(&snap->lock);
  1377. if (!snap->valid)
  1378. DMEMIT("Invalid");
  1379. else {
  1380. if (snap->store->type->usage) {
  1381. sector_t total_sectors, sectors_allocated,
  1382. metadata_sectors;
  1383. snap->store->type->usage(snap->store,
  1384. &total_sectors,
  1385. &sectors_allocated,
  1386. &metadata_sectors);
  1387. DMEMIT("%llu/%llu %llu",
  1388. (unsigned long long)sectors_allocated,
  1389. (unsigned long long)total_sectors,
  1390. (unsigned long long)metadata_sectors);
  1391. }
  1392. else
  1393. DMEMIT("Unknown");
  1394. }
  1395. up_write(&snap->lock);
  1396. break;
  1397. case STATUSTYPE_TABLE:
  1398. /*
  1399. * kdevname returns a static pointer so we need
  1400. * to make private copies if the output is to
  1401. * make sense.
  1402. */
  1403. DMEMIT("%s %s", snap->origin->name, snap->cow->name);
  1404. snap->store->type->status(snap->store, type, result + sz,
  1405. maxlen - sz);
  1406. break;
  1407. }
  1408. return 0;
  1409. }
  1410. static int snapshot_iterate_devices(struct dm_target *ti,
  1411. iterate_devices_callout_fn fn, void *data)
  1412. {
  1413. struct dm_snapshot *snap = ti->private;
  1414. return fn(ti, snap->origin, 0, ti->len, data);
  1415. }
  1416. /*-----------------------------------------------------------------
  1417. * Origin methods
  1418. *---------------------------------------------------------------*/
  1419. /*
  1420. * If no exceptions need creating, DM_MAPIO_REMAPPED is returned and any
  1421. * supplied bio was ignored. The caller may submit it immediately.
  1422. * (No remapping actually occurs as the origin is always a direct linear
  1423. * map.)
  1424. *
  1425. * If further exceptions are required, DM_MAPIO_SUBMITTED is returned
  1426. * and any supplied bio is added to a list to be submitted once all
  1427. * the necessary exceptions exist.
  1428. */
  1429. static int __origin_write(struct list_head *snapshots, sector_t sector,
  1430. struct bio *bio)
  1431. {
  1432. int r = DM_MAPIO_REMAPPED;
  1433. struct dm_snapshot *snap;
  1434. struct dm_exception *e;
  1435. struct dm_snap_pending_exception *pe;
  1436. struct dm_snap_pending_exception *pe_to_start_now = NULL;
  1437. struct dm_snap_pending_exception *pe_to_start_last = NULL;
  1438. chunk_t chunk;
  1439. /* Do all the snapshots on this origin */
  1440. list_for_each_entry (snap, snapshots, list) {
  1441. /*
  1442. * Don't make new exceptions in a merging snapshot
  1443. * because it has effectively been deleted
  1444. */
  1445. if (dm_target_is_snapshot_merge(snap->ti))
  1446. continue;
  1447. down_write(&snap->lock);
  1448. /* Only deal with valid and active snapshots */
  1449. if (!snap->valid || !snap->active)
  1450. goto next_snapshot;
  1451. /* Nothing to do if writing beyond end of snapshot */
  1452. if (sector >= dm_table_get_size(snap->ti->table))
  1453. goto next_snapshot;
  1454. /*
  1455. * Remember, different snapshots can have
  1456. * different chunk sizes.
  1457. */
  1458. chunk = sector_to_chunk(snap->store, sector);
  1459. /*
  1460. * Check exception table to see if block
  1461. * is already remapped in this snapshot
  1462. * and trigger an exception if not.
  1463. */
  1464. e = dm_lookup_exception(&snap->complete, chunk);
  1465. if (e)
  1466. goto next_snapshot;
  1467. pe = __lookup_pending_exception(snap, chunk);
  1468. if (!pe) {
  1469. up_write(&snap->lock);
  1470. pe = alloc_pending_exception(snap);
  1471. down_write(&snap->lock);
  1472. if (!snap->valid) {
  1473. free_pending_exception(pe);
  1474. goto next_snapshot;
  1475. }
  1476. e = dm_lookup_exception(&snap->complete, chunk);
  1477. if (e) {
  1478. free_pending_exception(pe);
  1479. goto next_snapshot;
  1480. }
  1481. pe = __find_pending_exception(snap, pe, chunk);
  1482. if (!pe) {
  1483. __invalidate_snapshot(snap, -ENOMEM);
  1484. goto next_snapshot;
  1485. }
  1486. }
  1487. r = DM_MAPIO_SUBMITTED;
  1488. /*
  1489. * If an origin bio was supplied, queue it to wait for the
  1490. * completion of this exception, and start this one last,
  1491. * at the end of the function.
  1492. */
  1493. if (bio) {
  1494. bio_list_add(&pe->origin_bios, bio);
  1495. bio = NULL;
  1496. if (!pe->started) {
  1497. pe->started = 1;
  1498. pe_to_start_last = pe;
  1499. }
  1500. }
  1501. if (!pe->started) {
  1502. pe->started = 1;
  1503. pe_to_start_now = pe;
  1504. }
  1505. next_snapshot:
  1506. up_write(&snap->lock);
  1507. if (pe_to_start_now) {
  1508. start_copy(pe_to_start_now);
  1509. pe_to_start_now = NULL;
  1510. }
  1511. }
  1512. /*
  1513. * Submit the exception against which the bio is queued last,
  1514. * to give the other exceptions a head start.
  1515. */
  1516. if (pe_to_start_last)
  1517. start_copy(pe_to_start_last);
  1518. return r;
  1519. }
  1520. /*
  1521. * Called on a write from the origin driver.
  1522. */
  1523. static int do_origin(struct dm_dev *origin, struct bio *bio)
  1524. {
  1525. struct origin *o;
  1526. int r = DM_MAPIO_REMAPPED;
  1527. down_read(&_origins_lock);
  1528. o = __lookup_origin(origin->bdev);
  1529. if (o)
  1530. r = __origin_write(&o->snapshots, bio->bi_sector, bio);
  1531. up_read(&_origins_lock);
  1532. return r;
  1533. }
  1534. /*
  1535. * Origin: maps a linear range of a device, with hooks for snapshotting.
  1536. */
  1537. /*
  1538. * Construct an origin mapping: <dev_path>
  1539. * The context for an origin is merely a 'struct dm_dev *'
  1540. * pointing to the real device.
  1541. */
  1542. static int origin_ctr(struct dm_target *ti, unsigned int argc, char **argv)
  1543. {
  1544. int r;
  1545. struct dm_dev *dev;
  1546. if (argc != 1) {
  1547. ti->error = "origin: incorrect number of arguments";
  1548. return -EINVAL;
  1549. }
  1550. r = dm_get_device(ti, argv[0], 0, ti->len,
  1551. dm_table_get_mode(ti->table), &dev);
  1552. if (r) {
  1553. ti->error = "Cannot get target device";
  1554. return r;
  1555. }
  1556. ti->private = dev;
  1557. ti->num_flush_requests = 1;
  1558. return 0;
  1559. }
  1560. static void origin_dtr(struct dm_target *ti)
  1561. {
  1562. struct dm_dev *dev = ti->private;
  1563. dm_put_device(ti, dev);
  1564. }
  1565. static int origin_map(struct dm_target *ti, struct bio *bio,
  1566. union map_info *map_context)
  1567. {
  1568. struct dm_dev *dev = ti->private;
  1569. bio->bi_bdev = dev->bdev;
  1570. if (unlikely(bio_empty_barrier(bio)))
  1571. return DM_MAPIO_REMAPPED;
  1572. /* Only tell snapshots if this is a write */
  1573. return (bio_rw(bio) == WRITE) ? do_origin(dev, bio) : DM_MAPIO_REMAPPED;
  1574. }
  1575. /*
  1576. * Set the target "split_io" field to the minimum of all the snapshots'
  1577. * chunk sizes.
  1578. */
  1579. static void origin_resume(struct dm_target *ti)
  1580. {
  1581. struct dm_dev *dev = ti->private;
  1582. ti->split_io = get_origin_minimum_chunksize(dev->bdev);
  1583. }
  1584. static int origin_status(struct dm_target *ti, status_type_t type, char *result,
  1585. unsigned int maxlen)
  1586. {
  1587. struct dm_dev *dev = ti->private;
  1588. switch (type) {
  1589. case STATUSTYPE_INFO:
  1590. result[0] = '\0';
  1591. break;
  1592. case STATUSTYPE_TABLE:
  1593. snprintf(result, maxlen, "%s", dev->name);
  1594. break;
  1595. }
  1596. return 0;
  1597. }
  1598. static int origin_iterate_devices(struct dm_target *ti,
  1599. iterate_devices_callout_fn fn, void *data)
  1600. {
  1601. struct dm_dev *dev = ti->private;
  1602. return fn(ti, dev, 0, ti->len, data);
  1603. }
  1604. static struct target_type origin_target = {
  1605. .name = "snapshot-origin",
  1606. .version = {1, 7, 0},
  1607. .module = THIS_MODULE,
  1608. .ctr = origin_ctr,
  1609. .dtr = origin_dtr,
  1610. .map = origin_map,
  1611. .resume = origin_resume,
  1612. .status = origin_status,
  1613. .iterate_devices = origin_iterate_devices,
  1614. };
  1615. static struct target_type snapshot_target = {
  1616. .name = "snapshot",
  1617. .version = {1, 9, 0},
  1618. .module = THIS_MODULE,
  1619. .ctr = snapshot_ctr,
  1620. .dtr = snapshot_dtr,
  1621. .map = snapshot_map,
  1622. .end_io = snapshot_end_io,
  1623. .postsuspend = snapshot_postsuspend,
  1624. .preresume = snapshot_preresume,
  1625. .resume = snapshot_resume,
  1626. .status = snapshot_status,
  1627. .iterate_devices = snapshot_iterate_devices,
  1628. };
  1629. static struct target_type merge_target = {
  1630. .name = dm_snapshot_merge_target_name,
  1631. .version = {1, 0, 0},
  1632. .module = THIS_MODULE,
  1633. .ctr = snapshot_ctr,
  1634. .dtr = snapshot_dtr,
  1635. .map = snapshot_merge_map,
  1636. .end_io = snapshot_end_io,
  1637. .presuspend = snapshot_merge_presuspend,
  1638. .postsuspend = snapshot_postsuspend,
  1639. .preresume = snapshot_preresume,
  1640. .resume = snapshot_merge_resume,
  1641. .status = snapshot_status,
  1642. .iterate_devices = snapshot_iterate_devices,
  1643. };
  1644. static int __init dm_snapshot_init(void)
  1645. {
  1646. int r;
  1647. r = dm_exception_store_init();
  1648. if (r) {
  1649. DMERR("Failed to initialize exception stores");
  1650. return r;
  1651. }
  1652. r = dm_register_target(&snapshot_target);
  1653. if (r < 0) {
  1654. DMERR("snapshot target register failed %d", r);
  1655. goto bad_register_snapshot_target;
  1656. }
  1657. r = dm_register_target(&origin_target);
  1658. if (r < 0) {
  1659. DMERR("Origin target register failed %d", r);
  1660. goto bad_register_origin_target;
  1661. }
  1662. r = dm_register_target(&merge_target);
  1663. if (r < 0) {
  1664. DMERR("Merge target register failed %d", r);
  1665. goto bad_register_merge_target;
  1666. }
  1667. r = init_origin_hash();
  1668. if (r) {
  1669. DMERR("init_origin_hash failed.");
  1670. goto bad_origin_hash;
  1671. }
  1672. exception_cache = KMEM_CACHE(dm_exception, 0);
  1673. if (!exception_cache) {
  1674. DMERR("Couldn't create exception cache.");
  1675. r = -ENOMEM;
  1676. goto bad_exception_cache;
  1677. }
  1678. pending_cache = KMEM_CACHE(dm_snap_pending_exception, 0);
  1679. if (!pending_cache) {
  1680. DMERR("Couldn't create pending cache.");
  1681. r = -ENOMEM;
  1682. goto bad_pending_cache;
  1683. }
  1684. tracked_chunk_cache = KMEM_CACHE(dm_snap_tracked_chunk, 0);
  1685. if (!tracked_chunk_cache) {
  1686. DMERR("Couldn't create cache to track chunks in use.");
  1687. r = -ENOMEM;
  1688. goto bad_tracked_chunk_cache;
  1689. }
  1690. ksnapd = create_singlethread_workqueue("ksnapd");
  1691. if (!ksnapd) {
  1692. DMERR("Failed to create ksnapd workqueue.");
  1693. r = -ENOMEM;
  1694. goto bad_pending_pool;
  1695. }
  1696. return 0;
  1697. bad_pending_pool:
  1698. kmem_cache_destroy(tracked_chunk_cache);
  1699. bad_tracked_chunk_cache:
  1700. kmem_cache_destroy(pending_cache);
  1701. bad_pending_cache:
  1702. kmem_cache_destroy(exception_cache);
  1703. bad_exception_cache:
  1704. exit_origin_hash();
  1705. bad_origin_hash:
  1706. dm_unregister_target(&merge_target);
  1707. bad_register_merge_target:
  1708. dm_unregister_target(&origin_target);
  1709. bad_register_origin_target:
  1710. dm_unregister_target(&snapshot_target);
  1711. bad_register_snapshot_target:
  1712. dm_exception_store_exit();
  1713. return r;
  1714. }
  1715. static void __exit dm_snapshot_exit(void)
  1716. {
  1717. destroy_workqueue(ksnapd);
  1718. dm_unregister_target(&snapshot_target);
  1719. dm_unregister_target(&origin_target);
  1720. dm_unregister_target(&merge_target);
  1721. exit_origin_hash();
  1722. kmem_cache_destroy(pending_cache);
  1723. kmem_cache_destroy(exception_cache);
  1724. kmem_cache_destroy(tracked_chunk_cache);
  1725. dm_exception_store_exit();
  1726. }
  1727. /* Module hooks */
  1728. module_init(dm_snapshot_init);
  1729. module_exit(dm_snapshot_exit);
  1730. MODULE_DESCRIPTION(DM_NAME " snapshot target");
  1731. MODULE_AUTHOR("Joe Thornber");
  1732. MODULE_LICENSE("GPL");