dm-snap.c 41 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870
  1. /*
  2. * dm-snapshot.c
  3. *
  4. * Copyright (C) 2001-2002 Sistina Software (UK) Limited.
  5. *
  6. * This file is released under the GPL.
  7. */
  8. #include <linux/blkdev.h>
  9. #include <linux/device-mapper.h>
  10. #include <linux/delay.h>
  11. #include <linux/fs.h>
  12. #include <linux/init.h>
  13. #include <linux/kdev_t.h>
  14. #include <linux/list.h>
  15. #include <linux/mempool.h>
  16. #include <linux/module.h>
  17. #include <linux/slab.h>
  18. #include <linux/vmalloc.h>
  19. #include <linux/log2.h>
  20. #include <linux/dm-kcopyd.h>
  21. #include <linux/workqueue.h>
  22. #include "dm-exception-store.h"
  23. #define DM_MSG_PREFIX "snapshots"
  24. static const char dm_snapshot_merge_target_name[] = "snapshot-merge";
  25. #define dm_target_is_snapshot_merge(ti) \
  26. ((ti)->type->name == dm_snapshot_merge_target_name)
  27. /*
  28. * The percentage increment we will wake up users at
  29. */
  30. #define WAKE_UP_PERCENT 5
  31. /*
  32. * kcopyd priority of snapshot operations
  33. */
  34. #define SNAPSHOT_COPY_PRIORITY 2
  35. /*
  36. * Reserve 1MB for each snapshot initially (with minimum of 1 page).
  37. */
  38. #define SNAPSHOT_PAGES (((1UL << 20) >> PAGE_SHIFT) ? : 1)
  39. /*
  40. * The size of the mempool used to track chunks in use.
  41. */
  42. #define MIN_IOS 256
  43. #define DM_TRACKED_CHUNK_HASH_SIZE 16
  44. #define DM_TRACKED_CHUNK_HASH(x) ((unsigned long)(x) & \
  45. (DM_TRACKED_CHUNK_HASH_SIZE - 1))
  46. struct dm_exception_table {
  47. uint32_t hash_mask;
  48. unsigned hash_shift;
  49. struct list_head *table;
  50. };
  51. struct dm_snapshot {
  52. struct rw_semaphore lock;
  53. struct dm_dev *origin;
  54. struct dm_dev *cow;
  55. struct dm_target *ti;
  56. /* List of snapshots per Origin */
  57. struct list_head list;
  58. /* You can't use a snapshot if this is 0 (e.g. if full) */
  59. int valid;
  60. /* Origin writes don't trigger exceptions until this is set */
  61. int active;
  62. /* Whether or not owning mapped_device is suspended */
  63. int suspended;
  64. mempool_t *pending_pool;
  65. atomic_t pending_exceptions_count;
  66. struct dm_exception_table pending;
  67. struct dm_exception_table complete;
  68. /*
  69. * pe_lock protects all pending_exception operations and access
  70. * as well as the snapshot_bios list.
  71. */
  72. spinlock_t pe_lock;
  73. /* The on disk metadata handler */
  74. struct dm_exception_store *store;
  75. struct dm_kcopyd_client *kcopyd_client;
  76. /* Queue of snapshot writes for ksnapd to flush */
  77. struct bio_list queued_bios;
  78. struct work_struct queued_bios_work;
  79. /* Chunks with outstanding reads */
  80. mempool_t *tracked_chunk_pool;
  81. spinlock_t tracked_chunk_lock;
  82. struct hlist_head tracked_chunk_hash[DM_TRACKED_CHUNK_HASH_SIZE];
  83. };
  84. struct dm_dev *dm_snap_cow(struct dm_snapshot *s)
  85. {
  86. return s->cow;
  87. }
  88. EXPORT_SYMBOL(dm_snap_cow);
  89. static struct workqueue_struct *ksnapd;
  90. static void flush_queued_bios(struct work_struct *work);
  91. static sector_t chunk_to_sector(struct dm_exception_store *store,
  92. chunk_t chunk)
  93. {
  94. return chunk << store->chunk_shift;
  95. }
  96. static int bdev_equal(struct block_device *lhs, struct block_device *rhs)
  97. {
  98. /*
  99. * There is only ever one instance of a particular block
  100. * device so we can compare pointers safely.
  101. */
  102. return lhs == rhs;
  103. }
  104. struct dm_snap_pending_exception {
  105. struct dm_exception e;
  106. /*
  107. * Origin buffers waiting for this to complete are held
  108. * in a bio list
  109. */
  110. struct bio_list origin_bios;
  111. struct bio_list snapshot_bios;
  112. /* Pointer back to snapshot context */
  113. struct dm_snapshot *snap;
  114. /*
  115. * 1 indicates the exception has already been sent to
  116. * kcopyd.
  117. */
  118. int started;
  119. };
  120. /*
  121. * Hash table mapping origin volumes to lists of snapshots and
  122. * a lock to protect it
  123. */
  124. static struct kmem_cache *exception_cache;
  125. static struct kmem_cache *pending_cache;
  126. struct dm_snap_tracked_chunk {
  127. struct hlist_node node;
  128. chunk_t chunk;
  129. };
  130. static struct kmem_cache *tracked_chunk_cache;
  131. static struct dm_snap_tracked_chunk *track_chunk(struct dm_snapshot *s,
  132. chunk_t chunk)
  133. {
  134. struct dm_snap_tracked_chunk *c = mempool_alloc(s->tracked_chunk_pool,
  135. GFP_NOIO);
  136. unsigned long flags;
  137. c->chunk = chunk;
  138. spin_lock_irqsave(&s->tracked_chunk_lock, flags);
  139. hlist_add_head(&c->node,
  140. &s->tracked_chunk_hash[DM_TRACKED_CHUNK_HASH(chunk)]);
  141. spin_unlock_irqrestore(&s->tracked_chunk_lock, flags);
  142. return c;
  143. }
  144. static void stop_tracking_chunk(struct dm_snapshot *s,
  145. struct dm_snap_tracked_chunk *c)
  146. {
  147. unsigned long flags;
  148. spin_lock_irqsave(&s->tracked_chunk_lock, flags);
  149. hlist_del(&c->node);
  150. spin_unlock_irqrestore(&s->tracked_chunk_lock, flags);
  151. mempool_free(c, s->tracked_chunk_pool);
  152. }
  153. static int __chunk_is_tracked(struct dm_snapshot *s, chunk_t chunk)
  154. {
  155. struct dm_snap_tracked_chunk *c;
  156. struct hlist_node *hn;
  157. int found = 0;
  158. spin_lock_irq(&s->tracked_chunk_lock);
  159. hlist_for_each_entry(c, hn,
  160. &s->tracked_chunk_hash[DM_TRACKED_CHUNK_HASH(chunk)], node) {
  161. if (c->chunk == chunk) {
  162. found = 1;
  163. break;
  164. }
  165. }
  166. spin_unlock_irq(&s->tracked_chunk_lock);
  167. return found;
  168. }
  169. /*
  170. * This conflicting I/O is extremely improbable in the caller,
  171. * so msleep(1) is sufficient and there is no need for a wait queue.
  172. */
  173. static void __check_for_conflicting_io(struct dm_snapshot *s, chunk_t chunk)
  174. {
  175. while (__chunk_is_tracked(s, chunk))
  176. msleep(1);
  177. }
  178. /*
  179. * One of these per registered origin, held in the snapshot_origins hash
  180. */
  181. struct origin {
  182. /* The origin device */
  183. struct block_device *bdev;
  184. struct list_head hash_list;
  185. /* List of snapshots for this origin */
  186. struct list_head snapshots;
  187. };
  188. /*
  189. * Size of the hash table for origin volumes. If we make this
  190. * the size of the minors list then it should be nearly perfect
  191. */
  192. #define ORIGIN_HASH_SIZE 256
  193. #define ORIGIN_MASK 0xFF
  194. static struct list_head *_origins;
  195. static struct rw_semaphore _origins_lock;
  196. static int init_origin_hash(void)
  197. {
  198. int i;
  199. _origins = kmalloc(ORIGIN_HASH_SIZE * sizeof(struct list_head),
  200. GFP_KERNEL);
  201. if (!_origins) {
  202. DMERR("unable to allocate memory");
  203. return -ENOMEM;
  204. }
  205. for (i = 0; i < ORIGIN_HASH_SIZE; i++)
  206. INIT_LIST_HEAD(_origins + i);
  207. init_rwsem(&_origins_lock);
  208. return 0;
  209. }
  210. static void exit_origin_hash(void)
  211. {
  212. kfree(_origins);
  213. }
  214. static unsigned origin_hash(struct block_device *bdev)
  215. {
  216. return bdev->bd_dev & ORIGIN_MASK;
  217. }
  218. static struct origin *__lookup_origin(struct block_device *origin)
  219. {
  220. struct list_head *ol;
  221. struct origin *o;
  222. ol = &_origins[origin_hash(origin)];
  223. list_for_each_entry (o, ol, hash_list)
  224. if (bdev_equal(o->bdev, origin))
  225. return o;
  226. return NULL;
  227. }
  228. static void __insert_origin(struct origin *o)
  229. {
  230. struct list_head *sl = &_origins[origin_hash(o->bdev)];
  231. list_add_tail(&o->hash_list, sl);
  232. }
  233. /*
  234. * _origins_lock must be held when calling this function.
  235. * Returns number of snapshots registered using the supplied cow device, plus:
  236. * snap_src - a snapshot suitable for use as a source of exception handover
  237. * snap_dest - a snapshot capable of receiving exception handover.
  238. *
  239. * Possible return values and states:
  240. * 0: NULL, NULL - first new snapshot
  241. * 1: snap_src, NULL - normal snapshot
  242. * 2: snap_src, snap_dest - waiting for handover
  243. * 2: snap_src, NULL - handed over, waiting for old to be deleted
  244. * 1: NULL, snap_dest - source got destroyed without handover
  245. */
  246. static int __find_snapshots_sharing_cow(struct dm_snapshot *snap,
  247. struct dm_snapshot **snap_src,
  248. struct dm_snapshot **snap_dest)
  249. {
  250. struct dm_snapshot *s;
  251. struct origin *o;
  252. int count = 0;
  253. int active;
  254. o = __lookup_origin(snap->origin->bdev);
  255. if (!o)
  256. goto out;
  257. list_for_each_entry(s, &o->snapshots, list) {
  258. if (!bdev_equal(s->cow->bdev, snap->cow->bdev))
  259. continue;
  260. down_read(&s->lock);
  261. active = s->active;
  262. up_read(&s->lock);
  263. if (active) {
  264. if (snap_src)
  265. *snap_src = s;
  266. } else if (snap_dest)
  267. *snap_dest = s;
  268. count++;
  269. }
  270. out:
  271. return count;
  272. }
  273. /*
  274. * On success, returns 1 if this snapshot is a handover destination,
  275. * otherwise returns 0.
  276. */
  277. static int __validate_exception_handover(struct dm_snapshot *snap)
  278. {
  279. struct dm_snapshot *snap_src = NULL, *snap_dest = NULL;
  280. /* Does snapshot need exceptions handed over to it? */
  281. if ((__find_snapshots_sharing_cow(snap, &snap_src, &snap_dest) == 2) ||
  282. snap_dest) {
  283. snap->ti->error = "Snapshot cow pairing for exception "
  284. "table handover failed";
  285. return -EINVAL;
  286. }
  287. /*
  288. * If no snap_src was found, snap cannot become a handover
  289. * destination.
  290. */
  291. if (!snap_src)
  292. return 0;
  293. return 1;
  294. }
  295. static void __insert_snapshot(struct origin *o, struct dm_snapshot *s)
  296. {
  297. struct dm_snapshot *l;
  298. /* Sort the list according to chunk size, largest-first smallest-last */
  299. list_for_each_entry(l, &o->snapshots, list)
  300. if (l->store->chunk_size < s->store->chunk_size)
  301. break;
  302. list_add_tail(&s->list, &l->list);
  303. }
  304. /*
  305. * Make a note of the snapshot and its origin so we can look it
  306. * up when the origin has a write on it.
  307. *
  308. * Also validate snapshot exception store handovers.
  309. * On success, returns 1 if this registration is a handover destination,
  310. * otherwise returns 0.
  311. */
  312. static int register_snapshot(struct dm_snapshot *snap)
  313. {
  314. struct origin *o, *new_o = NULL;
  315. struct block_device *bdev = snap->origin->bdev;
  316. int r = 0;
  317. new_o = kmalloc(sizeof(*new_o), GFP_KERNEL);
  318. if (!new_o)
  319. return -ENOMEM;
  320. down_write(&_origins_lock);
  321. r = __validate_exception_handover(snap);
  322. if (r < 0) {
  323. kfree(new_o);
  324. goto out;
  325. }
  326. o = __lookup_origin(bdev);
  327. if (o)
  328. kfree(new_o);
  329. else {
  330. /* New origin */
  331. o = new_o;
  332. /* Initialise the struct */
  333. INIT_LIST_HEAD(&o->snapshots);
  334. o->bdev = bdev;
  335. __insert_origin(o);
  336. }
  337. __insert_snapshot(o, snap);
  338. out:
  339. up_write(&_origins_lock);
  340. return r;
  341. }
  342. /*
  343. * Move snapshot to correct place in list according to chunk size.
  344. */
  345. static void reregister_snapshot(struct dm_snapshot *s)
  346. {
  347. struct block_device *bdev = s->origin->bdev;
  348. down_write(&_origins_lock);
  349. list_del(&s->list);
  350. __insert_snapshot(__lookup_origin(bdev), s);
  351. up_write(&_origins_lock);
  352. }
  353. static void unregister_snapshot(struct dm_snapshot *s)
  354. {
  355. struct origin *o;
  356. down_write(&_origins_lock);
  357. o = __lookup_origin(s->origin->bdev);
  358. list_del(&s->list);
  359. if (o && list_empty(&o->snapshots)) {
  360. list_del(&o->hash_list);
  361. kfree(o);
  362. }
  363. up_write(&_origins_lock);
  364. }
  365. /*
  366. * Implementation of the exception hash tables.
  367. * The lowest hash_shift bits of the chunk number are ignored, allowing
  368. * some consecutive chunks to be grouped together.
  369. */
  370. static int dm_exception_table_init(struct dm_exception_table *et,
  371. uint32_t size, unsigned hash_shift)
  372. {
  373. unsigned int i;
  374. et->hash_shift = hash_shift;
  375. et->hash_mask = size - 1;
  376. et->table = dm_vcalloc(size, sizeof(struct list_head));
  377. if (!et->table)
  378. return -ENOMEM;
  379. for (i = 0; i < size; i++)
  380. INIT_LIST_HEAD(et->table + i);
  381. return 0;
  382. }
  383. static void dm_exception_table_exit(struct dm_exception_table *et,
  384. struct kmem_cache *mem)
  385. {
  386. struct list_head *slot;
  387. struct dm_exception *ex, *next;
  388. int i, size;
  389. size = et->hash_mask + 1;
  390. for (i = 0; i < size; i++) {
  391. slot = et->table + i;
  392. list_for_each_entry_safe (ex, next, slot, hash_list)
  393. kmem_cache_free(mem, ex);
  394. }
  395. vfree(et->table);
  396. }
  397. static uint32_t exception_hash(struct dm_exception_table *et, chunk_t chunk)
  398. {
  399. return (chunk >> et->hash_shift) & et->hash_mask;
  400. }
  401. static void dm_remove_exception(struct dm_exception *e)
  402. {
  403. list_del(&e->hash_list);
  404. }
  405. /*
  406. * Return the exception data for a sector, or NULL if not
  407. * remapped.
  408. */
  409. static struct dm_exception *dm_lookup_exception(struct dm_exception_table *et,
  410. chunk_t chunk)
  411. {
  412. struct list_head *slot;
  413. struct dm_exception *e;
  414. slot = &et->table[exception_hash(et, chunk)];
  415. list_for_each_entry (e, slot, hash_list)
  416. if (chunk >= e->old_chunk &&
  417. chunk <= e->old_chunk + dm_consecutive_chunk_count(e))
  418. return e;
  419. return NULL;
  420. }
  421. static struct dm_exception *alloc_completed_exception(void)
  422. {
  423. struct dm_exception *e;
  424. e = kmem_cache_alloc(exception_cache, GFP_NOIO);
  425. if (!e)
  426. e = kmem_cache_alloc(exception_cache, GFP_ATOMIC);
  427. return e;
  428. }
  429. static void free_completed_exception(struct dm_exception *e)
  430. {
  431. kmem_cache_free(exception_cache, e);
  432. }
  433. static struct dm_snap_pending_exception *alloc_pending_exception(struct dm_snapshot *s)
  434. {
  435. struct dm_snap_pending_exception *pe = mempool_alloc(s->pending_pool,
  436. GFP_NOIO);
  437. atomic_inc(&s->pending_exceptions_count);
  438. pe->snap = s;
  439. return pe;
  440. }
  441. static void free_pending_exception(struct dm_snap_pending_exception *pe)
  442. {
  443. struct dm_snapshot *s = pe->snap;
  444. mempool_free(pe, s->pending_pool);
  445. smp_mb__before_atomic_dec();
  446. atomic_dec(&s->pending_exceptions_count);
  447. }
  448. static void dm_insert_exception(struct dm_exception_table *eh,
  449. struct dm_exception *new_e)
  450. {
  451. struct list_head *l;
  452. struct dm_exception *e = NULL;
  453. l = &eh->table[exception_hash(eh, new_e->old_chunk)];
  454. /* Add immediately if this table doesn't support consecutive chunks */
  455. if (!eh->hash_shift)
  456. goto out;
  457. /* List is ordered by old_chunk */
  458. list_for_each_entry_reverse(e, l, hash_list) {
  459. /* Insert after an existing chunk? */
  460. if (new_e->old_chunk == (e->old_chunk +
  461. dm_consecutive_chunk_count(e) + 1) &&
  462. new_e->new_chunk == (dm_chunk_number(e->new_chunk) +
  463. dm_consecutive_chunk_count(e) + 1)) {
  464. dm_consecutive_chunk_count_inc(e);
  465. free_completed_exception(new_e);
  466. return;
  467. }
  468. /* Insert before an existing chunk? */
  469. if (new_e->old_chunk == (e->old_chunk - 1) &&
  470. new_e->new_chunk == (dm_chunk_number(e->new_chunk) - 1)) {
  471. dm_consecutive_chunk_count_inc(e);
  472. e->old_chunk--;
  473. e->new_chunk--;
  474. free_completed_exception(new_e);
  475. return;
  476. }
  477. if (new_e->old_chunk > e->old_chunk)
  478. break;
  479. }
  480. out:
  481. list_add(&new_e->hash_list, e ? &e->hash_list : l);
  482. }
  483. /*
  484. * Callback used by the exception stores to load exceptions when
  485. * initialising.
  486. */
  487. static int dm_add_exception(void *context, chunk_t old, chunk_t new)
  488. {
  489. struct dm_snapshot *s = context;
  490. struct dm_exception *e;
  491. e = alloc_completed_exception();
  492. if (!e)
  493. return -ENOMEM;
  494. e->old_chunk = old;
  495. /* Consecutive_count is implicitly initialised to zero */
  496. e->new_chunk = new;
  497. dm_insert_exception(&s->complete, e);
  498. return 0;
  499. }
  500. #define min_not_zero(l, r) (((l) == 0) ? (r) : (((r) == 0) ? (l) : min(l, r)))
  501. /*
  502. * Return a minimum chunk size of all snapshots that have the specified origin.
  503. * Return zero if the origin has no snapshots.
  504. */
  505. static sector_t __minimum_chunk_size(struct origin *o)
  506. {
  507. struct dm_snapshot *snap;
  508. unsigned chunk_size = 0;
  509. if (o)
  510. list_for_each_entry(snap, &o->snapshots, list)
  511. chunk_size = min_not_zero(chunk_size,
  512. snap->store->chunk_size);
  513. return chunk_size;
  514. }
  515. /*
  516. * Hard coded magic.
  517. */
  518. static int calc_max_buckets(void)
  519. {
  520. /* use a fixed size of 2MB */
  521. unsigned long mem = 2 * 1024 * 1024;
  522. mem /= sizeof(struct list_head);
  523. return mem;
  524. }
  525. /*
  526. * Allocate room for a suitable hash table.
  527. */
  528. static int init_hash_tables(struct dm_snapshot *s)
  529. {
  530. sector_t hash_size, cow_dev_size, origin_dev_size, max_buckets;
  531. /*
  532. * Calculate based on the size of the original volume or
  533. * the COW volume...
  534. */
  535. cow_dev_size = get_dev_size(s->cow->bdev);
  536. origin_dev_size = get_dev_size(s->origin->bdev);
  537. max_buckets = calc_max_buckets();
  538. hash_size = min(origin_dev_size, cow_dev_size) >> s->store->chunk_shift;
  539. hash_size = min(hash_size, max_buckets);
  540. if (hash_size < 64)
  541. hash_size = 64;
  542. hash_size = rounddown_pow_of_two(hash_size);
  543. if (dm_exception_table_init(&s->complete, hash_size,
  544. DM_CHUNK_CONSECUTIVE_BITS))
  545. return -ENOMEM;
  546. /*
  547. * Allocate hash table for in-flight exceptions
  548. * Make this smaller than the real hash table
  549. */
  550. hash_size >>= 3;
  551. if (hash_size < 64)
  552. hash_size = 64;
  553. if (dm_exception_table_init(&s->pending, hash_size, 0)) {
  554. dm_exception_table_exit(&s->complete, exception_cache);
  555. return -ENOMEM;
  556. }
  557. return 0;
  558. }
  559. /*
  560. * Construct a snapshot mapping: <origin_dev> <COW-dev> <p/n> <chunk-size>
  561. */
  562. static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv)
  563. {
  564. struct dm_snapshot *s;
  565. int i;
  566. int r = -EINVAL;
  567. char *origin_path, *cow_path;
  568. unsigned args_used;
  569. if (argc != 4) {
  570. ti->error = "requires exactly 4 arguments";
  571. r = -EINVAL;
  572. goto bad;
  573. }
  574. origin_path = argv[0];
  575. argv++;
  576. argc--;
  577. s = kmalloc(sizeof(*s), GFP_KERNEL);
  578. if (!s) {
  579. ti->error = "Cannot allocate snapshot context private "
  580. "structure";
  581. r = -ENOMEM;
  582. goto bad;
  583. }
  584. cow_path = argv[0];
  585. argv++;
  586. argc--;
  587. r = dm_get_device(ti, cow_path, 0, 0,
  588. FMODE_READ | FMODE_WRITE, &s->cow);
  589. if (r) {
  590. ti->error = "Cannot get COW device";
  591. goto bad_cow;
  592. }
  593. r = dm_exception_store_create(ti, argc, argv, s, &args_used, &s->store);
  594. if (r) {
  595. ti->error = "Couldn't create exception store";
  596. r = -EINVAL;
  597. goto bad_store;
  598. }
  599. argv += args_used;
  600. argc -= args_used;
  601. r = dm_get_device(ti, origin_path, 0, ti->len, FMODE_READ, &s->origin);
  602. if (r) {
  603. ti->error = "Cannot get origin device";
  604. goto bad_origin;
  605. }
  606. s->ti = ti;
  607. s->valid = 1;
  608. s->active = 0;
  609. s->suspended = 0;
  610. atomic_set(&s->pending_exceptions_count, 0);
  611. init_rwsem(&s->lock);
  612. INIT_LIST_HEAD(&s->list);
  613. spin_lock_init(&s->pe_lock);
  614. /* Allocate hash table for COW data */
  615. if (init_hash_tables(s)) {
  616. ti->error = "Unable to allocate hash table space";
  617. r = -ENOMEM;
  618. goto bad_hash_tables;
  619. }
  620. r = dm_kcopyd_client_create(SNAPSHOT_PAGES, &s->kcopyd_client);
  621. if (r) {
  622. ti->error = "Could not create kcopyd client";
  623. goto bad_kcopyd;
  624. }
  625. s->pending_pool = mempool_create_slab_pool(MIN_IOS, pending_cache);
  626. if (!s->pending_pool) {
  627. ti->error = "Could not allocate mempool for pending exceptions";
  628. goto bad_pending_pool;
  629. }
  630. s->tracked_chunk_pool = mempool_create_slab_pool(MIN_IOS,
  631. tracked_chunk_cache);
  632. if (!s->tracked_chunk_pool) {
  633. ti->error = "Could not allocate tracked_chunk mempool for "
  634. "tracking reads";
  635. goto bad_tracked_chunk_pool;
  636. }
  637. for (i = 0; i < DM_TRACKED_CHUNK_HASH_SIZE; i++)
  638. INIT_HLIST_HEAD(&s->tracked_chunk_hash[i]);
  639. spin_lock_init(&s->tracked_chunk_lock);
  640. bio_list_init(&s->queued_bios);
  641. INIT_WORK(&s->queued_bios_work, flush_queued_bios);
  642. ti->private = s;
  643. ti->num_flush_requests = 1;
  644. /* Add snapshot to the list of snapshots for this origin */
  645. /* Exceptions aren't triggered till snapshot_resume() is called */
  646. r = register_snapshot(s);
  647. if (r == -ENOMEM) {
  648. ti->error = "Snapshot origin struct allocation failed";
  649. goto bad_load_and_register;
  650. } else if (r < 0) {
  651. /* invalid handover, register_snapshot has set ti->error */
  652. goto bad_load_and_register;
  653. }
  654. /*
  655. * Metadata must only be loaded into one table at once, so skip this
  656. * if metadata will be handed over during resume.
  657. * Chunk size will be set during the handover - set it to zero to
  658. * ensure it's ignored.
  659. */
  660. if (r > 0) {
  661. s->store->chunk_size = 0;
  662. return 0;
  663. }
  664. r = s->store->type->read_metadata(s->store, dm_add_exception,
  665. (void *)s);
  666. if (r < 0) {
  667. ti->error = "Failed to read snapshot metadata";
  668. goto bad_read_metadata;
  669. } else if (r > 0) {
  670. s->valid = 0;
  671. DMWARN("Snapshot is marked invalid.");
  672. }
  673. if (!s->store->chunk_size) {
  674. ti->error = "Chunk size not set";
  675. goto bad_read_metadata;
  676. }
  677. ti->split_io = s->store->chunk_size;
  678. return 0;
  679. bad_read_metadata:
  680. unregister_snapshot(s);
  681. bad_load_and_register:
  682. mempool_destroy(s->tracked_chunk_pool);
  683. bad_tracked_chunk_pool:
  684. mempool_destroy(s->pending_pool);
  685. bad_pending_pool:
  686. dm_kcopyd_client_destroy(s->kcopyd_client);
  687. bad_kcopyd:
  688. dm_exception_table_exit(&s->pending, pending_cache);
  689. dm_exception_table_exit(&s->complete, exception_cache);
  690. bad_hash_tables:
  691. dm_put_device(ti, s->origin);
  692. bad_origin:
  693. dm_exception_store_destroy(s->store);
  694. bad_store:
  695. dm_put_device(ti, s->cow);
  696. bad_cow:
  697. kfree(s);
  698. bad:
  699. return r;
  700. }
  701. static void __free_exceptions(struct dm_snapshot *s)
  702. {
  703. dm_kcopyd_client_destroy(s->kcopyd_client);
  704. s->kcopyd_client = NULL;
  705. dm_exception_table_exit(&s->pending, pending_cache);
  706. dm_exception_table_exit(&s->complete, exception_cache);
  707. }
  708. static void __handover_exceptions(struct dm_snapshot *snap_src,
  709. struct dm_snapshot *snap_dest)
  710. {
  711. union {
  712. struct dm_exception_table table_swap;
  713. struct dm_exception_store *store_swap;
  714. } u;
  715. /*
  716. * Swap all snapshot context information between the two instances.
  717. */
  718. u.table_swap = snap_dest->complete;
  719. snap_dest->complete = snap_src->complete;
  720. snap_src->complete = u.table_swap;
  721. u.store_swap = snap_dest->store;
  722. snap_dest->store = snap_src->store;
  723. snap_src->store = u.store_swap;
  724. snap_dest->store->snap = snap_dest;
  725. snap_src->store->snap = snap_src;
  726. snap_dest->ti->split_io = snap_dest->store->chunk_size;
  727. snap_dest->valid = snap_src->valid;
  728. /*
  729. * Set source invalid to ensure it receives no further I/O.
  730. */
  731. snap_src->valid = 0;
  732. }
  733. static void snapshot_dtr(struct dm_target *ti)
  734. {
  735. #ifdef CONFIG_DM_DEBUG
  736. int i;
  737. #endif
  738. struct dm_snapshot *s = ti->private;
  739. struct dm_snapshot *snap_src = NULL, *snap_dest = NULL;
  740. flush_workqueue(ksnapd);
  741. down_read(&_origins_lock);
  742. /* Check whether exception handover must be cancelled */
  743. (void) __find_snapshots_sharing_cow(s, &snap_src, &snap_dest);
  744. if (snap_src && snap_dest && (s == snap_src)) {
  745. down_write(&snap_dest->lock);
  746. snap_dest->valid = 0;
  747. up_write(&snap_dest->lock);
  748. DMERR("Cancelling snapshot handover.");
  749. }
  750. up_read(&_origins_lock);
  751. /* Prevent further origin writes from using this snapshot. */
  752. /* After this returns there can be no new kcopyd jobs. */
  753. unregister_snapshot(s);
  754. while (atomic_read(&s->pending_exceptions_count))
  755. msleep(1);
  756. /*
  757. * Ensure instructions in mempool_destroy aren't reordered
  758. * before atomic_read.
  759. */
  760. smp_mb();
  761. #ifdef CONFIG_DM_DEBUG
  762. for (i = 0; i < DM_TRACKED_CHUNK_HASH_SIZE; i++)
  763. BUG_ON(!hlist_empty(&s->tracked_chunk_hash[i]));
  764. #endif
  765. mempool_destroy(s->tracked_chunk_pool);
  766. __free_exceptions(s);
  767. mempool_destroy(s->pending_pool);
  768. dm_put_device(ti, s->origin);
  769. dm_exception_store_destroy(s->store);
  770. dm_put_device(ti, s->cow);
  771. kfree(s);
  772. }
  773. /*
  774. * Flush a list of buffers.
  775. */
  776. static void flush_bios(struct bio *bio)
  777. {
  778. struct bio *n;
  779. while (bio) {
  780. n = bio->bi_next;
  781. bio->bi_next = NULL;
  782. generic_make_request(bio);
  783. bio = n;
  784. }
  785. }
  786. static void flush_queued_bios(struct work_struct *work)
  787. {
  788. struct dm_snapshot *s =
  789. container_of(work, struct dm_snapshot, queued_bios_work);
  790. struct bio *queued_bios;
  791. unsigned long flags;
  792. spin_lock_irqsave(&s->pe_lock, flags);
  793. queued_bios = bio_list_get(&s->queued_bios);
  794. spin_unlock_irqrestore(&s->pe_lock, flags);
  795. flush_bios(queued_bios);
  796. }
  797. static int do_origin(struct dm_dev *origin, struct bio *bio);
  798. /*
  799. * Flush a list of buffers.
  800. */
  801. static void retry_origin_bios(struct dm_snapshot *s, struct bio *bio)
  802. {
  803. struct bio *n;
  804. int r;
  805. while (bio) {
  806. n = bio->bi_next;
  807. bio->bi_next = NULL;
  808. r = do_origin(s->origin, bio);
  809. if (r == DM_MAPIO_REMAPPED)
  810. generic_make_request(bio);
  811. bio = n;
  812. }
  813. }
  814. /*
  815. * Error a list of buffers.
  816. */
  817. static void error_bios(struct bio *bio)
  818. {
  819. struct bio *n;
  820. while (bio) {
  821. n = bio->bi_next;
  822. bio->bi_next = NULL;
  823. bio_io_error(bio);
  824. bio = n;
  825. }
  826. }
  827. static void __invalidate_snapshot(struct dm_snapshot *s, int err)
  828. {
  829. if (!s->valid)
  830. return;
  831. if (err == -EIO)
  832. DMERR("Invalidating snapshot: Error reading/writing.");
  833. else if (err == -ENOMEM)
  834. DMERR("Invalidating snapshot: Unable to allocate exception.");
  835. if (s->store->type->drop_snapshot)
  836. s->store->type->drop_snapshot(s->store);
  837. s->valid = 0;
  838. dm_table_event(s->ti->table);
  839. }
  840. static void pending_complete(struct dm_snap_pending_exception *pe, int success)
  841. {
  842. struct dm_exception *e;
  843. struct dm_snapshot *s = pe->snap;
  844. struct bio *origin_bios = NULL;
  845. struct bio *snapshot_bios = NULL;
  846. int error = 0;
  847. if (!success) {
  848. /* Read/write error - snapshot is unusable */
  849. down_write(&s->lock);
  850. __invalidate_snapshot(s, -EIO);
  851. error = 1;
  852. goto out;
  853. }
  854. e = alloc_completed_exception();
  855. if (!e) {
  856. down_write(&s->lock);
  857. __invalidate_snapshot(s, -ENOMEM);
  858. error = 1;
  859. goto out;
  860. }
  861. *e = pe->e;
  862. down_write(&s->lock);
  863. if (!s->valid) {
  864. free_completed_exception(e);
  865. error = 1;
  866. goto out;
  867. }
  868. /* Check for conflicting reads */
  869. __check_for_conflicting_io(s, pe->e.old_chunk);
  870. /*
  871. * Add a proper exception, and remove the
  872. * in-flight exception from the list.
  873. */
  874. dm_insert_exception(&s->complete, e);
  875. out:
  876. dm_remove_exception(&pe->e);
  877. snapshot_bios = bio_list_get(&pe->snapshot_bios);
  878. origin_bios = bio_list_get(&pe->origin_bios);
  879. free_pending_exception(pe);
  880. up_write(&s->lock);
  881. /* Submit any pending write bios */
  882. if (error)
  883. error_bios(snapshot_bios);
  884. else
  885. flush_bios(snapshot_bios);
  886. retry_origin_bios(s, origin_bios);
  887. }
  888. static void commit_callback(void *context, int success)
  889. {
  890. struct dm_snap_pending_exception *pe = context;
  891. pending_complete(pe, success);
  892. }
  893. /*
  894. * Called when the copy I/O has finished. kcopyd actually runs
  895. * this code so don't block.
  896. */
  897. static void copy_callback(int read_err, unsigned long write_err, void *context)
  898. {
  899. struct dm_snap_pending_exception *pe = context;
  900. struct dm_snapshot *s = pe->snap;
  901. if (read_err || write_err)
  902. pending_complete(pe, 0);
  903. else
  904. /* Update the metadata if we are persistent */
  905. s->store->type->commit_exception(s->store, &pe->e,
  906. commit_callback, pe);
  907. }
  908. /*
  909. * Dispatches the copy operation to kcopyd.
  910. */
  911. static void start_copy(struct dm_snap_pending_exception *pe)
  912. {
  913. struct dm_snapshot *s = pe->snap;
  914. struct dm_io_region src, dest;
  915. struct block_device *bdev = s->origin->bdev;
  916. sector_t dev_size;
  917. dev_size = get_dev_size(bdev);
  918. src.bdev = bdev;
  919. src.sector = chunk_to_sector(s->store, pe->e.old_chunk);
  920. src.count = min((sector_t)s->store->chunk_size, dev_size - src.sector);
  921. dest.bdev = s->cow->bdev;
  922. dest.sector = chunk_to_sector(s->store, pe->e.new_chunk);
  923. dest.count = src.count;
  924. /* Hand over to kcopyd */
  925. dm_kcopyd_copy(s->kcopyd_client,
  926. &src, 1, &dest, 0, copy_callback, pe);
  927. }
  928. static struct dm_snap_pending_exception *
  929. __lookup_pending_exception(struct dm_snapshot *s, chunk_t chunk)
  930. {
  931. struct dm_exception *e = dm_lookup_exception(&s->pending, chunk);
  932. if (!e)
  933. return NULL;
  934. return container_of(e, struct dm_snap_pending_exception, e);
  935. }
  936. /*
  937. * Looks to see if this snapshot already has a pending exception
  938. * for this chunk, otherwise it allocates a new one and inserts
  939. * it into the pending table.
  940. *
  941. * NOTE: a write lock must be held on snap->lock before calling
  942. * this.
  943. */
  944. static struct dm_snap_pending_exception *
  945. __find_pending_exception(struct dm_snapshot *s,
  946. struct dm_snap_pending_exception *pe, chunk_t chunk)
  947. {
  948. struct dm_snap_pending_exception *pe2;
  949. pe2 = __lookup_pending_exception(s, chunk);
  950. if (pe2) {
  951. free_pending_exception(pe);
  952. return pe2;
  953. }
  954. pe->e.old_chunk = chunk;
  955. bio_list_init(&pe->origin_bios);
  956. bio_list_init(&pe->snapshot_bios);
  957. pe->started = 0;
  958. if (s->store->type->prepare_exception(s->store, &pe->e)) {
  959. free_pending_exception(pe);
  960. return NULL;
  961. }
  962. dm_insert_exception(&s->pending, &pe->e);
  963. return pe;
  964. }
  965. static void remap_exception(struct dm_snapshot *s, struct dm_exception *e,
  966. struct bio *bio, chunk_t chunk)
  967. {
  968. bio->bi_bdev = s->cow->bdev;
  969. bio->bi_sector = chunk_to_sector(s->store,
  970. dm_chunk_number(e->new_chunk) +
  971. (chunk - e->old_chunk)) +
  972. (bio->bi_sector &
  973. s->store->chunk_mask);
  974. }
  975. static int snapshot_map(struct dm_target *ti, struct bio *bio,
  976. union map_info *map_context)
  977. {
  978. struct dm_exception *e;
  979. struct dm_snapshot *s = ti->private;
  980. int r = DM_MAPIO_REMAPPED;
  981. chunk_t chunk;
  982. struct dm_snap_pending_exception *pe = NULL;
  983. if (unlikely(bio_empty_barrier(bio))) {
  984. bio->bi_bdev = s->cow->bdev;
  985. return DM_MAPIO_REMAPPED;
  986. }
  987. chunk = sector_to_chunk(s->store, bio->bi_sector);
  988. /* Full snapshots are not usable */
  989. /* To get here the table must be live so s->active is always set. */
  990. if (!s->valid)
  991. return -EIO;
  992. /* FIXME: should only take write lock if we need
  993. * to copy an exception */
  994. down_write(&s->lock);
  995. if (!s->valid) {
  996. r = -EIO;
  997. goto out_unlock;
  998. }
  999. /* If the block is already remapped - use that, else remap it */
  1000. e = dm_lookup_exception(&s->complete, chunk);
  1001. if (e) {
  1002. remap_exception(s, e, bio, chunk);
  1003. goto out_unlock;
  1004. }
  1005. /*
  1006. * Write to snapshot - higher level takes care of RW/RO
  1007. * flags so we should only get this if we are
  1008. * writeable.
  1009. */
  1010. if (bio_rw(bio) == WRITE) {
  1011. pe = __lookup_pending_exception(s, chunk);
  1012. if (!pe) {
  1013. up_write(&s->lock);
  1014. pe = alloc_pending_exception(s);
  1015. down_write(&s->lock);
  1016. if (!s->valid) {
  1017. free_pending_exception(pe);
  1018. r = -EIO;
  1019. goto out_unlock;
  1020. }
  1021. e = dm_lookup_exception(&s->complete, chunk);
  1022. if (e) {
  1023. free_pending_exception(pe);
  1024. remap_exception(s, e, bio, chunk);
  1025. goto out_unlock;
  1026. }
  1027. pe = __find_pending_exception(s, pe, chunk);
  1028. if (!pe) {
  1029. __invalidate_snapshot(s, -ENOMEM);
  1030. r = -EIO;
  1031. goto out_unlock;
  1032. }
  1033. }
  1034. remap_exception(s, &pe->e, bio, chunk);
  1035. bio_list_add(&pe->snapshot_bios, bio);
  1036. r = DM_MAPIO_SUBMITTED;
  1037. if (!pe->started) {
  1038. /* this is protected by snap->lock */
  1039. pe->started = 1;
  1040. up_write(&s->lock);
  1041. start_copy(pe);
  1042. goto out;
  1043. }
  1044. } else {
  1045. bio->bi_bdev = s->origin->bdev;
  1046. map_context->ptr = track_chunk(s, chunk);
  1047. }
  1048. out_unlock:
  1049. up_write(&s->lock);
  1050. out:
  1051. return r;
  1052. }
  1053. /*
  1054. * A snapshot-merge target behaves like a combination of a snapshot
  1055. * target and a snapshot-origin target. It only generates new
  1056. * exceptions in other snapshots and not in the one that is being
  1057. * merged.
  1058. *
  1059. * For each chunk, if there is an existing exception, it is used to
  1060. * redirect I/O to the cow device. Otherwise I/O is sent to the origin,
  1061. * which in turn might generate exceptions in other snapshots.
  1062. */
  1063. static int snapshot_merge_map(struct dm_target *ti, struct bio *bio,
  1064. union map_info *map_context)
  1065. {
  1066. struct dm_exception *e;
  1067. struct dm_snapshot *s = ti->private;
  1068. int r = DM_MAPIO_REMAPPED;
  1069. chunk_t chunk;
  1070. chunk = sector_to_chunk(s->store, bio->bi_sector);
  1071. down_read(&s->lock);
  1072. /* Full snapshots are not usable */
  1073. if (!s->valid) {
  1074. r = -EIO;
  1075. goto out_unlock;
  1076. }
  1077. /* If the block is already remapped - use that */
  1078. e = dm_lookup_exception(&s->complete, chunk);
  1079. if (e) {
  1080. remap_exception(s, e, bio, chunk);
  1081. goto out_unlock;
  1082. }
  1083. bio->bi_bdev = s->origin->bdev;
  1084. if (bio_rw(bio) == WRITE) {
  1085. up_read(&s->lock);
  1086. return do_origin(s->origin, bio);
  1087. }
  1088. out_unlock:
  1089. up_read(&s->lock);
  1090. return r;
  1091. }
  1092. static int snapshot_end_io(struct dm_target *ti, struct bio *bio,
  1093. int error, union map_info *map_context)
  1094. {
  1095. struct dm_snapshot *s = ti->private;
  1096. struct dm_snap_tracked_chunk *c = map_context->ptr;
  1097. if (c)
  1098. stop_tracking_chunk(s, c);
  1099. return 0;
  1100. }
  1101. static void snapshot_postsuspend(struct dm_target *ti)
  1102. {
  1103. struct dm_snapshot *s = ti->private;
  1104. down_write(&s->lock);
  1105. s->suspended = 1;
  1106. up_write(&s->lock);
  1107. }
  1108. static int snapshot_preresume(struct dm_target *ti)
  1109. {
  1110. int r = 0;
  1111. struct dm_snapshot *s = ti->private;
  1112. struct dm_snapshot *snap_src = NULL, *snap_dest = NULL;
  1113. down_read(&_origins_lock);
  1114. (void) __find_snapshots_sharing_cow(s, &snap_src, &snap_dest);
  1115. if (snap_src && snap_dest) {
  1116. down_read(&snap_src->lock);
  1117. if (s == snap_src) {
  1118. DMERR("Unable to resume snapshot source until "
  1119. "handover completes.");
  1120. r = -EINVAL;
  1121. } else if (!snap_src->suspended) {
  1122. DMERR("Unable to perform snapshot handover until "
  1123. "source is suspended.");
  1124. r = -EINVAL;
  1125. }
  1126. up_read(&snap_src->lock);
  1127. }
  1128. up_read(&_origins_lock);
  1129. return r;
  1130. }
  1131. static void snapshot_resume(struct dm_target *ti)
  1132. {
  1133. struct dm_snapshot *s = ti->private;
  1134. struct dm_snapshot *snap_src = NULL, *snap_dest = NULL;
  1135. down_read(&_origins_lock);
  1136. (void) __find_snapshots_sharing_cow(s, &snap_src, &snap_dest);
  1137. if (snap_src && snap_dest) {
  1138. down_write(&snap_src->lock);
  1139. down_write_nested(&snap_dest->lock, SINGLE_DEPTH_NESTING);
  1140. __handover_exceptions(snap_src, snap_dest);
  1141. up_write(&snap_dest->lock);
  1142. up_write(&snap_src->lock);
  1143. }
  1144. up_read(&_origins_lock);
  1145. /* Now we have correct chunk size, reregister */
  1146. reregister_snapshot(s);
  1147. down_write(&s->lock);
  1148. s->active = 1;
  1149. s->suspended = 0;
  1150. up_write(&s->lock);
  1151. }
  1152. static int snapshot_status(struct dm_target *ti, status_type_t type,
  1153. char *result, unsigned int maxlen)
  1154. {
  1155. unsigned sz = 0;
  1156. struct dm_snapshot *snap = ti->private;
  1157. switch (type) {
  1158. case STATUSTYPE_INFO:
  1159. down_write(&snap->lock);
  1160. if (!snap->valid)
  1161. DMEMIT("Invalid");
  1162. else {
  1163. if (snap->store->type->usage) {
  1164. sector_t total_sectors, sectors_allocated,
  1165. metadata_sectors;
  1166. snap->store->type->usage(snap->store,
  1167. &total_sectors,
  1168. &sectors_allocated,
  1169. &metadata_sectors);
  1170. DMEMIT("%llu/%llu %llu",
  1171. (unsigned long long)sectors_allocated,
  1172. (unsigned long long)total_sectors,
  1173. (unsigned long long)metadata_sectors);
  1174. }
  1175. else
  1176. DMEMIT("Unknown");
  1177. }
  1178. up_write(&snap->lock);
  1179. break;
  1180. case STATUSTYPE_TABLE:
  1181. /*
  1182. * kdevname returns a static pointer so we need
  1183. * to make private copies if the output is to
  1184. * make sense.
  1185. */
  1186. DMEMIT("%s %s", snap->origin->name, snap->cow->name);
  1187. snap->store->type->status(snap->store, type, result + sz,
  1188. maxlen - sz);
  1189. break;
  1190. }
  1191. return 0;
  1192. }
  1193. static int snapshot_iterate_devices(struct dm_target *ti,
  1194. iterate_devices_callout_fn fn, void *data)
  1195. {
  1196. struct dm_snapshot *snap = ti->private;
  1197. return fn(ti, snap->origin, 0, ti->len, data);
  1198. }
  1199. /*-----------------------------------------------------------------
  1200. * Origin methods
  1201. *---------------------------------------------------------------*/
  1202. /*
  1203. * If no exceptions need creating, DM_MAPIO_REMAPPED is returned and any
  1204. * supplied bio was ignored. The caller may submit it immediately.
  1205. * (No remapping actually occurs as the origin is always a direct linear
  1206. * map.)
  1207. *
  1208. * If further exceptions are required, DM_MAPIO_SUBMITTED is returned
  1209. * and any supplied bio is added to a list to be submitted once all
  1210. * the necessary exceptions exist.
  1211. */
  1212. static int __origin_write(struct list_head *snapshots, sector_t sector,
  1213. struct bio *bio)
  1214. {
  1215. int r = DM_MAPIO_REMAPPED;
  1216. struct dm_snapshot *snap;
  1217. struct dm_exception *e;
  1218. struct dm_snap_pending_exception *pe;
  1219. struct dm_snap_pending_exception *pe_to_start_now = NULL;
  1220. struct dm_snap_pending_exception *pe_to_start_last = NULL;
  1221. chunk_t chunk;
  1222. /* Do all the snapshots on this origin */
  1223. list_for_each_entry (snap, snapshots, list) {
  1224. /*
  1225. * Don't make new exceptions in a merging snapshot
  1226. * because it has effectively been deleted
  1227. */
  1228. if (dm_target_is_snapshot_merge(snap->ti))
  1229. continue;
  1230. down_write(&snap->lock);
  1231. /* Only deal with valid and active snapshots */
  1232. if (!snap->valid || !snap->active)
  1233. goto next_snapshot;
  1234. /* Nothing to do if writing beyond end of snapshot */
  1235. if (sector >= dm_table_get_size(snap->ti->table))
  1236. goto next_snapshot;
  1237. /*
  1238. * Remember, different snapshots can have
  1239. * different chunk sizes.
  1240. */
  1241. chunk = sector_to_chunk(snap->store, sector);
  1242. /*
  1243. * Check exception table to see if block
  1244. * is already remapped in this snapshot
  1245. * and trigger an exception if not.
  1246. */
  1247. e = dm_lookup_exception(&snap->complete, chunk);
  1248. if (e)
  1249. goto next_snapshot;
  1250. pe = __lookup_pending_exception(snap, chunk);
  1251. if (!pe) {
  1252. up_write(&snap->lock);
  1253. pe = alloc_pending_exception(snap);
  1254. down_write(&snap->lock);
  1255. if (!snap->valid) {
  1256. free_pending_exception(pe);
  1257. goto next_snapshot;
  1258. }
  1259. e = dm_lookup_exception(&snap->complete, chunk);
  1260. if (e) {
  1261. free_pending_exception(pe);
  1262. goto next_snapshot;
  1263. }
  1264. pe = __find_pending_exception(snap, pe, chunk);
  1265. if (!pe) {
  1266. __invalidate_snapshot(snap, -ENOMEM);
  1267. goto next_snapshot;
  1268. }
  1269. }
  1270. r = DM_MAPIO_SUBMITTED;
  1271. /*
  1272. * If an origin bio was supplied, queue it to wait for the
  1273. * completion of this exception, and start this one last,
  1274. * at the end of the function.
  1275. */
  1276. if (bio) {
  1277. bio_list_add(&pe->origin_bios, bio);
  1278. bio = NULL;
  1279. if (!pe->started) {
  1280. pe->started = 1;
  1281. pe_to_start_last = pe;
  1282. }
  1283. }
  1284. if (!pe->started) {
  1285. pe->started = 1;
  1286. pe_to_start_now = pe;
  1287. }
  1288. next_snapshot:
  1289. up_write(&snap->lock);
  1290. if (pe_to_start_now) {
  1291. start_copy(pe_to_start_now);
  1292. pe_to_start_now = NULL;
  1293. }
  1294. }
  1295. /*
  1296. * Submit the exception against which the bio is queued last,
  1297. * to give the other exceptions a head start.
  1298. */
  1299. if (pe_to_start_last)
  1300. start_copy(pe_to_start_last);
  1301. return r;
  1302. }
  1303. /*
  1304. * Called on a write from the origin driver.
  1305. */
  1306. static int do_origin(struct dm_dev *origin, struct bio *bio)
  1307. {
  1308. struct origin *o;
  1309. int r = DM_MAPIO_REMAPPED;
  1310. down_read(&_origins_lock);
  1311. o = __lookup_origin(origin->bdev);
  1312. if (o)
  1313. r = __origin_write(&o->snapshots, bio->bi_sector, bio);
  1314. up_read(&_origins_lock);
  1315. return r;
  1316. }
  1317. /*
  1318. * Origin: maps a linear range of a device, with hooks for snapshotting.
  1319. */
  1320. /*
  1321. * Construct an origin mapping: <dev_path>
  1322. * The context for an origin is merely a 'struct dm_dev *'
  1323. * pointing to the real device.
  1324. */
  1325. static int origin_ctr(struct dm_target *ti, unsigned int argc, char **argv)
  1326. {
  1327. int r;
  1328. struct dm_dev *dev;
  1329. if (argc != 1) {
  1330. ti->error = "origin: incorrect number of arguments";
  1331. return -EINVAL;
  1332. }
  1333. r = dm_get_device(ti, argv[0], 0, ti->len,
  1334. dm_table_get_mode(ti->table), &dev);
  1335. if (r) {
  1336. ti->error = "Cannot get target device";
  1337. return r;
  1338. }
  1339. ti->private = dev;
  1340. ti->num_flush_requests = 1;
  1341. return 0;
  1342. }
  1343. static void origin_dtr(struct dm_target *ti)
  1344. {
  1345. struct dm_dev *dev = ti->private;
  1346. dm_put_device(ti, dev);
  1347. }
  1348. static int origin_map(struct dm_target *ti, struct bio *bio,
  1349. union map_info *map_context)
  1350. {
  1351. struct dm_dev *dev = ti->private;
  1352. bio->bi_bdev = dev->bdev;
  1353. if (unlikely(bio_empty_barrier(bio)))
  1354. return DM_MAPIO_REMAPPED;
  1355. /* Only tell snapshots if this is a write */
  1356. return (bio_rw(bio) == WRITE) ? do_origin(dev, bio) : DM_MAPIO_REMAPPED;
  1357. }
  1358. /*
  1359. * Set the target "split_io" field to the minimum of all the snapshots'
  1360. * chunk sizes.
  1361. */
  1362. static void origin_resume(struct dm_target *ti)
  1363. {
  1364. struct dm_dev *dev = ti->private;
  1365. down_read(&_origins_lock);
  1366. ti->split_io = __minimum_chunk_size(__lookup_origin(dev->bdev));
  1367. up_read(&_origins_lock);
  1368. }
  1369. static int origin_status(struct dm_target *ti, status_type_t type, char *result,
  1370. unsigned int maxlen)
  1371. {
  1372. struct dm_dev *dev = ti->private;
  1373. switch (type) {
  1374. case STATUSTYPE_INFO:
  1375. result[0] = '\0';
  1376. break;
  1377. case STATUSTYPE_TABLE:
  1378. snprintf(result, maxlen, "%s", dev->name);
  1379. break;
  1380. }
  1381. return 0;
  1382. }
  1383. static int origin_iterate_devices(struct dm_target *ti,
  1384. iterate_devices_callout_fn fn, void *data)
  1385. {
  1386. struct dm_dev *dev = ti->private;
  1387. return fn(ti, dev, 0, ti->len, data);
  1388. }
  1389. static struct target_type origin_target = {
  1390. .name = "snapshot-origin",
  1391. .version = {1, 7, 0},
  1392. .module = THIS_MODULE,
  1393. .ctr = origin_ctr,
  1394. .dtr = origin_dtr,
  1395. .map = origin_map,
  1396. .resume = origin_resume,
  1397. .status = origin_status,
  1398. .iterate_devices = origin_iterate_devices,
  1399. };
  1400. static struct target_type snapshot_target = {
  1401. .name = "snapshot",
  1402. .version = {1, 9, 0},
  1403. .module = THIS_MODULE,
  1404. .ctr = snapshot_ctr,
  1405. .dtr = snapshot_dtr,
  1406. .map = snapshot_map,
  1407. .end_io = snapshot_end_io,
  1408. .postsuspend = snapshot_postsuspend,
  1409. .preresume = snapshot_preresume,
  1410. .resume = snapshot_resume,
  1411. .status = snapshot_status,
  1412. .iterate_devices = snapshot_iterate_devices,
  1413. };
  1414. static struct target_type merge_target = {
  1415. .name = dm_snapshot_merge_target_name,
  1416. .version = {1, 0, 0},
  1417. .module = THIS_MODULE,
  1418. .ctr = snapshot_ctr,
  1419. .dtr = snapshot_dtr,
  1420. .map = snapshot_merge_map,
  1421. .end_io = snapshot_end_io,
  1422. .postsuspend = snapshot_postsuspend,
  1423. .preresume = snapshot_preresume,
  1424. .resume = snapshot_resume,
  1425. .status = snapshot_status,
  1426. .iterate_devices = snapshot_iterate_devices,
  1427. };
  1428. static int __init dm_snapshot_init(void)
  1429. {
  1430. int r;
  1431. r = dm_exception_store_init();
  1432. if (r) {
  1433. DMERR("Failed to initialize exception stores");
  1434. return r;
  1435. }
  1436. r = dm_register_target(&snapshot_target);
  1437. if (r < 0) {
  1438. DMERR("snapshot target register failed %d", r);
  1439. goto bad_register_snapshot_target;
  1440. }
  1441. r = dm_register_target(&origin_target);
  1442. if (r < 0) {
  1443. DMERR("Origin target register failed %d", r);
  1444. goto bad_register_origin_target;
  1445. }
  1446. r = dm_register_target(&merge_target);
  1447. if (r < 0) {
  1448. DMERR("Merge target register failed %d", r);
  1449. goto bad_register_merge_target;
  1450. }
  1451. r = init_origin_hash();
  1452. if (r) {
  1453. DMERR("init_origin_hash failed.");
  1454. goto bad_origin_hash;
  1455. }
  1456. exception_cache = KMEM_CACHE(dm_exception, 0);
  1457. if (!exception_cache) {
  1458. DMERR("Couldn't create exception cache.");
  1459. r = -ENOMEM;
  1460. goto bad_exception_cache;
  1461. }
  1462. pending_cache = KMEM_CACHE(dm_snap_pending_exception, 0);
  1463. if (!pending_cache) {
  1464. DMERR("Couldn't create pending cache.");
  1465. r = -ENOMEM;
  1466. goto bad_pending_cache;
  1467. }
  1468. tracked_chunk_cache = KMEM_CACHE(dm_snap_tracked_chunk, 0);
  1469. if (!tracked_chunk_cache) {
  1470. DMERR("Couldn't create cache to track chunks in use.");
  1471. r = -ENOMEM;
  1472. goto bad_tracked_chunk_cache;
  1473. }
  1474. ksnapd = create_singlethread_workqueue("ksnapd");
  1475. if (!ksnapd) {
  1476. DMERR("Failed to create ksnapd workqueue.");
  1477. r = -ENOMEM;
  1478. goto bad_pending_pool;
  1479. }
  1480. return 0;
  1481. bad_pending_pool:
  1482. kmem_cache_destroy(tracked_chunk_cache);
  1483. bad_tracked_chunk_cache:
  1484. kmem_cache_destroy(pending_cache);
  1485. bad_pending_cache:
  1486. kmem_cache_destroy(exception_cache);
  1487. bad_exception_cache:
  1488. exit_origin_hash();
  1489. bad_origin_hash:
  1490. dm_unregister_target(&merge_target);
  1491. bad_register_merge_target:
  1492. dm_unregister_target(&origin_target);
  1493. bad_register_origin_target:
  1494. dm_unregister_target(&snapshot_target);
  1495. bad_register_snapshot_target:
  1496. dm_exception_store_exit();
  1497. return r;
  1498. }
  1499. static void __exit dm_snapshot_exit(void)
  1500. {
  1501. destroy_workqueue(ksnapd);
  1502. dm_unregister_target(&snapshot_target);
  1503. dm_unregister_target(&origin_target);
  1504. dm_unregister_target(&merge_target);
  1505. exit_origin_hash();
  1506. kmem_cache_destroy(pending_cache);
  1507. kmem_cache_destroy(exception_cache);
  1508. kmem_cache_destroy(tracked_chunk_cache);
  1509. dm_exception_store_exit();
  1510. }
  1511. /* Module hooks */
  1512. module_init(dm_snapshot_init);
  1513. module_exit(dm_snapshot_exit);
  1514. MODULE_DESCRIPTION(DM_NAME " snapshot target");
  1515. MODULE_AUTHOR("Joe Thornber");
  1516. MODULE_LICENSE("GPL");