dm-snap.c 35 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615
  1. /*
  2. * dm-snapshot.c
  3. *
  4. * Copyright (C) 2001-2002 Sistina Software (UK) Limited.
  5. *
  6. * This file is released under the GPL.
  7. */
  8. #include <linux/blkdev.h>
  9. #include <linux/device-mapper.h>
  10. #include <linux/delay.h>
  11. #include <linux/fs.h>
  12. #include <linux/init.h>
  13. #include <linux/kdev_t.h>
  14. #include <linux/list.h>
  15. #include <linux/mempool.h>
  16. #include <linux/module.h>
  17. #include <linux/slab.h>
  18. #include <linux/vmalloc.h>
  19. #include <linux/log2.h>
  20. #include <linux/dm-kcopyd.h>
  21. #include <linux/workqueue.h>
  22. #include "dm-exception-store.h"
  23. #define DM_MSG_PREFIX "snapshots"
  24. /*
  25. * The percentage increment we will wake up users at
  26. */
  27. #define WAKE_UP_PERCENT 5
  28. /*
  29. * kcopyd priority of snapshot operations
  30. */
  31. #define SNAPSHOT_COPY_PRIORITY 2
  32. /*
  33. * Reserve 1MB for each snapshot initially (with minimum of 1 page).
  34. */
  35. #define SNAPSHOT_PAGES (((1UL << 20) >> PAGE_SHIFT) ? : 1)
  36. /*
  37. * The size of the mempool used to track chunks in use.
  38. */
  39. #define MIN_IOS 256
  40. #define DM_TRACKED_CHUNK_HASH_SIZE 16
  41. #define DM_TRACKED_CHUNK_HASH(x) ((unsigned long)(x) & \
  42. (DM_TRACKED_CHUNK_HASH_SIZE - 1))
  43. struct dm_exception_table {
  44. uint32_t hash_mask;
  45. unsigned hash_shift;
  46. struct list_head *table;
  47. };
  48. struct dm_snapshot {
  49. struct rw_semaphore lock;
  50. struct dm_dev *origin;
  51. struct dm_dev *cow;
  52. struct dm_target *ti;
  53. /* List of snapshots per Origin */
  54. struct list_head list;
  55. /* You can't use a snapshot if this is 0 (e.g. if full) */
  56. int valid;
  57. /* Origin writes don't trigger exceptions until this is set */
  58. int active;
  59. /* Whether or not owning mapped_device is suspended */
  60. int suspended;
  61. mempool_t *pending_pool;
  62. atomic_t pending_exceptions_count;
  63. struct dm_exception_table pending;
  64. struct dm_exception_table complete;
  65. /*
  66. * pe_lock protects all pending_exception operations and access
  67. * as well as the snapshot_bios list.
  68. */
  69. spinlock_t pe_lock;
  70. /* The on disk metadata handler */
  71. struct dm_exception_store *store;
  72. struct dm_kcopyd_client *kcopyd_client;
  73. /* Queue of snapshot writes for ksnapd to flush */
  74. struct bio_list queued_bios;
  75. struct work_struct queued_bios_work;
  76. /* Chunks with outstanding reads */
  77. mempool_t *tracked_chunk_pool;
  78. spinlock_t tracked_chunk_lock;
  79. struct hlist_head tracked_chunk_hash[DM_TRACKED_CHUNK_HASH_SIZE];
  80. };
  81. struct dm_dev *dm_snap_cow(struct dm_snapshot *s)
  82. {
  83. return s->cow;
  84. }
  85. EXPORT_SYMBOL(dm_snap_cow);
  86. static struct workqueue_struct *ksnapd;
  87. static void flush_queued_bios(struct work_struct *work);
  88. static sector_t chunk_to_sector(struct dm_exception_store *store,
  89. chunk_t chunk)
  90. {
  91. return chunk << store->chunk_shift;
  92. }
  93. static int bdev_equal(struct block_device *lhs, struct block_device *rhs)
  94. {
  95. /*
  96. * There is only ever one instance of a particular block
  97. * device so we can compare pointers safely.
  98. */
  99. return lhs == rhs;
  100. }
  101. struct dm_snap_pending_exception {
  102. struct dm_exception e;
  103. /*
  104. * Origin buffers waiting for this to complete are held
  105. * in a bio list
  106. */
  107. struct bio_list origin_bios;
  108. struct bio_list snapshot_bios;
  109. /*
  110. * Short-term queue of pending exceptions prior to submission.
  111. */
  112. struct list_head list;
  113. /*
  114. * The primary pending_exception is the one that holds
  115. * the ref_count and the list of origin_bios for a
  116. * group of pending_exceptions. It is always last to get freed.
  117. * These fields get set up when writing to the origin.
  118. */
  119. struct dm_snap_pending_exception *primary_pe;
  120. /*
  121. * Number of pending_exceptions processing this chunk.
  122. * When this drops to zero we must complete the origin bios.
  123. * If incrementing or decrementing this, hold pe->snap->lock for
  124. * the sibling concerned and not pe->primary_pe->snap->lock unless
  125. * they are the same.
  126. */
  127. atomic_t ref_count;
  128. /* Pointer back to snapshot context */
  129. struct dm_snapshot *snap;
  130. /*
  131. * 1 indicates the exception has already been sent to
  132. * kcopyd.
  133. */
  134. int started;
  135. };
  136. /*
  137. * Hash table mapping origin volumes to lists of snapshots and
  138. * a lock to protect it
  139. */
  140. static struct kmem_cache *exception_cache;
  141. static struct kmem_cache *pending_cache;
  142. struct dm_snap_tracked_chunk {
  143. struct hlist_node node;
  144. chunk_t chunk;
  145. };
  146. static struct kmem_cache *tracked_chunk_cache;
  147. static struct dm_snap_tracked_chunk *track_chunk(struct dm_snapshot *s,
  148. chunk_t chunk)
  149. {
  150. struct dm_snap_tracked_chunk *c = mempool_alloc(s->tracked_chunk_pool,
  151. GFP_NOIO);
  152. unsigned long flags;
  153. c->chunk = chunk;
  154. spin_lock_irqsave(&s->tracked_chunk_lock, flags);
  155. hlist_add_head(&c->node,
  156. &s->tracked_chunk_hash[DM_TRACKED_CHUNK_HASH(chunk)]);
  157. spin_unlock_irqrestore(&s->tracked_chunk_lock, flags);
  158. return c;
  159. }
  160. static void stop_tracking_chunk(struct dm_snapshot *s,
  161. struct dm_snap_tracked_chunk *c)
  162. {
  163. unsigned long flags;
  164. spin_lock_irqsave(&s->tracked_chunk_lock, flags);
  165. hlist_del(&c->node);
  166. spin_unlock_irqrestore(&s->tracked_chunk_lock, flags);
  167. mempool_free(c, s->tracked_chunk_pool);
  168. }
  169. static int __chunk_is_tracked(struct dm_snapshot *s, chunk_t chunk)
  170. {
  171. struct dm_snap_tracked_chunk *c;
  172. struct hlist_node *hn;
  173. int found = 0;
  174. spin_lock_irq(&s->tracked_chunk_lock);
  175. hlist_for_each_entry(c, hn,
  176. &s->tracked_chunk_hash[DM_TRACKED_CHUNK_HASH(chunk)], node) {
  177. if (c->chunk == chunk) {
  178. found = 1;
  179. break;
  180. }
  181. }
  182. spin_unlock_irq(&s->tracked_chunk_lock);
  183. return found;
  184. }
  185. /*
  186. * One of these per registered origin, held in the snapshot_origins hash
  187. */
  188. struct origin {
  189. /* The origin device */
  190. struct block_device *bdev;
  191. struct list_head hash_list;
  192. /* List of snapshots for this origin */
  193. struct list_head snapshots;
  194. };
  195. /*
  196. * Size of the hash table for origin volumes. If we make this
  197. * the size of the minors list then it should be nearly perfect
  198. */
  199. #define ORIGIN_HASH_SIZE 256
  200. #define ORIGIN_MASK 0xFF
  201. static struct list_head *_origins;
  202. static struct rw_semaphore _origins_lock;
  203. static int init_origin_hash(void)
  204. {
  205. int i;
  206. _origins = kmalloc(ORIGIN_HASH_SIZE * sizeof(struct list_head),
  207. GFP_KERNEL);
  208. if (!_origins) {
  209. DMERR("unable to allocate memory");
  210. return -ENOMEM;
  211. }
  212. for (i = 0; i < ORIGIN_HASH_SIZE; i++)
  213. INIT_LIST_HEAD(_origins + i);
  214. init_rwsem(&_origins_lock);
  215. return 0;
  216. }
  217. static void exit_origin_hash(void)
  218. {
  219. kfree(_origins);
  220. }
  221. static unsigned origin_hash(struct block_device *bdev)
  222. {
  223. return bdev->bd_dev & ORIGIN_MASK;
  224. }
  225. static struct origin *__lookup_origin(struct block_device *origin)
  226. {
  227. struct list_head *ol;
  228. struct origin *o;
  229. ol = &_origins[origin_hash(origin)];
  230. list_for_each_entry (o, ol, hash_list)
  231. if (bdev_equal(o->bdev, origin))
  232. return o;
  233. return NULL;
  234. }
  235. static void __insert_origin(struct origin *o)
  236. {
  237. struct list_head *sl = &_origins[origin_hash(o->bdev)];
  238. list_add_tail(&o->hash_list, sl);
  239. }
  240. /*
  241. * Make a note of the snapshot and its origin so we can look it
  242. * up when the origin has a write on it.
  243. */
  244. static int register_snapshot(struct dm_snapshot *snap)
  245. {
  246. struct dm_snapshot *l;
  247. struct origin *o, *new_o;
  248. struct block_device *bdev = snap->origin->bdev;
  249. new_o = kmalloc(sizeof(*new_o), GFP_KERNEL);
  250. if (!new_o)
  251. return -ENOMEM;
  252. down_write(&_origins_lock);
  253. o = __lookup_origin(bdev);
  254. if (o)
  255. kfree(new_o);
  256. else {
  257. /* New origin */
  258. o = new_o;
  259. /* Initialise the struct */
  260. INIT_LIST_HEAD(&o->snapshots);
  261. o->bdev = bdev;
  262. __insert_origin(o);
  263. }
  264. /* Sort the list according to chunk size, largest-first smallest-last */
  265. list_for_each_entry(l, &o->snapshots, list)
  266. if (l->store->chunk_size < snap->store->chunk_size)
  267. break;
  268. list_add_tail(&snap->list, &l->list);
  269. up_write(&_origins_lock);
  270. return 0;
  271. }
  272. static void unregister_snapshot(struct dm_snapshot *s)
  273. {
  274. struct origin *o;
  275. down_write(&_origins_lock);
  276. o = __lookup_origin(s->origin->bdev);
  277. list_del(&s->list);
  278. if (list_empty(&o->snapshots)) {
  279. list_del(&o->hash_list);
  280. kfree(o);
  281. }
  282. up_write(&_origins_lock);
  283. }
  284. /*
  285. * Implementation of the exception hash tables.
  286. * The lowest hash_shift bits of the chunk number are ignored, allowing
  287. * some consecutive chunks to be grouped together.
  288. */
  289. static int dm_exception_table_init(struct dm_exception_table *et,
  290. uint32_t size, unsigned hash_shift)
  291. {
  292. unsigned int i;
  293. et->hash_shift = hash_shift;
  294. et->hash_mask = size - 1;
  295. et->table = dm_vcalloc(size, sizeof(struct list_head));
  296. if (!et->table)
  297. return -ENOMEM;
  298. for (i = 0; i < size; i++)
  299. INIT_LIST_HEAD(et->table + i);
  300. return 0;
  301. }
  302. static void dm_exception_table_exit(struct dm_exception_table *et,
  303. struct kmem_cache *mem)
  304. {
  305. struct list_head *slot;
  306. struct dm_exception *ex, *next;
  307. int i, size;
  308. size = et->hash_mask + 1;
  309. for (i = 0; i < size; i++) {
  310. slot = et->table + i;
  311. list_for_each_entry_safe (ex, next, slot, hash_list)
  312. kmem_cache_free(mem, ex);
  313. }
  314. vfree(et->table);
  315. }
  316. static uint32_t exception_hash(struct dm_exception_table *et, chunk_t chunk)
  317. {
  318. return (chunk >> et->hash_shift) & et->hash_mask;
  319. }
  320. static void dm_remove_exception(struct dm_exception *e)
  321. {
  322. list_del(&e->hash_list);
  323. }
  324. /*
  325. * Return the exception data for a sector, or NULL if not
  326. * remapped.
  327. */
  328. static struct dm_exception *dm_lookup_exception(struct dm_exception_table *et,
  329. chunk_t chunk)
  330. {
  331. struct list_head *slot;
  332. struct dm_exception *e;
  333. slot = &et->table[exception_hash(et, chunk)];
  334. list_for_each_entry (e, slot, hash_list)
  335. if (chunk >= e->old_chunk &&
  336. chunk <= e->old_chunk + dm_consecutive_chunk_count(e))
  337. return e;
  338. return NULL;
  339. }
  340. static struct dm_exception *alloc_completed_exception(void)
  341. {
  342. struct dm_exception *e;
  343. e = kmem_cache_alloc(exception_cache, GFP_NOIO);
  344. if (!e)
  345. e = kmem_cache_alloc(exception_cache, GFP_ATOMIC);
  346. return e;
  347. }
  348. static void free_completed_exception(struct dm_exception *e)
  349. {
  350. kmem_cache_free(exception_cache, e);
  351. }
  352. static struct dm_snap_pending_exception *alloc_pending_exception(struct dm_snapshot *s)
  353. {
  354. struct dm_snap_pending_exception *pe = mempool_alloc(s->pending_pool,
  355. GFP_NOIO);
  356. atomic_inc(&s->pending_exceptions_count);
  357. pe->snap = s;
  358. return pe;
  359. }
  360. static void free_pending_exception(struct dm_snap_pending_exception *pe)
  361. {
  362. struct dm_snapshot *s = pe->snap;
  363. mempool_free(pe, s->pending_pool);
  364. smp_mb__before_atomic_dec();
  365. atomic_dec(&s->pending_exceptions_count);
  366. }
  367. static void dm_insert_exception(struct dm_exception_table *eh,
  368. struct dm_exception *new_e)
  369. {
  370. struct list_head *l;
  371. struct dm_exception *e = NULL;
  372. l = &eh->table[exception_hash(eh, new_e->old_chunk)];
  373. /* Add immediately if this table doesn't support consecutive chunks */
  374. if (!eh->hash_shift)
  375. goto out;
  376. /* List is ordered by old_chunk */
  377. list_for_each_entry_reverse(e, l, hash_list) {
  378. /* Insert after an existing chunk? */
  379. if (new_e->old_chunk == (e->old_chunk +
  380. dm_consecutive_chunk_count(e) + 1) &&
  381. new_e->new_chunk == (dm_chunk_number(e->new_chunk) +
  382. dm_consecutive_chunk_count(e) + 1)) {
  383. dm_consecutive_chunk_count_inc(e);
  384. free_completed_exception(new_e);
  385. return;
  386. }
  387. /* Insert before an existing chunk? */
  388. if (new_e->old_chunk == (e->old_chunk - 1) &&
  389. new_e->new_chunk == (dm_chunk_number(e->new_chunk) - 1)) {
  390. dm_consecutive_chunk_count_inc(e);
  391. e->old_chunk--;
  392. e->new_chunk--;
  393. free_completed_exception(new_e);
  394. return;
  395. }
  396. if (new_e->old_chunk > e->old_chunk)
  397. break;
  398. }
  399. out:
  400. list_add(&new_e->hash_list, e ? &e->hash_list : l);
  401. }
  402. /*
  403. * Callback used by the exception stores to load exceptions when
  404. * initialising.
  405. */
  406. static int dm_add_exception(void *context, chunk_t old, chunk_t new)
  407. {
  408. struct dm_snapshot *s = context;
  409. struct dm_exception *e;
  410. e = alloc_completed_exception();
  411. if (!e)
  412. return -ENOMEM;
  413. e->old_chunk = old;
  414. /* Consecutive_count is implicitly initialised to zero */
  415. e->new_chunk = new;
  416. dm_insert_exception(&s->complete, e);
  417. return 0;
  418. }
  419. #define min_not_zero(l, r) (((l) == 0) ? (r) : (((r) == 0) ? (l) : min(l, r)))
  420. /*
  421. * Return a minimum chunk size of all snapshots that have the specified origin.
  422. * Return zero if the origin has no snapshots.
  423. */
  424. static sector_t __minimum_chunk_size(struct origin *o)
  425. {
  426. struct dm_snapshot *snap;
  427. unsigned chunk_size = 0;
  428. if (o)
  429. list_for_each_entry(snap, &o->snapshots, list)
  430. chunk_size = min_not_zero(chunk_size,
  431. snap->store->chunk_size);
  432. return chunk_size;
  433. }
  434. /*
  435. * Hard coded magic.
  436. */
  437. static int calc_max_buckets(void)
  438. {
  439. /* use a fixed size of 2MB */
  440. unsigned long mem = 2 * 1024 * 1024;
  441. mem /= sizeof(struct list_head);
  442. return mem;
  443. }
  444. /*
  445. * Allocate room for a suitable hash table.
  446. */
  447. static int init_hash_tables(struct dm_snapshot *s)
  448. {
  449. sector_t hash_size, cow_dev_size, origin_dev_size, max_buckets;
  450. /*
  451. * Calculate based on the size of the original volume or
  452. * the COW volume...
  453. */
  454. cow_dev_size = get_dev_size(s->cow->bdev);
  455. origin_dev_size = get_dev_size(s->origin->bdev);
  456. max_buckets = calc_max_buckets();
  457. hash_size = min(origin_dev_size, cow_dev_size) >> s->store->chunk_shift;
  458. hash_size = min(hash_size, max_buckets);
  459. if (hash_size < 64)
  460. hash_size = 64;
  461. hash_size = rounddown_pow_of_two(hash_size);
  462. if (dm_exception_table_init(&s->complete, hash_size,
  463. DM_CHUNK_CONSECUTIVE_BITS))
  464. return -ENOMEM;
  465. /*
  466. * Allocate hash table for in-flight exceptions
  467. * Make this smaller than the real hash table
  468. */
  469. hash_size >>= 3;
  470. if (hash_size < 64)
  471. hash_size = 64;
  472. if (dm_exception_table_init(&s->pending, hash_size, 0)) {
  473. dm_exception_table_exit(&s->complete, exception_cache);
  474. return -ENOMEM;
  475. }
  476. return 0;
  477. }
  478. /*
  479. * Construct a snapshot mapping: <origin_dev> <COW-dev> <p/n> <chunk-size>
  480. */
  481. static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv)
  482. {
  483. struct dm_snapshot *s;
  484. int i;
  485. int r = -EINVAL;
  486. char *origin_path, *cow_path;
  487. unsigned args_used;
  488. if (argc != 4) {
  489. ti->error = "requires exactly 4 arguments";
  490. r = -EINVAL;
  491. goto bad;
  492. }
  493. origin_path = argv[0];
  494. argv++;
  495. argc--;
  496. s = kmalloc(sizeof(*s), GFP_KERNEL);
  497. if (!s) {
  498. ti->error = "Cannot allocate snapshot context private "
  499. "structure";
  500. r = -ENOMEM;
  501. goto bad;
  502. }
  503. cow_path = argv[0];
  504. argv++;
  505. argc--;
  506. r = dm_get_device(ti, cow_path, 0, 0,
  507. FMODE_READ | FMODE_WRITE, &s->cow);
  508. if (r) {
  509. ti->error = "Cannot get COW device";
  510. goto bad_cow;
  511. }
  512. r = dm_exception_store_create(ti, argc, argv, s, &args_used, &s->store);
  513. if (r) {
  514. ti->error = "Couldn't create exception store";
  515. r = -EINVAL;
  516. goto bad_store;
  517. }
  518. argv += args_used;
  519. argc -= args_used;
  520. r = dm_get_device(ti, origin_path, 0, ti->len, FMODE_READ, &s->origin);
  521. if (r) {
  522. ti->error = "Cannot get origin device";
  523. goto bad_origin;
  524. }
  525. s->ti = ti;
  526. s->valid = 1;
  527. s->active = 0;
  528. s->suspended = 0;
  529. atomic_set(&s->pending_exceptions_count, 0);
  530. init_rwsem(&s->lock);
  531. spin_lock_init(&s->pe_lock);
  532. /* Allocate hash table for COW data */
  533. if (init_hash_tables(s)) {
  534. ti->error = "Unable to allocate hash table space";
  535. r = -ENOMEM;
  536. goto bad_hash_tables;
  537. }
  538. r = dm_kcopyd_client_create(SNAPSHOT_PAGES, &s->kcopyd_client);
  539. if (r) {
  540. ti->error = "Could not create kcopyd client";
  541. goto bad_kcopyd;
  542. }
  543. s->pending_pool = mempool_create_slab_pool(MIN_IOS, pending_cache);
  544. if (!s->pending_pool) {
  545. ti->error = "Could not allocate mempool for pending exceptions";
  546. goto bad_pending_pool;
  547. }
  548. s->tracked_chunk_pool = mempool_create_slab_pool(MIN_IOS,
  549. tracked_chunk_cache);
  550. if (!s->tracked_chunk_pool) {
  551. ti->error = "Could not allocate tracked_chunk mempool for "
  552. "tracking reads";
  553. goto bad_tracked_chunk_pool;
  554. }
  555. for (i = 0; i < DM_TRACKED_CHUNK_HASH_SIZE; i++)
  556. INIT_HLIST_HEAD(&s->tracked_chunk_hash[i]);
  557. spin_lock_init(&s->tracked_chunk_lock);
  558. /* Metadata must only be loaded into one table at once */
  559. r = s->store->type->read_metadata(s->store, dm_add_exception,
  560. (void *)s);
  561. if (r < 0) {
  562. ti->error = "Failed to read snapshot metadata";
  563. goto bad_load_and_register;
  564. } else if (r > 0) {
  565. s->valid = 0;
  566. DMWARN("Snapshot is marked invalid.");
  567. }
  568. bio_list_init(&s->queued_bios);
  569. INIT_WORK(&s->queued_bios_work, flush_queued_bios);
  570. if (!s->store->chunk_size) {
  571. ti->error = "Chunk size not set";
  572. goto bad_load_and_register;
  573. }
  574. /* Add snapshot to the list of snapshots for this origin */
  575. /* Exceptions aren't triggered till snapshot_resume() is called */
  576. if (register_snapshot(s)) {
  577. r = -EINVAL;
  578. ti->error = "Cannot register snapshot origin";
  579. goto bad_load_and_register;
  580. }
  581. ti->private = s;
  582. ti->split_io = s->store->chunk_size;
  583. ti->num_flush_requests = 1;
  584. return 0;
  585. bad_load_and_register:
  586. mempool_destroy(s->tracked_chunk_pool);
  587. bad_tracked_chunk_pool:
  588. mempool_destroy(s->pending_pool);
  589. bad_pending_pool:
  590. dm_kcopyd_client_destroy(s->kcopyd_client);
  591. bad_kcopyd:
  592. dm_exception_table_exit(&s->pending, pending_cache);
  593. dm_exception_table_exit(&s->complete, exception_cache);
  594. bad_hash_tables:
  595. dm_put_device(ti, s->origin);
  596. bad_origin:
  597. dm_exception_store_destroy(s->store);
  598. bad_store:
  599. dm_put_device(ti, s->cow);
  600. bad_cow:
  601. kfree(s);
  602. bad:
  603. return r;
  604. }
  605. static void __free_exceptions(struct dm_snapshot *s)
  606. {
  607. dm_kcopyd_client_destroy(s->kcopyd_client);
  608. s->kcopyd_client = NULL;
  609. dm_exception_table_exit(&s->pending, pending_cache);
  610. dm_exception_table_exit(&s->complete, exception_cache);
  611. }
  612. static void snapshot_dtr(struct dm_target *ti)
  613. {
  614. #ifdef CONFIG_DM_DEBUG
  615. int i;
  616. #endif
  617. struct dm_snapshot *s = ti->private;
  618. flush_workqueue(ksnapd);
  619. /* Prevent further origin writes from using this snapshot. */
  620. /* After this returns there can be no new kcopyd jobs. */
  621. unregister_snapshot(s);
  622. while (atomic_read(&s->pending_exceptions_count))
  623. msleep(1);
  624. /*
  625. * Ensure instructions in mempool_destroy aren't reordered
  626. * before atomic_read.
  627. */
  628. smp_mb();
  629. #ifdef CONFIG_DM_DEBUG
  630. for (i = 0; i < DM_TRACKED_CHUNK_HASH_SIZE; i++)
  631. BUG_ON(!hlist_empty(&s->tracked_chunk_hash[i]));
  632. #endif
  633. mempool_destroy(s->tracked_chunk_pool);
  634. __free_exceptions(s);
  635. mempool_destroy(s->pending_pool);
  636. dm_put_device(ti, s->origin);
  637. dm_exception_store_destroy(s->store);
  638. dm_put_device(ti, s->cow);
  639. kfree(s);
  640. }
  641. /*
  642. * Flush a list of buffers.
  643. */
  644. static void flush_bios(struct bio *bio)
  645. {
  646. struct bio *n;
  647. while (bio) {
  648. n = bio->bi_next;
  649. bio->bi_next = NULL;
  650. generic_make_request(bio);
  651. bio = n;
  652. }
  653. }
  654. static void flush_queued_bios(struct work_struct *work)
  655. {
  656. struct dm_snapshot *s =
  657. container_of(work, struct dm_snapshot, queued_bios_work);
  658. struct bio *queued_bios;
  659. unsigned long flags;
  660. spin_lock_irqsave(&s->pe_lock, flags);
  661. queued_bios = bio_list_get(&s->queued_bios);
  662. spin_unlock_irqrestore(&s->pe_lock, flags);
  663. flush_bios(queued_bios);
  664. }
  665. /*
  666. * Error a list of buffers.
  667. */
  668. static void error_bios(struct bio *bio)
  669. {
  670. struct bio *n;
  671. while (bio) {
  672. n = bio->bi_next;
  673. bio->bi_next = NULL;
  674. bio_io_error(bio);
  675. bio = n;
  676. }
  677. }
  678. static void __invalidate_snapshot(struct dm_snapshot *s, int err)
  679. {
  680. if (!s->valid)
  681. return;
  682. if (err == -EIO)
  683. DMERR("Invalidating snapshot: Error reading/writing.");
  684. else if (err == -ENOMEM)
  685. DMERR("Invalidating snapshot: Unable to allocate exception.");
  686. if (s->store->type->drop_snapshot)
  687. s->store->type->drop_snapshot(s->store);
  688. s->valid = 0;
  689. dm_table_event(s->ti->table);
  690. }
  691. static void get_pending_exception(struct dm_snap_pending_exception *pe)
  692. {
  693. atomic_inc(&pe->ref_count);
  694. }
  695. static struct bio *put_pending_exception(struct dm_snap_pending_exception *pe)
  696. {
  697. struct dm_snap_pending_exception *primary_pe;
  698. struct bio *origin_bios = NULL;
  699. primary_pe = pe->primary_pe;
  700. /*
  701. * If this pe is involved in a write to the origin and
  702. * it is the last sibling to complete then release
  703. * the bios for the original write to the origin.
  704. */
  705. if (primary_pe &&
  706. atomic_dec_and_test(&primary_pe->ref_count)) {
  707. origin_bios = bio_list_get(&primary_pe->origin_bios);
  708. free_pending_exception(primary_pe);
  709. }
  710. /*
  711. * Free the pe if it's not linked to an origin write or if
  712. * it's not itself a primary pe.
  713. */
  714. if (!primary_pe || primary_pe != pe)
  715. free_pending_exception(pe);
  716. return origin_bios;
  717. }
  718. static void pending_complete(struct dm_snap_pending_exception *pe, int success)
  719. {
  720. struct dm_exception *e;
  721. struct dm_snapshot *s = pe->snap;
  722. struct bio *origin_bios = NULL;
  723. struct bio *snapshot_bios = NULL;
  724. int error = 0;
  725. if (!success) {
  726. /* Read/write error - snapshot is unusable */
  727. down_write(&s->lock);
  728. __invalidate_snapshot(s, -EIO);
  729. error = 1;
  730. goto out;
  731. }
  732. e = alloc_completed_exception();
  733. if (!e) {
  734. down_write(&s->lock);
  735. __invalidate_snapshot(s, -ENOMEM);
  736. error = 1;
  737. goto out;
  738. }
  739. *e = pe->e;
  740. down_write(&s->lock);
  741. if (!s->valid) {
  742. free_completed_exception(e);
  743. error = 1;
  744. goto out;
  745. }
  746. /*
  747. * Check for conflicting reads. This is extremely improbable,
  748. * so msleep(1) is sufficient and there is no need for a wait queue.
  749. */
  750. while (__chunk_is_tracked(s, pe->e.old_chunk))
  751. msleep(1);
  752. /*
  753. * Add a proper exception, and remove the
  754. * in-flight exception from the list.
  755. */
  756. dm_insert_exception(&s->complete, e);
  757. out:
  758. dm_remove_exception(&pe->e);
  759. snapshot_bios = bio_list_get(&pe->snapshot_bios);
  760. origin_bios = put_pending_exception(pe);
  761. up_write(&s->lock);
  762. /* Submit any pending write bios */
  763. if (error)
  764. error_bios(snapshot_bios);
  765. else
  766. flush_bios(snapshot_bios);
  767. flush_bios(origin_bios);
  768. }
  769. static void commit_callback(void *context, int success)
  770. {
  771. struct dm_snap_pending_exception *pe = context;
  772. pending_complete(pe, success);
  773. }
  774. /*
  775. * Called when the copy I/O has finished. kcopyd actually runs
  776. * this code so don't block.
  777. */
  778. static void copy_callback(int read_err, unsigned long write_err, void *context)
  779. {
  780. struct dm_snap_pending_exception *pe = context;
  781. struct dm_snapshot *s = pe->snap;
  782. if (read_err || write_err)
  783. pending_complete(pe, 0);
  784. else
  785. /* Update the metadata if we are persistent */
  786. s->store->type->commit_exception(s->store, &pe->e,
  787. commit_callback, pe);
  788. }
  789. /*
  790. * Dispatches the copy operation to kcopyd.
  791. */
  792. static void start_copy(struct dm_snap_pending_exception *pe)
  793. {
  794. struct dm_snapshot *s = pe->snap;
  795. struct dm_io_region src, dest;
  796. struct block_device *bdev = s->origin->bdev;
  797. sector_t dev_size;
  798. dev_size = get_dev_size(bdev);
  799. src.bdev = bdev;
  800. src.sector = chunk_to_sector(s->store, pe->e.old_chunk);
  801. src.count = min((sector_t)s->store->chunk_size, dev_size - src.sector);
  802. dest.bdev = s->cow->bdev;
  803. dest.sector = chunk_to_sector(s->store, pe->e.new_chunk);
  804. dest.count = src.count;
  805. /* Hand over to kcopyd */
  806. dm_kcopyd_copy(s->kcopyd_client,
  807. &src, 1, &dest, 0, copy_callback, pe);
  808. }
  809. static struct dm_snap_pending_exception *
  810. __lookup_pending_exception(struct dm_snapshot *s, chunk_t chunk)
  811. {
  812. struct dm_exception *e = dm_lookup_exception(&s->pending, chunk);
  813. if (!e)
  814. return NULL;
  815. return container_of(e, struct dm_snap_pending_exception, e);
  816. }
  817. /*
  818. * Looks to see if this snapshot already has a pending exception
  819. * for this chunk, otherwise it allocates a new one and inserts
  820. * it into the pending table.
  821. *
  822. * NOTE: a write lock must be held on snap->lock before calling
  823. * this.
  824. */
  825. static struct dm_snap_pending_exception *
  826. __find_pending_exception(struct dm_snapshot *s,
  827. struct dm_snap_pending_exception *pe, chunk_t chunk)
  828. {
  829. struct dm_snap_pending_exception *pe2;
  830. pe2 = __lookup_pending_exception(s, chunk);
  831. if (pe2) {
  832. free_pending_exception(pe);
  833. return pe2;
  834. }
  835. pe->e.old_chunk = chunk;
  836. bio_list_init(&pe->origin_bios);
  837. bio_list_init(&pe->snapshot_bios);
  838. pe->primary_pe = NULL;
  839. atomic_set(&pe->ref_count, 0);
  840. pe->started = 0;
  841. if (s->store->type->prepare_exception(s->store, &pe->e)) {
  842. free_pending_exception(pe);
  843. return NULL;
  844. }
  845. get_pending_exception(pe);
  846. dm_insert_exception(&s->pending, &pe->e);
  847. return pe;
  848. }
  849. static void remap_exception(struct dm_snapshot *s, struct dm_exception *e,
  850. struct bio *bio, chunk_t chunk)
  851. {
  852. bio->bi_bdev = s->cow->bdev;
  853. bio->bi_sector = chunk_to_sector(s->store,
  854. dm_chunk_number(e->new_chunk) +
  855. (chunk - e->old_chunk)) +
  856. (bio->bi_sector &
  857. s->store->chunk_mask);
  858. }
  859. static int snapshot_map(struct dm_target *ti, struct bio *bio,
  860. union map_info *map_context)
  861. {
  862. struct dm_exception *e;
  863. struct dm_snapshot *s = ti->private;
  864. int r = DM_MAPIO_REMAPPED;
  865. chunk_t chunk;
  866. struct dm_snap_pending_exception *pe = NULL;
  867. if (unlikely(bio_empty_barrier(bio))) {
  868. bio->bi_bdev = s->cow->bdev;
  869. return DM_MAPIO_REMAPPED;
  870. }
  871. chunk = sector_to_chunk(s->store, bio->bi_sector);
  872. /* Full snapshots are not usable */
  873. /* To get here the table must be live so s->active is always set. */
  874. if (!s->valid)
  875. return -EIO;
  876. /* FIXME: should only take write lock if we need
  877. * to copy an exception */
  878. down_write(&s->lock);
  879. if (!s->valid) {
  880. r = -EIO;
  881. goto out_unlock;
  882. }
  883. /* If the block is already remapped - use that, else remap it */
  884. e = dm_lookup_exception(&s->complete, chunk);
  885. if (e) {
  886. remap_exception(s, e, bio, chunk);
  887. goto out_unlock;
  888. }
  889. /*
  890. * Write to snapshot - higher level takes care of RW/RO
  891. * flags so we should only get this if we are
  892. * writeable.
  893. */
  894. if (bio_rw(bio) == WRITE) {
  895. pe = __lookup_pending_exception(s, chunk);
  896. if (!pe) {
  897. up_write(&s->lock);
  898. pe = alloc_pending_exception(s);
  899. down_write(&s->lock);
  900. if (!s->valid) {
  901. free_pending_exception(pe);
  902. r = -EIO;
  903. goto out_unlock;
  904. }
  905. e = dm_lookup_exception(&s->complete, chunk);
  906. if (e) {
  907. free_pending_exception(pe);
  908. remap_exception(s, e, bio, chunk);
  909. goto out_unlock;
  910. }
  911. pe = __find_pending_exception(s, pe, chunk);
  912. if (!pe) {
  913. __invalidate_snapshot(s, -ENOMEM);
  914. r = -EIO;
  915. goto out_unlock;
  916. }
  917. }
  918. remap_exception(s, &pe->e, bio, chunk);
  919. bio_list_add(&pe->snapshot_bios, bio);
  920. r = DM_MAPIO_SUBMITTED;
  921. if (!pe->started) {
  922. /* this is protected by snap->lock */
  923. pe->started = 1;
  924. up_write(&s->lock);
  925. start_copy(pe);
  926. goto out;
  927. }
  928. } else {
  929. bio->bi_bdev = s->origin->bdev;
  930. map_context->ptr = track_chunk(s, chunk);
  931. }
  932. out_unlock:
  933. up_write(&s->lock);
  934. out:
  935. return r;
  936. }
  937. static int snapshot_end_io(struct dm_target *ti, struct bio *bio,
  938. int error, union map_info *map_context)
  939. {
  940. struct dm_snapshot *s = ti->private;
  941. struct dm_snap_tracked_chunk *c = map_context->ptr;
  942. if (c)
  943. stop_tracking_chunk(s, c);
  944. return 0;
  945. }
  946. static void snapshot_postsuspend(struct dm_target *ti)
  947. {
  948. struct dm_snapshot *s = ti->private;
  949. down_write(&s->lock);
  950. s->suspended = 1;
  951. up_write(&s->lock);
  952. }
  953. static void snapshot_resume(struct dm_target *ti)
  954. {
  955. struct dm_snapshot *s = ti->private;
  956. down_write(&s->lock);
  957. s->active = 1;
  958. s->suspended = 0;
  959. up_write(&s->lock);
  960. }
  961. static int snapshot_status(struct dm_target *ti, status_type_t type,
  962. char *result, unsigned int maxlen)
  963. {
  964. unsigned sz = 0;
  965. struct dm_snapshot *snap = ti->private;
  966. switch (type) {
  967. case STATUSTYPE_INFO:
  968. down_write(&snap->lock);
  969. if (!snap->valid)
  970. DMEMIT("Invalid");
  971. else {
  972. if (snap->store->type->usage) {
  973. sector_t total_sectors, sectors_allocated,
  974. metadata_sectors;
  975. snap->store->type->usage(snap->store,
  976. &total_sectors,
  977. &sectors_allocated,
  978. &metadata_sectors);
  979. DMEMIT("%llu/%llu %llu",
  980. (unsigned long long)sectors_allocated,
  981. (unsigned long long)total_sectors,
  982. (unsigned long long)metadata_sectors);
  983. }
  984. else
  985. DMEMIT("Unknown");
  986. }
  987. up_write(&snap->lock);
  988. break;
  989. case STATUSTYPE_TABLE:
  990. /*
  991. * kdevname returns a static pointer so we need
  992. * to make private copies if the output is to
  993. * make sense.
  994. */
  995. DMEMIT("%s %s", snap->origin->name, snap->cow->name);
  996. snap->store->type->status(snap->store, type, result + sz,
  997. maxlen - sz);
  998. break;
  999. }
  1000. return 0;
  1001. }
  1002. static int snapshot_iterate_devices(struct dm_target *ti,
  1003. iterate_devices_callout_fn fn, void *data)
  1004. {
  1005. struct dm_snapshot *snap = ti->private;
  1006. return fn(ti, snap->origin, 0, ti->len, data);
  1007. }
  1008. /*-----------------------------------------------------------------
  1009. * Origin methods
  1010. *---------------------------------------------------------------*/
  1011. static int __origin_write(struct list_head *snapshots, struct bio *bio)
  1012. {
  1013. int r = DM_MAPIO_REMAPPED, first = 0;
  1014. struct dm_snapshot *snap;
  1015. struct dm_exception *e;
  1016. struct dm_snap_pending_exception *pe, *next_pe, *primary_pe = NULL;
  1017. chunk_t chunk;
  1018. LIST_HEAD(pe_queue);
  1019. /* Do all the snapshots on this origin */
  1020. list_for_each_entry (snap, snapshots, list) {
  1021. down_write(&snap->lock);
  1022. /* Only deal with valid and active snapshots */
  1023. if (!snap->valid || !snap->active)
  1024. goto next_snapshot;
  1025. /* Nothing to do if writing beyond end of snapshot */
  1026. if (bio->bi_sector >= dm_table_get_size(snap->ti->table))
  1027. goto next_snapshot;
  1028. /*
  1029. * Remember, different snapshots can have
  1030. * different chunk sizes.
  1031. */
  1032. chunk = sector_to_chunk(snap->store, bio->bi_sector);
  1033. /*
  1034. * Check exception table to see if block
  1035. * is already remapped in this snapshot
  1036. * and trigger an exception if not.
  1037. *
  1038. * ref_count is initialised to 1 so pending_complete()
  1039. * won't destroy the primary_pe while we're inside this loop.
  1040. */
  1041. e = dm_lookup_exception(&snap->complete, chunk);
  1042. if (e)
  1043. goto next_snapshot;
  1044. pe = __lookup_pending_exception(snap, chunk);
  1045. if (!pe) {
  1046. up_write(&snap->lock);
  1047. pe = alloc_pending_exception(snap);
  1048. down_write(&snap->lock);
  1049. if (!snap->valid) {
  1050. free_pending_exception(pe);
  1051. goto next_snapshot;
  1052. }
  1053. e = dm_lookup_exception(&snap->complete, chunk);
  1054. if (e) {
  1055. free_pending_exception(pe);
  1056. goto next_snapshot;
  1057. }
  1058. pe = __find_pending_exception(snap, pe, chunk);
  1059. if (!pe) {
  1060. __invalidate_snapshot(snap, -ENOMEM);
  1061. goto next_snapshot;
  1062. }
  1063. }
  1064. if (!primary_pe) {
  1065. /*
  1066. * Either every pe here has same
  1067. * primary_pe or none has one yet.
  1068. */
  1069. if (pe->primary_pe)
  1070. primary_pe = pe->primary_pe;
  1071. else {
  1072. primary_pe = pe;
  1073. first = 1;
  1074. }
  1075. bio_list_add(&primary_pe->origin_bios, bio);
  1076. r = DM_MAPIO_SUBMITTED;
  1077. }
  1078. if (!pe->primary_pe) {
  1079. pe->primary_pe = primary_pe;
  1080. get_pending_exception(primary_pe);
  1081. }
  1082. if (!pe->started) {
  1083. pe->started = 1;
  1084. list_add_tail(&pe->list, &pe_queue);
  1085. }
  1086. next_snapshot:
  1087. up_write(&snap->lock);
  1088. }
  1089. if (!primary_pe)
  1090. return r;
  1091. /*
  1092. * If this is the first time we're processing this chunk and
  1093. * ref_count is now 1 it means all the pending exceptions
  1094. * got completed while we were in the loop above, so it falls to
  1095. * us here to remove the primary_pe and submit any origin_bios.
  1096. */
  1097. if (first && atomic_dec_and_test(&primary_pe->ref_count)) {
  1098. flush_bios(bio_list_get(&primary_pe->origin_bios));
  1099. free_pending_exception(primary_pe);
  1100. /* If we got here, pe_queue is necessarily empty. */
  1101. return r;
  1102. }
  1103. /*
  1104. * Now that we have a complete pe list we can start the copying.
  1105. */
  1106. list_for_each_entry_safe(pe, next_pe, &pe_queue, list)
  1107. start_copy(pe);
  1108. return r;
  1109. }
  1110. /*
  1111. * Called on a write from the origin driver.
  1112. */
  1113. static int do_origin(struct dm_dev *origin, struct bio *bio)
  1114. {
  1115. struct origin *o;
  1116. int r = DM_MAPIO_REMAPPED;
  1117. down_read(&_origins_lock);
  1118. o = __lookup_origin(origin->bdev);
  1119. if (o)
  1120. r = __origin_write(&o->snapshots, bio);
  1121. up_read(&_origins_lock);
  1122. return r;
  1123. }
  1124. /*
  1125. * Origin: maps a linear range of a device, with hooks for snapshotting.
  1126. */
  1127. /*
  1128. * Construct an origin mapping: <dev_path>
  1129. * The context for an origin is merely a 'struct dm_dev *'
  1130. * pointing to the real device.
  1131. */
  1132. static int origin_ctr(struct dm_target *ti, unsigned int argc, char **argv)
  1133. {
  1134. int r;
  1135. struct dm_dev *dev;
  1136. if (argc != 1) {
  1137. ti->error = "origin: incorrect number of arguments";
  1138. return -EINVAL;
  1139. }
  1140. r = dm_get_device(ti, argv[0], 0, ti->len,
  1141. dm_table_get_mode(ti->table), &dev);
  1142. if (r) {
  1143. ti->error = "Cannot get target device";
  1144. return r;
  1145. }
  1146. ti->private = dev;
  1147. ti->num_flush_requests = 1;
  1148. return 0;
  1149. }
  1150. static void origin_dtr(struct dm_target *ti)
  1151. {
  1152. struct dm_dev *dev = ti->private;
  1153. dm_put_device(ti, dev);
  1154. }
  1155. static int origin_map(struct dm_target *ti, struct bio *bio,
  1156. union map_info *map_context)
  1157. {
  1158. struct dm_dev *dev = ti->private;
  1159. bio->bi_bdev = dev->bdev;
  1160. if (unlikely(bio_empty_barrier(bio)))
  1161. return DM_MAPIO_REMAPPED;
  1162. /* Only tell snapshots if this is a write */
  1163. return (bio_rw(bio) == WRITE) ? do_origin(dev, bio) : DM_MAPIO_REMAPPED;
  1164. }
  1165. /*
  1166. * Set the target "split_io" field to the minimum of all the snapshots'
  1167. * chunk sizes.
  1168. */
  1169. static void origin_resume(struct dm_target *ti)
  1170. {
  1171. struct dm_dev *dev = ti->private;
  1172. down_read(&_origins_lock);
  1173. ti->split_io = __minimum_chunk_size(__lookup_origin(dev->bdev));
  1174. up_read(&_origins_lock);
  1175. }
  1176. static int origin_status(struct dm_target *ti, status_type_t type, char *result,
  1177. unsigned int maxlen)
  1178. {
  1179. struct dm_dev *dev = ti->private;
  1180. switch (type) {
  1181. case STATUSTYPE_INFO:
  1182. result[0] = '\0';
  1183. break;
  1184. case STATUSTYPE_TABLE:
  1185. snprintf(result, maxlen, "%s", dev->name);
  1186. break;
  1187. }
  1188. return 0;
  1189. }
  1190. static int origin_iterate_devices(struct dm_target *ti,
  1191. iterate_devices_callout_fn fn, void *data)
  1192. {
  1193. struct dm_dev *dev = ti->private;
  1194. return fn(ti, dev, 0, ti->len, data);
  1195. }
  1196. static struct target_type origin_target = {
  1197. .name = "snapshot-origin",
  1198. .version = {1, 7, 0},
  1199. .module = THIS_MODULE,
  1200. .ctr = origin_ctr,
  1201. .dtr = origin_dtr,
  1202. .map = origin_map,
  1203. .resume = origin_resume,
  1204. .status = origin_status,
  1205. .iterate_devices = origin_iterate_devices,
  1206. };
  1207. static struct target_type snapshot_target = {
  1208. .name = "snapshot",
  1209. .version = {1, 9, 0},
  1210. .module = THIS_MODULE,
  1211. .ctr = snapshot_ctr,
  1212. .dtr = snapshot_dtr,
  1213. .map = snapshot_map,
  1214. .end_io = snapshot_end_io,
  1215. .postsuspend = snapshot_postsuspend,
  1216. .resume = snapshot_resume,
  1217. .status = snapshot_status,
  1218. .iterate_devices = snapshot_iterate_devices,
  1219. };
  1220. static int __init dm_snapshot_init(void)
  1221. {
  1222. int r;
  1223. r = dm_exception_store_init();
  1224. if (r) {
  1225. DMERR("Failed to initialize exception stores");
  1226. return r;
  1227. }
  1228. r = dm_register_target(&snapshot_target);
  1229. if (r) {
  1230. DMERR("snapshot target register failed %d", r);
  1231. goto bad_register_snapshot_target;
  1232. }
  1233. r = dm_register_target(&origin_target);
  1234. if (r < 0) {
  1235. DMERR("Origin target register failed %d", r);
  1236. goto bad1;
  1237. }
  1238. r = init_origin_hash();
  1239. if (r) {
  1240. DMERR("init_origin_hash failed.");
  1241. goto bad2;
  1242. }
  1243. exception_cache = KMEM_CACHE(dm_exception, 0);
  1244. if (!exception_cache) {
  1245. DMERR("Couldn't create exception cache.");
  1246. r = -ENOMEM;
  1247. goto bad3;
  1248. }
  1249. pending_cache = KMEM_CACHE(dm_snap_pending_exception, 0);
  1250. if (!pending_cache) {
  1251. DMERR("Couldn't create pending cache.");
  1252. r = -ENOMEM;
  1253. goto bad4;
  1254. }
  1255. tracked_chunk_cache = KMEM_CACHE(dm_snap_tracked_chunk, 0);
  1256. if (!tracked_chunk_cache) {
  1257. DMERR("Couldn't create cache to track chunks in use.");
  1258. r = -ENOMEM;
  1259. goto bad5;
  1260. }
  1261. ksnapd = create_singlethread_workqueue("ksnapd");
  1262. if (!ksnapd) {
  1263. DMERR("Failed to create ksnapd workqueue.");
  1264. r = -ENOMEM;
  1265. goto bad_pending_pool;
  1266. }
  1267. return 0;
  1268. bad_pending_pool:
  1269. kmem_cache_destroy(tracked_chunk_cache);
  1270. bad5:
  1271. kmem_cache_destroy(pending_cache);
  1272. bad4:
  1273. kmem_cache_destroy(exception_cache);
  1274. bad3:
  1275. exit_origin_hash();
  1276. bad2:
  1277. dm_unregister_target(&origin_target);
  1278. bad1:
  1279. dm_unregister_target(&snapshot_target);
  1280. bad_register_snapshot_target:
  1281. dm_exception_store_exit();
  1282. return r;
  1283. }
  1284. static void __exit dm_snapshot_exit(void)
  1285. {
  1286. destroy_workqueue(ksnapd);
  1287. dm_unregister_target(&snapshot_target);
  1288. dm_unregister_target(&origin_target);
  1289. exit_origin_hash();
  1290. kmem_cache_destroy(pending_cache);
  1291. kmem_cache_destroy(exception_cache);
  1292. kmem_cache_destroy(tracked_chunk_cache);
  1293. dm_exception_store_exit();
  1294. }
  1295. /* Module hooks */
  1296. module_init(dm_snapshot_init);
  1297. module_exit(dm_snapshot_exit);
  1298. MODULE_DESCRIPTION(DM_NAME " snapshot target");
  1299. MODULE_AUTHOR("Joe Thornber");
  1300. MODULE_LICENSE("GPL");