|
@@ -59,6 +59,9 @@ struct dm_snapshot {
|
|
struct rw_semaphore lock;
|
|
struct rw_semaphore lock;
|
|
|
|
|
|
struct dm_dev *origin;
|
|
struct dm_dev *origin;
|
|
|
|
+ struct dm_dev *cow;
|
|
|
|
+
|
|
|
|
+ struct dm_target *ti;
|
|
|
|
|
|
/* List of snapshots per Origin */
|
|
/* List of snapshots per Origin */
|
|
struct list_head list;
|
|
struct list_head list;
|
|
@@ -97,6 +100,12 @@ struct dm_snapshot {
|
|
struct hlist_head tracked_chunk_hash[DM_TRACKED_CHUNK_HASH_SIZE];
|
|
struct hlist_head tracked_chunk_hash[DM_TRACKED_CHUNK_HASH_SIZE];
|
|
};
|
|
};
|
|
|
|
|
|
|
|
+struct dm_dev *dm_snap_cow(struct dm_snapshot *s)
|
|
|
|
+{
|
|
|
|
+ return s->cow;
|
|
|
|
+}
|
|
|
|
+EXPORT_SYMBOL(dm_snap_cow);
|
|
|
|
+
|
|
static struct workqueue_struct *ksnapd;
|
|
static struct workqueue_struct *ksnapd;
|
|
static void flush_queued_bios(struct work_struct *work);
|
|
static void flush_queued_bios(struct work_struct *work);
|
|
|
|
|
|
@@ -558,7 +567,7 @@ static int init_hash_tables(struct dm_snapshot *s)
|
|
* Calculate based on the size of the original volume or
|
|
* Calculate based on the size of the original volume or
|
|
* the COW volume...
|
|
* the COW volume...
|
|
*/
|
|
*/
|
|
- cow_dev_size = get_dev_size(s->store->cow->bdev);
|
|
|
|
|
|
+ cow_dev_size = get_dev_size(s->cow->bdev);
|
|
origin_dev_size = get_dev_size(s->origin->bdev);
|
|
origin_dev_size = get_dev_size(s->origin->bdev);
|
|
max_buckets = calc_max_buckets();
|
|
max_buckets = calc_max_buckets();
|
|
|
|
|
|
@@ -596,45 +605,55 @@ static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv)
|
|
struct dm_snapshot *s;
|
|
struct dm_snapshot *s;
|
|
int i;
|
|
int i;
|
|
int r = -EINVAL;
|
|
int r = -EINVAL;
|
|
- char *origin_path;
|
|
|
|
- struct dm_exception_store *store;
|
|
|
|
|
|
+ char *origin_path, *cow_path;
|
|
unsigned args_used;
|
|
unsigned args_used;
|
|
|
|
|
|
if (argc != 4) {
|
|
if (argc != 4) {
|
|
ti->error = "requires exactly 4 arguments";
|
|
ti->error = "requires exactly 4 arguments";
|
|
r = -EINVAL;
|
|
r = -EINVAL;
|
|
- goto bad_args;
|
|
|
|
|
|
+ goto bad;
|
|
}
|
|
}
|
|
|
|
|
|
origin_path = argv[0];
|
|
origin_path = argv[0];
|
|
argv++;
|
|
argv++;
|
|
argc--;
|
|
argc--;
|
|
|
|
|
|
- r = dm_exception_store_create(ti, argc, argv, &args_used, &store);
|
|
|
|
|
|
+ s = kmalloc(sizeof(*s), GFP_KERNEL);
|
|
|
|
+ if (!s) {
|
|
|
|
+ ti->error = "Cannot allocate snapshot context private "
|
|
|
|
+ "structure";
|
|
|
|
+ r = -ENOMEM;
|
|
|
|
+ goto bad;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ cow_path = argv[0];
|
|
|
|
+ argv++;
|
|
|
|
+ argc--;
|
|
|
|
+
|
|
|
|
+ r = dm_get_device(ti, cow_path, 0, 0,
|
|
|
|
+ FMODE_READ | FMODE_WRITE, &s->cow);
|
|
|
|
+ if (r) {
|
|
|
|
+ ti->error = "Cannot get COW device";
|
|
|
|
+ goto bad_cow;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ r = dm_exception_store_create(ti, argc, argv, s, &args_used, &s->store);
|
|
if (r) {
|
|
if (r) {
|
|
ti->error = "Couldn't create exception store";
|
|
ti->error = "Couldn't create exception store";
|
|
r = -EINVAL;
|
|
r = -EINVAL;
|
|
- goto bad_args;
|
|
|
|
|
|
+ goto bad_store;
|
|
}
|
|
}
|
|
|
|
|
|
argv += args_used;
|
|
argv += args_used;
|
|
argc -= args_used;
|
|
argc -= args_used;
|
|
|
|
|
|
- s = kmalloc(sizeof(*s), GFP_KERNEL);
|
|
|
|
- if (!s) {
|
|
|
|
- ti->error = "Cannot allocate snapshot context private "
|
|
|
|
- "structure";
|
|
|
|
- r = -ENOMEM;
|
|
|
|
- goto bad_snap;
|
|
|
|
- }
|
|
|
|
-
|
|
|
|
r = dm_get_device(ti, origin_path, 0, ti->len, FMODE_READ, &s->origin);
|
|
r = dm_get_device(ti, origin_path, 0, ti->len, FMODE_READ, &s->origin);
|
|
if (r) {
|
|
if (r) {
|
|
ti->error = "Cannot get origin device";
|
|
ti->error = "Cannot get origin device";
|
|
goto bad_origin;
|
|
goto bad_origin;
|
|
}
|
|
}
|
|
|
|
|
|
- s->store = store;
|
|
|
|
|
|
+ s->ti = ti;
|
|
s->valid = 1;
|
|
s->valid = 1;
|
|
s->active = 0;
|
|
s->active = 0;
|
|
atomic_set(&s->pending_exceptions_count, 0);
|
|
atomic_set(&s->pending_exceptions_count, 0);
|
|
@@ -723,12 +742,15 @@ bad_hash_tables:
|
|
dm_put_device(ti, s->origin);
|
|
dm_put_device(ti, s->origin);
|
|
|
|
|
|
bad_origin:
|
|
bad_origin:
|
|
- kfree(s);
|
|
|
|
|
|
+ dm_exception_store_destroy(s->store);
|
|
|
|
|
|
-bad_snap:
|
|
|
|
- dm_exception_store_destroy(store);
|
|
|
|
|
|
+bad_store:
|
|
|
|
+ dm_put_device(ti, s->cow);
|
|
|
|
|
|
-bad_args:
|
|
|
|
|
|
+bad_cow:
|
|
|
|
+ kfree(s);
|
|
|
|
+
|
|
|
|
+bad:
|
|
return r;
|
|
return r;
|
|
}
|
|
}
|
|
|
|
|
|
@@ -777,6 +799,8 @@ static void snapshot_dtr(struct dm_target *ti)
|
|
|
|
|
|
dm_exception_store_destroy(s->store);
|
|
dm_exception_store_destroy(s->store);
|
|
|
|
|
|
|
|
+ dm_put_device(ti, s->cow);
|
|
|
|
+
|
|
kfree(s);
|
|
kfree(s);
|
|
}
|
|
}
|
|
|
|
|
|
@@ -839,7 +863,7 @@ static void __invalidate_snapshot(struct dm_snapshot *s, int err)
|
|
|
|
|
|
s->valid = 0;
|
|
s->valid = 0;
|
|
|
|
|
|
- dm_table_event(s->store->ti->table);
|
|
|
|
|
|
+ dm_table_event(s->ti->table);
|
|
}
|
|
}
|
|
|
|
|
|
static void get_pending_exception(struct dm_snap_pending_exception *pe)
|
|
static void get_pending_exception(struct dm_snap_pending_exception *pe)
|
|
@@ -977,7 +1001,7 @@ static void start_copy(struct dm_snap_pending_exception *pe)
|
|
src.sector = chunk_to_sector(s->store, pe->e.old_chunk);
|
|
src.sector = chunk_to_sector(s->store, pe->e.old_chunk);
|
|
src.count = min((sector_t)s->store->chunk_size, dev_size - src.sector);
|
|
src.count = min((sector_t)s->store->chunk_size, dev_size - src.sector);
|
|
|
|
|
|
- dest.bdev = s->store->cow->bdev;
|
|
|
|
|
|
+ dest.bdev = s->cow->bdev;
|
|
dest.sector = chunk_to_sector(s->store, pe->e.new_chunk);
|
|
dest.sector = chunk_to_sector(s->store, pe->e.new_chunk);
|
|
dest.count = src.count;
|
|
dest.count = src.count;
|
|
|
|
|
|
@@ -1038,7 +1062,7 @@ __find_pending_exception(struct dm_snapshot *s,
|
|
static void remap_exception(struct dm_snapshot *s, struct dm_exception *e,
|
|
static void remap_exception(struct dm_snapshot *s, struct dm_exception *e,
|
|
struct bio *bio, chunk_t chunk)
|
|
struct bio *bio, chunk_t chunk)
|
|
{
|
|
{
|
|
- bio->bi_bdev = s->store->cow->bdev;
|
|
|
|
|
|
+ bio->bi_bdev = s->cow->bdev;
|
|
bio->bi_sector = chunk_to_sector(s->store,
|
|
bio->bi_sector = chunk_to_sector(s->store,
|
|
dm_chunk_number(e->new_chunk) +
|
|
dm_chunk_number(e->new_chunk) +
|
|
(chunk - e->old_chunk)) +
|
|
(chunk - e->old_chunk)) +
|
|
@@ -1056,7 +1080,7 @@ static int snapshot_map(struct dm_target *ti, struct bio *bio,
|
|
struct dm_snap_pending_exception *pe = NULL;
|
|
struct dm_snap_pending_exception *pe = NULL;
|
|
|
|
|
|
if (unlikely(bio_empty_barrier(bio))) {
|
|
if (unlikely(bio_empty_barrier(bio))) {
|
|
- bio->bi_bdev = s->store->cow->bdev;
|
|
|
|
|
|
+ bio->bi_bdev = s->cow->bdev;
|
|
return DM_MAPIO_REMAPPED;
|
|
return DM_MAPIO_REMAPPED;
|
|
}
|
|
}
|
|
|
|
|
|
@@ -1200,7 +1224,7 @@ static int snapshot_status(struct dm_target *ti, status_type_t type,
|
|
* to make private copies if the output is to
|
|
* to make private copies if the output is to
|
|
* make sense.
|
|
* make sense.
|
|
*/
|
|
*/
|
|
- DMEMIT("%s", snap->origin->name);
|
|
|
|
|
|
+ DMEMIT("%s %s", snap->origin->name, snap->cow->name);
|
|
snap->store->type->status(snap->store, type, result + sz,
|
|
snap->store->type->status(snap->store, type, result + sz,
|
|
maxlen - sz);
|
|
maxlen - sz);
|
|
break;
|
|
break;
|
|
@@ -1240,7 +1264,7 @@ static int __origin_write(struct list_head *snapshots, struct bio *bio)
|
|
goto next_snapshot;
|
|
goto next_snapshot;
|
|
|
|
|
|
/* Nothing to do if writing beyond end of snapshot */
|
|
/* Nothing to do if writing beyond end of snapshot */
|
|
- if (bio->bi_sector >= dm_table_get_size(snap->store->ti->table))
|
|
|
|
|
|
+ if (bio->bi_sector >= dm_table_get_size(snap->ti->table))
|
|
goto next_snapshot;
|
|
goto next_snapshot;
|
|
|
|
|
|
/*
|
|
/*
|