|
@@ -186,7 +186,6 @@ struct pool {
|
|
|
|
|
|
struct dm_thin_new_mapping *next_mapping;
|
|
|
mempool_t *mapping_pool;
|
|
|
- mempool_t *endio_hook_pool;
|
|
|
|
|
|
process_bio_fn process_bio;
|
|
|
process_bio_fn process_discard;
|
|
@@ -304,7 +303,7 @@ static void __requeue_bio_list(struct thin_c *tc, struct bio_list *master)
|
|
|
bio_list_init(master);
|
|
|
|
|
|
while ((bio = bio_list_pop(&bios))) {
|
|
|
- struct dm_thin_endio_hook *h = dm_get_mapinfo(bio)->ptr;
|
|
|
+ struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
|
|
|
|
|
|
if (h->tc == tc)
|
|
|
bio_endio(bio, DM_ENDIO_REQUEUE);
|
|
@@ -368,6 +367,17 @@ static int bio_triggers_commit(struct thin_c *tc, struct bio *bio)
|
|
|
dm_thin_changed_this_transaction(tc->td);
|
|
|
}
|
|
|
|
|
|
+static void inc_all_io_entry(struct pool *pool, struct bio *bio)
|
|
|
+{
|
|
|
+ struct dm_thin_endio_hook *h;
|
|
|
+
|
|
|
+ if (bio->bi_rw & REQ_DISCARD)
|
|
|
+ return;
|
|
|
+
|
|
|
+ h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
|
|
|
+ h->all_io_entry = dm_deferred_entry_inc(pool->all_io_ds);
|
|
|
+}
|
|
|
+
|
|
|
static void issue(struct thin_c *tc, struct bio *bio)
|
|
|
{
|
|
|
struct pool *pool = tc->pool;
|
|
@@ -474,7 +484,7 @@ static void copy_complete(int read_err, unsigned long write_err, void *context)
|
|
|
static void overwrite_endio(struct bio *bio, int err)
|
|
|
{
|
|
|
unsigned long flags;
|
|
|
- struct dm_thin_endio_hook *h = dm_get_mapinfo(bio)->ptr;
|
|
|
+ struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
|
|
|
struct dm_thin_new_mapping *m = h->overwrite_mapping;
|
|
|
struct pool *pool = m->tc->pool;
|
|
|
|
|
@@ -499,8 +509,7 @@ static void overwrite_endio(struct bio *bio, int err)
|
|
|
/*
|
|
|
* This sends the bios in the cell back to the deferred_bios list.
|
|
|
*/
|
|
|
-static void cell_defer(struct thin_c *tc, struct dm_bio_prison_cell *cell,
|
|
|
- dm_block_t data_block)
|
|
|
+static void cell_defer(struct thin_c *tc, struct dm_bio_prison_cell *cell)
|
|
|
{
|
|
|
struct pool *pool = tc->pool;
|
|
|
unsigned long flags;
|
|
@@ -513,17 +522,13 @@ static void cell_defer(struct thin_c *tc, struct dm_bio_prison_cell *cell,
|
|
|
}
|
|
|
|
|
|
/*
|
|
|
- * Same as cell_defer above, except it omits one particular detainee,
|
|
|
- * a write bio that covers the block and has already been processed.
|
|
|
+ * Same as cell_defer except it omits the original holder of the cell.
|
|
|
*/
|
|
|
-static void cell_defer_except(struct thin_c *tc, struct dm_bio_prison_cell *cell)
|
|
|
+static void cell_defer_no_holder(struct thin_c *tc, struct dm_bio_prison_cell *cell)
|
|
|
{
|
|
|
- struct bio_list bios;
|
|
|
struct pool *pool = tc->pool;
|
|
|
unsigned long flags;
|
|
|
|
|
|
- bio_list_init(&bios);
|
|
|
-
|
|
|
spin_lock_irqsave(&pool->lock, flags);
|
|
|
dm_cell_release_no_holder(cell, &pool->deferred_bios);
|
|
|
spin_unlock_irqrestore(&pool->lock, flags);
|
|
@@ -561,7 +566,7 @@ static void process_prepared_mapping(struct dm_thin_new_mapping *m)
|
|
|
*/
|
|
|
r = dm_thin_insert_block(tc->td, m->virt_block, m->data_block);
|
|
|
if (r) {
|
|
|
- DMERR("dm_thin_insert_block() failed");
|
|
|
+ DMERR_LIMIT("dm_thin_insert_block() failed");
|
|
|
dm_cell_error(m->cell);
|
|
|
goto out;
|
|
|
}
|
|
@@ -573,10 +578,10 @@ static void process_prepared_mapping(struct dm_thin_new_mapping *m)
|
|
|
* the bios in the cell.
|
|
|
*/
|
|
|
if (bio) {
|
|
|
- cell_defer_except(tc, m->cell);
|
|
|
+ cell_defer_no_holder(tc, m->cell);
|
|
|
bio_endio(bio, 0);
|
|
|
} else
|
|
|
- cell_defer(tc, m->cell, m->data_block);
|
|
|
+ cell_defer(tc, m->cell);
|
|
|
|
|
|
out:
|
|
|
list_del(&m->list);
|
|
@@ -588,8 +593,8 @@ static void process_prepared_discard_fail(struct dm_thin_new_mapping *m)
|
|
|
struct thin_c *tc = m->tc;
|
|
|
|
|
|
bio_io_error(m->bio);
|
|
|
- cell_defer_except(tc, m->cell);
|
|
|
- cell_defer_except(tc, m->cell2);
|
|
|
+ cell_defer_no_holder(tc, m->cell);
|
|
|
+ cell_defer_no_holder(tc, m->cell2);
|
|
|
mempool_free(m, tc->pool->mapping_pool);
|
|
|
}
|
|
|
|
|
@@ -597,13 +602,15 @@ static void process_prepared_discard_passdown(struct dm_thin_new_mapping *m)
|
|
|
{
|
|
|
struct thin_c *tc = m->tc;
|
|
|
|
|
|
+ inc_all_io_entry(tc->pool, m->bio);
|
|
|
+ cell_defer_no_holder(tc, m->cell);
|
|
|
+ cell_defer_no_holder(tc, m->cell2);
|
|
|
+
|
|
|
if (m->pass_discard)
|
|
|
remap_and_issue(tc, m->bio, m->data_block);
|
|
|
else
|
|
|
bio_endio(m->bio, 0);
|
|
|
|
|
|
- cell_defer_except(tc, m->cell);
|
|
|
- cell_defer_except(tc, m->cell2);
|
|
|
mempool_free(m, tc->pool->mapping_pool);
|
|
|
}
|
|
|
|
|
@@ -614,7 +621,7 @@ static void process_prepared_discard(struct dm_thin_new_mapping *m)
|
|
|
|
|
|
r = dm_thin_remove_block(tc->td, m->virt_block);
|
|
|
if (r)
|
|
|
- DMERR("dm_thin_remove_block() failed");
|
|
|
+ DMERR_LIMIT("dm_thin_remove_block() failed");
|
|
|
|
|
|
process_prepared_discard_passdown(m);
|
|
|
}
|
|
@@ -706,11 +713,12 @@ static void schedule_copy(struct thin_c *tc, dm_block_t virt_block,
|
|
|
* bio immediately. Otherwise we use kcopyd to clone the data first.
|
|
|
*/
|
|
|
if (io_overwrites_block(pool, bio)) {
|
|
|
- struct dm_thin_endio_hook *h = dm_get_mapinfo(bio)->ptr;
|
|
|
+ struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
|
|
|
|
|
|
h->overwrite_mapping = m;
|
|
|
m->bio = bio;
|
|
|
save_and_set_endio(bio, &m->saved_bi_end_io, overwrite_endio);
|
|
|
+ inc_all_io_entry(pool, bio);
|
|
|
remap_and_issue(tc, bio, data_dest);
|
|
|
} else {
|
|
|
struct dm_io_region from, to;
|
|
@@ -727,7 +735,7 @@ static void schedule_copy(struct thin_c *tc, dm_block_t virt_block,
|
|
|
0, copy_complete, m);
|
|
|
if (r < 0) {
|
|
|
mempool_free(m, pool->mapping_pool);
|
|
|
- DMERR("dm_kcopyd_copy() failed");
|
|
|
+ DMERR_LIMIT("dm_kcopyd_copy() failed");
|
|
|
dm_cell_error(cell);
|
|
|
}
|
|
|
}
|
|
@@ -775,11 +783,12 @@ static void schedule_zero(struct thin_c *tc, dm_block_t virt_block,
|
|
|
process_prepared_mapping(m);
|
|
|
|
|
|
else if (io_overwrites_block(pool, bio)) {
|
|
|
- struct dm_thin_endio_hook *h = dm_get_mapinfo(bio)->ptr;
|
|
|
+ struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
|
|
|
|
|
|
h->overwrite_mapping = m;
|
|
|
m->bio = bio;
|
|
|
save_and_set_endio(bio, &m->saved_bi_end_io, overwrite_endio);
|
|
|
+ inc_all_io_entry(pool, bio);
|
|
|
remap_and_issue(tc, bio, data_block);
|
|
|
} else {
|
|
|
int r;
|
|
@@ -792,7 +801,7 @@ static void schedule_zero(struct thin_c *tc, dm_block_t virt_block,
|
|
|
r = dm_kcopyd_zero(pool->copier, 1, &to, 0, copy_complete, m);
|
|
|
if (r < 0) {
|
|
|
mempool_free(m, pool->mapping_pool);
|
|
|
- DMERR("dm_kcopyd_zero() failed");
|
|
|
+ DMERR_LIMIT("dm_kcopyd_zero() failed");
|
|
|
dm_cell_error(cell);
|
|
|
}
|
|
|
}
|
|
@@ -804,7 +813,7 @@ static int commit(struct pool *pool)
|
|
|
|
|
|
r = dm_pool_commit_metadata(pool->pmd);
|
|
|
if (r)
|
|
|
- DMERR("commit failed, error = %d", r);
|
|
|
+ DMERR_LIMIT("commit failed: error = %d", r);
|
|
|
|
|
|
return r;
|
|
|
}
|
|
@@ -889,7 +898,7 @@ static int alloc_data_block(struct thin_c *tc, dm_block_t *result)
|
|
|
*/
|
|
|
static void retry_on_resume(struct bio *bio)
|
|
|
{
|
|
|
- struct dm_thin_endio_hook *h = dm_get_mapinfo(bio)->ptr;
|
|
|
+ struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
|
|
|
struct thin_c *tc = h->tc;
|
|
|
struct pool *pool = tc->pool;
|
|
|
unsigned long flags;
|
|
@@ -936,7 +945,7 @@ static void process_discard(struct thin_c *tc, struct bio *bio)
|
|
|
*/
|
|
|
build_data_key(tc->td, lookup_result.block, &key2);
|
|
|
if (dm_bio_detain(tc->pool->prison, &key2, bio, &cell2)) {
|
|
|
- dm_cell_release_singleton(cell, bio);
|
|
|
+ cell_defer_no_holder(tc, cell);
|
|
|
break;
|
|
|
}
|
|
|
|
|
@@ -962,13 +971,15 @@ static void process_discard(struct thin_c *tc, struct bio *bio)
|
|
|
wake_worker(pool);
|
|
|
}
|
|
|
} else {
|
|
|
+ inc_all_io_entry(pool, bio);
|
|
|
+ cell_defer_no_holder(tc, cell);
|
|
|
+ cell_defer_no_holder(tc, cell2);
|
|
|
+
|
|
|
/*
|
|
|
* The DM core makes sure that the discard doesn't span
|
|
|
* a block boundary. So we submit the discard of a
|
|
|
* partial block appropriately.
|
|
|
*/
|
|
|
- dm_cell_release_singleton(cell, bio);
|
|
|
- dm_cell_release_singleton(cell2, bio);
|
|
|
if ((!lookup_result.shared) && pool->pf.discard_passdown)
|
|
|
remap_and_issue(tc, bio, lookup_result.block);
|
|
|
else
|
|
@@ -980,13 +991,14 @@ static void process_discard(struct thin_c *tc, struct bio *bio)
|
|
|
/*
|
|
|
* It isn't provisioned, just forget it.
|
|
|
*/
|
|
|
- dm_cell_release_singleton(cell, bio);
|
|
|
+ cell_defer_no_holder(tc, cell);
|
|
|
bio_endio(bio, 0);
|
|
|
break;
|
|
|
|
|
|
default:
|
|
|
- DMERR("discard: find block unexpectedly returned %d", r);
|
|
|
- dm_cell_release_singleton(cell, bio);
|
|
|
+ DMERR_LIMIT("%s: dm_thin_find_block() failed: error = %d",
|
|
|
+ __func__, r);
|
|
|
+ cell_defer_no_holder(tc, cell);
|
|
|
bio_io_error(bio);
|
|
|
break;
|
|
|
}
|
|
@@ -1012,7 +1024,8 @@ static void break_sharing(struct thin_c *tc, struct bio *bio, dm_block_t block,
|
|
|
break;
|
|
|
|
|
|
default:
|
|
|
- DMERR("%s: alloc_data_block() failed, error = %d", __func__, r);
|
|
|
+ DMERR_LIMIT("%s: alloc_data_block() failed: error = %d",
|
|
|
+ __func__, r);
|
|
|
dm_cell_error(cell);
|
|
|
break;
|
|
|
}
|
|
@@ -1037,11 +1050,12 @@ static void process_shared_bio(struct thin_c *tc, struct bio *bio,
|
|
|
if (bio_data_dir(bio) == WRITE && bio->bi_size)
|
|
|
break_sharing(tc, bio, block, &key, lookup_result, cell);
|
|
|
else {
|
|
|
- struct dm_thin_endio_hook *h = dm_get_mapinfo(bio)->ptr;
|
|
|
+ struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
|
|
|
|
|
|
h->shared_read_entry = dm_deferred_entry_inc(pool->shared_read_ds);
|
|
|
+ inc_all_io_entry(pool, bio);
|
|
|
+ cell_defer_no_holder(tc, cell);
|
|
|
|
|
|
- dm_cell_release_singleton(cell, bio);
|
|
|
remap_and_issue(tc, bio, lookup_result->block);
|
|
|
}
|
|
|
}
|
|
@@ -1056,7 +1070,9 @@ static void provision_block(struct thin_c *tc, struct bio *bio, dm_block_t block
|
|
|
* Remap empty bios (flushes) immediately, without provisioning.
|
|
|
*/
|
|
|
if (!bio->bi_size) {
|
|
|
- dm_cell_release_singleton(cell, bio);
|
|
|
+ inc_all_io_entry(tc->pool, bio);
|
|
|
+ cell_defer_no_holder(tc, cell);
|
|
|
+
|
|
|
remap_and_issue(tc, bio, 0);
|
|
|
return;
|
|
|
}
|
|
@@ -1066,7 +1082,7 @@ static void provision_block(struct thin_c *tc, struct bio *bio, dm_block_t block
|
|
|
*/
|
|
|
if (bio_data_dir(bio) == READ) {
|
|
|
zero_fill_bio(bio);
|
|
|
- dm_cell_release_singleton(cell, bio);
|
|
|
+ cell_defer_no_holder(tc, cell);
|
|
|
bio_endio(bio, 0);
|
|
|
return;
|
|
|
}
|
|
@@ -1085,7 +1101,8 @@ static void provision_block(struct thin_c *tc, struct bio *bio, dm_block_t block
|
|
|
break;
|
|
|
|
|
|
default:
|
|
|
- DMERR("%s: alloc_data_block() failed, error = %d", __func__, r);
|
|
|
+ DMERR_LIMIT("%s: alloc_data_block() failed: error = %d",
|
|
|
+ __func__, r);
|
|
|
set_pool_mode(tc->pool, PM_READ_ONLY);
|
|
|
dm_cell_error(cell);
|
|
|
break;
|
|
@@ -1111,34 +1128,31 @@ static void process_bio(struct thin_c *tc, struct bio *bio)
|
|
|
r = dm_thin_find_block(tc->td, block, 1, &lookup_result);
|
|
|
switch (r) {
|
|
|
case 0:
|
|
|
- /*
|
|
|
- * We can release this cell now. This thread is the only
|
|
|
- * one that puts bios into a cell, and we know there were
|
|
|
- * no preceding bios.
|
|
|
- */
|
|
|
- /*
|
|
|
- * TODO: this will probably have to change when discard goes
|
|
|
- * back in.
|
|
|
- */
|
|
|
- dm_cell_release_singleton(cell, bio);
|
|
|
-
|
|
|
- if (lookup_result.shared)
|
|
|
+ if (lookup_result.shared) {
|
|
|
process_shared_bio(tc, bio, block, &lookup_result);
|
|
|
- else
|
|
|
+ cell_defer_no_holder(tc, cell);
|
|
|
+ } else {
|
|
|
+ inc_all_io_entry(tc->pool, bio);
|
|
|
+ cell_defer_no_holder(tc, cell);
|
|
|
+
|
|
|
remap_and_issue(tc, bio, lookup_result.block);
|
|
|
+ }
|
|
|
break;
|
|
|
|
|
|
case -ENODATA:
|
|
|
if (bio_data_dir(bio) == READ && tc->origin_dev) {
|
|
|
- dm_cell_release_singleton(cell, bio);
|
|
|
+ inc_all_io_entry(tc->pool, bio);
|
|
|
+ cell_defer_no_holder(tc, cell);
|
|
|
+
|
|
|
remap_to_origin_and_issue(tc, bio);
|
|
|
} else
|
|
|
provision_block(tc, bio, block, cell);
|
|
|
break;
|
|
|
|
|
|
default:
|
|
|
- DMERR("dm_thin_find_block() failed, error = %d", r);
|
|
|
- dm_cell_release_singleton(cell, bio);
|
|
|
+ DMERR_LIMIT("%s: dm_thin_find_block() failed: error = %d",
|
|
|
+ __func__, r);
|
|
|
+ cell_defer_no_holder(tc, cell);
|
|
|
bio_io_error(bio);
|
|
|
break;
|
|
|
}
|
|
@@ -1156,8 +1170,10 @@ static void process_bio_read_only(struct thin_c *tc, struct bio *bio)
|
|
|
case 0:
|
|
|
if (lookup_result.shared && (rw == WRITE) && bio->bi_size)
|
|
|
bio_io_error(bio);
|
|
|
- else
|
|
|
+ else {
|
|
|
+ inc_all_io_entry(tc->pool, bio);
|
|
|
remap_and_issue(tc, bio, lookup_result.block);
|
|
|
+ }
|
|
|
break;
|
|
|
|
|
|
case -ENODATA:
|
|
@@ -1167,6 +1183,7 @@ static void process_bio_read_only(struct thin_c *tc, struct bio *bio)
|
|
|
}
|
|
|
|
|
|
if (tc->origin_dev) {
|
|
|
+ inc_all_io_entry(tc->pool, bio);
|
|
|
remap_to_origin_and_issue(tc, bio);
|
|
|
break;
|
|
|
}
|
|
@@ -1176,7 +1193,8 @@ static void process_bio_read_only(struct thin_c *tc, struct bio *bio)
|
|
|
break;
|
|
|
|
|
|
default:
|
|
|
- DMERR("dm_thin_find_block() failed, error = %d", r);
|
|
|
+ DMERR_LIMIT("%s: dm_thin_find_block() failed: error = %d",
|
|
|
+ __func__, r);
|
|
|
bio_io_error(bio);
|
|
|
break;
|
|
|
}
|
|
@@ -1207,7 +1225,7 @@ static void process_deferred_bios(struct pool *pool)
|
|
|
spin_unlock_irqrestore(&pool->lock, flags);
|
|
|
|
|
|
while ((bio = bio_list_pop(&bios))) {
|
|
|
- struct dm_thin_endio_hook *h = dm_get_mapinfo(bio)->ptr;
|
|
|
+ struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
|
|
|
struct thin_c *tc = h->tc;
|
|
|
|
|
|
/*
|
|
@@ -1340,32 +1358,30 @@ static void thin_defer_bio(struct thin_c *tc, struct bio *bio)
|
|
|
wake_worker(pool);
|
|
|
}
|
|
|
|
|
|
-static struct dm_thin_endio_hook *thin_hook_bio(struct thin_c *tc, struct bio *bio)
|
|
|
+static void thin_hook_bio(struct thin_c *tc, struct bio *bio)
|
|
|
{
|
|
|
- struct pool *pool = tc->pool;
|
|
|
- struct dm_thin_endio_hook *h = mempool_alloc(pool->endio_hook_pool, GFP_NOIO);
|
|
|
+ struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
|
|
|
|
|
|
h->tc = tc;
|
|
|
h->shared_read_entry = NULL;
|
|
|
- h->all_io_entry = bio->bi_rw & REQ_DISCARD ? NULL : dm_deferred_entry_inc(pool->all_io_ds);
|
|
|
+ h->all_io_entry = NULL;
|
|
|
h->overwrite_mapping = NULL;
|
|
|
-
|
|
|
- return h;
|
|
|
}
|
|
|
|
|
|
/*
|
|
|
* Non-blocking function called from the thin target's map function.
|
|
|
*/
|
|
|
-static int thin_bio_map(struct dm_target *ti, struct bio *bio,
|
|
|
- union map_info *map_context)
|
|
|
+static int thin_bio_map(struct dm_target *ti, struct bio *bio)
|
|
|
{
|
|
|
int r;
|
|
|
struct thin_c *tc = ti->private;
|
|
|
dm_block_t block = get_bio_block(tc, bio);
|
|
|
struct dm_thin_device *td = tc->td;
|
|
|
struct dm_thin_lookup_result result;
|
|
|
+ struct dm_bio_prison_cell *cell1, *cell2;
|
|
|
+ struct dm_cell_key key;
|
|
|
|
|
|
- map_context->ptr = thin_hook_bio(tc, bio);
|
|
|
+ thin_hook_bio(tc, bio);
|
|
|
|
|
|
if (get_pool_mode(tc->pool) == PM_FAIL) {
|
|
|
bio_io_error(bio);
|
|
@@ -1400,12 +1416,25 @@ static int thin_bio_map(struct dm_target *ti, struct bio *bio,
|
|
|
* shared flag will be set in their case.
|
|
|
*/
|
|
|
thin_defer_bio(tc, bio);
|
|
|
- r = DM_MAPIO_SUBMITTED;
|
|
|
- } else {
|
|
|
- remap(tc, bio, result.block);
|
|
|
- r = DM_MAPIO_REMAPPED;
|
|
|
+ return DM_MAPIO_SUBMITTED;
|
|
|
}
|
|
|
- break;
|
|
|
+
|
|
|
+ build_virtual_key(tc->td, block, &key);
|
|
|
+ if (dm_bio_detain(tc->pool->prison, &key, bio, &cell1))
|
|
|
+ return DM_MAPIO_SUBMITTED;
|
|
|
+
|
|
|
+ build_data_key(tc->td, result.block, &key);
|
|
|
+ if (dm_bio_detain(tc->pool->prison, &key, bio, &cell2)) {
|
|
|
+ cell_defer_no_holder(tc, cell1);
|
|
|
+ return DM_MAPIO_SUBMITTED;
|
|
|
+ }
|
|
|
+
|
|
|
+ inc_all_io_entry(tc->pool, bio);
|
|
|
+ cell_defer_no_holder(tc, cell2);
|
|
|
+ cell_defer_no_holder(tc, cell1);
|
|
|
+
|
|
|
+ remap(tc, bio, result.block);
|
|
|
+ return DM_MAPIO_REMAPPED;
|
|
|
|
|
|
case -ENODATA:
|
|
|
if (get_pool_mode(tc->pool) == PM_READ_ONLY) {
|
|
@@ -1414,8 +1443,7 @@ static int thin_bio_map(struct dm_target *ti, struct bio *bio,
|
|
|
* of doing so. Just error it.
|
|
|
*/
|
|
|
bio_io_error(bio);
|
|
|
- r = DM_MAPIO_SUBMITTED;
|
|
|
- break;
|
|
|
+ return DM_MAPIO_SUBMITTED;
|
|
|
}
|
|
|
/* fall through */
|
|
|
|
|
@@ -1425,8 +1453,7 @@ static int thin_bio_map(struct dm_target *ti, struct bio *bio,
|
|
|
* provide the hint to load the metadata into cache.
|
|
|
*/
|
|
|
thin_defer_bio(tc, bio);
|
|
|
- r = DM_MAPIO_SUBMITTED;
|
|
|
- break;
|
|
|
+ return DM_MAPIO_SUBMITTED;
|
|
|
|
|
|
default:
|
|
|
/*
|
|
@@ -1435,11 +1462,8 @@ static int thin_bio_map(struct dm_target *ti, struct bio *bio,
|
|
|
* pool is switched to fail-io mode.
|
|
|
*/
|
|
|
bio_io_error(bio);
|
|
|
- r = DM_MAPIO_SUBMITTED;
|
|
|
- break;
|
|
|
+ return DM_MAPIO_SUBMITTED;
|
|
|
}
|
|
|
-
|
|
|
- return r;
|
|
|
}
|
|
|
|
|
|
static int pool_is_congested(struct dm_target_callbacks *cb, int bdi_bits)
|
|
@@ -1566,14 +1590,12 @@ static void __pool_destroy(struct pool *pool)
|
|
|
if (pool->next_mapping)
|
|
|
mempool_free(pool->next_mapping, pool->mapping_pool);
|
|
|
mempool_destroy(pool->mapping_pool);
|
|
|
- mempool_destroy(pool->endio_hook_pool);
|
|
|
dm_deferred_set_destroy(pool->shared_read_ds);
|
|
|
dm_deferred_set_destroy(pool->all_io_ds);
|
|
|
kfree(pool);
|
|
|
}
|
|
|
|
|
|
static struct kmem_cache *_new_mapping_cache;
|
|
|
-static struct kmem_cache *_endio_hook_cache;
|
|
|
|
|
|
static struct pool *pool_create(struct mapped_device *pool_md,
|
|
|
struct block_device *metadata_dev,
|
|
@@ -1667,13 +1689,6 @@ static struct pool *pool_create(struct mapped_device *pool_md,
|
|
|
goto bad_mapping_pool;
|
|
|
}
|
|
|
|
|
|
- pool->endio_hook_pool = mempool_create_slab_pool(ENDIO_HOOK_POOL_SIZE,
|
|
|
- _endio_hook_cache);
|
|
|
- if (!pool->endio_hook_pool) {
|
|
|
- *error = "Error creating pool's endio_hook mempool";
|
|
|
- err_p = ERR_PTR(-ENOMEM);
|
|
|
- goto bad_endio_hook_pool;
|
|
|
- }
|
|
|
pool->ref_count = 1;
|
|
|
pool->last_commit_jiffies = jiffies;
|
|
|
pool->pool_md = pool_md;
|
|
@@ -1682,8 +1697,6 @@ static struct pool *pool_create(struct mapped_device *pool_md,
|
|
|
|
|
|
return pool;
|
|
|
|
|
|
-bad_endio_hook_pool:
|
|
|
- mempool_destroy(pool->mapping_pool);
|
|
|
bad_mapping_pool:
|
|
|
dm_deferred_set_destroy(pool->all_io_ds);
|
|
|
bad_all_io_ds:
|
|
@@ -1966,8 +1979,7 @@ out_unlock:
|
|
|
return r;
|
|
|
}
|
|
|
|
|
|
-static int pool_map(struct dm_target *ti, struct bio *bio,
|
|
|
- union map_info *map_context)
|
|
|
+static int pool_map(struct dm_target *ti, struct bio *bio)
|
|
|
{
|
|
|
int r;
|
|
|
struct pool_c *pt = ti->private;
|
|
@@ -2358,7 +2370,9 @@ static int pool_status(struct dm_target *ti, status_type_t type,
|
|
|
else
|
|
|
DMEMIT("rw ");
|
|
|
|
|
|
- if (pool->pf.discard_enabled && pool->pf.discard_passdown)
|
|
|
+ if (!pool->pf.discard_enabled)
|
|
|
+ DMEMIT("ignore_discard");
|
|
|
+ else if (pool->pf.discard_passdown)
|
|
|
DMEMIT("discard_passdown");
|
|
|
else
|
|
|
DMEMIT("no_discard_passdown");
|
|
@@ -2454,7 +2468,7 @@ static struct target_type pool_target = {
|
|
|
.name = "thin-pool",
|
|
|
.features = DM_TARGET_SINGLETON | DM_TARGET_ALWAYS_WRITEABLE |
|
|
|
DM_TARGET_IMMUTABLE,
|
|
|
- .version = {1, 5, 0},
|
|
|
+ .version = {1, 6, 0},
|
|
|
.module = THIS_MODULE,
|
|
|
.ctr = pool_ctr,
|
|
|
.dtr = pool_dtr,
|
|
@@ -2576,6 +2590,7 @@ static int thin_ctr(struct dm_target *ti, unsigned argc, char **argv)
|
|
|
|
|
|
ti->num_flush_requests = 1;
|
|
|
ti->flush_supported = true;
|
|
|
+ ti->per_bio_data_size = sizeof(struct dm_thin_endio_hook);
|
|
|
|
|
|
/* In case the pool supports discards, pass them on. */
|
|
|
if (tc->pool->pf.discard_enabled) {
|
|
@@ -2609,20 +2624,17 @@ out_unlock:
|
|
|
return r;
|
|
|
}
|
|
|
|
|
|
-static int thin_map(struct dm_target *ti, struct bio *bio,
|
|
|
- union map_info *map_context)
|
|
|
+static int thin_map(struct dm_target *ti, struct bio *bio)
|
|
|
{
|
|
|
bio->bi_sector = dm_target_offset(ti, bio->bi_sector);
|
|
|
|
|
|
- return thin_bio_map(ti, bio, map_context);
|
|
|
+ return thin_bio_map(ti, bio);
|
|
|
}
|
|
|
|
|
|
-static int thin_endio(struct dm_target *ti,
|
|
|
- struct bio *bio, int err,
|
|
|
- union map_info *map_context)
|
|
|
+static int thin_endio(struct dm_target *ti, struct bio *bio, int err)
|
|
|
{
|
|
|
unsigned long flags;
|
|
|
- struct dm_thin_endio_hook *h = map_context->ptr;
|
|
|
+ struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
|
|
|
struct list_head work;
|
|
|
struct dm_thin_new_mapping *m, *tmp;
|
|
|
struct pool *pool = h->tc->pool;
|
|
@@ -2643,14 +2655,15 @@ static int thin_endio(struct dm_target *ti,
|
|
|
if (h->all_io_entry) {
|
|
|
INIT_LIST_HEAD(&work);
|
|
|
dm_deferred_entry_dec(h->all_io_entry, &work);
|
|
|
- spin_lock_irqsave(&pool->lock, flags);
|
|
|
- list_for_each_entry_safe(m, tmp, &work, list)
|
|
|
- list_add(&m->list, &pool->prepared_discards);
|
|
|
- spin_unlock_irqrestore(&pool->lock, flags);
|
|
|
+ if (!list_empty(&work)) {
|
|
|
+ spin_lock_irqsave(&pool->lock, flags);
|
|
|
+ list_for_each_entry_safe(m, tmp, &work, list)
|
|
|
+ list_add(&m->list, &pool->prepared_discards);
|
|
|
+ spin_unlock_irqrestore(&pool->lock, flags);
|
|
|
+ wake_worker(pool);
|
|
|
+ }
|
|
|
}
|
|
|
|
|
|
- mempool_free(h, pool->endio_hook_pool);
|
|
|
-
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
@@ -2745,7 +2758,7 @@ static void thin_io_hints(struct dm_target *ti, struct queue_limits *limits)
|
|
|
|
|
|
static struct target_type thin_target = {
|
|
|
.name = "thin",
|
|
|
- .version = {1, 5, 0},
|
|
|
+ .version = {1, 6, 0},
|
|
|
.module = THIS_MODULE,
|
|
|
.ctr = thin_ctr,
|
|
|
.dtr = thin_dtr,
|
|
@@ -2779,14 +2792,8 @@ static int __init dm_thin_init(void)
|
|
|
if (!_new_mapping_cache)
|
|
|
goto bad_new_mapping_cache;
|
|
|
|
|
|
- _endio_hook_cache = KMEM_CACHE(dm_thin_endio_hook, 0);
|
|
|
- if (!_endio_hook_cache)
|
|
|
- goto bad_endio_hook_cache;
|
|
|
-
|
|
|
return 0;
|
|
|
|
|
|
-bad_endio_hook_cache:
|
|
|
- kmem_cache_destroy(_new_mapping_cache);
|
|
|
bad_new_mapping_cache:
|
|
|
dm_unregister_target(&pool_target);
|
|
|
bad_pool_target:
|
|
@@ -2801,7 +2808,6 @@ static void dm_thin_exit(void)
|
|
|
dm_unregister_target(&pool_target);
|
|
|
|
|
|
kmem_cache_destroy(_new_mapping_cache);
|
|
|
- kmem_cache_destroy(_endio_hook_cache);
|
|
|
}
|
|
|
|
|
|
module_init(dm_thin_init);
|