|
@@ -62,18 +62,6 @@ struct dm_io {
|
|
|
spinlock_t endio_lock;
|
|
|
};
|
|
|
|
|
|
-/*
|
|
|
- * For bio-based dm.
|
|
|
- * One of these is allocated per target within a bio. Hopefully
|
|
|
- * this will be simplified out one day.
|
|
|
- */
|
|
|
-struct dm_target_io {
|
|
|
- struct dm_io *io;
|
|
|
- struct dm_target *ti;
|
|
|
- union map_info info;
|
|
|
- struct bio clone;
|
|
|
-};
|
|
|
-
|
|
|
/*
|
|
|
* For request-based dm.
|
|
|
* One of these is allocated per request.
|
|
@@ -1980,13 +1968,20 @@ static void free_dev(struct mapped_device *md)
|
|
|
|
|
|
static void __bind_mempools(struct mapped_device *md, struct dm_table *t)
|
|
|
{
|
|
|
- struct dm_md_mempools *p;
|
|
|
+ struct dm_md_mempools *p = dm_table_get_md_mempools(t);
|
|
|
|
|
|
- if (md->io_pool && (md->tio_pool || dm_table_get_type(t) == DM_TYPE_BIO_BASED) && md->bs)
|
|
|
- /* the md already has necessary mempools */
|
|
|
+ if (md->io_pool && (md->tio_pool || dm_table_get_type(t) == DM_TYPE_BIO_BASED) && md->bs) {
|
|
|
+ /*
|
|
|
+ * The md already has necessary mempools. Reload just the
|
|
|
+ * bioset because front_pad may have changed because
|
|
|
+ * a different table was loaded.
|
|
|
+ */
|
|
|
+ bioset_free(md->bs);
|
|
|
+ md->bs = p->bs;
|
|
|
+ p->bs = NULL;
|
|
|
goto out;
|
|
|
+ }
|
|
|
|
|
|
- p = dm_table_get_md_mempools(t);
|
|
|
BUG_ON(!p || md->io_pool || md->tio_pool || md->bs);
|
|
|
|
|
|
md->io_pool = p->io_pool;
|
|
@@ -2745,7 +2740,7 @@ int dm_noflush_suspending(struct dm_target *ti)
|
|
|
}
|
|
|
EXPORT_SYMBOL_GPL(dm_noflush_suspending);
|
|
|
|
|
|
-struct dm_md_mempools *dm_alloc_md_mempools(unsigned type, unsigned integrity)
|
|
|
+struct dm_md_mempools *dm_alloc_md_mempools(unsigned type, unsigned integrity, unsigned per_bio_data_size)
|
|
|
{
|
|
|
struct dm_md_mempools *pools = kmalloc(sizeof(*pools), GFP_KERNEL);
|
|
|
unsigned int pool_size = (type == DM_TYPE_BIO_BASED) ? 16 : MIN_IOS;
|
|
@@ -2753,6 +2748,8 @@ struct dm_md_mempools *dm_alloc_md_mempools(unsigned type, unsigned integrity)
|
|
|
if (!pools)
|
|
|
return NULL;
|
|
|
|
|
|
+ per_bio_data_size = roundup(per_bio_data_size, __alignof__(struct dm_target_io));
|
|
|
+
|
|
|
pools->io_pool = (type == DM_TYPE_BIO_BASED) ?
|
|
|
mempool_create_slab_pool(MIN_IOS, _io_cache) :
|
|
|
mempool_create_slab_pool(MIN_IOS, _rq_bio_info_cache);
|
|
@@ -2768,7 +2765,7 @@ struct dm_md_mempools *dm_alloc_md_mempools(unsigned type, unsigned integrity)
|
|
|
|
|
|
pools->bs = (type == DM_TYPE_BIO_BASED) ?
|
|
|
bioset_create(pool_size,
|
|
|
- offsetof(struct dm_target_io, clone)) :
|
|
|
+ per_bio_data_size + offsetof(struct dm_target_io, clone)) :
|
|
|
bioset_create(pool_size,
|
|
|
offsetof(struct dm_rq_clone_bio_info, clone));
|
|
|
if (!pools->bs)
|