|
@@ -211,10 +211,55 @@ struct dm_md_mempools {
|
|
|
struct bio_set *bs;
|
|
|
};
|
|
|
|
|
|
-#define MIN_IOS 256
|
|
|
+#define RESERVED_BIO_BASED_IOS 16
|
|
|
+#define RESERVED_REQUEST_BASED_IOS 256
|
|
|
+#define RESERVED_MAX_IOS 1024
|
|
|
static struct kmem_cache *_io_cache;
|
|
|
static struct kmem_cache *_rq_tio_cache;
|
|
|
|
|
|
+/*
|
|
|
+ * Bio-based DM's mempools' reserved IOs set by the user.
|
|
|
+ */
|
|
|
+static unsigned reserved_bio_based_ios = RESERVED_BIO_BASED_IOS;
|
|
|
+
|
|
|
+/*
|
|
|
+ * Request-based DM's mempools' reserved IOs set by the user.
|
|
|
+ */
|
|
|
+static unsigned reserved_rq_based_ios = RESERVED_REQUEST_BASED_IOS;
|
|
|
+
|
|
|
+static unsigned __dm_get_reserved_ios(unsigned *reserved_ios,
|
|
|
+ unsigned def, unsigned max)
|
|
|
+{
|
|
|
+ unsigned ios = ACCESS_ONCE(*reserved_ios);
|
|
|
+ unsigned modified_ios = 0;
|
|
|
+
|
|
|
+ if (!ios)
|
|
|
+ modified_ios = def;
|
|
|
+ else if (ios > max)
|
|
|
+ modified_ios = max;
|
|
|
+
|
|
|
+ if (modified_ios) {
|
|
|
+ (void)cmpxchg(reserved_ios, ios, modified_ios);
|
|
|
+ ios = modified_ios;
|
|
|
+ }
|
|
|
+
|
|
|
+ return ios;
|
|
|
+}
|
|
|
+
|
|
|
+unsigned dm_get_reserved_bio_based_ios(void)
|
|
|
+{
|
|
|
+ return __dm_get_reserved_ios(&reserved_bio_based_ios,
|
|
|
+ RESERVED_BIO_BASED_IOS, RESERVED_MAX_IOS);
|
|
|
+}
|
|
|
+EXPORT_SYMBOL_GPL(dm_get_reserved_bio_based_ios);
|
|
|
+
|
|
|
+unsigned dm_get_reserved_rq_based_ios(void)
|
|
|
+{
|
|
|
+ return __dm_get_reserved_ios(&reserved_rq_based_ios,
|
|
|
+ RESERVED_REQUEST_BASED_IOS, RESERVED_MAX_IOS);
|
|
|
+}
|
|
|
+EXPORT_SYMBOL_GPL(dm_get_reserved_rq_based_ios);
|
|
|
+
|
|
|
static int __init local_init(void)
|
|
|
{
|
|
|
int r = -ENOMEM;
|
|
@@ -2277,6 +2322,17 @@ struct target_type *dm_get_immutable_target_type(struct mapped_device *md)
|
|
|
return md->immutable_target_type;
|
|
|
}
|
|
|
|
|
|
+/*
|
|
|
+ * The queue_limits are only valid as long as you have a reference
|
|
|
+ * count on 'md'.
|
|
|
+ */
|
|
|
+struct queue_limits *dm_get_queue_limits(struct mapped_device *md)
|
|
|
+{
|
|
|
+ BUG_ON(!atomic_read(&md->holders));
|
|
|
+ return &md->queue->limits;
|
|
|
+}
|
|
|
+EXPORT_SYMBOL_GPL(dm_get_queue_limits);
|
|
|
+
|
|
|
/*
|
|
|
* Fully initialize a request-based queue (->elevator, ->request_fn, etc).
|
|
|
*/
|
|
@@ -2862,18 +2918,18 @@ struct dm_md_mempools *dm_alloc_md_mempools(unsigned type, unsigned integrity, u
|
|
|
|
|
|
if (type == DM_TYPE_BIO_BASED) {
|
|
|
cachep = _io_cache;
|
|
|
- pool_size = 16;
|
|
|
+ pool_size = dm_get_reserved_bio_based_ios();
|
|
|
front_pad = roundup(per_bio_data_size, __alignof__(struct dm_target_io)) + offsetof(struct dm_target_io, clone);
|
|
|
} else if (type == DM_TYPE_REQUEST_BASED) {
|
|
|
cachep = _rq_tio_cache;
|
|
|
- pool_size = MIN_IOS;
|
|
|
+ pool_size = dm_get_reserved_rq_based_ios();
|
|
|
front_pad = offsetof(struct dm_rq_clone_bio_info, clone);
|
|
|
/* per_bio_data_size is not used. See __bind_mempools(). */
|
|
|
WARN_ON(per_bio_data_size != 0);
|
|
|
} else
|
|
|
goto out;
|
|
|
|
|
|
- pools->io_pool = mempool_create_slab_pool(MIN_IOS, cachep);
|
|
|
+ pools->io_pool = mempool_create_slab_pool(pool_size, cachep);
|
|
|
if (!pools->io_pool)
|
|
|
goto out;
|
|
|
|
|
@@ -2924,6 +2980,13 @@ module_exit(dm_exit);
|
|
|
|
|
|
module_param(major, uint, 0);
|
|
|
MODULE_PARM_DESC(major, "The major number of the device mapper");
|
|
|
+
|
|
|
+module_param(reserved_bio_based_ios, uint, S_IRUGO | S_IWUSR);
|
|
|
+MODULE_PARM_DESC(reserved_bio_based_ios, "Reserved IOs in bio-based mempools");
|
|
|
+
|
|
|
+module_param(reserved_rq_based_ios, uint, S_IRUGO | S_IWUSR);
|
|
|
+MODULE_PARM_DESC(reserved_rq_based_ios, "Reserved IOs in request-based mempools");
|
|
|
+
|
|
|
MODULE_DESCRIPTION(DM_NAME " driver");
|
|
|
MODULE_AUTHOR("Joe Thornber <dm-devel@redhat.com>");
|
|
|
MODULE_LICENSE("GPL");
|