|
@@ -126,7 +126,7 @@ struct mapped_device {
|
|
|
|
|
|
struct request_queue *queue;
|
|
|
unsigned type;
|
|
|
- /* Protect type against concurrent access. */
|
|
|
+ /* Protect queue and type against concurrent access. */
|
|
|
struct mutex type_lock;
|
|
|
|
|
|
struct gendisk *disk;
|
|
@@ -1856,6 +1856,28 @@ static const struct block_device_operations dm_blk_dops;
|
|
|
static void dm_wq_work(struct work_struct *work);
|
|
|
static void dm_rq_barrier_work(struct work_struct *work);
|
|
|
|
|
|
+static void dm_init_md_queue(struct mapped_device *md)
|
|
|
+{
|
|
|
+ /*
|
|
|
+ * Request-based dm devices cannot be stacked on top of bio-based dm
|
|
|
+ * devices. The type of this dm device has not been decided yet.
|
|
|
+ * The type is decided at the first table loading time.
|
|
|
+ * To prevent problematic device stacking, clear the queue flag
|
|
|
+ * for request stacking support until then.
|
|
|
+ *
|
|
|
+ * This queue is new, so no concurrency on the queue_flags.
|
|
|
+ */
|
|
|
+ queue_flag_clear_unlocked(QUEUE_FLAG_STACKABLE, md->queue);
|
|
|
+
|
|
|
+ md->queue->queuedata = md;
|
|
|
+ md->queue->backing_dev_info.congested_fn = dm_any_congested;
|
|
|
+ md->queue->backing_dev_info.congested_data = md;
|
|
|
+ blk_queue_make_request(md->queue, dm_request);
|
|
|
+ blk_queue_bounce_limit(md->queue, BLK_BOUNCE_ANY);
|
|
|
+ md->queue->unplug_fn = dm_unplug_all;
|
|
|
+ blk_queue_merge_bvec(md->queue, dm_merge_bvec);
|
|
|
+}
|
|
|
+
|
|
|
/*
|
|
|
* Allocate and initialise a blank device with a given minor.
|
|
|
*/
|
|
@@ -1895,33 +1917,11 @@ static struct mapped_device *alloc_dev(int minor)
|
|
|
INIT_LIST_HEAD(&md->uevent_list);
|
|
|
spin_lock_init(&md->uevent_lock);
|
|
|
|
|
|
- md->queue = blk_init_queue(dm_request_fn, NULL);
|
|
|
+ md->queue = blk_alloc_queue(GFP_KERNEL);
|
|
|
if (!md->queue)
|
|
|
goto bad_queue;
|
|
|
|
|
|
- /*
|
|
|
- * Request-based dm devices cannot be stacked on top of bio-based dm
|
|
|
- * devices. The type of this dm device has not been decided yet,
|
|
|
- * although we initialized the queue using blk_init_queue().
|
|
|
- * The type is decided at the first table loading time.
|
|
|
- * To prevent problematic device stacking, clear the queue flag
|
|
|
- * for request stacking support until then.
|
|
|
- *
|
|
|
- * This queue is new, so no concurrency on the queue_flags.
|
|
|
- */
|
|
|
- queue_flag_clear_unlocked(QUEUE_FLAG_STACKABLE, md->queue);
|
|
|
- md->saved_make_request_fn = md->queue->make_request_fn;
|
|
|
- md->queue->queuedata = md;
|
|
|
- md->queue->backing_dev_info.congested_fn = dm_any_congested;
|
|
|
- md->queue->backing_dev_info.congested_data = md;
|
|
|
- blk_queue_make_request(md->queue, dm_request);
|
|
|
- blk_queue_bounce_limit(md->queue, BLK_BOUNCE_ANY);
|
|
|
- md->queue->unplug_fn = dm_unplug_all;
|
|
|
- blk_queue_merge_bvec(md->queue, dm_merge_bvec);
|
|
|
- blk_queue_softirq_done(md->queue, dm_softirq_done);
|
|
|
- blk_queue_prep_rq(md->queue, dm_prep_fn);
|
|
|
- blk_queue_lld_busy(md->queue, dm_lld_busy);
|
|
|
- blk_queue_ordered(md->queue, QUEUE_ORDERED_DRAIN_FLUSH);
|
|
|
+ dm_init_md_queue(md);
|
|
|
|
|
|
md->disk = alloc_disk(1);
|
|
|
if (!md->disk)
|
|
@@ -2160,6 +2160,48 @@ unsigned dm_get_md_type(struct mapped_device *md)
|
|
|
return md->type;
|
|
|
}
|
|
|
|
|
|
+/*
|
|
|
+ * Fully initialize a request-based queue (->elevator, ->request_fn, etc).
|
|
|
+ */
|
|
|
+static int dm_init_request_based_queue(struct mapped_device *md)
|
|
|
+{
|
|
|
+ struct request_queue *q = NULL;
|
|
|
+
|
|
|
+ if (md->queue->elevator)
|
|
|
+ return 1;
|
|
|
+
|
|
|
+ /* Fully initialize the queue */
|
|
|
+ q = blk_init_allocated_queue(md->queue, dm_request_fn, NULL);
|
|
|
+ if (!q)
|
|
|
+ return 0;
|
|
|
+
|
|
|
+ md->queue = q;
|
|
|
+ md->saved_make_request_fn = md->queue->make_request_fn;
|
|
|
+ dm_init_md_queue(md);
|
|
|
+ blk_queue_softirq_done(md->queue, dm_softirq_done);
|
|
|
+ blk_queue_prep_rq(md->queue, dm_prep_fn);
|
|
|
+ blk_queue_lld_busy(md->queue, dm_lld_busy);
|
|
|
+ blk_queue_ordered(md->queue, QUEUE_ORDERED_DRAIN_FLUSH);
|
|
|
+
|
|
|
+ elv_register_queue(md->queue);
|
|
|
+
|
|
|
+ return 1;
|
|
|
+}
|
|
|
+
|
|
|
+/*
|
|
|
+ * Setup the DM device's queue based on md's type
|
|
|
+ */
|
|
|
+int dm_setup_md_queue(struct mapped_device *md)
|
|
|
+{
|
|
|
+ if ((dm_get_md_type(md) == DM_TYPE_REQUEST_BASED) &&
|
|
|
+ !dm_init_request_based_queue(md)) {
|
|
|
+ DMWARN("Cannot initialize queue for request-based mapped device");
|
|
|
+ return -EINVAL;
|
|
|
+ }
|
|
|
+
|
|
|
+ return 0;
|
|
|
+}
|
|
|
+
|
|
|
static struct mapped_device *dm_find_md(dev_t dev)
|
|
|
{
|
|
|
struct mapped_device *md;
|