|
@@ -865,10 +865,14 @@ static void dm_done(struct request *clone, int error, bool mapped)
|
|
|
{
|
|
|
int r = error;
|
|
|
struct dm_rq_target_io *tio = clone->end_io_data;
|
|
|
- dm_request_endio_fn rq_end_io = tio->ti->type->rq_end_io;
|
|
|
+ dm_request_endio_fn rq_end_io = NULL;
|
|
|
|
|
|
- if (mapped && rq_end_io)
|
|
|
- r = rq_end_io(tio->ti, clone, error, &tio->info);
|
|
|
+ if (tio->ti) {
|
|
|
+ rq_end_io = tio->ti->type->rq_end_io;
|
|
|
+
|
|
|
+ if (mapped && rq_end_io)
|
|
|
+ r = rq_end_io(tio->ti, clone, error, &tio->info);
|
|
|
+ }
|
|
|
|
|
|
if (r <= 0)
|
|
|
/* The target wants to complete the I/O */
|
|
@@ -1588,15 +1592,6 @@ static int map_request(struct dm_target *ti, struct request *clone,
|
|
|
int r, requeued = 0;
|
|
|
struct dm_rq_target_io *tio = clone->end_io_data;
|
|
|
|
|
|
- /*
|
|
|
- * Hold the md reference here for the in-flight I/O.
|
|
|
- * We can't rely on the reference count by device opener,
|
|
|
- * because the device may be closed during the request completion
|
|
|
- * when all bios are completed.
|
|
|
- * See the comment in rq_completed() too.
|
|
|
- */
|
|
|
- dm_get(md);
|
|
|
-
|
|
|
tio->ti = ti;
|
|
|
r = ti->type->map_rq(ti, clone, &tio->info);
|
|
|
switch (r) {
|
|
@@ -1628,6 +1623,26 @@ static int map_request(struct dm_target *ti, struct request *clone,
|
|
|
return requeued;
|
|
|
}
|
|
|
|
|
|
+static struct request *dm_start_request(struct mapped_device *md, struct request *orig)
|
|
|
+{
|
|
|
+ struct request *clone;
|
|
|
+
|
|
|
+ blk_start_request(orig);
|
|
|
+ clone = orig->special;
|
|
|
+ atomic_inc(&md->pending[rq_data_dir(clone)]);
|
|
|
+
|
|
|
+ /*
|
|
|
+ * Hold the md reference here for the in-flight I/O.
|
|
|
+ * We can't rely on the reference count by device opener,
|
|
|
+ * because the device may be closed during the request completion
|
|
|
+ * when all bios are completed.
|
|
|
+ * See the comment in rq_completed() too.
|
|
|
+ */
|
|
|
+ dm_get(md);
|
|
|
+
|
|
|
+ return clone;
|
|
|
+}
|
|
|
+
|
|
|
/*
|
|
|
* q->request_fn for request-based dm.
|
|
|
* Called with the queue lock held.
|
|
@@ -1657,14 +1672,21 @@ static void dm_request_fn(struct request_queue *q)
|
|
|
pos = blk_rq_pos(rq);
|
|
|
|
|
|
ti = dm_table_find_target(map, pos);
|
|
|
- BUG_ON(!dm_target_is_valid(ti));
|
|
|
+ if (!dm_target_is_valid(ti)) {
|
|
|
+ /*
|
|
|
+ * Must perform setup, that dm_done() requires,
|
|
|
+ * before calling dm_kill_unmapped_request
|
|
|
+ */
|
|
|
+ DMERR_LIMIT("request attempted access beyond the end of device");
|
|
|
+ clone = dm_start_request(md, rq);
|
|
|
+ dm_kill_unmapped_request(clone, -EIO);
|
|
|
+ continue;
|
|
|
+ }
|
|
|
|
|
|
if (ti->type->busy && ti->type->busy(ti))
|
|
|
goto delay_and_out;
|
|
|
|
|
|
- blk_start_request(rq);
|
|
|
- clone = rq->special;
|
|
|
- atomic_inc(&md->pending[rq_data_dir(clone)]);
|
|
|
+ clone = dm_start_request(md, rq);
|
|
|
|
|
|
spin_unlock(q->queue_lock);
|
|
|
if (map_request(ti, clone, md))
|
|
@@ -1684,8 +1706,6 @@ delay_and_out:
|
|
|
blk_delay_queue(q, HZ / 10);
|
|
|
out:
|
|
|
dm_table_put(map);
|
|
|
-
|
|
|
- return;
|
|
|
}
|
|
|
|
|
|
int dm_underlying_device_busy(struct request_queue *q)
|