|
@@ -1595,10 +1595,15 @@ static int dm_prep_fn(struct request_queue *q, struct request *rq)
|
|
|
return BLKPREP_OK;
|
|
|
}
|
|
|
|
|
|
-static void map_request(struct dm_target *ti, struct request *clone,
|
|
|
- struct mapped_device *md)
|
|
|
+/*
|
|
|
+ * Returns:
|
|
|
+ * 0 : the request has been processed (not requeued)
|
|
|
+ * !0 : the request has been requeued
|
|
|
+ */
|
|
|
+static int map_request(struct dm_target *ti, struct request *clone,
|
|
|
+ struct mapped_device *md)
|
|
|
{
|
|
|
- int r;
|
|
|
+ int r, requeued = 0;
|
|
|
struct dm_rq_target_io *tio = clone->end_io_data;
|
|
|
|
|
|
/*
|
|
@@ -1625,6 +1630,7 @@ static void map_request(struct dm_target *ti, struct request *clone,
|
|
|
case DM_MAPIO_REQUEUE:
|
|
|
/* The target wants to requeue the I/O */
|
|
|
dm_requeue_unmapped_request(clone);
|
|
|
+ requeued = 1;
|
|
|
break;
|
|
|
default:
|
|
|
if (r > 0) {
|
|
@@ -1636,6 +1642,8 @@ static void map_request(struct dm_target *ti, struct request *clone,
|
|
|
dm_kill_unmapped_request(clone, r);
|
|
|
break;
|
|
|
}
|
|
|
+
|
|
|
+ return requeued;
|
|
|
}
|
|
|
|
|
|
/*
|
|
@@ -1677,12 +1685,17 @@ static void dm_request_fn(struct request_queue *q)
|
|
|
atomic_inc(&md->pending[rq_data_dir(clone)]);
|
|
|
|
|
|
spin_unlock(q->queue_lock);
|
|
|
- map_request(ti, clone, md);
|
|
|
+ if (map_request(ti, clone, md))
|
|
|
+ goto requeued;
|
|
|
+
|
|
|
spin_lock_irq(q->queue_lock);
|
|
|
}
|
|
|
|
|
|
goto out;
|
|
|
|
|
|
+requeued:
|
|
|
+ spin_lock_irq(q->queue_lock);
|
|
|
+
|
|
|
plug_and_out:
|
|
|
if (!elv_queue_empty(q))
|
|
|
/* Some requests still remain, retry later */
|