|
@@ -282,6 +282,23 @@ failed:
|
|
|
m->current_pg = NULL;
|
|
|
}
|
|
|
|
|
|
+/*
|
|
|
+ * Check whether bios must be queued in the device-mapper core rather
|
|
|
+ * than here in the target.
|
|
|
+ *
|
|
|
+ * m->lock must be held on entry.
|
|
|
+ *
|
|
|
+ * If m->queue_if_no_path and m->saved_queue_if_no_path hold the
|
|
|
+ * same value then we are not between multipath_presuspend()
|
|
|
+ * and multipath_resume() calls and we have no need to check
|
|
|
+ * for the DMF_NOFLUSH_SUSPENDING flag.
|
|
|
+ */
|
|
|
+static int __must_push_back(struct multipath *m)
|
|
|
+{
|
|
|
+ return (m->queue_if_no_path != m->saved_queue_if_no_path &&
|
|
|
+ dm_noflush_suspending(m->ti));
|
|
|
+}
|
|
|
+
|
|
|
static int map_io(struct multipath *m, struct bio *bio, struct mpath_io *mpio,
|
|
|
unsigned was_queued)
|
|
|
{
|
|
@@ -311,10 +328,12 @@ static int map_io(struct multipath *m, struct bio *bio, struct mpath_io *mpio,
|
|
|
queue_work(kmultipathd, &m->process_queued_ios);
|
|
|
pgpath = NULL;
|
|
|
r = DM_MAPIO_SUBMITTED;
|
|
|
- } else if (!pgpath)
|
|
|
- r = -EIO; /* Failed */
|
|
|
- else
|
|
|
+ } else if (pgpath)
|
|
|
bio->bi_bdev = pgpath->path.dev->bdev;
|
|
|
+ else if (__must_push_back(m))
|
|
|
+ r = DM_MAPIO_REQUEUE;
|
|
|
+ else
|
|
|
+ r = -EIO; /* Failed */
|
|
|
|
|
|
mpio->pgpath = pgpath;
|
|
|
|
|
@@ -374,6 +393,8 @@ static void dispatch_queued_ios(struct multipath *m)
|
|
|
bio_endio(bio, bio->bi_size, r);
|
|
|
else if (r == DM_MAPIO_REMAPPED)
|
|
|
generic_make_request(bio);
|
|
|
+ else if (r == DM_MAPIO_REQUEUE)
|
|
|
+ bio_endio(bio, bio->bi_size, -EIO);
|
|
|
|
|
|
bio = next;
|
|
|
}
|
|
@@ -783,7 +804,7 @@ static int multipath_map(struct dm_target *ti, struct bio *bio,
|
|
|
map_context->ptr = mpio;
|
|
|
bio->bi_rw |= (1 << BIO_RW_FAILFAST);
|
|
|
r = map_io(m, bio, mpio, 0);
|
|
|
- if (r < 0)
|
|
|
+ if (r < 0 || r == DM_MAPIO_REQUEUE)
|
|
|
mempool_free(mpio, m->mpio_pool);
|
|
|
|
|
|
return r;
|
|
@@ -1007,7 +1028,10 @@ static int do_end_io(struct multipath *m, struct bio *bio,
|
|
|
|
|
|
spin_lock_irqsave(&m->lock, flags);
|
|
|
if (!m->nr_valid_paths) {
|
|
|
- if (!m->queue_if_no_path) {
|
|
|
+ if (__must_push_back(m)) {
|
|
|
+ spin_unlock_irqrestore(&m->lock, flags);
|
|
|
+ return DM_ENDIO_REQUEUE;
|
|
|
+ } else if (!m->queue_if_no_path) {
|
|
|
spin_unlock_irqrestore(&m->lock, flags);
|
|
|
return -EIO;
|
|
|
} else {
|