|
@@ -69,6 +69,7 @@ struct multipath {
|
|
|
struct list_head priority_groups;
|
|
|
unsigned pg_init_required; /* pg_init needs calling? */
|
|
|
unsigned pg_init_in_progress; /* Only one pg_init allowed at once */
|
|
|
+ wait_queue_head_t pg_init_wait; /* Wait for pg_init completion */
|
|
|
|
|
|
unsigned nr_valid_paths; /* Total number of usable paths */
|
|
|
struct pgpath *current_pgpath;
|
|
@@ -95,8 +96,6 @@ struct multipath {
|
|
|
mempool_t *mpio_pool;
|
|
|
|
|
|
struct mutex work_mutex;
|
|
|
-
|
|
|
- unsigned suspended; /* Don't create new I/O internally when set. */
|
|
|
};
|
|
|
|
|
|
/*
|
|
@@ -202,6 +201,7 @@ static struct multipath *alloc_multipath(struct dm_target *ti)
|
|
|
m->queue_io = 1;
|
|
|
INIT_WORK(&m->process_queued_ios, process_queued_ios);
|
|
|
INIT_WORK(&m->trigger_event, trigger_event);
|
|
|
+ init_waitqueue_head(&m->pg_init_wait);
|
|
|
mutex_init(&m->work_mutex);
|
|
|
m->mpio_pool = mempool_create_slab_pool(MIN_IOS, _mpio_cache);
|
|
|
if (!m->mpio_pool) {
|
|
@@ -235,6 +235,21 @@ static void free_multipath(struct multipath *m)
|
|
|
* Path selection
|
|
|
*-----------------------------------------------*/
|
|
|
|
|
|
+static void __pg_init_all_paths(struct multipath *m)
|
|
|
+{
|
|
|
+ struct pgpath *pgpath;
|
|
|
+
|
|
|
+ m->pg_init_count++;
|
|
|
+ m->pg_init_required = 0;
|
|
|
+ list_for_each_entry(pgpath, &m->current_pg->pgpaths, list) {
|
|
|
+ /* Skip failed paths */
|
|
|
+ if (!pgpath->is_active)
|
|
|
+ continue;
|
|
|
+ if (queue_work(kmpath_handlerd, &pgpath->activate_path))
|
|
|
+ m->pg_init_in_progress++;
|
|
|
+ }
|
|
|
+}
|
|
|
+
|
|
|
static void __switch_pg(struct multipath *m, struct pgpath *pgpath)
|
|
|
{
|
|
|
m->current_pg = pgpath->pg;
|
|
@@ -439,7 +454,7 @@ static void process_queued_ios(struct work_struct *work)
|
|
|
{
|
|
|
struct multipath *m =
|
|
|
container_of(work, struct multipath, process_queued_ios);
|
|
|
- struct pgpath *pgpath = NULL, *tmp;
|
|
|
+ struct pgpath *pgpath = NULL;
|
|
|
unsigned must_queue = 1;
|
|
|
unsigned long flags;
|
|
|
|
|
@@ -457,14 +472,9 @@ static void process_queued_ios(struct work_struct *work)
|
|
|
(!pgpath && !m->queue_if_no_path))
|
|
|
must_queue = 0;
|
|
|
|
|
|
- if (m->pg_init_required && !m->pg_init_in_progress && pgpath) {
|
|
|
- m->pg_init_count++;
|
|
|
- m->pg_init_required = 0;
|
|
|
- list_for_each_entry(tmp, &pgpath->pg->pgpaths, list) {
|
|
|
- if (queue_work(kmpath_handlerd, &tmp->activate_path))
|
|
|
- m->pg_init_in_progress++;
|
|
|
- }
|
|
|
- }
|
|
|
+ if (m->pg_init_required && !m->pg_init_in_progress && pgpath)
|
|
|
+ __pg_init_all_paths(m);
|
|
|
+
|
|
|
out:
|
|
|
spin_unlock_irqrestore(&m->lock, flags);
|
|
|
if (!must_queue)
|
|
@@ -597,8 +607,8 @@ static struct pgpath *parse_path(struct arg_set *as, struct path_selector *ps,
|
|
|
if (!p)
|
|
|
return ERR_PTR(-ENOMEM);
|
|
|
|
|
|
- r = dm_get_device(ti, shift(as), ti->begin, ti->len,
|
|
|
- dm_table_get_mode(ti->table), &p->path.dev);
|
|
|
+ r = dm_get_device(ti, shift(as), dm_table_get_mode(ti->table),
|
|
|
+ &p->path.dev);
|
|
|
if (r) {
|
|
|
ti->error = "error getting device";
|
|
|
goto bad;
|
|
@@ -890,9 +900,34 @@ static int multipath_ctr(struct dm_target *ti, unsigned int argc,
|
|
|
return r;
|
|
|
}
|
|
|
|
|
|
-static void flush_multipath_work(void)
|
|
|
+static void multipath_wait_for_pg_init_completion(struct multipath *m)
|
|
|
+{
|
|
|
+ DECLARE_WAITQUEUE(wait, current);
|
|
|
+ unsigned long flags;
|
|
|
+
|
|
|
+ add_wait_queue(&m->pg_init_wait, &wait);
|
|
|
+
|
|
|
+ while (1) {
|
|
|
+ set_current_state(TASK_UNINTERRUPTIBLE);
|
|
|
+
|
|
|
+ spin_lock_irqsave(&m->lock, flags);
|
|
|
+ if (!m->pg_init_in_progress) {
|
|
|
+ spin_unlock_irqrestore(&m->lock, flags);
|
|
|
+ break;
|
|
|
+ }
|
|
|
+ spin_unlock_irqrestore(&m->lock, flags);
|
|
|
+
|
|
|
+ io_schedule();
|
|
|
+ }
|
|
|
+ set_current_state(TASK_RUNNING);
|
|
|
+
|
|
|
+ remove_wait_queue(&m->pg_init_wait, &wait);
|
|
|
+}
|
|
|
+
|
|
|
+static void flush_multipath_work(struct multipath *m)
|
|
|
{
|
|
|
flush_workqueue(kmpath_handlerd);
|
|
|
+ multipath_wait_for_pg_init_completion(m);
|
|
|
flush_workqueue(kmultipathd);
|
|
|
flush_scheduled_work();
|
|
|
}
|
|
@@ -901,7 +936,7 @@ static void multipath_dtr(struct dm_target *ti)
|
|
|
{
|
|
|
struct multipath *m = ti->private;
|
|
|
|
|
|
- flush_multipath_work();
|
|
|
+ flush_multipath_work(m);
|
|
|
free_multipath(m);
|
|
|
}
|
|
|
|
|
@@ -1128,8 +1163,7 @@ static int pg_init_limit_reached(struct multipath *m, struct pgpath *pgpath)
|
|
|
|
|
|
static void pg_init_done(void *data, int errors)
|
|
|
{
|
|
|
- struct dm_path *path = data;
|
|
|
- struct pgpath *pgpath = path_to_pgpath(path);
|
|
|
+ struct pgpath *pgpath = data;
|
|
|
struct priority_group *pg = pgpath->pg;
|
|
|
struct multipath *m = pg->m;
|
|
|
unsigned long flags;
|
|
@@ -1143,8 +1177,8 @@ static void pg_init_done(void *data, int errors)
|
|
|
errors = 0;
|
|
|
break;
|
|
|
}
|
|
|
- DMERR("Cannot failover device because scsi_dh_%s was not "
|
|
|
- "loaded.", m->hw_handler_name);
|
|
|
+ DMERR("Could not failover the device: Handler scsi_dh_%s "
|
|
|
+ "Error %d.", m->hw_handler_name, errors);
|
|
|
/*
|
|
|
* Fail path for now, so we do not ping pong
|
|
|
*/
|
|
@@ -1181,14 +1215,24 @@ static void pg_init_done(void *data, int errors)
|
|
|
m->current_pgpath = NULL;
|
|
|
m->current_pg = NULL;
|
|
|
}
|
|
|
- } else if (!m->pg_init_required) {
|
|
|
- m->queue_io = 0;
|
|
|
+ } else if (!m->pg_init_required)
|
|
|
pg->bypassed = 0;
|
|
|
- }
|
|
|
|
|
|
- m->pg_init_in_progress--;
|
|
|
- if (!m->pg_init_in_progress)
|
|
|
- queue_work(kmultipathd, &m->process_queued_ios);
|
|
|
+ if (--m->pg_init_in_progress)
|
|
|
+ /* Activations of other paths are still on going */
|
|
|
+ goto out;
|
|
|
+
|
|
|
+ if (!m->pg_init_required)
|
|
|
+ m->queue_io = 0;
|
|
|
+
|
|
|
+ queue_work(kmultipathd, &m->process_queued_ios);
|
|
|
+
|
|
|
+ /*
|
|
|
+ * Wake up any thread waiting to suspend.
|
|
|
+ */
|
|
|
+ wake_up(&m->pg_init_wait);
|
|
|
+
|
|
|
+out:
|
|
|
spin_unlock_irqrestore(&m->lock, flags);
|
|
|
}
|
|
|
|
|
@@ -1198,7 +1242,7 @@ static void activate_path(struct work_struct *work)
|
|
|
container_of(work, struct pgpath, activate_path);
|
|
|
|
|
|
scsi_dh_activate(bdev_get_queue(pgpath->path.dev->bdev),
|
|
|
- pg_init_done, &pgpath->path);
|
|
|
+ pg_init_done, pgpath);
|
|
|
}
|
|
|
|
|
|
/*
|
|
@@ -1276,8 +1320,7 @@ static void multipath_postsuspend(struct dm_target *ti)
|
|
|
struct multipath *m = ti->private;
|
|
|
|
|
|
mutex_lock(&m->work_mutex);
|
|
|
- m->suspended = 1;
|
|
|
- flush_multipath_work();
|
|
|
+ flush_multipath_work(m);
|
|
|
mutex_unlock(&m->work_mutex);
|
|
|
}
|
|
|
|
|
@@ -1289,10 +1332,6 @@ static void multipath_resume(struct dm_target *ti)
|
|
|
struct multipath *m = (struct multipath *) ti->private;
|
|
|
unsigned long flags;
|
|
|
|
|
|
- mutex_lock(&m->work_mutex);
|
|
|
- m->suspended = 0;
|
|
|
- mutex_unlock(&m->work_mutex);
|
|
|
-
|
|
|
spin_lock_irqsave(&m->lock, flags);
|
|
|
m->queue_if_no_path = m->saved_queue_if_no_path;
|
|
|
spin_unlock_irqrestore(&m->lock, flags);
|
|
@@ -1428,11 +1467,6 @@ static int multipath_message(struct dm_target *ti, unsigned argc, char **argv)
|
|
|
|
|
|
mutex_lock(&m->work_mutex);
|
|
|
|
|
|
- if (m->suspended) {
|
|
|
- r = -EBUSY;
|
|
|
- goto out;
|
|
|
- }
|
|
|
-
|
|
|
if (dm_suspended(ti)) {
|
|
|
r = -EBUSY;
|
|
|
goto out;
|
|
@@ -1471,8 +1505,7 @@ static int multipath_message(struct dm_target *ti, unsigned argc, char **argv)
|
|
|
goto out;
|
|
|
}
|
|
|
|
|
|
- r = dm_get_device(ti, argv[1], ti->begin, ti->len,
|
|
|
- dm_table_get_mode(ti->table), &dev);
|
|
|
+ r = dm_get_device(ti, argv[1], dm_table_get_mode(ti->table), &dev);
|
|
|
if (r) {
|
|
|
DMWARN("message: error getting device %s",
|
|
|
argv[1]);
|