|
@@ -5160,8 +5160,16 @@ static void move_task_off_dead_cpu(int dead_cpu, struct task_struct *p)
|
|
|
|
|
|
/* No more Mr. Nice Guy. */
|
|
|
if (dest_cpu == NR_CPUS) {
|
|
|
+ cpumask_t cpus_allowed = cpuset_cpus_allowed_locked(p);
|
|
|
+ /*
|
|
|
+ * Try to stay on the same cpuset, where the
|
|
|
+ * current cpuset may be a subset of all cpus.
|
|
|
+ * The cpuset_cpus_allowed_locked() variant of
|
|
|
+ * cpuset_cpus_allowed() will not block. It must be
|
|
|
+ * called within calls to cpuset_lock/cpuset_unlock.
|
|
|
+ */
|
|
|
rq = task_rq_lock(p, &flags);
|
|
|
- cpus_setall(p->cpus_allowed);
|
|
|
+ p->cpus_allowed = cpus_allowed;
|
|
|
dest_cpu = any_online_cpu(p->cpus_allowed);
|
|
|
task_rq_unlock(rq, &flags);
|
|
|
|
|
@@ -5527,6 +5535,7 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu)
|
|
|
|
|
|
case CPU_DEAD:
|
|
|
case CPU_DEAD_FROZEN:
|
|
|
+ cpuset_lock(); /* around calls to cpuset_cpus_allowed_lock() */
|
|
|
migrate_live_tasks(cpu);
|
|
|
rq = cpu_rq(cpu);
|
|
|
kthread_stop(rq->migration_thread);
|
|
@@ -5540,6 +5549,7 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu)
|
|
|
rq->idle->sched_class = &idle_sched_class;
|
|
|
migrate_dead_tasks(cpu);
|
|
|
spin_unlock_irq(&rq->lock);
|
|
|
+ cpuset_unlock();
|
|
|
migrate_nr_uninterruptible(rq);
|
|
|
BUG_ON(rq->nr_running != 0);
|
|
|
|