|
@@ -2573,7 +2573,26 @@ static void ttwu_queue_remote(struct task_struct *p, int cpu)
|
|
|
if (!next)
|
|
|
smp_send_reschedule(cpu);
|
|
|
}
|
|
|
-#endif
|
|
|
+
|
|
|
+#ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW
|
|
|
+static int ttwu_activate_remote(struct task_struct *p, int wake_flags)
|
|
|
+{
|
|
|
+ struct rq *rq;
|
|
|
+ int ret = 0;
|
|
|
+
|
|
|
+ rq = __task_rq_lock(p);
|
|
|
+ if (p->on_cpu) {
|
|
|
+ ttwu_activate(rq, p, ENQUEUE_WAKEUP);
|
|
|
+ ttwu_do_wakeup(rq, p, wake_flags);
|
|
|
+ ret = 1;
|
|
|
+ }
|
|
|
+ __task_rq_unlock(rq);
|
|
|
+
|
|
|
+ return ret;
|
|
|
+
|
|
|
+}
|
|
|
+#endif /* __ARCH_WANT_INTERRUPTS_ON_CTXSW */
|
|
|
+#endif /* CONFIG_SMP */
|
|
|
|
|
|
static void ttwu_queue(struct task_struct *p, int cpu)
|
|
|
{
|
|
@@ -2631,17 +2650,17 @@ try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags)
|
|
|
while (p->on_cpu) {
|
|
|
#ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW
|
|
|
/*
|
|
|
- * If called from interrupt context we could have landed in the
|
|
|
- * middle of schedule(), in this case we should take care not
|
|
|
- * to spin on ->on_cpu if p is current, since that would
|
|
|
- * deadlock.
|
|
|
+ * In case the architecture enables interrupts in
|
|
|
+ * context_switch(), we cannot busy wait, since that
|
|
|
+ * would lead to deadlocks when an interrupt hits and
|
|
|
+ * tries to wake up @prev. So bail and do a complete
|
|
|
+ * remote wakeup.
|
|
|
*/
|
|
|
- if (p == current) {
|
|
|
- ttwu_queue(p, cpu);
|
|
|
+ if (ttwu_activate_remote(p, wake_flags))
|
|
|
goto stat;
|
|
|
- }
|
|
|
-#endif
|
|
|
+#else
|
|
|
cpu_relax();
|
|
|
+#endif
|
|
|
}
|
|
|
/*
|
|
|
* Pairs with the smp_wmb() in finish_lock_switch().
|
|
@@ -5841,7 +5860,7 @@ void __cpuinit init_idle(struct task_struct *idle, int cpu)
|
|
|
idle->state = TASK_RUNNING;
|
|
|
idle->se.exec_start = sched_clock();
|
|
|
|
|
|
- cpumask_copy(&idle->cpus_allowed, cpumask_of(cpu));
|
|
|
+ do_set_cpus_allowed(idle, cpumask_of(cpu));
|
|
|
/*
|
|
|
* We're having a chicken and egg problem, even though we are
|
|
|
* holding rq->lock, the cpu isn't yet set to this cpu so the
|
|
@@ -5929,6 +5948,16 @@ static inline void sched_init_granularity(void)
|
|
|
}
|
|
|
|
|
|
#ifdef CONFIG_SMP
|
|
|
+void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask)
|
|
|
+{
|
|
|
+ if (p->sched_class && p->sched_class->set_cpus_allowed)
|
|
|
+ p->sched_class->set_cpus_allowed(p, new_mask);
|
|
|
+ else {
|
|
|
+ cpumask_copy(&p->cpus_allowed, new_mask);
|
|
|
+ p->rt.nr_cpus_allowed = cpumask_weight(new_mask);
|
|
|
+ }
|
|
|
+}
|
|
|
+
|
|
|
/*
|
|
|
* This is how migration works:
|
|
|
*
|
|
@@ -5974,12 +6003,7 @@ int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask)
|
|
|
goto out;
|
|
|
}
|
|
|
|
|
|
- if (p->sched_class->set_cpus_allowed)
|
|
|
- p->sched_class->set_cpus_allowed(p, new_mask);
|
|
|
- else {
|
|
|
- cpumask_copy(&p->cpus_allowed, new_mask);
|
|
|
- p->rt.nr_cpus_allowed = cpumask_weight(new_mask);
|
|
|
- }
|
|
|
+ do_set_cpus_allowed(p, new_mask);
|
|
|
|
|
|
/* Can the task run on the task's current CPU? If so, we're done */
|
|
|
if (cpumask_test_cpu(task_cpu(p), new_mask))
|