|
@@ -2320,14 +2320,12 @@ static int select_fallback_rq(int cpu, struct task_struct *p)
|
|
}
|
|
}
|
|
|
|
|
|
/*
|
|
/*
|
|
- * Called from:
|
|
|
|
|
|
+ * Gets called from 3 sites (exec, fork, wakeup), since it is called without
|
|
|
|
+ * holding rq->lock we need to ensure ->cpus_allowed is stable, this is done
|
|
|
|
+ * by:
|
|
*
|
|
*
|
|
- * - fork, @p is stable because it isn't on the tasklist yet
|
|
|
|
- *
|
|
|
|
- * - exec, @p is unstable, retry loop
|
|
|
|
- *
|
|
|
|
- * - wake-up, we serialize ->cpus_allowed against TASK_WAKING so
|
|
|
|
- * we should be good.
|
|
|
|
|
|
+ * exec: is unstable, retry loop
|
|
|
|
+ * fork & wake-up: serialize ->cpus_allowed against TASK_WAKING
|
|
*/
|
|
*/
|
|
static inline
|
|
static inline
|
|
int select_task_rq(struct task_struct *p, int sd_flags, int wake_flags)
|
|
int select_task_rq(struct task_struct *p, int sd_flags, int wake_flags)
|
|
@@ -2620,9 +2618,6 @@ void sched_fork(struct task_struct *p, int clone_flags)
|
|
if (p->sched_class->task_fork)
|
|
if (p->sched_class->task_fork)
|
|
p->sched_class->task_fork(p);
|
|
p->sched_class->task_fork(p);
|
|
|
|
|
|
-#ifdef CONFIG_SMP
|
|
|
|
- cpu = select_task_rq(p, SD_BALANCE_FORK, 0);
|
|
|
|
-#endif
|
|
|
|
set_task_cpu(p, cpu);
|
|
set_task_cpu(p, cpu);
|
|
|
|
|
|
#if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT)
|
|
#if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT)
|
|
@@ -2652,6 +2647,21 @@ void wake_up_new_task(struct task_struct *p, unsigned long clone_flags)
|
|
{
|
|
{
|
|
unsigned long flags;
|
|
unsigned long flags;
|
|
struct rq *rq;
|
|
struct rq *rq;
|
|
|
|
+ int cpu = get_cpu();
|
|
|
|
+
|
|
|
|
+#ifdef CONFIG_SMP
|
|
|
|
+ /*
|
|
|
|
+ * Fork balancing, do it here and not earlier because:
|
|
|
|
+ * - cpus_allowed can change in the fork path
|
|
|
|
+ * - any previously selected cpu might disappear through hotplug
|
|
|
|
+ *
|
|
|
|
+ * We still have TASK_WAKING but PF_STARTING is gone now, meaning
|
|
|
|
+ * ->cpus_allowed is stable, we have preemption disabled, meaning
|
|
|
|
+ * cpu_online_mask is stable.
|
|
|
|
+ */
|
|
|
|
+ cpu = select_task_rq(p, SD_BALANCE_FORK, 0);
|
|
|
|
+ set_task_cpu(p, cpu);
|
|
|
|
+#endif
|
|
|
|
|
|
rq = task_rq_lock(p, &flags);
|
|
rq = task_rq_lock(p, &flags);
|
|
BUG_ON(p->state != TASK_WAKING);
|
|
BUG_ON(p->state != TASK_WAKING);
|
|
@@ -2665,6 +2675,7 @@ void wake_up_new_task(struct task_struct *p, unsigned long clone_flags)
|
|
p->sched_class->task_woken(rq, p);
|
|
p->sched_class->task_woken(rq, p);
|
|
#endif
|
|
#endif
|
|
task_rq_unlock(rq, &flags);
|
|
task_rq_unlock(rq, &flags);
|
|
|
|
+ put_cpu();
|
|
}
|
|
}
|
|
|
|
|
|
#ifdef CONFIG_PREEMPT_NOTIFIERS
|
|
#ifdef CONFIG_PREEMPT_NOTIFIERS
|
|
@@ -7139,14 +7150,18 @@ int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask)
|
|
* the ->cpus_allowed mask from under waking tasks, which would be
|
|
* the ->cpus_allowed mask from under waking tasks, which would be
|
|
* possible when we change rq->lock in ttwu(), so synchronize against
|
|
* possible when we change rq->lock in ttwu(), so synchronize against
|
|
* TASK_WAKING to avoid that.
|
|
* TASK_WAKING to avoid that.
|
|
|
|
+ *
|
|
|
|
+ * Make an exception for freshly cloned tasks, since cpuset namespaces
|
|
|
|
+ * might move the task about, we have to validate the target in
|
|
|
|
+ * wake_up_new_task() anyway since the cpu might have gone away.
|
|
*/
|
|
*/
|
|
again:
|
|
again:
|
|
- while (p->state == TASK_WAKING)
|
|
|
|
|
|
+ while (p->state == TASK_WAKING && !(p->flags & PF_STARTING))
|
|
cpu_relax();
|
|
cpu_relax();
|
|
|
|
|
|
rq = task_rq_lock(p, &flags);
|
|
rq = task_rq_lock(p, &flags);
|
|
|
|
|
|
- if (p->state == TASK_WAKING) {
|
|
|
|
|
|
+ if (p->state == TASK_WAKING && !(p->flags & PF_STARTING)) {
|
|
task_rq_unlock(rq, &flags);
|
|
task_rq_unlock(rq, &flags);
|
|
goto again;
|
|
goto again;
|
|
}
|
|
}
|