|
@@ -216,15 +216,15 @@ static inline struct task_group *task_group(struct task_struct *p)
|
|
|
}
|
|
|
|
|
|
/* Change a task's cfs_rq and parent entity if it moves across CPUs/groups */
|
|
|
-static inline void set_task_cfs_rq(struct task_struct *p)
|
|
|
+static inline void set_task_cfs_rq(struct task_struct *p, unsigned int cpu)
|
|
|
{
|
|
|
- p->se.cfs_rq = task_group(p)->cfs_rq[task_cpu(p)];
|
|
|
- p->se.parent = task_group(p)->se[task_cpu(p)];
|
|
|
+ p->se.cfs_rq = task_group(p)->cfs_rq[cpu];
|
|
|
+ p->se.parent = task_group(p)->se[cpu];
|
|
|
}
|
|
|
|
|
|
#else
|
|
|
|
|
|
-static inline void set_task_cfs_rq(struct task_struct *p) { }
|
|
|
+static inline void set_task_cfs_rq(struct task_struct *p, unsigned int cpu) { }
|
|
|
|
|
|
#endif /* CONFIG_FAIR_GROUP_SCHED */
|
|
|
|
|
@@ -1022,10 +1022,16 @@ unsigned long weighted_cpuload(const int cpu)
|
|
|
|
|
|
static inline void __set_task_cpu(struct task_struct *p, unsigned int cpu)
|
|
|
{
|
|
|
+ set_task_cfs_rq(p, cpu);
|
|
|
#ifdef CONFIG_SMP
|
|
|
+ /*
|
|
|
+ * After ->cpu is set up to a new value, task_rq_lock(p, ...) can be
|
|
|
+ * successfuly executed on another CPU. We must ensure that updates of
|
|
|
+ * per-task data have been completed by this moment.
|
|
|
+ */
|
|
|
+ smp_wmb();
|
|
|
task_thread_info(p)->cpu = cpu;
|
|
|
#endif
|
|
|
- set_task_cfs_rq(p);
|
|
|
}
|
|
|
|
|
|
#ifdef CONFIG_SMP
|
|
@@ -7088,7 +7094,7 @@ void sched_move_task(struct task_struct *tsk)
|
|
|
rq = task_rq_lock(tsk, &flags);
|
|
|
|
|
|
if (tsk->sched_class != &fair_sched_class) {
|
|
|
- set_task_cfs_rq(tsk);
|
|
|
+ set_task_cfs_rq(tsk, task_cpu(tsk));
|
|
|
goto done;
|
|
|
}
|
|
|
|
|
@@ -7103,7 +7109,7 @@ void sched_move_task(struct task_struct *tsk)
|
|
|
tsk->sched_class->put_prev_task(rq, tsk);
|
|
|
}
|
|
|
|
|
|
- set_task_cfs_rq(tsk);
|
|
|
+ set_task_cfs_rq(tsk, task_cpu(tsk));
|
|
|
|
|
|
if (on_rq) {
|
|
|
if (unlikely(running))
|