|
@@ -605,10 +605,10 @@ static inline int cpu_of(struct rq *rq)
|
|
|
/*
|
|
|
* Return the group to which this tasks belongs.
|
|
|
*
|
|
|
- * We use task_subsys_state_check() and extend the RCU verification
|
|
|
- * with lockdep_is_held(&p->pi_lock) because cpu_cgroup_attach()
|
|
|
- * holds that lock for each task it moves into the cgroup. Therefore
|
|
|
- * by holding that lock, we pin the task to the current cgroup.
|
|
|
+ * We use task_subsys_state_check() and extend the RCU verification with
|
|
|
+ * pi->lock and rq->lock because cpu_cgroup_attach() holds those locks for each
|
|
|
+ * task it moves into the cgroup. Therefore by holding either of those locks,
|
|
|
+ * we pin the task to the current cgroup.
|
|
|
*/
|
|
|
static inline struct task_group *task_group(struct task_struct *p)
|
|
|
{
|
|
@@ -616,7 +616,8 @@ static inline struct task_group *task_group(struct task_struct *p)
|
|
|
struct cgroup_subsys_state *css;
|
|
|
|
|
|
css = task_subsys_state_check(p, cpu_cgroup_subsys_id,
|
|
|
- lockdep_is_held(&p->pi_lock));
|
|
|
+ lockdep_is_held(&p->pi_lock) ||
|
|
|
+ lockdep_is_held(&task_rq(p)->lock));
|
|
|
tg = container_of(css, struct task_group, css);
|
|
|
|
|
|
return autogroup_task_group(p, tg);
|
|
@@ -2200,6 +2201,16 @@ void set_task_cpu(struct task_struct *p, unsigned int new_cpu)
|
|
|
!(task_thread_info(p)->preempt_count & PREEMPT_ACTIVE));
|
|
|
|
|
|
#ifdef CONFIG_LOCKDEP
|
|
|
+ /*
|
|
|
+ * The caller should hold either p->pi_lock or rq->lock, when changing
|
|
|
+ * a task's CPU. ->pi_lock for waking tasks, rq->lock for runnable tasks.
|
|
|
+ *
|
|
|
+ * sched_move_task() holds both and thus holding either pins the cgroup,
|
|
|
+ * see set_task_rq().
|
|
|
+ *
|
|
|
+ * Furthermore, all task_rq users should acquire both locks, see
|
|
|
+ * task_rq_lock().
|
|
|
+ */
|
|
|
WARN_ON_ONCE(debug_locks && !(lockdep_is_held(&p->pi_lock) ||
|
|
|
lockdep_is_held(&task_rq(p)->lock)));
|
|
|
#endif
|
|
@@ -2447,6 +2458,10 @@ ttwu_stat(struct task_struct *p, int cpu, int wake_flags)
|
|
|
}
|
|
|
rcu_read_unlock();
|
|
|
}
|
|
|
+
|
|
|
+ if (wake_flags & WF_MIGRATED)
|
|
|
+ schedstat_inc(p, se.statistics.nr_wakeups_migrate);
|
|
|
+
|
|
|
#endif /* CONFIG_SMP */
|
|
|
|
|
|
schedstat_inc(rq, ttwu_count);
|
|
@@ -2455,9 +2470,6 @@ ttwu_stat(struct task_struct *p, int cpu, int wake_flags)
|
|
|
if (wake_flags & WF_SYNC)
|
|
|
schedstat_inc(p, se.statistics.nr_wakeups_sync);
|
|
|
|
|
|
- if (cpu != task_cpu(p))
|
|
|
- schedstat_inc(p, se.statistics.nr_wakeups_migrate);
|
|
|
-
|
|
|
#endif /* CONFIG_SCHEDSTATS */
|
|
|
}
|
|
|
|
|
@@ -2600,6 +2612,7 @@ static void ttwu_queue(struct task_struct *p, int cpu)
|
|
|
|
|
|
#if defined(CONFIG_SMP)
|
|
|
if (sched_feat(TTWU_QUEUE) && cpu != smp_processor_id()) {
|
|
|
+ sched_clock_cpu(cpu); /* sync clocks x-cpu */
|
|
|
ttwu_queue_remote(p, cpu);
|
|
|
return;
|
|
|
}
|
|
@@ -2674,8 +2687,10 @@ try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags)
|
|
|
p->sched_class->task_waking(p);
|
|
|
|
|
|
cpu = select_task_rq(p, SD_BALANCE_WAKE, wake_flags);
|
|
|
- if (task_cpu(p) != cpu)
|
|
|
+ if (task_cpu(p) != cpu) {
|
|
|
+ wake_flags |= WF_MIGRATED;
|
|
|
set_task_cpu(p, cpu);
|
|
|
+ }
|
|
|
#endif /* CONFIG_SMP */
|
|
|
|
|
|
ttwu_queue(p, cpu);
|