|
@@ -323,6 +323,15 @@ static inline struct task_group *task_group(struct task_struct *p)
|
|
|
/* Change a task's cfs_rq and parent entity if it moves across CPUs/groups */
|
|
|
static inline void set_task_rq(struct task_struct *p, unsigned int cpu)
|
|
|
{
|
|
|
+ /*
|
|
|
+ * Strictly speaking this rcu_read_lock() is not needed since the
|
|
|
+ * task_group is tied to the cgroup, which in turn can never go away
|
|
|
+ * as long as there are tasks attached to it.
|
|
|
+ *
|
|
|
+ * However since task_group() uses task_subsys_state() which is an
|
|
|
+ * rcu_dereference() user, this quiets CONFIG_PROVE_RCU.
|
|
|
+ */
|
|
|
+ rcu_read_lock();
|
|
|
#ifdef CONFIG_FAIR_GROUP_SCHED
|
|
|
p->se.cfs_rq = task_group(p)->cfs_rq[cpu];
|
|
|
p->se.parent = task_group(p)->se[cpu];
|
|
@@ -332,6 +341,7 @@ static inline void set_task_rq(struct task_struct *p, unsigned int cpu)
|
|
|
p->rt.rt_rq = task_group(p)->rt_rq[cpu];
|
|
|
p->rt.parent = task_group(p)->rt_se[cpu];
|
|
|
#endif
|
|
|
+ rcu_read_unlock();
|
|
|
}
|
|
|
|
|
|
#else
|