|
@@ -1096,7 +1096,7 @@ void set_task_cpu(struct task_struct *p, unsigned int new_cpu)
|
|
* a task's CPU. ->pi_lock for waking tasks, rq->lock for runnable tasks.
|
|
* a task's CPU. ->pi_lock for waking tasks, rq->lock for runnable tasks.
|
|
*
|
|
*
|
|
* sched_move_task() holds both and thus holding either pins the cgroup,
|
|
* sched_move_task() holds both and thus holding either pins the cgroup,
|
|
- * see set_task_rq().
|
|
|
|
|
|
+ * see task_group().
|
|
*
|
|
*
|
|
* Furthermore, all task_rq users should acquire both locks, see
|
|
* Furthermore, all task_rq users should acquire both locks, see
|
|
* task_rq_lock().
|
|
* task_rq_lock().
|
|
@@ -7658,6 +7658,7 @@ void sched_destroy_group(struct task_group *tg)
|
|
*/
|
|
*/
|
|
void sched_move_task(struct task_struct *tsk)
|
|
void sched_move_task(struct task_struct *tsk)
|
|
{
|
|
{
|
|
|
|
+ struct task_group *tg;
|
|
int on_rq, running;
|
|
int on_rq, running;
|
|
unsigned long flags;
|
|
unsigned long flags;
|
|
struct rq *rq;
|
|
struct rq *rq;
|
|
@@ -7672,6 +7673,12 @@ void sched_move_task(struct task_struct *tsk)
|
|
if (unlikely(running))
|
|
if (unlikely(running))
|
|
tsk->sched_class->put_prev_task(rq, tsk);
|
|
tsk->sched_class->put_prev_task(rq, tsk);
|
|
|
|
|
|
|
|
+ tg = container_of(task_subsys_state_check(tsk, cpu_cgroup_subsys_id,
|
|
|
|
+ lockdep_is_held(&tsk->sighand->siglock)),
|
|
|
|
+ struct task_group, css);
|
|
|
|
+ tg = autogroup_task_group(tsk, tg);
|
|
|
|
+ tsk->sched_task_group = tg;
|
|
|
|
+
|
|
#ifdef CONFIG_FAIR_GROUP_SCHED
|
|
#ifdef CONFIG_FAIR_GROUP_SCHED
|
|
if (tsk->sched_class->task_move_group)
|
|
if (tsk->sched_class->task_move_group)
|
|
tsk->sched_class->task_move_group(tsk, on_rq);
|
|
tsk->sched_class->task_move_group(tsk, on_rq);
|