|
@@ -606,6 +606,9 @@ static inline struct task_group *task_group(struct task_struct *p)
|
|
|
struct task_group *tg;
|
|
|
struct cgroup_subsys_state *css;
|
|
|
|
|
|
+ if (p->flags & PF_EXITING)
|
|
|
+ return &root_task_group;
|
|
|
+
|
|
|
css = task_subsys_state_check(p, cpu_cgroup_subsys_id,
|
|
|
lockdep_is_held(&task_rq(p)->lock));
|
|
|
tg = container_of(css, struct task_group, css);
|
|
@@ -8880,6 +8883,20 @@ cpu_cgroup_attach(struct cgroup_subsys *ss, struct cgroup *cgrp,
|
|
|
}
|
|
|
}
|
|
|
|
|
|
+static void
|
|
|
+cpu_cgroup_exit(struct cgroup_subsys *ss, struct task_struct *task)
|
|
|
+{
|
|
|
+ /*
|
|
|
+ * cgroup_exit() is called in the copy_process() failure path.
|
|
|
+ * Ignore this case since the task hasn't ran yet, this avoids
|
|
|
+ * trying to poke a half freed task state from generic code.
|
|
|
+ */
|
|
|
+ if (!(task->flags & PF_EXITING))
|
|
|
+ return;
|
|
|
+
|
|
|
+ sched_move_task(task);
|
|
|
+}
|
|
|
+
|
|
|
#ifdef CONFIG_FAIR_GROUP_SCHED
|
|
|
static int cpu_shares_write_u64(struct cgroup *cgrp, struct cftype *cftype,
|
|
|
u64 shareval)
|
|
@@ -8952,6 +8969,7 @@ struct cgroup_subsys cpu_cgroup_subsys = {
|
|
|
.destroy = cpu_cgroup_destroy,
|
|
|
.can_attach = cpu_cgroup_can_attach,
|
|
|
.attach = cpu_cgroup_attach,
|
|
|
+ .exit = cpu_cgroup_exit,
|
|
|
.populate = cpu_cgroup_populate,
|
|
|
.subsys_id = cpu_cgroup_subsys_id,
|
|
|
.early_init = 1,
|