|
@@ -1762,7 +1762,7 @@ EXPORT_SYMBOL_GPL(cgroup_path);
|
|
|
*
|
|
|
* 'guarantee' is set if the caller promises that a new css_set for the task
|
|
|
* will already exist. If not set, this function might sleep, and can fail with
|
|
|
- * -ENOMEM. Otherwise, it can only fail with -ESRCH.
|
|
|
+ * -ENOMEM. Must be called with cgroup_mutex and threadgroup locked.
|
|
|
*/
|
|
|
static int cgroup_task_migrate(struct cgroup *cgrp, struct cgroup *oldcgrp,
|
|
|
struct task_struct *tsk, bool guarantee)
|
|
@@ -1800,13 +1800,9 @@ static int cgroup_task_migrate(struct cgroup *cgrp, struct cgroup *oldcgrp,
|
|
|
}
|
|
|
put_css_set(oldcg);
|
|
|
|
|
|
- /* if PF_EXITING is set, the tsk->cgroups pointer is no longer safe. */
|
|
|
+ /* @tsk can't exit as its threadgroup is locked */
|
|
|
task_lock(tsk);
|
|
|
- if (tsk->flags & PF_EXITING) {
|
|
|
- task_unlock(tsk);
|
|
|
- put_css_set(newcg);
|
|
|
- return -ESRCH;
|
|
|
- }
|
|
|
+ WARN_ON_ONCE(tsk->flags & PF_EXITING);
|
|
|
rcu_assign_pointer(tsk->cgroups, newcg);
|
|
|
task_unlock(tsk);
|
|
|
|
|
@@ -1832,8 +1828,8 @@ static int cgroup_task_migrate(struct cgroup *cgrp, struct cgroup *oldcgrp,
|
|
|
* @cgrp: the cgroup the task is attaching to
|
|
|
* @tsk: the task to be attached
|
|
|
*
|
|
|
- * Call holding cgroup_mutex. May take task_lock of
|
|
|
- * the task 'tsk' during call.
|
|
|
+ * Call with cgroup_mutex and threadgroup locked. May take task_lock of
|
|
|
+ * @tsk during call.
|
|
|
*/
|
|
|
int cgroup_attach_task(struct cgroup *cgrp, struct task_struct *tsk)
|
|
|
{
|
|
@@ -1842,6 +1838,10 @@ int cgroup_attach_task(struct cgroup *cgrp, struct task_struct *tsk)
|
|
|
struct cgroup *oldcgrp;
|
|
|
struct cgroupfs_root *root = cgrp->root;
|
|
|
|
|
|
+ /* @tsk either already exited or can't exit until the end */
|
|
|
+ if (tsk->flags & PF_EXITING)
|
|
|
+ return -ESRCH;
|
|
|
+
|
|
|
/* Nothing to do if the task is already in that cgroup */
|
|
|
oldcgrp = task_cgroup_from_root(tsk, root);
|
|
|
if (cgrp == oldcgrp)
|
|
@@ -2062,6 +2062,10 @@ int cgroup_attach_proc(struct cgroup *cgrp, struct task_struct *leader)
|
|
|
tsk = leader;
|
|
|
i = 0;
|
|
|
do {
|
|
|
+ /* @tsk either already exited or can't exit until the end */
|
|
|
+ if (tsk->flags & PF_EXITING)
|
|
|
+ continue;
|
|
|
+
|
|
|
/* as per above, nr_threads may decrease, but not increase. */
|
|
|
BUG_ON(i >= group_size);
|
|
|
get_task_struct(tsk);
|
|
@@ -2116,11 +2120,6 @@ int cgroup_attach_proc(struct cgroup *cgrp, struct task_struct *leader)
|
|
|
continue;
|
|
|
/* get old css_set pointer */
|
|
|
task_lock(tsk);
|
|
|
- if (tsk->flags & PF_EXITING) {
|
|
|
- /* ignore this task if it's going away */
|
|
|
- task_unlock(tsk);
|
|
|
- continue;
|
|
|
- }
|
|
|
oldcg = tsk->cgroups;
|
|
|
get_css_set(oldcg);
|
|
|
task_unlock(tsk);
|
|
@@ -2153,16 +2152,12 @@ int cgroup_attach_proc(struct cgroup *cgrp, struct task_struct *leader)
|
|
|
oldcgrp = task_cgroup_from_root(tsk, root);
|
|
|
if (cgrp == oldcgrp)
|
|
|
continue;
|
|
|
- /* if the thread is PF_EXITING, it can just get skipped. */
|
|
|
retval = cgroup_task_migrate(cgrp, oldcgrp, tsk, true);
|
|
|
- if (retval == 0) {
|
|
|
- /* attach each task to each subsystem */
|
|
|
- for_each_subsys(root, ss) {
|
|
|
- if (ss->attach_task)
|
|
|
- ss->attach_task(cgrp, tsk);
|
|
|
- }
|
|
|
- } else {
|
|
|
- BUG_ON(retval != -ESRCH);
|
|
|
+ BUG_ON(retval);
|
|
|
+ /* attach each task to each subsystem */
|
|
|
+ for_each_subsys(root, ss) {
|
|
|
+ if (ss->attach_task)
|
|
|
+ ss->attach_task(cgrp, tsk);
|
|
|
}
|
|
|
}
|
|
|
/* nothing is sensitive to fork() after this point. */
|
|
@@ -2215,8 +2210,8 @@ out_free_group_list:
|
|
|
|
|
|
/*
|
|
|
* Find the task_struct of the task to attach by vpid and pass it along to the
|
|
|
- * function to attach either it or all tasks in its threadgroup. Will take
|
|
|
- * cgroup_mutex; may take task_lock of task.
|
|
|
+ * function to attach either it or all tasks in its threadgroup. Will lock
|
|
|
+ * cgroup_mutex and threadgroup; may take task_lock of task.
|
|
|
*/
|
|
|
static int attach_task_by_pid(struct cgroup *cgrp, u64 pid, bool threadgroup)
|
|
|
{
|
|
@@ -2243,11 +2238,6 @@ static int attach_task_by_pid(struct cgroup *cgrp, u64 pid, bool threadgroup)
|
|
|
* detect it later.
|
|
|
*/
|
|
|
tsk = tsk->group_leader;
|
|
|
- } else if (tsk->flags & PF_EXITING) {
|
|
|
- /* optimization for the single-task-only case */
|
|
|
- rcu_read_unlock();
|
|
|
- cgroup_unlock();
|
|
|
- return -ESRCH;
|
|
|
}
|
|
|
/*
|
|
|
* even if we're attaching all tasks in the thread group, we
|
|
@@ -2271,13 +2261,15 @@ static int attach_task_by_pid(struct cgroup *cgrp, u64 pid, bool threadgroup)
|
|
|
get_task_struct(tsk);
|
|
|
}
|
|
|
|
|
|
- if (threadgroup) {
|
|
|
- threadgroup_lock(tsk);
|
|
|
+ threadgroup_lock(tsk);
|
|
|
+
|
|
|
+ if (threadgroup)
|
|
|
ret = cgroup_attach_proc(cgrp, tsk);
|
|
|
- threadgroup_unlock(tsk);
|
|
|
- } else {
|
|
|
+ else
|
|
|
ret = cgroup_attach_task(cgrp, tsk);
|
|
|
- }
|
|
|
+
|
|
|
+ threadgroup_unlock(tsk);
|
|
|
+
|
|
|
put_task_struct(tsk);
|
|
|
cgroup_unlock();
|
|
|
return ret;
|