|
@@ -2104,19 +2104,6 @@ static int cgroup_attach_proc(struct cgroup *cgrp, struct task_struct *leader)
|
|
|
|
|
|
/* prevent changes to the threadgroup list while we take a snapshot. */
|
|
|
read_lock(&tasklist_lock);
|
|
|
- if (!thread_group_leader(leader)) {
|
|
|
- /*
|
|
|
- * a race with de_thread from another thread's exec() may strip
|
|
|
- * us of our leadership, making while_each_thread unsafe to use
|
|
|
- * on this task. if this happens, there is no choice but to
|
|
|
- * throw this task away and try again (from cgroup_procs_write);
|
|
|
- * this is "double-double-toil-and-trouble-check locking".
|
|
|
- */
|
|
|
- read_unlock(&tasklist_lock);
|
|
|
- retval = -EAGAIN;
|
|
|
- goto out_free_group_list;
|
|
|
- }
|
|
|
-
|
|
|
tsk = leader;
|
|
|
i = 0;
|
|
|
do {
|
|
@@ -2245,22 +2232,14 @@ static int attach_task_by_pid(struct cgroup *cgrp, u64 pid, bool threadgroup)
|
|
|
if (!cgroup_lock_live_group(cgrp))
|
|
|
return -ENODEV;
|
|
|
|
|
|
+retry_find_task:
|
|
|
+ rcu_read_lock();
|
|
|
if (pid) {
|
|
|
- rcu_read_lock();
|
|
|
tsk = find_task_by_vpid(pid);
|
|
|
if (!tsk) {
|
|
|
rcu_read_unlock();
|
|
|
- cgroup_unlock();
|
|
|
- return -ESRCH;
|
|
|
- }
|
|
|
- if (threadgroup) {
|
|
|
- /*
|
|
|
- * RCU protects this access, since tsk was found in the
|
|
|
- * tid map. a race with de_thread may cause group_leader
|
|
|
- * to stop being the leader, but cgroup_attach_proc will
|
|
|
- * detect it later.
|
|
|
- */
|
|
|
- tsk = tsk->group_leader;
|
|
|
+ ret= -ESRCH;
|
|
|
+ goto out_unlock_cgroup;
|
|
|
}
|
|
|
/*
|
|
|
* even if we're attaching all tasks in the thread group, we
|
|
@@ -2271,29 +2250,38 @@ static int attach_task_by_pid(struct cgroup *cgrp, u64 pid, bool threadgroup)
|
|
|
cred->euid != tcred->uid &&
|
|
|
cred->euid != tcred->suid) {
|
|
|
rcu_read_unlock();
|
|
|
- cgroup_unlock();
|
|
|
- return -EACCES;
|
|
|
+ ret = -EACCES;
|
|
|
+ goto out_unlock_cgroup;
|
|
|
}
|
|
|
- get_task_struct(tsk);
|
|
|
- rcu_read_unlock();
|
|
|
- } else {
|
|
|
- if (threadgroup)
|
|
|
- tsk = current->group_leader;
|
|
|
- else
|
|
|
- tsk = current;
|
|
|
- get_task_struct(tsk);
|
|
|
- }
|
|
|
-
|
|
|
- threadgroup_lock(tsk);
|
|
|
+ } else
|
|
|
+ tsk = current;
|
|
|
|
|
|
if (threadgroup)
|
|
|
+ tsk = tsk->group_leader;
|
|
|
+ get_task_struct(tsk);
|
|
|
+ rcu_read_unlock();
|
|
|
+
|
|
|
+ threadgroup_lock(tsk);
|
|
|
+ if (threadgroup) {
|
|
|
+ if (!thread_group_leader(tsk)) {
|
|
|
+ /*
|
|
|
+ * a race with de_thread from another thread's exec()
|
|
|
+ * may strip us of our leadership, if this happens,
|
|
|
+ * there is no choice but to throw this task away and
|
|
|
+ * try again; this is
|
|
|
+ * "double-double-toil-and-trouble-check locking".
|
|
|
+ */
|
|
|
+ threadgroup_unlock(tsk);
|
|
|
+ put_task_struct(tsk);
|
|
|
+ goto retry_find_task;
|
|
|
+ }
|
|
|
ret = cgroup_attach_proc(cgrp, tsk);
|
|
|
- else
|
|
|
+ } else
|
|
|
ret = cgroup_attach_task(cgrp, tsk);
|
|
|
-
|
|
|
threadgroup_unlock(tsk);
|
|
|
|
|
|
put_task_struct(tsk);
|
|
|
+out_unlock_cgroup:
|
|
|
cgroup_unlock();
|
|
|
return ret;
|
|
|
}
|
|
@@ -2305,16 +2293,7 @@ static int cgroup_tasks_write(struct cgroup *cgrp, struct cftype *cft, u64 pid)
|
|
|
|
|
|
static int cgroup_procs_write(struct cgroup *cgrp, struct cftype *cft, u64 tgid)
|
|
|
{
|
|
|
- int ret;
|
|
|
- do {
|
|
|
- /*
|
|
|
- * attach_proc fails with -EAGAIN if threadgroup leadership
|
|
|
- * changes in the middle of the operation, in which case we need
|
|
|
- * to find the task_struct for the new leader and start over.
|
|
|
- */
|
|
|
- ret = attach_task_by_pid(cgrp, tgid, true);
|
|
|
- } while (ret == -EAGAIN);
|
|
|
- return ret;
|
|
|
+ return attach_task_by_pid(cgrp, tgid, true);
|
|
|
}
|
|
|
|
|
|
/**
|