|
@@ -2003,8 +2003,8 @@ static int css_set_prefetch(struct cgroup *cgrp, struct css_set *cg,
|
|
* @cgrp: the cgroup to attach to
|
|
* @cgrp: the cgroup to attach to
|
|
* @leader: the threadgroup leader task_struct of the group to be attached
|
|
* @leader: the threadgroup leader task_struct of the group to be attached
|
|
*
|
|
*
|
|
- * Call holding cgroup_mutex and the threadgroup_fork_lock of the leader. Will
|
|
|
|
- * take task_lock of each thread in leader's threadgroup individually in turn.
|
|
|
|
|
|
+ * Call holding cgroup_mutex and the group_rwsem of the leader. Will take
|
|
|
|
+ * task_lock of each thread in leader's threadgroup individually in turn.
|
|
*/
|
|
*/
|
|
int cgroup_attach_proc(struct cgroup *cgrp, struct task_struct *leader)
|
|
int cgroup_attach_proc(struct cgroup *cgrp, struct task_struct *leader)
|
|
{
|
|
{
|
|
@@ -2030,8 +2030,8 @@ int cgroup_attach_proc(struct cgroup *cgrp, struct task_struct *leader)
|
|
* step 0: in order to do expensive, possibly blocking operations for
|
|
* step 0: in order to do expensive, possibly blocking operations for
|
|
* every thread, we cannot iterate the thread group list, since it needs
|
|
* every thread, we cannot iterate the thread group list, since it needs
|
|
* rcu or tasklist locked. instead, build an array of all threads in the
|
|
* rcu or tasklist locked. instead, build an array of all threads in the
|
|
- * group - threadgroup_fork_lock prevents new threads from appearing,
|
|
|
|
- * and if threads exit, this will just be an over-estimate.
|
|
|
|
|
|
+ * group - group_rwsem prevents new threads from appearing, and if
|
|
|
|
+ * threads exit, this will just be an over-estimate.
|
|
*/
|
|
*/
|
|
group_size = get_nr_threads(leader);
|
|
group_size = get_nr_threads(leader);
|
|
/* flex_array supports very large thread-groups better than kmalloc. */
|
|
/* flex_array supports very large thread-groups better than kmalloc. */
|
|
@@ -2249,7 +2249,6 @@ static int attach_task_by_pid(struct cgroup *cgrp, u64 pid, bool threadgroup)
|
|
cgroup_unlock();
|
|
cgroup_unlock();
|
|
return -ESRCH;
|
|
return -ESRCH;
|
|
}
|
|
}
|
|
-
|
|
|
|
/*
|
|
/*
|
|
* even if we're attaching all tasks in the thread group, we
|
|
* even if we're attaching all tasks in the thread group, we
|
|
* only need to check permissions on one of them.
|
|
* only need to check permissions on one of them.
|
|
@@ -2273,9 +2272,9 @@ static int attach_task_by_pid(struct cgroup *cgrp, u64 pid, bool threadgroup)
|
|
}
|
|
}
|
|
|
|
|
|
if (threadgroup) {
|
|
if (threadgroup) {
|
|
- threadgroup_fork_write_lock(tsk);
|
|
|
|
|
|
+ threadgroup_lock(tsk);
|
|
ret = cgroup_attach_proc(cgrp, tsk);
|
|
ret = cgroup_attach_proc(cgrp, tsk);
|
|
- threadgroup_fork_write_unlock(tsk);
|
|
|
|
|
|
+ threadgroup_unlock(tsk);
|
|
} else {
|
|
} else {
|
|
ret = cgroup_attach_task(cgrp, tsk);
|
|
ret = cgroup_attach_task(cgrp, tsk);
|
|
}
|
|
}
|