|
@@ -2027,7 +2027,7 @@ int cgroup_attach_proc(struct cgroup *cgrp, struct task_struct *leader)
|
|
|
goto out_free_group_list;
|
|
|
|
|
|
/* prevent changes to the threadgroup list while we take a snapshot. */
|
|
|
- rcu_read_lock();
|
|
|
+ read_lock(&tasklist_lock);
|
|
|
if (!thread_group_leader(leader)) {
|
|
|
/*
|
|
|
* a race with de_thread from another thread's exec() may strip
|
|
@@ -2036,7 +2036,7 @@ int cgroup_attach_proc(struct cgroup *cgrp, struct task_struct *leader)
|
|
|
* throw this task away and try again (from cgroup_procs_write);
|
|
|
* this is "double-double-toil-and-trouble-check locking".
|
|
|
*/
|
|
|
- rcu_read_unlock();
|
|
|
+ read_unlock(&tasklist_lock);
|
|
|
retval = -EAGAIN;
|
|
|
goto out_free_group_list;
|
|
|
}
|
|
@@ -2057,7 +2057,7 @@ int cgroup_attach_proc(struct cgroup *cgrp, struct task_struct *leader)
|
|
|
} while_each_thread(leader, tsk);
|
|
|
/* remember the number of threads in the array for later. */
|
|
|
group_size = i;
|
|
|
- rcu_read_unlock();
|
|
|
+ read_unlock(&tasklist_lock);
|
|
|
|
|
|
/*
|
|
|
* step 1: check that we can legitimately attach to the cgroup.
|