|
@@ -2102,10 +2102,14 @@ static int cgroup_attach_proc(struct cgroup *cgrp, struct task_struct *leader)
|
|
|
if (retval)
|
|
|
goto out_free_group_list;
|
|
|
|
|
|
- /* prevent changes to the threadgroup list while we take a snapshot. */
|
|
|
- read_lock(&tasklist_lock);
|
|
|
tsk = leader;
|
|
|
i = 0;
|
|
|
+ /*
|
|
|
+ * Prevent freeing of tasks while we take a snapshot. Tasks that are
|
|
|
+ * already PF_EXITING could be freed from underneath us unless we
|
|
|
+ * take an rcu_read_lock.
|
|
|
+ */
|
|
|
+ rcu_read_lock();
|
|
|
do {
|
|
|
struct task_and_cgroup ent;
|
|
|
|
|
@@ -2128,11 +2132,11 @@ static int cgroup_attach_proc(struct cgroup *cgrp, struct task_struct *leader)
|
|
|
BUG_ON(retval != 0);
|
|
|
i++;
|
|
|
} while_each_thread(leader, tsk);
|
|
|
+ rcu_read_unlock();
|
|
|
/* remember the number of threads in the array for later. */
|
|
|
group_size = i;
|
|
|
tset.tc_array = group;
|
|
|
tset.tc_array_len = group_size;
|
|
|
- read_unlock(&tasklist_lock);
|
|
|
|
|
|
/* methods shouldn't be called if no task is actually migrating */
|
|
|
retval = 0;
|