|
@@ -2038,7 +2038,7 @@ static int cgroup_attach_task(struct cgroup *cgrp, struct task_struct *tsk,
|
|
|
|
|
|
/* @tsk either already exited or can't exit until the end */
|
|
|
if (tsk->flags & PF_EXITING)
|
|
|
- continue;
|
|
|
+ goto next;
|
|
|
|
|
|
/* as per above, nr_threads may decrease, but not increase. */
|
|
|
BUG_ON(i >= group_size);
|
|
@@ -2046,7 +2046,7 @@ static int cgroup_attach_task(struct cgroup *cgrp, struct task_struct *tsk,
|
|
|
ent.cgrp = task_cgroup_from_root(tsk, root);
|
|
|
/* nothing to do if this task is already in the cgroup */
|
|
|
if (ent.cgrp == cgrp)
|
|
|
- continue;
|
|
|
+ goto next;
|
|
|
/*
|
|
|
* saying GFP_ATOMIC has no effect here because we did prealloc
|
|
|
* earlier, but it's good form to communicate our expectations.
|
|
@@ -2054,7 +2054,7 @@ static int cgroup_attach_task(struct cgroup *cgrp, struct task_struct *tsk,
|
|
|
retval = flex_array_put(group, i, &ent, GFP_ATOMIC);
|
|
|
BUG_ON(retval != 0);
|
|
|
i++;
|
|
|
-
|
|
|
+ next:
|
|
|
if (!threadgroup)
|
|
|
break;
|
|
|
} while_each_thread(leader, tsk);
|