|
@@ -1944,13 +1944,6 @@ int cgroup_attach_task(struct cgroup *cgrp, struct task_struct *tsk)
|
|
|
goto out;
|
|
|
}
|
|
|
}
|
|
|
- if (ss->can_attach_task) {
|
|
|
- retval = ss->can_attach_task(cgrp, tsk);
|
|
|
- if (retval) {
|
|
|
- failed_ss = ss;
|
|
|
- goto out;
|
|
|
- }
|
|
|
- }
|
|
|
}
|
|
|
|
|
|
retval = cgroup_task_migrate(cgrp, oldcgrp, tsk, false);
|
|
@@ -1958,10 +1951,6 @@ int cgroup_attach_task(struct cgroup *cgrp, struct task_struct *tsk)
|
|
|
goto out;
|
|
|
|
|
|
for_each_subsys(root, ss) {
|
|
|
- if (ss->pre_attach)
|
|
|
- ss->pre_attach(cgrp);
|
|
|
- if (ss->attach_task)
|
|
|
- ss->attach_task(cgrp, tsk);
|
|
|
if (ss->attach)
|
|
|
ss->attach(ss, cgrp, &tset);
|
|
|
}
|
|
@@ -2093,7 +2082,6 @@ int cgroup_attach_proc(struct cgroup *cgrp, struct task_struct *leader)
|
|
|
{
|
|
|
int retval, i, group_size, nr_migrating_tasks;
|
|
|
struct cgroup_subsys *ss, *failed_ss = NULL;
|
|
|
- bool cancel_failed_ss = false;
|
|
|
/* guaranteed to be initialized later, but the compiler needs this */
|
|
|
struct css_set *oldcg;
|
|
|
struct cgroupfs_root *root = cgrp->root;
|
|
@@ -2188,21 +2176,6 @@ int cgroup_attach_proc(struct cgroup *cgrp, struct task_struct *leader)
|
|
|
goto out_cancel_attach;
|
|
|
}
|
|
|
}
|
|
|
- /* a callback to be run on every thread in the threadgroup. */
|
|
|
- if (ss->can_attach_task) {
|
|
|
- /* run on each task in the threadgroup. */
|
|
|
- for (i = 0; i < group_size; i++) {
|
|
|
- tc = flex_array_get(group, i);
|
|
|
- if (tc->cgrp == cgrp)
|
|
|
- continue;
|
|
|
- retval = ss->can_attach_task(cgrp, tc->task);
|
|
|
- if (retval) {
|
|
|
- failed_ss = ss;
|
|
|
- cancel_failed_ss = true;
|
|
|
- goto out_cancel_attach;
|
|
|
- }
|
|
|
- }
|
|
|
- }
|
|
|
}
|
|
|
|
|
|
/*
|
|
@@ -2234,15 +2207,10 @@ int cgroup_attach_proc(struct cgroup *cgrp, struct task_struct *leader)
|
|
|
}
|
|
|
|
|
|
/*
|
|
|
- * step 3: now that we're guaranteed success wrt the css_sets, proceed
|
|
|
- * to move all tasks to the new cgroup, calling ss->attach_task for each
|
|
|
- * one along the way. there are no failure cases after here, so this is
|
|
|
- * the commit point.
|
|
|
+ * step 3: now that we're guaranteed success wrt the css_sets,
|
|
|
+ * proceed to move all tasks to the new cgroup. There are no
|
|
|
+ * failure cases after here, so this is the commit point.
|
|
|
*/
|
|
|
- for_each_subsys(root, ss) {
|
|
|
- if (ss->pre_attach)
|
|
|
- ss->pre_attach(cgrp);
|
|
|
- }
|
|
|
for (i = 0; i < group_size; i++) {
|
|
|
tc = flex_array_get(group, i);
|
|
|
/* leave current thread as it is if it's already there */
|
|
@@ -2250,18 +2218,11 @@ int cgroup_attach_proc(struct cgroup *cgrp, struct task_struct *leader)
|
|
|
continue;
|
|
|
retval = cgroup_task_migrate(cgrp, tc->cgrp, tc->task, true);
|
|
|
BUG_ON(retval);
|
|
|
- /* attach each task to each subsystem */
|
|
|
- for_each_subsys(root, ss) {
|
|
|
- if (ss->attach_task)
|
|
|
- ss->attach_task(cgrp, tc->task);
|
|
|
- }
|
|
|
}
|
|
|
/* nothing is sensitive to fork() after this point. */
|
|
|
|
|
|
/*
|
|
|
- * step 4: do expensive, non-thread-specific subsystem callbacks.
|
|
|
- * TODO: if ever a subsystem needs to know the oldcgrp for each task
|
|
|
- * being moved, this call will need to be reworked to communicate that.
|
|
|
+ * step 4: do subsystem attach callbacks.
|
|
|
*/
|
|
|
for_each_subsys(root, ss) {
|
|
|
if (ss->attach)
|
|
@@ -2285,11 +2246,8 @@ out_cancel_attach:
|
|
|
/* same deal as in cgroup_attach_task */
|
|
|
if (retval) {
|
|
|
for_each_subsys(root, ss) {
|
|
|
- if (ss == failed_ss) {
|
|
|
- if (cancel_failed_ss && ss->cancel_attach)
|
|
|
- ss->cancel_attach(ss, cgrp, &tset);
|
|
|
+ if (ss == failed_ss)
|
|
|
break;
|
|
|
- }
|
|
|
if (ss->cancel_attach)
|
|
|
ss->cancel_attach(ss, cgrp, &tset);
|
|
|
}
|