|
@@ -57,6 +57,7 @@
|
|
#include <linux/vmalloc.h> /* TODO: replace with more sophisticated array */
|
|
#include <linux/vmalloc.h> /* TODO: replace with more sophisticated array */
|
|
#include <linux/eventfd.h>
|
|
#include <linux/eventfd.h>
|
|
#include <linux/poll.h>
|
|
#include <linux/poll.h>
|
|
|
|
+#include <linux/flex_array.h> /* used in cgroup_attach_proc */
|
|
|
|
|
|
#include <asm/atomic.h>
|
|
#include <asm/atomic.h>
|
|
|
|
|
|
@@ -1995,7 +1996,7 @@ int cgroup_attach_proc(struct cgroup *cgrp, struct task_struct *leader)
|
|
struct cgroupfs_root *root = cgrp->root;
|
|
struct cgroupfs_root *root = cgrp->root;
|
|
/* threadgroup list cursor and array */
|
|
/* threadgroup list cursor and array */
|
|
struct task_struct *tsk;
|
|
struct task_struct *tsk;
|
|
- struct task_struct **group;
|
|
|
|
|
|
+ struct flex_array *group;
|
|
/*
|
|
/*
|
|
* we need to make sure we have css_sets for all the tasks we're
|
|
* we need to make sure we have css_sets for all the tasks we're
|
|
* going to move -before- we actually start moving them, so that in
|
|
* going to move -before- we actually start moving them, so that in
|
|
@@ -2012,9 +2013,15 @@ int cgroup_attach_proc(struct cgroup *cgrp, struct task_struct *leader)
|
|
* and if threads exit, this will just be an over-estimate.
|
|
* and if threads exit, this will just be an over-estimate.
|
|
*/
|
|
*/
|
|
group_size = get_nr_threads(leader);
|
|
group_size = get_nr_threads(leader);
|
|
- group = kmalloc(group_size * sizeof(*group), GFP_KERNEL);
|
|
|
|
|
|
+ /* flex_array supports very large thread-groups better than kmalloc. */
|
|
|
|
+ group = flex_array_alloc(sizeof(struct task_struct *), group_size,
|
|
|
|
+ GFP_KERNEL);
|
|
if (!group)
|
|
if (!group)
|
|
return -ENOMEM;
|
|
return -ENOMEM;
|
|
|
|
+ /* pre-allocate to guarantee space while iterating in rcu read-side. */
|
|
|
|
+ retval = flex_array_prealloc(group, 0, group_size - 1, GFP_KERNEL);
|
|
|
|
+ if (retval)
|
|
|
|
+ goto out_free_group_list;
|
|
|
|
|
|
/* prevent changes to the threadgroup list while we take a snapshot. */
|
|
/* prevent changes to the threadgroup list while we take a snapshot. */
|
|
rcu_read_lock();
|
|
rcu_read_lock();
|
|
@@ -2037,7 +2044,12 @@ int cgroup_attach_proc(struct cgroup *cgrp, struct task_struct *leader)
|
|
/* as per above, nr_threads may decrease, but not increase. */
|
|
/* as per above, nr_threads may decrease, but not increase. */
|
|
BUG_ON(i >= group_size);
|
|
BUG_ON(i >= group_size);
|
|
get_task_struct(tsk);
|
|
get_task_struct(tsk);
|
|
- group[i] = tsk;
|
|
|
|
|
|
+ /*
|
|
|
|
+ * saying GFP_ATOMIC has no effect here because we did prealloc
|
|
|
|
+ * earlier, but it's good form to communicate our expectations.
|
|
|
|
+ */
|
|
|
|
+ retval = flex_array_put_ptr(group, i, tsk, GFP_ATOMIC);
|
|
|
|
+ BUG_ON(retval != 0);
|
|
i++;
|
|
i++;
|
|
} while_each_thread(leader, tsk);
|
|
} while_each_thread(leader, tsk);
|
|
/* remember the number of threads in the array for later. */
|
|
/* remember the number of threads in the array for later. */
|
|
@@ -2059,7 +2071,8 @@ int cgroup_attach_proc(struct cgroup *cgrp, struct task_struct *leader)
|
|
if (ss->can_attach_task) {
|
|
if (ss->can_attach_task) {
|
|
/* run on each task in the threadgroup. */
|
|
/* run on each task in the threadgroup. */
|
|
for (i = 0; i < group_size; i++) {
|
|
for (i = 0; i < group_size; i++) {
|
|
- retval = ss->can_attach_task(cgrp, group[i]);
|
|
|
|
|
|
+ tsk = flex_array_get_ptr(group, i);
|
|
|
|
+ retval = ss->can_attach_task(cgrp, tsk);
|
|
if (retval) {
|
|
if (retval) {
|
|
failed_ss = ss;
|
|
failed_ss = ss;
|
|
cancel_failed_ss = true;
|
|
cancel_failed_ss = true;
|
|
@@ -2075,7 +2088,7 @@ int cgroup_attach_proc(struct cgroup *cgrp, struct task_struct *leader)
|
|
*/
|
|
*/
|
|
INIT_LIST_HEAD(&newcg_list);
|
|
INIT_LIST_HEAD(&newcg_list);
|
|
for (i = 0; i < group_size; i++) {
|
|
for (i = 0; i < group_size; i++) {
|
|
- tsk = group[i];
|
|
|
|
|
|
+ tsk = flex_array_get_ptr(group, i);
|
|
/* nothing to do if this task is already in the cgroup */
|
|
/* nothing to do if this task is already in the cgroup */
|
|
oldcgrp = task_cgroup_from_root(tsk, root);
|
|
oldcgrp = task_cgroup_from_root(tsk, root);
|
|
if (cgrp == oldcgrp)
|
|
if (cgrp == oldcgrp)
|
|
@@ -2114,7 +2127,7 @@ int cgroup_attach_proc(struct cgroup *cgrp, struct task_struct *leader)
|
|
ss->pre_attach(cgrp);
|
|
ss->pre_attach(cgrp);
|
|
}
|
|
}
|
|
for (i = 0; i < group_size; i++) {
|
|
for (i = 0; i < group_size; i++) {
|
|
- tsk = group[i];
|
|
|
|
|
|
+ tsk = flex_array_get_ptr(group, i);
|
|
/* leave current thread as it is if it's already there */
|
|
/* leave current thread as it is if it's already there */
|
|
oldcgrp = task_cgroup_from_root(tsk, root);
|
|
oldcgrp = task_cgroup_from_root(tsk, root);
|
|
if (cgrp == oldcgrp)
|
|
if (cgrp == oldcgrp)
|
|
@@ -2167,10 +2180,12 @@ out_cancel_attach:
|
|
}
|
|
}
|
|
}
|
|
}
|
|
/* clean up the array of referenced threads in the group. */
|
|
/* clean up the array of referenced threads in the group. */
|
|
- for (i = 0; i < group_size; i++)
|
|
|
|
- put_task_struct(group[i]);
|
|
|
|
|
|
+ for (i = 0; i < group_size; i++) {
|
|
|
|
+ tsk = flex_array_get_ptr(group, i);
|
|
|
|
+ put_task_struct(tsk);
|
|
|
|
+ }
|
|
out_free_group_list:
|
|
out_free_group_list:
|
|
- kfree(group);
|
|
|
|
|
|
+ flex_array_free(group);
|
|
return retval;
|
|
return retval;
|
|
}
|
|
}
|
|
|
|
|