|
@@ -1057,13 +1057,15 @@ static void *cpuset_being_rebound;
|
|
|
* update_tasks_nodemask - Update the nodemasks of tasks in the cpuset.
|
|
|
* @cs: the cpuset in which each task's mems_allowed mask needs to be changed
|
|
|
* @oldmem: old mems_allowed of cpuset cs
|
|
|
+ * @heap: if NULL, defer allocating heap memory to cgroup_scan_tasks()
|
|
|
*
|
|
|
* Called with cgroup_mutex held
|
|
|
- * Return 0 if successful, -errno if not.
|
|
|
+ * No return value. It's guaranteed that cgroup_scan_tasks() always returns 0
|
|
|
+ * if @heap != NULL.
|
|
|
*/
|
|
|
-static int update_tasks_nodemask(struct cpuset *cs, const nodemask_t *oldmem)
|
|
|
+static void update_tasks_nodemask(struct cpuset *cs, const nodemask_t *oldmem,
|
|
|
+ struct ptr_heap *heap)
|
|
|
{
|
|
|
- int retval;
|
|
|
struct cgroup_scanner scan;
|
|
|
|
|
|
cpuset_being_rebound = cs; /* causes mpol_dup() rebind */
|
|
@@ -1071,7 +1073,7 @@ static int update_tasks_nodemask(struct cpuset *cs, const nodemask_t *oldmem)
|
|
|
scan.cg = cs->css.cgroup;
|
|
|
scan.test_task = NULL;
|
|
|
scan.process_task = cpuset_change_nodemask;
|
|
|
- scan.heap = NULL;
|
|
|
+ scan.heap = heap;
|
|
|
scan.data = (nodemask_t *)oldmem;
|
|
|
|
|
|
/*
|
|
@@ -1084,12 +1086,10 @@ static int update_tasks_nodemask(struct cpuset *cs, const nodemask_t *oldmem)
|
|
|
* It's ok if we rebind the same mm twice; mpol_rebind_mm()
|
|
|
* is idempotent. Also migrate pages in each mm to new nodes.
|
|
|
*/
|
|
|
- retval = cgroup_scan_tasks(&scan);
|
|
|
+ cgroup_scan_tasks(&scan);
|
|
|
|
|
|
/* We're done rebinding vmas to this cpuset's new mems_allowed. */
|
|
|
cpuset_being_rebound = NULL;
|
|
|
-
|
|
|
- return retval;
|
|
|
}
|
|
|
|
|
|
/*
|
|
@@ -1110,6 +1110,7 @@ static int update_nodemask(struct cpuset *cs, struct cpuset *trialcs,
|
|
|
{
|
|
|
nodemask_t oldmem;
|
|
|
int retval;
|
|
|
+ struct ptr_heap heap;
|
|
|
|
|
|
/*
|
|
|
* top_cpuset.mems_allowed tracks node_stats[N_HIGH_MEMORY];
|
|
@@ -1144,12 +1145,18 @@ static int update_nodemask(struct cpuset *cs, struct cpuset *trialcs,
|
|
|
if (retval < 0)
|
|
|
goto done;
|
|
|
|
|
|
+ retval = heap_init(&heap, PAGE_SIZE, GFP_KERNEL, NULL);
|
|
|
+ if (retval < 0)
|
|
|
+ goto done;
|
|
|
+
|
|
|
mutex_lock(&callback_mutex);
|
|
|
cs->mems_allowed = trialcs->mems_allowed;
|
|
|
cs->mems_generation = cpuset_mems_generation++;
|
|
|
mutex_unlock(&callback_mutex);
|
|
|
|
|
|
- retval = update_tasks_nodemask(cs, &oldmem);
|
|
|
+ update_tasks_nodemask(cs, &oldmem, &heap);
|
|
|
+
|
|
|
+ heap_free(&heap);
|
|
|
done:
|
|
|
return retval;
|
|
|
}
|
|
@@ -2003,7 +2010,7 @@ static void scan_for_empty_cpusets(struct cpuset *root)
|
|
|
remove_tasks_in_empty_cpuset(cp);
|
|
|
else {
|
|
|
update_tasks_cpumask(cp, NULL);
|
|
|
- update_tasks_nodemask(cp, &oldmems);
|
|
|
+ update_tasks_nodemask(cp, &oldmems, NULL);
|
|
|
}
|
|
|
}
|
|
|
}
|