|
@@ -920,9 +920,6 @@ static int update_cpumask(struct cpuset *cs, struct cpuset *trialcs,
|
|
|
* call to guarantee_online_mems(), as we know no one is changing
|
|
|
* our task's cpuset.
|
|
|
*
|
|
|
- * Hold callback_mutex around the two modifications of our tasks
|
|
|
- * mems_allowed to synchronize with cpuset_mems_allowed().
|
|
|
- *
|
|
|
* While the mm_struct we are migrating is typically from some
|
|
|
* other task, the task_struct mems_allowed that we are hacking
|
|
|
* is for our current task, which must allocate new pages for that
|
|
@@ -1391,11 +1388,10 @@ static void cpuset_attach(struct cgroup_subsys *ss, struct cgroup *cont,
|
|
|
|
|
|
if (cs == &top_cpuset) {
|
|
|
cpumask_copy(cpus_attach, cpu_possible_mask);
|
|
|
- to = node_possible_map;
|
|
|
} else {
|
|
|
guarantee_online_cpus(cs, cpus_attach);
|
|
|
- guarantee_online_mems(cs, &to);
|
|
|
}
|
|
|
+ guarantee_online_mems(cs, &to);
|
|
|
|
|
|
/* do per-task migration stuff possibly for each in the threadgroup */
|
|
|
cpuset_attach_task(tsk, &to, cs);
|
|
@@ -2090,15 +2086,23 @@ static int cpuset_track_online_cpus(struct notifier_block *unused_nb,
|
|
|
static int cpuset_track_online_nodes(struct notifier_block *self,
|
|
|
unsigned long action, void *arg)
|
|
|
{
|
|
|
+ nodemask_t oldmems;
|
|
|
+
|
|
|
cgroup_lock();
|
|
|
switch (action) {
|
|
|
case MEM_ONLINE:
|
|
|
- case MEM_OFFLINE:
|
|
|
+ oldmems = top_cpuset.mems_allowed;
|
|
|
mutex_lock(&callback_mutex);
|
|
|
top_cpuset.mems_allowed = node_states[N_HIGH_MEMORY];
|
|
|
mutex_unlock(&callback_mutex);
|
|
|
- if (action == MEM_OFFLINE)
|
|
|
- scan_for_empty_cpusets(&top_cpuset);
|
|
|
+ update_tasks_nodemask(&top_cpuset, &oldmems, NULL);
|
|
|
+ break;
|
|
|
+ case MEM_OFFLINE:
|
|
|
+ /*
|
|
|
+ * needn't update top_cpuset.mems_allowed explicitly because
|
|
|
+ * scan_for_empty_cpusets() will update it.
|
|
|
+ */
|
|
|
+ scan_for_empty_cpusets(&top_cpuset);
|
|
|
break;
|
|
|
default:
|
|
|
break;
|