|
@@ -6122,6 +6122,17 @@ static void sched_init_numa(void)
|
|
* numbers.
|
|
* numbers.
|
|
*/
|
|
*/
|
|
|
|
|
|
|
|
+ /*
|
|
|
|
+ * Here, we should temporarily reset sched_domains_numa_levels to 0.
|
|
|
|
+ * If it fails to allocate memory for array sched_domains_numa_masks[][],
|
|
|
|
+ * the array will contain less then 'level' members. This could be
|
|
|
|
+ * dangerous when we use it to iterate array sched_domains_numa_masks[][]
|
|
|
|
+ * in other functions.
|
|
|
|
+ *
|
|
|
|
+ * We reset it to 'level' at the end of this function.
|
|
|
|
+ */
|
|
|
|
+ sched_domains_numa_levels = 0;
|
|
|
|
+
|
|
sched_domains_numa_masks = kzalloc(sizeof(void *) * level, GFP_KERNEL);
|
|
sched_domains_numa_masks = kzalloc(sizeof(void *) * level, GFP_KERNEL);
|
|
if (!sched_domains_numa_masks)
|
|
if (!sched_domains_numa_masks)
|
|
return;
|
|
return;
|
|
@@ -6176,11 +6187,68 @@ static void sched_init_numa(void)
|
|
}
|
|
}
|
|
|
|
|
|
sched_domain_topology = tl;
|
|
sched_domain_topology = tl;
|
|
|
|
+
|
|
|
|
+ sched_domains_numa_levels = level;
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static void sched_domains_numa_masks_set(int cpu)
|
|
|
|
+{
|
|
|
|
+ int i, j;
|
|
|
|
+ int node = cpu_to_node(cpu);
|
|
|
|
+
|
|
|
|
+ for (i = 0; i < sched_domains_numa_levels; i++) {
|
|
|
|
+ for (j = 0; j < nr_node_ids; j++) {
|
|
|
|
+ if (node_distance(j, node) <= sched_domains_numa_distance[i])
|
|
|
|
+ cpumask_set_cpu(cpu, sched_domains_numa_masks[i][j]);
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static void sched_domains_numa_masks_clear(int cpu)
|
|
|
|
+{
|
|
|
|
+ int i, j;
|
|
|
|
+ for (i = 0; i < sched_domains_numa_levels; i++) {
|
|
|
|
+ for (j = 0; j < nr_node_ids; j++)
|
|
|
|
+ cpumask_clear_cpu(cpu, sched_domains_numa_masks[i][j]);
|
|
|
|
+ }
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+/*
|
|
|
|
+ * Update sched_domains_numa_masks[level][node] array when new cpus
|
|
|
|
+ * are onlined.
|
|
|
|
+ */
|
|
|
|
+static int sched_domains_numa_masks_update(struct notifier_block *nfb,
|
|
|
|
+ unsigned long action,
|
|
|
|
+ void *hcpu)
|
|
|
|
+{
|
|
|
|
+ int cpu = (long)hcpu;
|
|
|
|
+
|
|
|
|
+ switch (action & ~CPU_TASKS_FROZEN) {
|
|
|
|
+ case CPU_ONLINE:
|
|
|
|
+ sched_domains_numa_masks_set(cpu);
|
|
|
|
+ break;
|
|
|
|
+
|
|
|
|
+ case CPU_DEAD:
|
|
|
|
+ sched_domains_numa_masks_clear(cpu);
|
|
|
|
+ break;
|
|
|
|
+
|
|
|
|
+ default:
|
|
|
|
+ return NOTIFY_DONE;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ return NOTIFY_OK;
|
|
}
|
|
}
|
|
#else
|
|
#else
|
|
static inline void sched_init_numa(void)
|
|
static inline void sched_init_numa(void)
|
|
{
|
|
{
|
|
}
|
|
}
|
|
|
|
+
|
|
|
|
+static int sched_domains_numa_masks_update(struct notifier_block *nfb,
|
|
|
|
+ unsigned long action,
|
|
|
|
+ void *hcpu)
|
|
|
|
+{
|
|
|
|
+ return 0;
|
|
|
|
+}
|
|
#endif /* CONFIG_NUMA */
|
|
#endif /* CONFIG_NUMA */
|
|
|
|
|
|
static int __sdt_alloc(const struct cpumask *cpu_map)
|
|
static int __sdt_alloc(const struct cpumask *cpu_map)
|
|
@@ -6629,6 +6697,7 @@ void __init sched_init_smp(void)
|
|
mutex_unlock(&sched_domains_mutex);
|
|
mutex_unlock(&sched_domains_mutex);
|
|
put_online_cpus();
|
|
put_online_cpus();
|
|
|
|
|
|
|
|
+ hotcpu_notifier(sched_domains_numa_masks_update, CPU_PRI_SCHED_ACTIVE);
|
|
hotcpu_notifier(cpuset_cpu_active, CPU_PRI_CPUSET_ACTIVE);
|
|
hotcpu_notifier(cpuset_cpu_active, CPU_PRI_CPUSET_ACTIVE);
|
|
hotcpu_notifier(cpuset_cpu_inactive, CPU_PRI_CPUSET_INACTIVE);
|
|
hotcpu_notifier(cpuset_cpu_inactive, CPU_PRI_CPUSET_INACTIVE);
|
|
|
|
|