|
@@ -6917,19 +6917,12 @@ cpu_attach_domain(struct sched_domain *sd, struct root_domain *rd, int cpu)
|
|
|
}
|
|
|
|
|
|
/* cpus with isolated domains */
|
|
|
-static cpumask_t cpu_isolated_map = CPU_MASK_NONE;
|
|
|
+static cpumask_var_t cpu_isolated_map;
|
|
|
|
|
|
/* Setup the mask of cpus configured for isolated domains */
|
|
|
static int __init isolated_cpu_setup(char *str)
|
|
|
{
|
|
|
- static int __initdata ints[NR_CPUS];
|
|
|
- int i;
|
|
|
-
|
|
|
- str = get_options(str, ARRAY_SIZE(ints), ints);
|
|
|
- cpus_clear(cpu_isolated_map);
|
|
|
- for (i = 1; i <= ints[0]; i++)
|
|
|
- if (ints[i] < NR_CPUS)
|
|
|
- cpu_set(ints[i], cpu_isolated_map);
|
|
|
+ cpulist_parse(str, *cpu_isolated_map);
|
|
|
return 1;
|
|
|
}
|
|
|
|
|
@@ -7727,7 +7720,7 @@ static int arch_init_sched_domains(const cpumask_t *cpu_map)
|
|
|
doms_cur = kmalloc(sizeof(cpumask_t), GFP_KERNEL);
|
|
|
if (!doms_cur)
|
|
|
doms_cur = &fallback_doms;
|
|
|
- cpus_andnot(*doms_cur, *cpu_map, cpu_isolated_map);
|
|
|
+ cpumask_andnot(doms_cur, cpu_map, cpu_isolated_map);
|
|
|
dattr_cur = NULL;
|
|
|
err = build_sched_domains(doms_cur);
|
|
|
register_sched_domain_sysctl();
|
|
@@ -7826,7 +7819,7 @@ match1:
|
|
|
if (doms_new == NULL) {
|
|
|
ndoms_cur = 0;
|
|
|
doms_new = &fallback_doms;
|
|
|
- cpus_andnot(doms_new[0], cpu_online_map, cpu_isolated_map);
|
|
|
+ cpumask_andnot(&doms_new[0], cpu_online_mask, cpu_isolated_map);
|
|
|
WARN_ON_ONCE(dattr_new);
|
|
|
}
|
|
|
|
|
@@ -7985,7 +7978,9 @@ static int update_runtime(struct notifier_block *nfb,
|
|
|
|
|
|
void __init sched_init_smp(void)
|
|
|
{
|
|
|
- cpumask_t non_isolated_cpus;
|
|
|
+ cpumask_var_t non_isolated_cpus;
|
|
|
+
|
|
|
+ alloc_cpumask_var(&non_isolated_cpus, GFP_KERNEL);
|
|
|
|
|
|
#if defined(CONFIG_NUMA)
|
|
|
sched_group_nodes_bycpu = kzalloc(nr_cpu_ids * sizeof(void **),
|
|
@@ -7994,10 +7989,10 @@ void __init sched_init_smp(void)
|
|
|
#endif
|
|
|
get_online_cpus();
|
|
|
mutex_lock(&sched_domains_mutex);
|
|
|
- arch_init_sched_domains(&cpu_online_map);
|
|
|
- cpus_andnot(non_isolated_cpus, cpu_possible_map, cpu_isolated_map);
|
|
|
- if (cpus_empty(non_isolated_cpus))
|
|
|
- cpu_set(smp_processor_id(), non_isolated_cpus);
|
|
|
+ arch_init_sched_domains(cpu_online_mask);
|
|
|
+ cpumask_andnot(non_isolated_cpus, cpu_possible_mask, cpu_isolated_map);
|
|
|
+ if (cpumask_empty(non_isolated_cpus))
|
|
|
+ cpumask_set_cpu(smp_processor_id(), non_isolated_cpus);
|
|
|
mutex_unlock(&sched_domains_mutex);
|
|
|
put_online_cpus();
|
|
|
|
|
@@ -8012,9 +8007,10 @@ void __init sched_init_smp(void)
|
|
|
init_hrtick();
|
|
|
|
|
|
/* Move init over to a non-isolated CPU */
|
|
|
- if (set_cpus_allowed_ptr(current, &non_isolated_cpus) < 0)
|
|
|
+ if (set_cpus_allowed_ptr(current, non_isolated_cpus) < 0)
|
|
|
BUG();
|
|
|
sched_init_granularity();
|
|
|
+ free_cpumask_var(non_isolated_cpus);
|
|
|
}
|
|
|
#else
|
|
|
void __init sched_init_smp(void)
|
|
@@ -8334,6 +8330,7 @@ void __init sched_init(void)
|
|
|
#ifdef CONFIG_NO_HZ
|
|
|
alloc_bootmem_cpumask_var(&nohz.cpu_mask);
|
|
|
#endif
|
|
|
+ alloc_bootmem_cpumask_var(&cpu_isolated_map);
|
|
|
|
|
|
scheduler_running = 1;
|
|
|
}
|