|
@@ -59,8 +59,8 @@
|
|
|
|
|
|
struct thread_info *secondary_ti;
|
|
|
|
|
|
-DEFINE_PER_CPU(cpumask_t, cpu_sibling_map) = CPU_MASK_NONE;
|
|
|
-DEFINE_PER_CPU(cpumask_t, cpu_core_map) = CPU_MASK_NONE;
|
|
|
+DEFINE_PER_CPU(cpumask_var_t, cpu_sibling_map);
|
|
|
+DEFINE_PER_CPU(cpumask_var_t, cpu_core_map);
|
|
|
|
|
|
EXPORT_PER_CPU_SYMBOL(cpu_sibling_map);
|
|
|
EXPORT_PER_CPU_SYMBOL(cpu_core_map);
|
|
@@ -271,6 +271,16 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
|
|
|
smp_store_cpu_info(boot_cpuid);
|
|
|
cpu_callin_map[boot_cpuid] = 1;
|
|
|
|
|
|
+ for_each_possible_cpu(cpu) {
|
|
|
+ zalloc_cpumask_var_node(&per_cpu(cpu_sibling_map, cpu),
|
|
|
+ GFP_KERNEL, cpu_to_node(cpu));
|
|
|
+ zalloc_cpumask_var_node(&per_cpu(cpu_core_map, cpu),
|
|
|
+ GFP_KERNEL, cpu_to_node(cpu));
|
|
|
+ }
|
|
|
+
|
|
|
+ cpumask_set_cpu(boot_cpuid, cpu_sibling_mask(boot_cpuid));
|
|
|
+ cpumask_set_cpu(boot_cpuid, cpu_core_mask(boot_cpuid));
|
|
|
+
|
|
|
if (smp_ops)
|
|
|
if (smp_ops->probe)
|
|
|
max_cpus = smp_ops->probe();
|
|
@@ -289,10 +299,6 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
|
|
|
void __devinit smp_prepare_boot_cpu(void)
|
|
|
{
|
|
|
BUG_ON(smp_processor_id() != boot_cpuid);
|
|
|
-
|
|
|
- set_cpu_online(boot_cpuid, true);
|
|
|
- cpu_set(boot_cpuid, per_cpu(cpu_sibling_map, boot_cpuid));
|
|
|
- cpu_set(boot_cpuid, per_cpu(cpu_core_map, boot_cpuid));
|
|
|
#ifdef CONFIG_PPC64
|
|
|
paca[boot_cpuid].__current = current;
|
|
|
#endif
|
|
@@ -525,15 +531,15 @@ int __devinit start_secondary(void *unused)
|
|
|
for (i = 0; i < threads_per_core; i++) {
|
|
|
if (cpu_is_offline(base + i))
|
|
|
continue;
|
|
|
- cpu_set(cpu, per_cpu(cpu_sibling_map, base + i));
|
|
|
- cpu_set(base + i, per_cpu(cpu_sibling_map, cpu));
|
|
|
+ cpumask_set_cpu(cpu, cpu_sibling_mask(base + i));
|
|
|
+ cpumask_set_cpu(base + i, cpu_sibling_mask(cpu));
|
|
|
|
|
|
/* cpu_core_map should be a superset of
|
|
|
* cpu_sibling_map even if we don't have cache
|
|
|
* information, so update the former here, too.
|
|
|
*/
|
|
|
- cpu_set(cpu, per_cpu(cpu_core_map, base +i));
|
|
|
- cpu_set(base + i, per_cpu(cpu_core_map, cpu));
|
|
|
+ cpumask_set_cpu(cpu, cpu_core_mask(base + i));
|
|
|
+ cpumask_set_cpu(base + i, cpu_core_mask(cpu));
|
|
|
}
|
|
|
l2_cache = cpu_to_l2cache(cpu);
|
|
|
for_each_online_cpu(i) {
|
|
@@ -541,8 +547,8 @@ int __devinit start_secondary(void *unused)
|
|
|
if (!np)
|
|
|
continue;
|
|
|
if (np == l2_cache) {
|
|
|
- cpu_set(cpu, per_cpu(cpu_core_map, i));
|
|
|
- cpu_set(i, per_cpu(cpu_core_map, cpu));
|
|
|
+ cpumask_set_cpu(cpu, cpu_core_mask(i));
|
|
|
+ cpumask_set_cpu(i, cpu_core_mask(cpu));
|
|
|
}
|
|
|
of_node_put(np);
|
|
|
}
|
|
@@ -602,10 +608,10 @@ int __cpu_disable(void)
|
|
|
/* Update sibling maps */
|
|
|
base = cpu_first_thread_in_core(cpu);
|
|
|
for (i = 0; i < threads_per_core; i++) {
|
|
|
- cpu_clear(cpu, per_cpu(cpu_sibling_map, base + i));
|
|
|
- cpu_clear(base + i, per_cpu(cpu_sibling_map, cpu));
|
|
|
- cpu_clear(cpu, per_cpu(cpu_core_map, base +i));
|
|
|
- cpu_clear(base + i, per_cpu(cpu_core_map, cpu));
|
|
|
+ cpumask_clear_cpu(cpu, cpu_sibling_mask(base + i));
|
|
|
+ cpumask_clear_cpu(base + i, cpu_sibling_mask(cpu));
|
|
|
+ cpumask_clear_cpu(cpu, cpu_core_mask(base + i));
|
|
|
+ cpumask_clear_cpu(base + i, cpu_core_mask(cpu));
|
|
|
}
|
|
|
|
|
|
l2_cache = cpu_to_l2cache(cpu);
|
|
@@ -614,8 +620,8 @@ int __cpu_disable(void)
|
|
|
if (!np)
|
|
|
continue;
|
|
|
if (np == l2_cache) {
|
|
|
- cpu_clear(cpu, per_cpu(cpu_core_map, i));
|
|
|
- cpu_clear(i, per_cpu(cpu_core_map, cpu));
|
|
|
+ cpumask_clear_cpu(cpu, cpu_core_mask(i));
|
|
|
+ cpumask_clear_cpu(i, cpu_core_mask(cpu));
|
|
|
}
|
|
|
of_node_put(np);
|
|
|
}
|