|
@@ -70,8 +70,8 @@ EXPORT_SYMBOL(smp_num_siblings);
|
|
int cpu_llc_id[NR_CPUS] __cpuinitdata = {[0 ... NR_CPUS-1] = BAD_APICID};
|
|
int cpu_llc_id[NR_CPUS] __cpuinitdata = {[0 ... NR_CPUS-1] = BAD_APICID};
|
|
|
|
|
|
/* representing HT siblings of each logical CPU */
|
|
/* representing HT siblings of each logical CPU */
|
|
-cpumask_t cpu_sibling_map[NR_CPUS] __read_mostly;
|
|
|
|
-EXPORT_SYMBOL(cpu_sibling_map);
|
|
|
|
|
|
+DEFINE_PER_CPU(cpumask_t, cpu_sibling_map);
|
|
|
|
+EXPORT_PER_CPU_SYMBOL(cpu_sibling_map);
|
|
|
|
|
|
/* representing HT and core siblings of each logical CPU */
|
|
/* representing HT and core siblings of each logical CPU */
|
|
DEFINE_PER_CPU(cpumask_t, cpu_core_map);
|
|
DEFINE_PER_CPU(cpumask_t, cpu_core_map);
|
|
@@ -319,8 +319,8 @@ void __cpuinit set_cpu_sibling_map(int cpu)
|
|
for_each_cpu_mask(i, cpu_sibling_setup_map) {
|
|
for_each_cpu_mask(i, cpu_sibling_setup_map) {
|
|
if (c[cpu].phys_proc_id == c[i].phys_proc_id &&
|
|
if (c[cpu].phys_proc_id == c[i].phys_proc_id &&
|
|
c[cpu].cpu_core_id == c[i].cpu_core_id) {
|
|
c[cpu].cpu_core_id == c[i].cpu_core_id) {
|
|
- cpu_set(i, cpu_sibling_map[cpu]);
|
|
|
|
- cpu_set(cpu, cpu_sibling_map[i]);
|
|
|
|
|
|
+ cpu_set(i, per_cpu(cpu_sibling_map, cpu));
|
|
|
|
+ cpu_set(cpu, per_cpu(cpu_sibling_map, i));
|
|
cpu_set(i, per_cpu(cpu_core_map, cpu));
|
|
cpu_set(i, per_cpu(cpu_core_map, cpu));
|
|
cpu_set(cpu, per_cpu(cpu_core_map, i));
|
|
cpu_set(cpu, per_cpu(cpu_core_map, i));
|
|
cpu_set(i, c[cpu].llc_shared_map);
|
|
cpu_set(i, c[cpu].llc_shared_map);
|
|
@@ -328,13 +328,13 @@ void __cpuinit set_cpu_sibling_map(int cpu)
|
|
}
|
|
}
|
|
}
|
|
}
|
|
} else {
|
|
} else {
|
|
- cpu_set(cpu, cpu_sibling_map[cpu]);
|
|
|
|
|
|
+ cpu_set(cpu, per_cpu(cpu_sibling_map, cpu));
|
|
}
|
|
}
|
|
|
|
|
|
cpu_set(cpu, c[cpu].llc_shared_map);
|
|
cpu_set(cpu, c[cpu].llc_shared_map);
|
|
|
|
|
|
if (current_cpu_data.x86_max_cores == 1) {
|
|
if (current_cpu_data.x86_max_cores == 1) {
|
|
- per_cpu(cpu_core_map, cpu) = cpu_sibling_map[cpu];
|
|
|
|
|
|
+ per_cpu(cpu_core_map, cpu) = per_cpu(cpu_sibling_map, cpu);
|
|
c[cpu].booted_cores = 1;
|
|
c[cpu].booted_cores = 1;
|
|
return;
|
|
return;
|
|
}
|
|
}
|
|
@@ -351,12 +351,12 @@ void __cpuinit set_cpu_sibling_map(int cpu)
|
|
/*
|
|
/*
|
|
* Does this new cpu bringup a new core?
|
|
* Does this new cpu bringup a new core?
|
|
*/
|
|
*/
|
|
- if (cpus_weight(cpu_sibling_map[cpu]) == 1) {
|
|
|
|
|
|
+ if (cpus_weight(per_cpu(cpu_sibling_map, cpu)) == 1) {
|
|
/*
|
|
/*
|
|
* for each core in package, increment
|
|
* for each core in package, increment
|
|
* the booted_cores for this new cpu
|
|
* the booted_cores for this new cpu
|
|
*/
|
|
*/
|
|
- if (first_cpu(cpu_sibling_map[i]) == i)
|
|
|
|
|
|
+ if (first_cpu(per_cpu(cpu_sibling_map, i)) == i)
|
|
c[cpu].booted_cores++;
|
|
c[cpu].booted_cores++;
|
|
/*
|
|
/*
|
|
* increment the core count for all
|
|
* increment the core count for all
|
|
@@ -983,7 +983,7 @@ static void __init smp_boot_cpus(unsigned int max_cpus)
|
|
printk(KERN_NOTICE "Local APIC not detected."
|
|
printk(KERN_NOTICE "Local APIC not detected."
|
|
" Using dummy APIC emulation.\n");
|
|
" Using dummy APIC emulation.\n");
|
|
map_cpu_to_logical_apicid();
|
|
map_cpu_to_logical_apicid();
|
|
- cpu_set(0, cpu_sibling_map[0]);
|
|
|
|
|
|
+ cpu_set(0, per_cpu(cpu_sibling_map, 0));
|
|
cpu_set(0, per_cpu(cpu_core_map, 0));
|
|
cpu_set(0, per_cpu(cpu_core_map, 0));
|
|
return;
|
|
return;
|
|
}
|
|
}
|
|
@@ -1008,7 +1008,7 @@ static void __init smp_boot_cpus(unsigned int max_cpus)
|
|
printk(KERN_ERR "... forcing use of dummy APIC emulation. (tell your hw vendor)\n");
|
|
printk(KERN_ERR "... forcing use of dummy APIC emulation. (tell your hw vendor)\n");
|
|
smpboot_clear_io_apic_irqs();
|
|
smpboot_clear_io_apic_irqs();
|
|
phys_cpu_present_map = physid_mask_of_physid(0);
|
|
phys_cpu_present_map = physid_mask_of_physid(0);
|
|
- cpu_set(0, cpu_sibling_map[0]);
|
|
|
|
|
|
+ cpu_set(0, per_cpu(cpu_sibling_map, 0));
|
|
cpu_set(0, per_cpu(cpu_core_map, 0));
|
|
cpu_set(0, per_cpu(cpu_core_map, 0));
|
|
return;
|
|
return;
|
|
}
|
|
}
|
|
@@ -1023,7 +1023,7 @@ static void __init smp_boot_cpus(unsigned int max_cpus)
|
|
printk(KERN_INFO "SMP mode deactivated, forcing use of dummy APIC emulation.\n");
|
|
printk(KERN_INFO "SMP mode deactivated, forcing use of dummy APIC emulation.\n");
|
|
smpboot_clear_io_apic_irqs();
|
|
smpboot_clear_io_apic_irqs();
|
|
phys_cpu_present_map = physid_mask_of_physid(0);
|
|
phys_cpu_present_map = physid_mask_of_physid(0);
|
|
- cpu_set(0, cpu_sibling_map[0]);
|
|
|
|
|
|
+ cpu_set(0, per_cpu(cpu_sibling_map, 0));
|
|
cpu_set(0, per_cpu(cpu_core_map, 0));
|
|
cpu_set(0, per_cpu(cpu_core_map, 0));
|
|
return;
|
|
return;
|
|
}
|
|
}
|
|
@@ -1102,15 +1102,15 @@ static void __init smp_boot_cpus(unsigned int max_cpus)
|
|
Dprintk("Boot done.\n");
|
|
Dprintk("Boot done.\n");
|
|
|
|
|
|
/*
|
|
/*
|
|
- * construct cpu_sibling_map[], so that we can tell sibling CPUs
|
|
|
|
|
|
+ * construct cpu_sibling_map, so that we can tell sibling CPUs
|
|
* efficiently.
|
|
* efficiently.
|
|
*/
|
|
*/
|
|
for (cpu = 0; cpu < NR_CPUS; cpu++) {
|
|
for (cpu = 0; cpu < NR_CPUS; cpu++) {
|
|
- cpus_clear(cpu_sibling_map[cpu]);
|
|
|
|
|
|
+ cpus_clear(per_cpu(cpu_sibling_map, cpu));
|
|
cpus_clear(per_cpu(cpu_core_map, cpu));
|
|
cpus_clear(per_cpu(cpu_core_map, cpu));
|
|
}
|
|
}
|
|
|
|
|
|
- cpu_set(0, cpu_sibling_map[0]);
|
|
|
|
|
|
+ cpu_set(0, per_cpu(cpu_sibling_map, 0));
|
|
cpu_set(0, per_cpu(cpu_core_map, 0));
|
|
cpu_set(0, per_cpu(cpu_core_map, 0));
|
|
|
|
|
|
smpboot_setup_io_apic();
|
|
smpboot_setup_io_apic();
|
|
@@ -1153,13 +1153,13 @@ void remove_siblinginfo(int cpu)
|
|
/*/
|
|
/*/
|
|
* last thread sibling in this cpu core going down
|
|
* last thread sibling in this cpu core going down
|
|
*/
|
|
*/
|
|
- if (cpus_weight(cpu_sibling_map[cpu]) == 1)
|
|
|
|
|
|
+ if (cpus_weight(per_cpu(cpu_sibling_map, cpu)) == 1)
|
|
c[sibling].booted_cores--;
|
|
c[sibling].booted_cores--;
|
|
}
|
|
}
|
|
|
|
|
|
- for_each_cpu_mask(sibling, cpu_sibling_map[cpu])
|
|
|
|
- cpu_clear(cpu, cpu_sibling_map[sibling]);
|
|
|
|
- cpus_clear(cpu_sibling_map[cpu]);
|
|
|
|
|
|
+ for_each_cpu_mask(sibling, per_cpu(cpu_sibling_map, cpu))
|
|
|
|
+ cpu_clear(cpu, per_cpu(cpu_sibling_map, sibling));
|
|
|
|
+ cpus_clear(per_cpu(cpu_sibling_map, cpu));
|
|
cpus_clear(per_cpu(cpu_core_map, cpu));
|
|
cpus_clear(per_cpu(cpu_core_map, cpu));
|
|
c[cpu].phys_proc_id = 0;
|
|
c[cpu].phys_proc_id = 0;
|
|
c[cpu].cpu_core_id = 0;
|
|
c[cpu].cpu_core_id = 0;
|