|
@@ -64,6 +64,7 @@
|
|
int smp_num_siblings = 1;
|
|
int smp_num_siblings = 1;
|
|
/* Package ID of each logical CPU */
|
|
/* Package ID of each logical CPU */
|
|
u8 phys_proc_id[NR_CPUS] __read_mostly = { [0 ... NR_CPUS-1] = BAD_APICID };
|
|
u8 phys_proc_id[NR_CPUS] __read_mostly = { [0 ... NR_CPUS-1] = BAD_APICID };
|
|
|
|
+/* core ID of each logical CPU */
|
|
u8 cpu_core_id[NR_CPUS] __read_mostly = { [0 ... NR_CPUS-1] = BAD_APICID };
|
|
u8 cpu_core_id[NR_CPUS] __read_mostly = { [0 ... NR_CPUS-1] = BAD_APICID };
|
|
EXPORT_SYMBOL(phys_proc_id);
|
|
EXPORT_SYMBOL(phys_proc_id);
|
|
EXPORT_SYMBOL(cpu_core_id);
|
|
EXPORT_SYMBOL(cpu_core_id);
|
|
@@ -89,7 +90,10 @@ struct cpuinfo_x86 cpu_data[NR_CPUS] __cacheline_aligned;
|
|
/* Set when the idlers are all forked */
|
|
/* Set when the idlers are all forked */
|
|
int smp_threads_ready;
|
|
int smp_threads_ready;
|
|
|
|
|
|
|
|
+/* representing HT siblings of each logical CPU */
|
|
cpumask_t cpu_sibling_map[NR_CPUS] __read_mostly;
|
|
cpumask_t cpu_sibling_map[NR_CPUS] __read_mostly;
|
|
|
|
+
|
|
|
|
+/* representing HT and core siblings of each logical CPU */
|
|
cpumask_t cpu_core_map[NR_CPUS] __read_mostly;
|
|
cpumask_t cpu_core_map[NR_CPUS] __read_mostly;
|
|
EXPORT_SYMBOL(cpu_core_map);
|
|
EXPORT_SYMBOL(cpu_core_map);
|
|
|
|
|
|
@@ -436,30 +440,59 @@ void __cpuinit smp_callin(void)
|
|
cpu_set(cpuid, cpu_callin_map);
|
|
cpu_set(cpuid, cpu_callin_map);
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+/* representing cpus for which sibling maps can be computed */
|
|
|
|
+static cpumask_t cpu_sibling_setup_map;
|
|
|
|
+
|
|
static inline void set_cpu_sibling_map(int cpu)
|
|
static inline void set_cpu_sibling_map(int cpu)
|
|
{
|
|
{
|
|
int i;
|
|
int i;
|
|
|
|
+ struct cpuinfo_x86 *c = cpu_data;
|
|
|
|
+
|
|
|
|
+ cpu_set(cpu, cpu_sibling_setup_map);
|
|
|
|
|
|
if (smp_num_siblings > 1) {
|
|
if (smp_num_siblings > 1) {
|
|
- for_each_cpu(i) {
|
|
|
|
- if (cpu_core_id[cpu] == cpu_core_id[i]) {
|
|
|
|
|
|
+ for_each_cpu_mask(i, cpu_sibling_setup_map) {
|
|
|
|
+ if (phys_proc_id[cpu] == phys_proc_id[i] &&
|
|
|
|
+ cpu_core_id[cpu] == cpu_core_id[i]) {
|
|
cpu_set(i, cpu_sibling_map[cpu]);
|
|
cpu_set(i, cpu_sibling_map[cpu]);
|
|
cpu_set(cpu, cpu_sibling_map[i]);
|
|
cpu_set(cpu, cpu_sibling_map[i]);
|
|
|
|
+ cpu_set(i, cpu_core_map[cpu]);
|
|
|
|
+ cpu_set(cpu, cpu_core_map[i]);
|
|
}
|
|
}
|
|
}
|
|
}
|
|
} else {
|
|
} else {
|
|
cpu_set(cpu, cpu_sibling_map[cpu]);
|
|
cpu_set(cpu, cpu_sibling_map[cpu]);
|
|
}
|
|
}
|
|
|
|
|
|
- if (current_cpu_data.x86_num_cores > 1) {
|
|
|
|
- for_each_cpu(i) {
|
|
|
|
- if (phys_proc_id[cpu] == phys_proc_id[i]) {
|
|
|
|
- cpu_set(i, cpu_core_map[cpu]);
|
|
|
|
- cpu_set(cpu, cpu_core_map[i]);
|
|
|
|
- }
|
|
|
|
- }
|
|
|
|
- } else {
|
|
|
|
|
|
+ if (current_cpu_data.x86_max_cores == 1) {
|
|
cpu_core_map[cpu] = cpu_sibling_map[cpu];
|
|
cpu_core_map[cpu] = cpu_sibling_map[cpu];
|
|
|
|
+ c[cpu].booted_cores = 1;
|
|
|
|
+ return;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ for_each_cpu_mask(i, cpu_sibling_setup_map) {
|
|
|
|
+ if (phys_proc_id[cpu] == phys_proc_id[i]) {
|
|
|
|
+ cpu_set(i, cpu_core_map[cpu]);
|
|
|
|
+ cpu_set(cpu, cpu_core_map[i]);
|
|
|
|
+ /*
|
|
|
|
+ * Does this new cpu bringup a new core?
|
|
|
|
+ */
|
|
|
|
+ if (cpus_weight(cpu_sibling_map[cpu]) == 1) {
|
|
|
|
+ /*
|
|
|
|
+ * for each core in package, increment
|
|
|
|
+ * the booted_cores for this new cpu
|
|
|
|
+ */
|
|
|
|
+ if (first_cpu(cpu_sibling_map[i]) == i)
|
|
|
|
+ c[cpu].booted_cores++;
|
|
|
|
+ /*
|
|
|
|
+ * increment the core count for all
|
|
|
|
+ * the other cpus in this package
|
|
|
|
+ */
|
|
|
|
+ if (i != cpu)
|
|
|
|
+ c[i].booted_cores++;
|
|
|
|
+ } else if (i != cpu && !c[cpu].booted_cores)
|
|
|
|
+ c[cpu].booted_cores = c[i].booted_cores;
|
|
|
|
+ }
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
|
|
@@ -993,6 +1026,7 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
|
|
nmi_watchdog_default();
|
|
nmi_watchdog_default();
|
|
current_cpu_data = boot_cpu_data;
|
|
current_cpu_data = boot_cpu_data;
|
|
current_thread_info()->cpu = 0; /* needed? */
|
|
current_thread_info()->cpu = 0; /* needed? */
|
|
|
|
+ set_cpu_sibling_map(0);
|
|
|
|
|
|
if (smp_sanity_check(max_cpus) < 0) {
|
|
if (smp_sanity_check(max_cpus) < 0) {
|
|
printk(KERN_INFO "SMP disabled\n");
|
|
printk(KERN_INFO "SMP disabled\n");
|
|
@@ -1036,8 +1070,6 @@ void __init smp_prepare_boot_cpu(void)
|
|
int me = smp_processor_id();
|
|
int me = smp_processor_id();
|
|
cpu_set(me, cpu_online_map);
|
|
cpu_set(me, cpu_online_map);
|
|
cpu_set(me, cpu_callout_map);
|
|
cpu_set(me, cpu_callout_map);
|
|
- cpu_set(0, cpu_sibling_map[0]);
|
|
|
|
- cpu_set(0, cpu_core_map[0]);
|
|
|
|
per_cpu(cpu_state, me) = CPU_ONLINE;
|
|
per_cpu(cpu_state, me) = CPU_ONLINE;
|
|
}
|
|
}
|
|
|
|
|
|
@@ -1106,15 +1138,24 @@ void __init smp_cpus_done(unsigned int max_cpus)
|
|
static void remove_siblinginfo(int cpu)
|
|
static void remove_siblinginfo(int cpu)
|
|
{
|
|
{
|
|
int sibling;
|
|
int sibling;
|
|
|
|
+ struct cpuinfo_x86 *c = cpu_data;
|
|
|
|
|
|
|
|
+ for_each_cpu_mask(sibling, cpu_core_map[cpu]) {
|
|
|
|
+ cpu_clear(cpu, cpu_core_map[sibling]);
|
|
|
|
+ /*
|
|
|
|
+ * last thread sibling in this cpu core going down
|
|
|
|
+ */
|
|
|
|
+ if (cpus_weight(cpu_sibling_map[cpu]) == 1)
|
|
|
|
+ c[sibling].booted_cores--;
|
|
|
|
+ }
|
|
|
|
+
|
|
for_each_cpu_mask(sibling, cpu_sibling_map[cpu])
|
|
for_each_cpu_mask(sibling, cpu_sibling_map[cpu])
|
|
cpu_clear(cpu, cpu_sibling_map[sibling]);
|
|
cpu_clear(cpu, cpu_sibling_map[sibling]);
|
|
- for_each_cpu_mask(sibling, cpu_core_map[cpu])
|
|
|
|
- cpu_clear(cpu, cpu_core_map[sibling]);
|
|
|
|
cpus_clear(cpu_sibling_map[cpu]);
|
|
cpus_clear(cpu_sibling_map[cpu]);
|
|
cpus_clear(cpu_core_map[cpu]);
|
|
cpus_clear(cpu_core_map[cpu]);
|
|
phys_proc_id[cpu] = BAD_APICID;
|
|
phys_proc_id[cpu] = BAD_APICID;
|
|
cpu_core_id[cpu] = BAD_APICID;
|
|
cpu_core_id[cpu] = BAD_APICID;
|
|
|
|
+ cpu_clear(cpu, cpu_sibling_setup_map);
|
|
}
|
|
}
|
|
|
|
|
|
void remove_cpu_from_maps(void)
|
|
void remove_cpu_from_maps(void)
|