|
@@ -101,11 +101,11 @@ EXPORT_SYMBOL(smp_num_siblings);
|
|
DEFINE_PER_CPU(u16, cpu_llc_id) = BAD_APICID;
|
|
DEFINE_PER_CPU(u16, cpu_llc_id) = BAD_APICID;
|
|
|
|
|
|
/* representing HT siblings of each logical CPU */
|
|
/* representing HT siblings of each logical CPU */
|
|
-DEFINE_PER_CPU(cpumask_t, cpu_sibling_map);
|
|
|
|
|
|
+DEFINE_PER_CPU(cpumask_var_t, cpu_sibling_map);
|
|
EXPORT_PER_CPU_SYMBOL(cpu_sibling_map);
|
|
EXPORT_PER_CPU_SYMBOL(cpu_sibling_map);
|
|
|
|
|
|
/* representing HT and core siblings of each logical CPU */
|
|
/* representing HT and core siblings of each logical CPU */
|
|
-DEFINE_PER_CPU(cpumask_t, cpu_core_map);
|
|
|
|
|
|
+DEFINE_PER_CPU(cpumask_var_t, cpu_core_map);
|
|
EXPORT_PER_CPU_SYMBOL(cpu_core_map);
|
|
EXPORT_PER_CPU_SYMBOL(cpu_core_map);
|
|
|
|
|
|
/* Per CPU bogomips and other parameters */
|
|
/* Per CPU bogomips and other parameters */
|
|
@@ -115,11 +115,6 @@ EXPORT_PER_CPU_SYMBOL(cpu_info);
|
|
atomic_t init_deasserted;
|
|
atomic_t init_deasserted;
|
|
|
|
|
|
#if defined(CONFIG_NUMA) && defined(CONFIG_X86_32)
|
|
#if defined(CONFIG_NUMA) && defined(CONFIG_X86_32)
|
|
-
|
|
|
|
-/* which logical CPUs are on which nodes */
|
|
|
|
-cpumask_t node_to_cpumask_map[MAX_NUMNODES] __read_mostly =
|
|
|
|
- { [0 ... MAX_NUMNODES-1] = CPU_MASK_NONE };
|
|
|
|
-EXPORT_SYMBOL(node_to_cpumask_map);
|
|
|
|
/* which node each logical CPU is on */
|
|
/* which node each logical CPU is on */
|
|
int cpu_to_node_map[NR_CPUS] __read_mostly = { [0 ... NR_CPUS-1] = 0 };
|
|
int cpu_to_node_map[NR_CPUS] __read_mostly = { [0 ... NR_CPUS-1] = 0 };
|
|
EXPORT_SYMBOL(cpu_to_node_map);
|
|
EXPORT_SYMBOL(cpu_to_node_map);
|
|
@@ -128,7 +123,7 @@ EXPORT_SYMBOL(cpu_to_node_map);
|
|
static void map_cpu_to_node(int cpu, int node)
|
|
static void map_cpu_to_node(int cpu, int node)
|
|
{
|
|
{
|
|
printk(KERN_INFO "Mapping cpu %d to node %d\n", cpu, node);
|
|
printk(KERN_INFO "Mapping cpu %d to node %d\n", cpu, node);
|
|
- cpumask_set_cpu(cpu, &node_to_cpumask_map[node]);
|
|
|
|
|
|
+ cpumask_set_cpu(cpu, node_to_cpumask_map[node]);
|
|
cpu_to_node_map[cpu] = node;
|
|
cpu_to_node_map[cpu] = node;
|
|
}
|
|
}
|
|
|
|
|
|
@@ -139,7 +134,7 @@ static void unmap_cpu_to_node(int cpu)
|
|
|
|
|
|
printk(KERN_INFO "Unmapping cpu %d from all nodes\n", cpu);
|
|
printk(KERN_INFO "Unmapping cpu %d from all nodes\n", cpu);
|
|
for (node = 0; node < MAX_NUMNODES; node++)
|
|
for (node = 0; node < MAX_NUMNODES; node++)
|
|
- cpumask_clear_cpu(cpu, &node_to_cpumask_map[node]);
|
|
|
|
|
|
+ cpumask_clear_cpu(cpu, node_to_cpumask_map[node]);
|
|
cpu_to_node_map[cpu] = 0;
|
|
cpu_to_node_map[cpu] = 0;
|
|
}
|
|
}
|
|
#else /* !(CONFIG_NUMA && CONFIG_X86_32) */
|
|
#else /* !(CONFIG_NUMA && CONFIG_X86_32) */
|
|
@@ -301,7 +296,7 @@ notrace static void __cpuinit start_secondary(void *unused)
|
|
__flush_tlb_all();
|
|
__flush_tlb_all();
|
|
#endif
|
|
#endif
|
|
|
|
|
|
- /* This must be done before setting cpu_online_map */
|
|
|
|
|
|
+ /* This must be done before setting cpu_online_mask */
|
|
set_cpu_sibling_map(raw_smp_processor_id());
|
|
set_cpu_sibling_map(raw_smp_processor_id());
|
|
wmb();
|
|
wmb();
|
|
|
|
|
|
@@ -334,6 +329,23 @@ notrace static void __cpuinit start_secondary(void *unused)
|
|
cpu_idle();
|
|
cpu_idle();
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+#ifdef CONFIG_CPUMASK_OFFSTACK
|
|
|
|
+/* In this case, llc_shared_map is a pointer to a cpumask. */
|
|
|
|
+static inline void copy_cpuinfo_x86(struct cpuinfo_x86 *dst,
|
|
|
|
+ const struct cpuinfo_x86 *src)
|
|
|
|
+{
|
|
|
|
+ struct cpumask *llc = dst->llc_shared_map;
|
|
|
|
+ *dst = *src;
|
|
|
|
+ dst->llc_shared_map = llc;
|
|
|
|
+}
|
|
|
|
+#else
|
|
|
|
+static inline void copy_cpuinfo_x86(struct cpuinfo_x86 *dst,
|
|
|
|
+ const struct cpuinfo_x86 *src)
|
|
|
|
+{
|
|
|
|
+ *dst = *src;
|
|
|
|
+}
|
|
|
|
+#endif /* CONFIG_CPUMASK_OFFSTACK */
|
|
|
|
+
|
|
/*
|
|
/*
|
|
* The bootstrap kernel entry code has set these up. Save them for
|
|
* The bootstrap kernel entry code has set these up. Save them for
|
|
* a given CPU
|
|
* a given CPU
|
|
@@ -343,7 +355,7 @@ void __cpuinit smp_store_cpu_info(int id)
|
|
{
|
|
{
|
|
struct cpuinfo_x86 *c = &cpu_data(id);
|
|
struct cpuinfo_x86 *c = &cpu_data(id);
|
|
|
|
|
|
- *c = boot_cpu_data;
|
|
|
|
|
|
+ copy_cpuinfo_x86(c, &boot_cpu_data);
|
|
c->cpu_index = id;
|
|
c->cpu_index = id;
|
|
if (id != 0)
|
|
if (id != 0)
|
|
identify_secondary_cpu(c);
|
|
identify_secondary_cpu(c);
|
|
@@ -367,15 +379,15 @@ void __cpuinit set_cpu_sibling_map(int cpu)
|
|
cpumask_set_cpu(cpu, cpu_sibling_mask(i));
|
|
cpumask_set_cpu(cpu, cpu_sibling_mask(i));
|
|
cpumask_set_cpu(i, cpu_core_mask(cpu));
|
|
cpumask_set_cpu(i, cpu_core_mask(cpu));
|
|
cpumask_set_cpu(cpu, cpu_core_mask(i));
|
|
cpumask_set_cpu(cpu, cpu_core_mask(i));
|
|
- cpumask_set_cpu(i, &c->llc_shared_map);
|
|
|
|
- cpumask_set_cpu(cpu, &o->llc_shared_map);
|
|
|
|
|
|
+ cpumask_set_cpu(i, c->llc_shared_map);
|
|
|
|
+ cpumask_set_cpu(cpu, o->llc_shared_map);
|
|
}
|
|
}
|
|
}
|
|
}
|
|
} else {
|
|
} else {
|
|
cpumask_set_cpu(cpu, cpu_sibling_mask(cpu));
|
|
cpumask_set_cpu(cpu, cpu_sibling_mask(cpu));
|
|
}
|
|
}
|
|
|
|
|
|
- cpumask_set_cpu(cpu, &c->llc_shared_map);
|
|
|
|
|
|
+ cpumask_set_cpu(cpu, c->llc_shared_map);
|
|
|
|
|
|
if (current_cpu_data.x86_max_cores == 1) {
|
|
if (current_cpu_data.x86_max_cores == 1) {
|
|
cpumask_copy(cpu_core_mask(cpu), cpu_sibling_mask(cpu));
|
|
cpumask_copy(cpu_core_mask(cpu), cpu_sibling_mask(cpu));
|
|
@@ -386,8 +398,8 @@ void __cpuinit set_cpu_sibling_map(int cpu)
|
|
for_each_cpu(i, cpu_sibling_setup_mask) {
|
|
for_each_cpu(i, cpu_sibling_setup_mask) {
|
|
if (per_cpu(cpu_llc_id, cpu) != BAD_APICID &&
|
|
if (per_cpu(cpu_llc_id, cpu) != BAD_APICID &&
|
|
per_cpu(cpu_llc_id, cpu) == per_cpu(cpu_llc_id, i)) {
|
|
per_cpu(cpu_llc_id, cpu) == per_cpu(cpu_llc_id, i)) {
|
|
- cpumask_set_cpu(i, &c->llc_shared_map);
|
|
|
|
- cpumask_set_cpu(cpu, &cpu_data(i).llc_shared_map);
|
|
|
|
|
|
+ cpumask_set_cpu(i, c->llc_shared_map);
|
|
|
|
+ cpumask_set_cpu(cpu, cpu_data(i).llc_shared_map);
|
|
}
|
|
}
|
|
if (c->phys_proc_id == cpu_data(i).phys_proc_id) {
|
|
if (c->phys_proc_id == cpu_data(i).phys_proc_id) {
|
|
cpumask_set_cpu(i, cpu_core_mask(cpu));
|
|
cpumask_set_cpu(i, cpu_core_mask(cpu));
|
|
@@ -425,12 +437,7 @@ const struct cpumask *cpu_coregroup_mask(int cpu)
|
|
if (sched_mc_power_savings || sched_smt_power_savings)
|
|
if (sched_mc_power_savings || sched_smt_power_savings)
|
|
return cpu_core_mask(cpu);
|
|
return cpu_core_mask(cpu);
|
|
else
|
|
else
|
|
- return &c->llc_shared_map;
|
|
|
|
-}
|
|
|
|
-
|
|
|
|
-cpumask_t cpu_coregroup_map(int cpu)
|
|
|
|
-{
|
|
|
|
- return *cpu_coregroup_mask(cpu);
|
|
|
|
|
|
+ return c->llc_shared_map;
|
|
}
|
|
}
|
|
|
|
|
|
static void impress_friends(void)
|
|
static void impress_friends(void)
|
|
@@ -897,9 +904,8 @@ int __cpuinit native_cpu_up(unsigned int cpu)
|
|
*/
|
|
*/
|
|
static __init void disable_smp(void)
|
|
static __init void disable_smp(void)
|
|
{
|
|
{
|
|
- /* use the read/write pointers to the present and possible maps */
|
|
|
|
- cpumask_copy(&cpu_present_map, cpumask_of(0));
|
|
|
|
- cpumask_copy(&cpu_possible_map, cpumask_of(0));
|
|
|
|
|
|
+ init_cpu_present(cpumask_of(0));
|
|
|
|
+ init_cpu_possible(cpumask_of(0));
|
|
smpboot_clear_io_apic_irqs();
|
|
smpboot_clear_io_apic_irqs();
|
|
|
|
|
|
if (smp_found_config)
|
|
if (smp_found_config)
|
|
@@ -1031,6 +1037,8 @@ static void __init smp_cpu_index_default(void)
|
|
*/
|
|
*/
|
|
void __init native_smp_prepare_cpus(unsigned int max_cpus)
|
|
void __init native_smp_prepare_cpus(unsigned int max_cpus)
|
|
{
|
|
{
|
|
|
|
+ unsigned int i;
|
|
|
|
+
|
|
preempt_disable();
|
|
preempt_disable();
|
|
smp_cpu_index_default();
|
|
smp_cpu_index_default();
|
|
current_cpu_data = boot_cpu_data;
|
|
current_cpu_data = boot_cpu_data;
|
|
@@ -1044,6 +1052,14 @@ void __init native_smp_prepare_cpus(unsigned int max_cpus)
|
|
boot_cpu_logical_apicid = logical_smp_processor_id();
|
|
boot_cpu_logical_apicid = logical_smp_processor_id();
|
|
#endif
|
|
#endif
|
|
current_thread_info()->cpu = 0; /* needed? */
|
|
current_thread_info()->cpu = 0; /* needed? */
|
|
|
|
+ for_each_possible_cpu(i) {
|
|
|
|
+ alloc_cpumask_var(&per_cpu(cpu_sibling_map, i), GFP_KERNEL);
|
|
|
|
+ alloc_cpumask_var(&per_cpu(cpu_core_map, i), GFP_KERNEL);
|
|
|
|
+ alloc_cpumask_var(&cpu_data(i).llc_shared_map, GFP_KERNEL);
|
|
|
|
+ cpumask_clear(per_cpu(cpu_core_map, i));
|
|
|
|
+ cpumask_clear(per_cpu(cpu_sibling_map, i));
|
|
|
|
+ cpumask_clear(cpu_data(i).llc_shared_map);
|
|
|
|
+ }
|
|
set_cpu_sibling_map(0);
|
|
set_cpu_sibling_map(0);
|
|
|
|
|
|
enable_IR_x2apic();
|
|
enable_IR_x2apic();
|
|
@@ -1132,11 +1148,11 @@ early_param("possible_cpus", _setup_possible_cpus);
|
|
|
|
|
|
|
|
|
|
/*
|
|
/*
|
|
- * cpu_possible_map should be static, it cannot change as cpu's
|
|
|
|
|
|
+ * cpu_possible_mask should be static, it cannot change as cpu's
|
|
* are onlined, or offlined. The reason is per-cpu data-structures
|
|
* are onlined, or offlined. The reason is per-cpu data-structures
|
|
* are allocated by some modules at init time, and dont expect to
|
|
* are allocated by some modules at init time, and dont expect to
|
|
* do this dynamically on cpu arrival/departure.
|
|
* do this dynamically on cpu arrival/departure.
|
|
- * cpu_present_map on the other hand can change dynamically.
|
|
|
|
|
|
+ * cpu_present_mask on the other hand can change dynamically.
|
|
* In case when cpu_hotplug is not compiled, then we resort to current
|
|
* In case when cpu_hotplug is not compiled, then we resort to current
|
|
* behaviour, which is cpu_possible == cpu_present.
|
|
* behaviour, which is cpu_possible == cpu_present.
|
|
* - Ashok Raj
|
|
* - Ashok Raj
|