|
@@ -329,6 +329,23 @@ notrace static void __cpuinit start_secondary(void *unused)
|
|
|
cpu_idle();
|
|
|
}
|
|
|
|
|
|
+#ifdef CONFIG_CPUMASK_OFFSTACK
|
|
|
+/* In this case, llc_shared_map is a pointer to a cpumask. */
|
|
|
+static inline void copy_cpuinfo_x86(struct cpuinfo_x86 *dst,
|
|
|
+ const struct cpuinfo_x86 *src)
|
|
|
+{
|
|
|
+ struct cpumask *llc = dst->llc_shared_map;
|
|
|
+ *dst = *src;
|
|
|
+ dst->llc_shared_map = llc;
|
|
|
+}
|
|
|
+#else
|
|
|
+static inline void copy_cpuinfo_x86(struct cpuinfo_x86 *dst,
|
|
|
+ const struct cpuinfo_x86 *src)
|
|
|
+{
|
|
|
+ *dst = *src;
|
|
|
+}
|
|
|
+#endif /* CONFIG_CPUMASK_OFFSTACK */
|
|
|
+
|
|
|
/*
|
|
|
* The bootstrap kernel entry code has set these up. Save them for
|
|
|
* a given CPU
|
|
@@ -338,7 +355,7 @@ void __cpuinit smp_store_cpu_info(int id)
|
|
|
{
|
|
|
struct cpuinfo_x86 *c = &cpu_data(id);
|
|
|
|
|
|
- *c = boot_cpu_data;
|
|
|
+ copy_cpuinfo_x86(c, &boot_cpu_data);
|
|
|
c->cpu_index = id;
|
|
|
if (id != 0)
|
|
|
identify_secondary_cpu(c);
|
|
@@ -362,15 +379,15 @@ void __cpuinit set_cpu_sibling_map(int cpu)
|
|
|
cpumask_set_cpu(cpu, cpu_sibling_mask(i));
|
|
|
cpumask_set_cpu(i, cpu_core_mask(cpu));
|
|
|
cpumask_set_cpu(cpu, cpu_core_mask(i));
|
|
|
- cpumask_set_cpu(i, &c->llc_shared_map);
|
|
|
- cpumask_set_cpu(cpu, &o->llc_shared_map);
|
|
|
+ cpumask_set_cpu(i, c->llc_shared_map);
|
|
|
+ cpumask_set_cpu(cpu, o->llc_shared_map);
|
|
|
}
|
|
|
}
|
|
|
} else {
|
|
|
cpumask_set_cpu(cpu, cpu_sibling_mask(cpu));
|
|
|
}
|
|
|
|
|
|
- cpumask_set_cpu(cpu, &c->llc_shared_map);
|
|
|
+ cpumask_set_cpu(cpu, c->llc_shared_map);
|
|
|
|
|
|
if (current_cpu_data.x86_max_cores == 1) {
|
|
|
cpumask_copy(cpu_core_mask(cpu), cpu_sibling_mask(cpu));
|
|
@@ -381,8 +398,8 @@ void __cpuinit set_cpu_sibling_map(int cpu)
|
|
|
for_each_cpu(i, cpu_sibling_setup_mask) {
|
|
|
if (per_cpu(cpu_llc_id, cpu) != BAD_APICID &&
|
|
|
per_cpu(cpu_llc_id, cpu) == per_cpu(cpu_llc_id, i)) {
|
|
|
- cpumask_set_cpu(i, &c->llc_shared_map);
|
|
|
- cpumask_set_cpu(cpu, &cpu_data(i).llc_shared_map);
|
|
|
+ cpumask_set_cpu(i, c->llc_shared_map);
|
|
|
+ cpumask_set_cpu(cpu, cpu_data(i).llc_shared_map);
|
|
|
}
|
|
|
if (c->phys_proc_id == cpu_data(i).phys_proc_id) {
|
|
|
cpumask_set_cpu(i, cpu_core_mask(cpu));
|
|
@@ -420,7 +437,7 @@ const struct cpumask *cpu_coregroup_mask(int cpu)
|
|
|
if (sched_mc_power_savings || sched_smt_power_savings)
|
|
|
return cpu_core_mask(cpu);
|
|
|
else
|
|
|
- return &c->llc_shared_map;
|
|
|
+ return c->llc_shared_map;
|
|
|
}
|
|
|
|
|
|
static void impress_friends(void)
|
|
@@ -1039,8 +1056,10 @@ void __init native_smp_prepare_cpus(unsigned int max_cpus)
|
|
|
for_each_possible_cpu(i) {
|
|
|
alloc_cpumask_var(&per_cpu(cpu_sibling_map, i), GFP_KERNEL);
|
|
|
alloc_cpumask_var(&per_cpu(cpu_core_map, i), GFP_KERNEL);
|
|
|
+ alloc_cpumask_var(&cpu_data(i).llc_shared_map, GFP_KERNEL);
|
|
|
cpumask_clear(per_cpu(cpu_core_map, i));
|
|
|
cpumask_clear(per_cpu(cpu_sibling_map, i));
|
|
|
+ cpumask_clear(cpu_data(i).llc_shared_map);
|
|
|
}
|
|
|
set_cpu_sibling_map(0);
|
|
|
|