|
@@ -104,7 +104,7 @@ static int __meminit early_nr_cpus_node(int node)
|
|
|
{
|
|
|
int cpu, n = 0;
|
|
|
|
|
|
- for (cpu = 0; cpu < NR_CPUS; cpu++)
|
|
|
+ for_each_possible_early_cpu(cpu)
|
|
|
if (node == node_cpuid[cpu].nid)
|
|
|
n++;
|
|
|
|
|
@@ -143,7 +143,7 @@ static void *per_cpu_node_setup(void *cpu_data, int node)
|
|
|
#ifdef CONFIG_SMP
|
|
|
int cpu;
|
|
|
|
|
|
- for (cpu = 0; cpu < NR_CPUS; cpu++) {
|
|
|
+ for_each_possible_early_cpu(cpu) {
|
|
|
if (node == node_cpuid[cpu].nid) {
|
|
|
memcpy(__va(cpu_data), __phys_per_cpu_start,
|
|
|
__per_cpu_end - __per_cpu_start);
|
|
@@ -346,7 +346,7 @@ static void __init initialize_pernode_data(void)
|
|
|
|
|
|
#ifdef CONFIG_SMP
|
|
|
/* Set the node_data pointer for each per-cpu struct */
|
|
|
- for (cpu = 0; cpu < NR_CPUS; cpu++) {
|
|
|
+ for_each_possible_early_cpu(cpu) {
|
|
|
node = node_cpuid[cpu].nid;
|
|
|
per_cpu(cpu_info, cpu).node_data = mem_data[node].node_data;
|
|
|
}
|
|
@@ -494,13 +494,9 @@ void __cpuinit *per_cpu_init(void)
|
|
|
int cpu;
|
|
|
static int first_time = 1;
|
|
|
|
|
|
-
|
|
|
- if (smp_processor_id() != 0)
|
|
|
- return __per_cpu_start + __per_cpu_offset[smp_processor_id()];
|
|
|
-
|
|
|
if (first_time) {
|
|
|
first_time = 0;
|
|
|
- for (cpu = 0; cpu < NR_CPUS; cpu++)
|
|
|
+ for_each_possible_early_cpu(cpu)
|
|
|
per_cpu(local_per_cpu_offset, cpu) = __per_cpu_offset[cpu];
|
|
|
}
|
|
|
|