Преглед изворни кода

x86/non-x86: percpu, node ids, apic ids x86.git fixup

Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Mike Travis пре 17 година
родитељ
комит
dd5af90a7f
4 измењених фајлова са 13 додато и 13 уклоњено
  1. 1 1
      arch/x86/Kconfig
  2. 2 10
      include/asm-generic/percpu.h
  3. 2 2
      init/main.c
  4. 8 0
      kernel/module.c

+ 1 - 1
arch/x86/Kconfig

@@ -97,7 +97,7 @@ config GENERIC_TIME_VSYSCALL
 	bool
 	bool
 	default X86_64
 	default X86_64
 
 
-config ARCH_SETS_UP_PER_CPU_AREA
+config HAVE_SETUP_PER_CPU_AREA
 	def_bool X86_64
 	def_bool X86_64
 
 
 config ARCH_SUPPORTS_OPROFILE
 config ARCH_SUPPORTS_OPROFILE

+ 2 - 10
include/asm-generic/percpu.h

@@ -47,7 +47,7 @@ extern unsigned long __per_cpu_offset[NR_CPUS];
 #endif
 #endif
 
 
 /*
 /*
- * A percpu variable may point to a discarded reghions. The following are
+ * A percpu variable may point to a discarded regions. The following are
  * established ways to produce a usable pointer from the percpu variable
  * established ways to produce a usable pointer from the percpu variable
  * offset.
  * offset.
  */
  */
@@ -59,18 +59,10 @@ extern unsigned long __per_cpu_offset[NR_CPUS];
 	(*SHIFT_PERCPU_PTR(&per_cpu_var(var), __my_cpu_offset))
 	(*SHIFT_PERCPU_PTR(&per_cpu_var(var), __my_cpu_offset))
 
 
 
 
-#ifdef CONFIG_ARCH_SETS_UP_PER_CPU_AREA
+#ifdef CONFIG_HAVE_SETUP_PER_CPU_AREA
 extern void setup_per_cpu_areas(void);
 extern void setup_per_cpu_areas(void);
 #endif
 #endif
 
 
-/* A macro to avoid #include hell... */
-#define percpu_modcopy(pcpudst, src, size)			\
-do {								\
-	unsigned int __i;					\
-	for_each_possible_cpu(__i)				\
-		memcpy((pcpudst)+per_cpu_offset(__i),		\
-		       (src), (size));				\
-} while (0)
 #else /* ! SMP */
 #else /* ! SMP */
 
 
 #define per_cpu(var, cpu)			(*((void)(cpu), &per_cpu_var(var)))
 #define per_cpu(var, cpu)			(*((void)(cpu), &per_cpu_var(var)))

+ 2 - 2
init/main.c

@@ -363,7 +363,7 @@ static inline void smp_prepare_cpus(unsigned int maxcpus) { }
 
 
 #else
 #else
 
 
-#ifndef CONFIG_ARCH_SETS_UP_PER_CPU_AREA
+#ifndef CONFIG_HAVE_SETUP_PER_CPU_AREA
 unsigned long __per_cpu_offset[NR_CPUS] __read_mostly;
 unsigned long __per_cpu_offset[NR_CPUS] __read_mostly;
 
 
 EXPORT_SYMBOL(__per_cpu_offset);
 EXPORT_SYMBOL(__per_cpu_offset);
@@ -384,7 +384,7 @@ static void __init setup_per_cpu_areas(void)
 		ptr += size;
 		ptr += size;
 	}
 	}
 }
 }
-#endif /* CONFIG_ARCH_SETS_UP_CPU_AREA */
+#endif /* CONFIG_HAVE_SETUP_PER_CPU_AREA */
 
 
 /* Called by boot processor to activate the rest. */
 /* Called by boot processor to activate the rest. */
 static void __init smp_init(void)
 static void __init smp_init(void)

+ 8 - 0
kernel/module.c

@@ -430,6 +430,14 @@ static unsigned int find_pcpusec(Elf_Ehdr *hdr,
 	return find_sec(hdr, sechdrs, secstrings, ".data.percpu");
 	return find_sec(hdr, sechdrs, secstrings, ".data.percpu");
 }
 }
 
 
+static void percpu_modcopy(void *pcpudest, const void *from, unsigned long size)
+{
+	int cpu;
+
+	for_each_possible_cpu(cpu)
+		memcpy(pcpudest + per_cpu_offset(cpu), from, size);
+}
+
 static int percpu_modinit(void)
 static int percpu_modinit(void)
 {
 {
 	pcpu_num_used = 2;
 	pcpu_num_used = 2;