Переглянути джерело

powerpc: Optimise smp_wmb

Change 2d1b2027626d5151fff8ef7c06ca8e7876a1a510 ("powerpc: Fixup
lwsync at runtime") removed __SUBARCH_HAS_LWSYNC, causing smp_wmb to
revert back to eieio for all CPUs.  This restores the behaviour
intorduced in 74f0609526afddd88bef40b651da24f3167b10b2 ("powerpc:
Optimise smp_wmb on 64-bit processors").

Signed-off-by: Nick Piggin <npiggin@suse.de>
Signed-off-by: Paul Mackerras <paulus@samba.org>
Nick Piggin 16 роки тому
батько
коміт
46d075be58

+ 4 - 0
arch/powerpc/include/asm/synch.h

@@ -5,6 +5,10 @@
 #include <linux/stringify.h>
 #include <asm/feature-fixups.h>
 
+#if defined(__powerpc64__) || defined(CONFIG_PPC_E500MC)
+#define __SUBARCH_HAS_LWSYNC
+#endif
+
 #ifndef __ASSEMBLY__
 extern unsigned int __start___lwsync_fixup, __stop___lwsync_fixup;
 extern void do_lwsync_fixups(unsigned long value, void *fixup_start,

+ 2 - 2
arch/powerpc/include/asm/system.h

@@ -45,14 +45,14 @@
 #ifdef CONFIG_SMP
 
 #ifdef __SUBARCH_HAS_LWSYNC
-#    define SMPWMB      lwsync
+#    define SMPWMB      LWSYNC
 #else
 #    define SMPWMB      eieio
 #endif
 
 #define smp_mb()	mb()
 #define smp_rmb()	rmb()
-#define smp_wmb()	__asm__ __volatile__ (__stringify(SMPWMB) : : :"memory")
+#define smp_wmb()	__asm__ __volatile__ (stringify_in_c(SMPWMB) : : :"memory")
 #define smp_read_barrier_depends()	read_barrier_depends()
 #else
 #define smp_mb()	barrier()