Browse Source

x86: Simplify flush_write_buffers()

Always make it an inline instead of using a macro for the no-op case.

Signed-off-by: Brian Gerst <brgerst@gmail.com>
LKML-Reference: <1265380629-3212-7-git-send-email-brgerst@gmail.com>
Signed-off-by: H. Peter Anvin <hpa@zytor.com>
Brian Gerst 15 years ago
parent
commit
910bf6ad0b
2 changed files with 9 additions and 9 deletions
  1. 2 8
      arch/x86/include/asm/io_32.h
  2. 7 1
      arch/x86/include/asm/io_64.h

+ 2 - 8
arch/x86/include/asm/io_32.h

@@ -84,18 +84,12 @@ memcpy_toio(volatile void __iomem *dst, const void *src, size_t count)
  *	2. Accidentally out of order processors (PPro errata #51)
  */
 
-#if defined(CONFIG_X86_OOSTORE) || defined(CONFIG_X86_PPRO_FENCE)
-
 static inline void flush_write_buffers(void)
 {
+#if defined(CONFIG_X86_OOSTORE) || defined(CONFIG_X86_PPRO_FENCE)
 	asm volatile("lock; addl $0,0(%%esp)": : :"memory");
-}
-
-#else
-
-#define flush_write_buffers() do { } while (0)
-
 #endif
+}
 
 #endif /* __KERNEL__ */
 

+ 7 - 1
arch/x86/include/asm/io_64.h

@@ -83,7 +83,13 @@ memcpy_toio(volatile void __iomem *dst, const void *src, size_t count)
  *	1. Out of order aware processors
  *	2. Accidentally out of order processors (PPro errata #51)
  */
-#define flush_write_buffers() do { } while (0)
+
+static inline void flush_write_buffers(void)
+{
+#if defined(CONFIG_X86_OOSTORE) || defined(CONFIG_X86_PPRO_FENCE)
+	asm volatile("lock; addl $0,0(%%esp)": : :"memory");
+#endif
+}
 
 #endif /* __KERNEL__ */