浏览代码

x86: more header fixes

Summary: Add missing include guards for some x86 headers.

This has only had the most rudimentary testing, but is hopefully obviously
correct.

Signed-off-by: Ingo Molnar <mingo@elte.hu>
Vegard Nossum 17 年之前
父节点
当前提交
0db125c467
共有 4 个文件被更改,包括 16 次插入0 次删除
  1. 1 0
      include/asm-x86/seccomp_64.h
  2. 5 0
      include/asm-x86/suspend_32.h
  3. 5 0
      include/asm-x86/xor_32.h
  4. 5 0
      include/asm-x86/xor_64.h

+ 1 - 0
include/asm-x86/seccomp_64.h

@@ -1,4 +1,5 @@
 #ifndef _ASM_SECCOMP_H
 #ifndef _ASM_SECCOMP_H
+#define _ASM_SECCOMP_H
 
 
 #include <linux/thread_info.h>
 #include <linux/thread_info.h>
 
 

+ 5 - 0
include/asm-x86/suspend_32.h

@@ -3,6 +3,9 @@
  * Based on code
  * Based on code
  * Copyright 2001 Patrick Mochel <mochel@osdl.org>
  * Copyright 2001 Patrick Mochel <mochel@osdl.org>
  */
  */
+#ifndef __ASM_X86_32_SUSPEND_H
+#define __ASM_X86_32_SUSPEND_H
+
 #include <asm/desc.h>
 #include <asm/desc.h>
 #include <asm/i387.h>
 #include <asm/i387.h>
 
 
@@ -44,3 +47,5 @@ static inline void acpi_save_register_state(unsigned long return_point)
 /* routines for saving/restoring kernel state */
 /* routines for saving/restoring kernel state */
 extern int acpi_save_state_mem(void);
 extern int acpi_save_state_mem(void);
 #endif
 #endif
+
+#endif /* __ASM_X86_32_SUSPEND_H */

+ 5 - 0
include/asm-x86/xor_32.h

@@ -1,3 +1,6 @@
+#ifndef ASM_X86__XOR_32_H
+#define ASM_X86__XOR_32_H
+
 /*
 /*
  * Optimized RAID-5 checksumming functions for MMX and SSE.
  * Optimized RAID-5 checksumming functions for MMX and SSE.
  *
  *
@@ -881,3 +884,5 @@ do {							\
    deals with a load to a line that is being prefetched.  */
    deals with a load to a line that is being prefetched.  */
 #define XOR_SELECT_TEMPLATE(FASTEST)			\
 #define XOR_SELECT_TEMPLATE(FASTEST)			\
 	(cpu_has_xmm ? &xor_block_pIII_sse : FASTEST)
 	(cpu_has_xmm ? &xor_block_pIII_sse : FASTEST)
+
+#endif /* ASM_X86__XOR_32_H */

+ 5 - 0
include/asm-x86/xor_64.h

@@ -1,3 +1,6 @@
+#ifndef ASM_X86__XOR_64_H
+#define ASM_X86__XOR_64_H
+
 /*
 /*
  * Optimized RAID-5 checksumming functions for MMX and SSE.
  * Optimized RAID-5 checksumming functions for MMX and SSE.
  *
  *
@@ -354,3 +357,5 @@ do {						\
    We may also be able to load into the L1 only depending on how the cpu
    We may also be able to load into the L1 only depending on how the cpu
    deals with a load to a line that is being prefetched.  */
    deals with a load to a line that is being prefetched.  */
 #define XOR_SELECT_TEMPLATE(FASTEST) (&xor_block_sse)
 #define XOR_SELECT_TEMPLATE(FASTEST) (&xor_block_sse)
+
+#endif /* ASM_X86__XOR_64_H */