Browse Source

x86: put movsl_mask into uaccess.h.

x86_64 does not need it, but it won't have X86_INTEL_USERCOPY
defined either.

Signed-off-by: Glauber Costa <gcosta@redhat.com>
Signed-off-by: H. Peter Anvin <hpa@zytor.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Glauber Costa 17 years ago
parent
commit
8bc7de0c5d
2 changed files with 9 additions and 9 deletions
  1. 9 0
      include/asm-x86/uaccess.h
  2. 0 9
      include/asm-x86/uaccess_32.h

+ 9 - 0
include/asm-x86/uaccess.h

@@ -432,6 +432,15 @@ struct __large_struct { unsigned long buf[100]; };
 #define __get_user_unaligned __get_user
 #define __put_user_unaligned __put_user
 
+/*
+ * movsl can be slow when source and dest are not both 8-byte aligned
+ */
+#ifdef CONFIG_X86_INTEL_USERCOPY
+extern struct movsl_mask {
+	int mask;
+} ____cacheline_aligned_in_smp movsl_mask;
+#endif
+
 #ifdef CONFIG_X86_32
 # include "uaccess_32.h"
 #else

+ 0 - 9
include/asm-x86/uaccess_32.h

@@ -11,15 +11,6 @@
 #include <asm/asm.h>
 #include <asm/page.h>
 
-/*
- * movsl can be slow when source and dest are not both 8-byte aligned
- */
-#ifdef CONFIG_X86_INTEL_USERCOPY
-extern struct movsl_mask {
-	int mask;
-} ____cacheline_aligned_in_smp movsl_mask;
-#endif
-
 unsigned long __must_check __copy_to_user_ll
 		(void __user *to, const void *from, unsigned long n);
 unsigned long __must_check __copy_from_user_ll