|
@@ -3,14 +3,16 @@
|
|
|
|
|
|
#include <linux/bitops.h> /* for LOCK_PREFIX */
|
|
|
|
|
|
+/*
|
|
|
+ * Note: if you use set64_bit(), __cmpxchg64(), or their variants, you
|
|
|
+ * you need to test for the feature in boot_cpu_data.
|
|
|
+ */
|
|
|
+
|
|
|
#define xchg(ptr,v) ((__typeof__(*(ptr)))__xchg((unsigned long)(v),(ptr),sizeof(*(ptr))))
|
|
|
|
|
|
struct __xchg_dummy { unsigned long a[100]; };
|
|
|
#define __xg(x) ((struct __xchg_dummy *)(x))
|
|
|
|
|
|
-
|
|
|
-#ifdef CONFIG_X86_CMPXCHG64
|
|
|
-
|
|
|
/*
|
|
|
* The semantics of XCHGCMP8B are a bit strange, this is why
|
|
|
* there is a loop and the loading of %%eax and %%edx has to
|
|
@@ -65,8 +67,6 @@ static inline void __set_64bit_var (unsigned long long *ptr,
|
|
|
__set_64bit(ptr, (unsigned int)(value), (unsigned int)((value)>>32ULL) ) : \
|
|
|
__set_64bit(ptr, ll_low(value), ll_high(value)) )
|
|
|
|
|
|
-#endif
|
|
|
-
|
|
|
/*
|
|
|
* Note: no "lock" prefix even on SMP: xchg always implies lock anyway
|
|
|
* Note 2: xchg has side effect, so that attribute volatile is necessary,
|
|
@@ -252,8 +252,6 @@ static inline unsigned long cmpxchg_386(volatile void *ptr, unsigned long old,
|
|
|
})
|
|
|
#endif
|
|
|
|
|
|
-#ifdef CONFIG_X86_CMPXCHG64
|
|
|
-
|
|
|
static inline unsigned long long __cmpxchg64(volatile void *ptr, unsigned long long old,
|
|
|
unsigned long long new)
|
|
|
{
|
|
@@ -289,5 +287,3 @@ static inline unsigned long long __cmpxchg64_local(volatile void *ptr,
|
|
|
((__typeof__(*(ptr)))__cmpxchg64_local((ptr),(unsigned long long)(o),\
|
|
|
(unsigned long long)(n)))
|
|
|
#endif
|
|
|
-
|
|
|
-#endif
|