|
@@ -263,6 +263,10 @@ static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int siz
|
|
|
|
|
|
#ifdef CONFIG_X86_CMPXCHG
|
|
|
#define __HAVE_ARCH_CMPXCHG 1
|
|
|
+#define cmpxchg(ptr,o,n)\
|
|
|
+ ((__typeof__(*(ptr)))__cmpxchg((ptr),(unsigned long)(o),\
|
|
|
+ (unsigned long)(n),sizeof(*(ptr))))
|
|
|
+#endif
|
|
|
|
|
|
static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
|
|
|
unsigned long new, int size)
|
|
@@ -291,10 +295,42 @@ static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
|
|
|
return old;
|
|
|
}
|
|
|
|
|
|
-#define cmpxchg(ptr,o,n)\
|
|
|
- ((__typeof__(*(ptr)))__cmpxchg((ptr),(unsigned long)(o),\
|
|
|
- (unsigned long)(n),sizeof(*(ptr))))
|
|
|
+#ifndef CONFIG_X86_CMPXCHG
|
|
|
+/*
|
|
|
+ * Building a kernel capable running on 80386. It may be necessary to
|
|
|
+ * simulate the cmpxchg on the 80386 CPU. For that purpose we define
|
|
|
+ * a function for each of the sizes we support.
|
|
|
+ */
|
|
|
|
|
|
+extern unsigned long cmpxchg_386_u8(volatile void *, u8, u8);
|
|
|
+extern unsigned long cmpxchg_386_u16(volatile void *, u16, u16);
|
|
|
+extern unsigned long cmpxchg_386_u32(volatile void *, u32, u32);
|
|
|
+
|
|
|
+static inline unsigned long cmpxchg_386(volatile void *ptr, unsigned long old,
|
|
|
+ unsigned long new, int size)
|
|
|
+{
|
|
|
+ switch (size) {
|
|
|
+ case 1:
|
|
|
+ return cmpxchg_386_u8(ptr, old, new);
|
|
|
+ case 2:
|
|
|
+ return cmpxchg_386_u16(ptr, old, new);
|
|
|
+ case 4:
|
|
|
+ return cmpxchg_386_u32(ptr, old, new);
|
|
|
+ }
|
|
|
+ return old;
|
|
|
+}
|
|
|
+
|
|
|
+#define cmpxchg(ptr,o,n) \
|
|
|
+({ \
|
|
|
+ __typeof__(*(ptr)) __ret; \
|
|
|
+ if (likely(boot_cpu_data.x86 > 3)) \
|
|
|
+ __ret = __cmpxchg((ptr), (unsigned long)(o), \
|
|
|
+ (unsigned long)(n), sizeof(*(ptr))); \
|
|
|
+ else \
|
|
|
+ __ret = cmpxchg_386((ptr), (unsigned long)(o), \
|
|
|
+ (unsigned long)(n), sizeof(*(ptr))); \
|
|
|
+ __ret; \
|
|
|
+})
|
|
|
#endif
|
|
|
|
|
|
#ifdef CONFIG_X86_CMPXCHG64
|