|
@@ -93,6 +93,39 @@ static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
|
|
|
return old;
|
|
|
}
|
|
|
|
|
|
+/*
|
|
|
+ * Always use locked operations when touching memory shared with a
|
|
|
+ * hypervisor, since the system may be SMP even if the guest kernel
|
|
|
+ * isn't.
|
|
|
+ */
|
|
|
+static inline unsigned long __sync_cmpxchg(volatile void *ptr,
|
|
|
+ unsigned long old,
|
|
|
+ unsigned long new, int size)
|
|
|
+{
|
|
|
+ unsigned long prev;
|
|
|
+ switch (size) {
|
|
|
+ case 1:
|
|
|
+ asm volatile("lock; cmpxchgb %b1,%2"
|
|
|
+ : "=a"(prev)
|
|
|
+ : "q"(new), "m"(*__xg(ptr)), "0"(old)
|
|
|
+ : "memory");
|
|
|
+ return prev;
|
|
|
+ case 2:
|
|
|
+ asm volatile("lock; cmpxchgw %w1,%2"
|
|
|
+ : "=a"(prev)
|
|
|
+ : "r"(new), "m"(*__xg(ptr)), "0"(old)
|
|
|
+ : "memory");
|
|
|
+ return prev;
|
|
|
+ case 4:
|
|
|
+ asm volatile("lock; cmpxchgl %1,%2"
|
|
|
+ : "=a"(prev)
|
|
|
+ : "r"(new), "m"(*__xg(ptr)), "0"(old)
|
|
|
+ : "memory");
|
|
|
+ return prev;
|
|
|
+ }
|
|
|
+ return old;
|
|
|
+}
|
|
|
+
|
|
|
static inline unsigned long __cmpxchg_local(volatile void *ptr,
|
|
|
unsigned long old,
|
|
|
unsigned long new, int size)
|
|
@@ -139,6 +172,10 @@ static inline unsigned long __cmpxchg_local(volatile void *ptr,
|
|
|
((__typeof__(*(ptr)))__cmpxchg_local((ptr), (unsigned long)(o), \
|
|
|
(unsigned long)(n), \
|
|
|
sizeof(*(ptr))))
|
|
|
+#define sync_cmpxchg(ptr, o, n) \
|
|
|
+ ((__typeof__(*(ptr)))__sync_cmpxchg((ptr), (unsigned long)(o), \
|
|
|
+ (unsigned long)(n), \
|
|
|
+ sizeof(*(ptr))))
|
|
|
#define cmpxchg64_local(ptr, o, n) \
|
|
|
({ \
|
|
|
BUILD_BUG_ON(sizeof(*(ptr)) != 8); \
|