|
@@ -3,6 +3,14 @@
|
|
|
|
|
|
#ifdef CONFIG_CMPXCHG_LOCKREF
|
|
|
|
|
|
+/*
|
|
|
+ * Allow weakly-ordered memory architectures to provide barrier-less
|
|
|
+ * cmpxchg semantics for lockref updates.
|
|
|
+ */
|
|
|
+#ifndef cmpxchg64_relaxed
|
|
|
+# define cmpxchg64_relaxed cmpxchg64
|
|
|
+#endif
|
|
|
+
|
|
|
/*
|
|
|
* Note that the "cmpxchg()" reloads the "old" value for the
|
|
|
* failure case.
|
|
@@ -14,8 +22,9 @@
|
|
|
while (likely(arch_spin_value_unlocked(old.lock.rlock.raw_lock))) { \
|
|
|
struct lockref new = old, prev = old; \
|
|
|
CODE \
|
|
|
- old.lock_count = cmpxchg64(&lockref->lock_count, \
|
|
|
- old.lock_count, new.lock_count); \
|
|
|
+ old.lock_count = cmpxchg64_relaxed(&lockref->lock_count, \
|
|
|
+ old.lock_count, \
|
|
|
+ new.lock_count); \
|
|
|
if (likely(old.lock_count == prev.lock_count)) { \
|
|
|
SUCCESS; \
|
|
|
} \
|