|
@@ -388,12 +388,9 @@ do { \
|
|
|
#define __this_cpu_xor_1(pcp, val) percpu_to_op("xor", (pcp), val)
|
|
|
#define __this_cpu_xor_2(pcp, val) percpu_to_op("xor", (pcp), val)
|
|
|
#define __this_cpu_xor_4(pcp, val) percpu_to_op("xor", (pcp), val)
|
|
|
-/*
|
|
|
- * Generic fallback operations for __this_cpu_xchg_[1-4] are okay and much
|
|
|
- * faster than an xchg with forced lock semantics.
|
|
|
- */
|
|
|
-#define __this_cpu_xchg_8(pcp, nval) percpu_xchg_op(pcp, nval)
|
|
|
-#define __this_cpu_cmpxchg_8(pcp, oval, nval) percpu_cmpxchg_op(pcp, oval, nval)
|
|
|
+#define __this_cpu_xchg_1(pcp, val) percpu_xchg_op(pcp, val)
|
|
|
+#define __this_cpu_xchg_2(pcp, val) percpu_xchg_op(pcp, val)
|
|
|
+#define __this_cpu_xchg_4(pcp, val) percpu_xchg_op(pcp, val)
|
|
|
|
|
|
#define this_cpu_read_1(pcp) percpu_from_op("mov", (pcp), "m"(pcp))
|
|
|
#define this_cpu_read_2(pcp) percpu_from_op("mov", (pcp), "m"(pcp))
|
|
@@ -485,6 +482,8 @@ do { \
|
|
|
#define __this_cpu_or_8(pcp, val) percpu_to_op("or", (pcp), val)
|
|
|
#define __this_cpu_xor_8(pcp, val) percpu_to_op("xor", (pcp), val)
|
|
|
#define __this_cpu_add_return_8(pcp, val) percpu_add_return_op(pcp, val)
|
|
|
+#define __this_cpu_xchg_8(pcp, nval) percpu_xchg_op(pcp, nval)
|
|
|
+#define __this_cpu_cmpxchg_8(pcp, oval, nval) percpu_cmpxchg_op(pcp, oval, nval)
|
|
|
|
|
|
#define this_cpu_read_8(pcp) percpu_from_op("mov", (pcp), "m"(pcp))
|
|
|
#define this_cpu_write_8(pcp, val) percpu_to_op("mov", (pcp), val)
|