|
@@ -177,39 +177,6 @@ do { \
|
|
} \
|
|
} \
|
|
} while (0)
|
|
} while (0)
|
|
|
|
|
|
-/*
|
|
|
|
- * Add return operation
|
|
|
|
- */
|
|
|
|
-#define percpu_add_return_op(var, val) \
|
|
|
|
-({ \
|
|
|
|
- typeof(var) paro_ret__ = val; \
|
|
|
|
- switch (sizeof(var)) { \
|
|
|
|
- case 1: \
|
|
|
|
- asm("xaddb %0, "__percpu_arg(1) \
|
|
|
|
- : "+q" (paro_ret__), "+m" (var) \
|
|
|
|
- : : "memory"); \
|
|
|
|
- break; \
|
|
|
|
- case 2: \
|
|
|
|
- asm("xaddw %0, "__percpu_arg(1) \
|
|
|
|
- : "+r" (paro_ret__), "+m" (var) \
|
|
|
|
- : : "memory"); \
|
|
|
|
- break; \
|
|
|
|
- case 4: \
|
|
|
|
- asm("xaddl %0, "__percpu_arg(1) \
|
|
|
|
- : "+r" (paro_ret__), "+m" (var) \
|
|
|
|
- : : "memory"); \
|
|
|
|
- break; \
|
|
|
|
- case 8: \
|
|
|
|
- asm("xaddq %0, "__percpu_arg(1) \
|
|
|
|
- : "+re" (paro_ret__), "+m" (var) \
|
|
|
|
- : : "memory"); \
|
|
|
|
- break; \
|
|
|
|
- default: __bad_percpu_size(); \
|
|
|
|
- } \
|
|
|
|
- paro_ret__ += val; \
|
|
|
|
- paro_ret__; \
|
|
|
|
-})
|
|
|
|
-
|
|
|
|
#define percpu_from_op(op, var, constraint) \
|
|
#define percpu_from_op(op, var, constraint) \
|
|
({ \
|
|
({ \
|
|
typeof(var) pfo_ret__; \
|
|
typeof(var) pfo_ret__; \
|
|
@@ -262,6 +229,39 @@ do { \
|
|
} \
|
|
} \
|
|
})
|
|
})
|
|
|
|
|
|
|
|
+/*
|
|
|
|
+ * Add return operation
|
|
|
|
+ */
|
|
|
|
+#define percpu_add_return_op(var, val) \
|
|
|
|
+({ \
|
|
|
|
+ typeof(var) paro_ret__ = val; \
|
|
|
|
+ switch (sizeof(var)) { \
|
|
|
|
+ case 1: \
|
|
|
|
+ asm("xaddb %0, "__percpu_arg(1) \
|
|
|
|
+ : "+q" (paro_ret__), "+m" (var) \
|
|
|
|
+ : : "memory"); \
|
|
|
|
+ break; \
|
|
|
|
+ case 2: \
|
|
|
|
+ asm("xaddw %0, "__percpu_arg(1) \
|
|
|
|
+ : "+r" (paro_ret__), "+m" (var) \
|
|
|
|
+ : : "memory"); \
|
|
|
|
+ break; \
|
|
|
|
+ case 4: \
|
|
|
|
+ asm("xaddl %0, "__percpu_arg(1) \
|
|
|
|
+ : "+r" (paro_ret__), "+m" (var) \
|
|
|
|
+ : : "memory"); \
|
|
|
|
+ break; \
|
|
|
|
+ case 8: \
|
|
|
|
+ asm("xaddq %0, "__percpu_arg(1) \
|
|
|
|
+ : "+re" (paro_ret__), "+m" (var) \
|
|
|
|
+ : : "memory"); \
|
|
|
|
+ break; \
|
|
|
|
+ default: __bad_percpu_size(); \
|
|
|
|
+ } \
|
|
|
|
+ paro_ret__ += val; \
|
|
|
|
+ paro_ret__; \
|
|
|
|
+})
|
|
|
|
+
|
|
/*
|
|
/*
|
|
* percpu_read() makes gcc load the percpu variable every time it is
|
|
* percpu_read() makes gcc load the percpu variable every time it is
|
|
* accessed while percpu_read_stable() allows the value to be cached.
|
|
* accessed while percpu_read_stable() allows the value to be cached.
|
|
@@ -352,6 +352,7 @@ do { \
|
|
#define __this_cpu_and_8(pcp, val) percpu_to_op("and", (pcp), val)
|
|
#define __this_cpu_and_8(pcp, val) percpu_to_op("and", (pcp), val)
|
|
#define __this_cpu_or_8(pcp, val) percpu_to_op("or", (pcp), val)
|
|
#define __this_cpu_or_8(pcp, val) percpu_to_op("or", (pcp), val)
|
|
#define __this_cpu_xor_8(pcp, val) percpu_to_op("xor", (pcp), val)
|
|
#define __this_cpu_xor_8(pcp, val) percpu_to_op("xor", (pcp), val)
|
|
|
|
+#define __this_cpu_add_return_8(pcp, val) percpu_add_return_op(pcp, val)
|
|
|
|
|
|
#define this_cpu_read_8(pcp) percpu_from_op("mov", (pcp), "m"(pcp))
|
|
#define this_cpu_read_8(pcp) percpu_from_op("mov", (pcp), "m"(pcp))
|
|
#define this_cpu_write_8(pcp, val) percpu_to_op("mov", (pcp), val)
|
|
#define this_cpu_write_8(pcp, val) percpu_to_op("mov", (pcp), val)
|
|
@@ -359,14 +360,12 @@ do { \
|
|
#define this_cpu_and_8(pcp, val) percpu_to_op("and", (pcp), val)
|
|
#define this_cpu_and_8(pcp, val) percpu_to_op("and", (pcp), val)
|
|
#define this_cpu_or_8(pcp, val) percpu_to_op("or", (pcp), val)
|
|
#define this_cpu_or_8(pcp, val) percpu_to_op("or", (pcp), val)
|
|
#define this_cpu_xor_8(pcp, val) percpu_to_op("xor", (pcp), val)
|
|
#define this_cpu_xor_8(pcp, val) percpu_to_op("xor", (pcp), val)
|
|
|
|
+#define this_cpu_add_return_8(pcp, val) percpu_add_return_op(pcp, val)
|
|
|
|
|
|
#define irqsafe_cpu_add_8(pcp, val) percpu_add_op((pcp), val)
|
|
#define irqsafe_cpu_add_8(pcp, val) percpu_add_op((pcp), val)
|
|
#define irqsafe_cpu_and_8(pcp, val) percpu_to_op("and", (pcp), val)
|
|
#define irqsafe_cpu_and_8(pcp, val) percpu_to_op("and", (pcp), val)
|
|
#define irqsafe_cpu_or_8(pcp, val) percpu_to_op("or", (pcp), val)
|
|
#define irqsafe_cpu_or_8(pcp, val) percpu_to_op("or", (pcp), val)
|
|
#define irqsafe_cpu_xor_8(pcp, val) percpu_to_op("xor", (pcp), val)
|
|
#define irqsafe_cpu_xor_8(pcp, val) percpu_to_op("xor", (pcp), val)
|
|
-
|
|
|
|
-#define __this_cpu_add_return_8(pcp, val) percpu_add_return_op(pcp, val)
|
|
|
|
-#define this_cpu_add_return_8(pcp, val) percpu_add_return_op(pcp, val)
|
|
|
|
#endif
|
|
#endif
|
|
|
|
|
|
/* This is not atomic against other CPUs -- CPU preemption needs to be off */
|
|
/* This is not atomic against other CPUs -- CPU preemption needs to be off */
|