|
@@ -195,41 +195,4 @@ static inline long local_sub_return(long i, local_t *l)
|
|
|
#define __local_add(i, l) local_add((i), (l))
|
|
|
#define __local_sub(i, l) local_sub((i), (l))
|
|
|
|
|
|
-/* Use these for per-cpu local_t variables: on some archs they are
|
|
|
- * much more efficient than these naive implementations. Note they take
|
|
|
- * a variable, not an address.
|
|
|
- *
|
|
|
- * X86_64: This could be done better if we moved the per cpu data directly
|
|
|
- * after GS.
|
|
|
- */
|
|
|
-
|
|
|
-/* Need to disable preemption for the cpu local counters otherwise we could
|
|
|
- still access a variable of a previous CPU in a non atomic way. */
|
|
|
-#define cpu_local_wrap_v(l) \
|
|
|
-({ \
|
|
|
- local_t res__; \
|
|
|
- preempt_disable(); \
|
|
|
- res__ = (l); \
|
|
|
- preempt_enable(); \
|
|
|
- res__; \
|
|
|
-})
|
|
|
-#define cpu_local_wrap(l) \
|
|
|
-({ \
|
|
|
- preempt_disable(); \
|
|
|
- (l); \
|
|
|
- preempt_enable(); \
|
|
|
-}) \
|
|
|
-
|
|
|
-#define cpu_local_read(l) cpu_local_wrap_v(local_read(&__get_cpu_var((l))))
|
|
|
-#define cpu_local_set(l, i) cpu_local_wrap(local_set(&__get_cpu_var((l)), (i)))
|
|
|
-#define cpu_local_inc(l) cpu_local_wrap(local_inc(&__get_cpu_var((l))))
|
|
|
-#define cpu_local_dec(l) cpu_local_wrap(local_dec(&__get_cpu_var((l))))
|
|
|
-#define cpu_local_add(i, l) cpu_local_wrap(local_add((i), &__get_cpu_var((l))))
|
|
|
-#define cpu_local_sub(i, l) cpu_local_wrap(local_sub((i), &__get_cpu_var((l))))
|
|
|
-
|
|
|
-#define __cpu_local_inc(l) cpu_local_inc((l))
|
|
|
-#define __cpu_local_dec(l) cpu_local_dec((l))
|
|
|
-#define __cpu_local_add(i, l) cpu_local_add((i), (l))
|
|
|
-#define __cpu_local_sub(i, l) cpu_local_sub((i), (l))
|
|
|
-
|
|
|
#endif /* _ASM_X86_LOCAL_H */
|