local.h 5.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235
  1. #ifndef _ASM_X86_LOCAL_H
  2. #define _ASM_X86_LOCAL_H
  3. #include <linux/percpu.h>
  4. #include <asm/system.h>
  5. #include <asm/atomic.h>
  6. #include <asm/asm.h>
  7. typedef struct {
  8. atomic_long_t a;
  9. } local_t;
  10. #define LOCAL_INIT(i) { ATOMIC_LONG_INIT(i) }
  11. #define local_read(l) atomic_long_read(&(l)->a)
  12. #define local_set(l, i) atomic_long_set(&(l)->a, (i))
  13. static inline void local_inc(local_t *l)
  14. {
  15. asm volatile(_ASM_INC "%0"
  16. : "+m" (l->a.counter));
  17. }
  18. static inline void local_dec(local_t *l)
  19. {
  20. asm volatile(_ASM_DEC "%0"
  21. : "+m" (l->a.counter));
  22. }
  23. static inline void local_add(long i, local_t *l)
  24. {
  25. asm volatile(_ASM_ADD "%1,%0"
  26. : "+m" (l->a.counter)
  27. : "ir" (i));
  28. }
  29. static inline void local_sub(long i, local_t *l)
  30. {
  31. asm volatile(_ASM_SUB "%1,%0"
  32. : "+m" (l->a.counter)
  33. : "ir" (i));
  34. }
  35. /**
  36. * local_sub_and_test - subtract value from variable and test result
  37. * @i: integer value to subtract
  38. * @l: pointer to type local_t
  39. *
  40. * Atomically subtracts @i from @l and returns
  41. * true if the result is zero, or false for all
  42. * other cases.
  43. */
  44. static inline int local_sub_and_test(long i, local_t *l)
  45. {
  46. unsigned char c;
  47. asm volatile(_ASM_SUB "%2,%0; sete %1"
  48. : "+m" (l->a.counter), "=qm" (c)
  49. : "ir" (i) : "memory");
  50. return c;
  51. }
  52. /**
  53. * local_dec_and_test - decrement and test
  54. * @l: pointer to type local_t
  55. *
  56. * Atomically decrements @l by 1 and
  57. * returns true if the result is 0, or false for all other
  58. * cases.
  59. */
  60. static inline int local_dec_and_test(local_t *l)
  61. {
  62. unsigned char c;
  63. asm volatile(_ASM_DEC "%0; sete %1"
  64. : "+m" (l->a.counter), "=qm" (c)
  65. : : "memory");
  66. return c != 0;
  67. }
  68. /**
  69. * local_inc_and_test - increment and test
  70. * @l: pointer to type local_t
  71. *
  72. * Atomically increments @l by 1
  73. * and returns true if the result is zero, or false for all
  74. * other cases.
  75. */
  76. static inline int local_inc_and_test(local_t *l)
  77. {
  78. unsigned char c;
  79. asm volatile(_ASM_INC "%0; sete %1"
  80. : "+m" (l->a.counter), "=qm" (c)
  81. : : "memory");
  82. return c != 0;
  83. }
  84. /**
  85. * local_add_negative - add and test if negative
  86. * @i: integer value to add
  87. * @l: pointer to type local_t
  88. *
  89. * Atomically adds @i to @l and returns true
  90. * if the result is negative, or false when
  91. * result is greater than or equal to zero.
  92. */
  93. static inline int local_add_negative(long i, local_t *l)
  94. {
  95. unsigned char c;
  96. asm volatile(_ASM_ADD "%2,%0; sets %1"
  97. : "+m" (l->a.counter), "=qm" (c)
  98. : "ir" (i) : "memory");
  99. return c;
  100. }
  101. /**
  102. * local_add_return - add and return
  103. * @i: integer value to add
  104. * @l: pointer to type local_t
  105. *
  106. * Atomically adds @i to @l and returns @i + @l
  107. */
  108. static inline long local_add_return(long i, local_t *l)
  109. {
  110. long __i;
  111. #ifdef CONFIG_M386
  112. unsigned long flags;
  113. if (unlikely(boot_cpu_data.x86 <= 3))
  114. goto no_xadd;
  115. #endif
  116. /* Modern 486+ processor */
  117. __i = i;
  118. asm volatile(_ASM_XADD "%0, %1;"
  119. : "+r" (i), "+m" (l->a.counter)
  120. : : "memory");
  121. return i + __i;
  122. #ifdef CONFIG_M386
  123. no_xadd: /* Legacy 386 processor */
  124. local_irq_save(flags);
  125. __i = local_read(l);
  126. local_set(l, i + __i);
  127. local_irq_restore(flags);
  128. return i + __i;
  129. #endif
  130. }
  131. static inline long local_sub_return(long i, local_t *l)
  132. {
  133. return local_add_return(-i, l);
  134. }
  135. #define local_inc_return(l) (local_add_return(1, l))
  136. #define local_dec_return(l) (local_sub_return(1, l))
  137. #define local_cmpxchg(l, o, n) \
  138. (cmpxchg_local(&((l)->a.counter), (o), (n)))
  139. /* Always has a lock prefix */
  140. #define local_xchg(l, n) (xchg(&((l)->a.counter), (n)))
  141. /**
  142. * local_add_unless - add unless the number is a given value
  143. * @l: pointer of type local_t
  144. * @a: the amount to add to l...
  145. * @u: ...unless l is equal to u.
  146. *
  147. * Atomically adds @a to @l, so long as it was not @u.
  148. * Returns non-zero if @l was not @u, and zero otherwise.
  149. */
  150. #define local_add_unless(l, a, u) \
  151. ({ \
  152. long c, old; \
  153. c = local_read((l)); \
  154. for (;;) { \
  155. if (unlikely(c == (u))) \
  156. break; \
  157. old = local_cmpxchg((l), c, c + (a)); \
  158. if (likely(old == c)) \
  159. break; \
  160. c = old; \
  161. } \
  162. c != (u); \
  163. })
  164. #define local_inc_not_zero(l) local_add_unless((l), 1, 0)
  165. /* On x86_32, these are no better than the atomic variants.
  166. * On x86-64 these are better than the atomic variants on SMP kernels
  167. * because they dont use a lock prefix.
  168. */
  169. #define __local_inc(l) local_inc(l)
  170. #define __local_dec(l) local_dec(l)
  171. #define __local_add(i, l) local_add((i), (l))
  172. #define __local_sub(i, l) local_sub((i), (l))
  173. /* Use these for per-cpu local_t variables: on some archs they are
  174. * much more efficient than these naive implementations. Note they take
  175. * a variable, not an address.
  176. *
  177. * X86_64: This could be done better if we moved the per cpu data directly
  178. * after GS.
  179. */
  180. /* Need to disable preemption for the cpu local counters otherwise we could
  181. still access a variable of a previous CPU in a non atomic way. */
  182. #define cpu_local_wrap_v(l) \
  183. ({ \
  184. local_t res__; \
  185. preempt_disable(); \
  186. res__ = (l); \
  187. preempt_enable(); \
  188. res__; \
  189. })
  190. #define cpu_local_wrap(l) \
  191. ({ \
  192. preempt_disable(); \
  193. (l); \
  194. preempt_enable(); \
  195. }) \
  196. #define cpu_local_read(l) cpu_local_wrap_v(local_read(&__get_cpu_var((l))))
  197. #define cpu_local_set(l, i) cpu_local_wrap(local_set(&__get_cpu_var((l)), (i)))
  198. #define cpu_local_inc(l) cpu_local_wrap(local_inc(&__get_cpu_var((l))))
  199. #define cpu_local_dec(l) cpu_local_wrap(local_dec(&__get_cpu_var((l))))
  200. #define cpu_local_add(i, l) cpu_local_wrap(local_add((i), &__get_cpu_var((l))))
  201. #define cpu_local_sub(i, l) cpu_local_wrap(local_sub((i), &__get_cpu_var((l))))
  202. #define __cpu_local_inc(l) cpu_local_inc((l))
  203. #define __cpu_local_dec(l) cpu_local_dec((l))
  204. #define __cpu_local_add(i, l) cpu_local_add((i), (l))
  205. #define __cpu_local_sub(i, l) cpu_local_sub((i), (l))
  206. #endif /* _ASM_X86_LOCAL_H */