local.h 5.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241
  1. #ifndef _ARCH_LOCAL_H
  2. #define _ARCH_LOCAL_H
  3. #include <linux/percpu.h>
  4. #include <asm/system.h>
  5. #include <asm/atomic.h>
  6. #include <asm/asm.h>
  7. typedef struct
  8. {
  9. atomic_long_t a;
  10. } local_t;
  11. #define LOCAL_INIT(i) { ATOMIC_LONG_INIT(i) }
  12. #define local_read(l) atomic_long_read(&(l)->a)
  13. #define local_set(l,i) atomic_long_set(&(l)->a, (i))
  14. static inline void local_inc(local_t *l)
  15. {
  16. __asm__ __volatile__(
  17. _ASM_INC "%0"
  18. :"+m" (l->a.counter));
  19. }
  20. static inline void local_dec(local_t *l)
  21. {
  22. __asm__ __volatile__(
  23. _ASM_DEC "%0"
  24. :"+m" (l->a.counter));
  25. }
  26. static inline void local_add(long i, local_t *l)
  27. {
  28. __asm__ __volatile__(
  29. _ASM_ADD "%1,%0"
  30. :"+m" (l->a.counter)
  31. :"ir" (i));
  32. }
  33. static inline void local_sub(long i, local_t *l)
  34. {
  35. __asm__ __volatile__(
  36. _ASM_SUB "%1,%0"
  37. :"+m" (l->a.counter)
  38. :"ir" (i));
  39. }
  40. /**
  41. * local_sub_and_test - subtract value from variable and test result
  42. * @i: integer value to subtract
  43. * @l: pointer to type local_t
  44. *
  45. * Atomically subtracts @i from @l and returns
  46. * true if the result is zero, or false for all
  47. * other cases.
  48. */
  49. static inline int local_sub_and_test(long i, local_t *l)
  50. {
  51. unsigned char c;
  52. __asm__ __volatile__(
  53. _ASM_SUB "%2,%0; sete %1"
  54. :"+m" (l->a.counter), "=qm" (c)
  55. :"ir" (i) : "memory");
  56. return c;
  57. }
  58. /**
  59. * local_dec_and_test - decrement and test
  60. * @l: pointer to type local_t
  61. *
  62. * Atomically decrements @l by 1 and
  63. * returns true if the result is 0, or false for all other
  64. * cases.
  65. */
  66. static inline int local_dec_and_test(local_t *l)
  67. {
  68. unsigned char c;
  69. __asm__ __volatile__(
  70. _ASM_DEC "%0; sete %1"
  71. :"+m" (l->a.counter), "=qm" (c)
  72. : : "memory");
  73. return c != 0;
  74. }
  75. /**
  76. * local_inc_and_test - increment and test
  77. * @l: pointer to type local_t
  78. *
  79. * Atomically increments @l by 1
  80. * and returns true if the result is zero, or false for all
  81. * other cases.
  82. */
  83. static inline int local_inc_and_test(local_t *l)
  84. {
  85. unsigned char c;
  86. __asm__ __volatile__(
  87. _ASM_INC "%0; sete %1"
  88. :"+m" (l->a.counter), "=qm" (c)
  89. : : "memory");
  90. return c != 0;
  91. }
  92. /**
  93. * local_add_negative - add and test if negative
  94. * @i: integer value to add
  95. * @l: pointer to type local_t
  96. *
  97. * Atomically adds @i to @l and returns true
  98. * if the result is negative, or false when
  99. * result is greater than or equal to zero.
  100. */
  101. static inline int local_add_negative(long i, local_t *l)
  102. {
  103. unsigned char c;
  104. __asm__ __volatile__(
  105. _ASM_ADD "%2,%0; sets %1"
  106. :"+m" (l->a.counter), "=qm" (c)
  107. :"ir" (i) : "memory");
  108. return c;
  109. }
  110. /**
  111. * local_add_return - add and return
  112. * @i: integer value to add
  113. * @l: pointer to type local_t
  114. *
  115. * Atomically adds @i to @l and returns @i + @l
  116. */
  117. static inline long local_add_return(long i, local_t *l)
  118. {
  119. long __i;
  120. #ifdef CONFIG_M386
  121. unsigned long flags;
  122. if(unlikely(boot_cpu_data.x86 <= 3))
  123. goto no_xadd;
  124. #endif
  125. /* Modern 486+ processor */
  126. __i = i;
  127. __asm__ __volatile__(
  128. _ASM_XADD "%0, %1;"
  129. :"+r" (i), "+m" (l->a.counter)
  130. : : "memory");
  131. return i + __i;
  132. #ifdef CONFIG_M386
  133. no_xadd: /* Legacy 386 processor */
  134. local_irq_save(flags);
  135. __i = local_read(l);
  136. local_set(l, i + __i);
  137. local_irq_restore(flags);
  138. return i + __i;
  139. #endif
  140. }
  141. static inline long local_sub_return(long i, local_t *l)
  142. {
  143. return local_add_return(-i,l);
  144. }
  145. #define local_inc_return(l) (local_add_return(1,l))
  146. #define local_dec_return(l) (local_sub_return(1,l))
  147. #define local_cmpxchg(l, o, n) \
  148. (cmpxchg_local(&((l)->a.counter), (o), (n)))
  149. /* Always has a lock prefix */
  150. #define local_xchg(l, n) (xchg(&((l)->a.counter), (n)))
  151. /**
  152. * local_add_unless - add unless the number is a given value
  153. * @l: pointer of type local_t
  154. * @a: the amount to add to l...
  155. * @u: ...unless l is equal to u.
  156. *
  157. * Atomically adds @a to @l, so long as it was not @u.
  158. * Returns non-zero if @l was not @u, and zero otherwise.
  159. */
  160. #define local_add_unless(l, a, u) \
  161. ({ \
  162. long c, old; \
  163. c = local_read(l); \
  164. for (;;) { \
  165. if (unlikely(c == (u))) \
  166. break; \
  167. old = local_cmpxchg((l), c, c + (a)); \
  168. if (likely(old == c)) \
  169. break; \
  170. c = old; \
  171. } \
  172. c != (u); \
  173. })
  174. #define local_inc_not_zero(l) local_add_unless((l), 1, 0)
  175. /* On x86_32, these are no better than the atomic variants.
  176. * On x86-64 these are better than the atomic variants on SMP kernels
  177. * because they dont use a lock prefix.
  178. */
  179. #define __local_inc(l) local_inc(l)
  180. #define __local_dec(l) local_dec(l)
  181. #define __local_add(i,l) local_add((i),(l))
  182. #define __local_sub(i,l) local_sub((i),(l))
  183. /* Use these for per-cpu local_t variables: on some archs they are
  184. * much more efficient than these naive implementations. Note they take
  185. * a variable, not an address.
  186. *
  187. * X86_64: This could be done better if we moved the per cpu data directly
  188. * after GS.
  189. */
  190. /* Need to disable preemption for the cpu local counters otherwise we could
  191. still access a variable of a previous CPU in a non atomic way. */
  192. #define cpu_local_wrap_v(l) \
  193. ({ local_t res__; \
  194. preempt_disable(); \
  195. res__ = (l); \
  196. preempt_enable(); \
  197. res__; })
  198. #define cpu_local_wrap(l) \
  199. ({ preempt_disable(); \
  200. l; \
  201. preempt_enable(); }) \
  202. #define cpu_local_read(l) cpu_local_wrap_v(local_read(&__get_cpu_var(l)))
  203. #define cpu_local_set(l, i) cpu_local_wrap(local_set(&__get_cpu_var(l), (i)))
  204. #define cpu_local_inc(l) cpu_local_wrap(local_inc(&__get_cpu_var(l)))
  205. #define cpu_local_dec(l) cpu_local_wrap(local_dec(&__get_cpu_var(l)))
  206. #define cpu_local_add(i, l) cpu_local_wrap(local_add((i), &__get_cpu_var(l)))
  207. #define cpu_local_sub(i, l) cpu_local_wrap(local_sub((i), &__get_cpu_var(l)))
  208. #define __cpu_local_inc(l) cpu_local_inc(l)
  209. #define __cpu_local_dec(l) cpu_local_dec(l)
  210. #define __cpu_local_add(i, l) cpu_local_add((i), (l))
  211. #define __cpu_local_sub(i, l) cpu_local_sub((i), (l))
  212. #endif /* _ARCH_LOCAL_H */