atomic.h 6.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226
  1. #ifndef _ASM_IA64_ATOMIC_H
  2. #define _ASM_IA64_ATOMIC_H
  3. /*
  4. * Atomic operations that C can't guarantee us. Useful for
  5. * resource counting etc..
  6. *
  7. * NOTE: don't mess with the types below! The "unsigned long" and
  8. * "int" types were carefully placed so as to ensure proper operation
  9. * of the macros.
  10. *
  11. * Copyright (C) 1998, 1999, 2002-2003 Hewlett-Packard Co
  12. * David Mosberger-Tang <davidm@hpl.hp.com>
  13. */
  14. #include <linux/types.h>
  15. #include <asm/intrinsics.h>
  16. #include <asm/system.h>
  17. /*
  18. * On IA-64, counter must always be volatile to ensure that that the
  19. * memory accesses are ordered.
  20. */
  21. typedef struct { volatile __s32 counter; } atomic_t;
  22. typedef struct { volatile __s64 counter; } atomic64_t;
  23. #define ATOMIC_INIT(i) ((atomic_t) { (i) })
  24. #define ATOMIC64_INIT(i) ((atomic64_t) { (i) })
  25. #define atomic_read(v) ((v)->counter)
  26. #define atomic64_read(v) ((v)->counter)
  27. #define atomic_set(v,i) (((v)->counter) = (i))
  28. #define atomic64_set(v,i) (((v)->counter) = (i))
  29. static __inline__ int
  30. ia64_atomic_add (int i, atomic_t *v)
  31. {
  32. __s32 old, new;
  33. CMPXCHG_BUGCHECK_DECL
  34. do {
  35. CMPXCHG_BUGCHECK(v);
  36. old = atomic_read(v);
  37. new = old + i;
  38. } while (ia64_cmpxchg(acq, v, old, new, sizeof(atomic_t)) != old);
  39. return new;
  40. }
  41. static __inline__ int
  42. ia64_atomic64_add (__s64 i, atomic64_t *v)
  43. {
  44. __s64 old, new;
  45. CMPXCHG_BUGCHECK_DECL
  46. do {
  47. CMPXCHG_BUGCHECK(v);
  48. old = atomic64_read(v);
  49. new = old + i;
  50. } while (ia64_cmpxchg(acq, v, old, new, sizeof(atomic64_t)) != old);
  51. return new;
  52. }
  53. static __inline__ int
  54. ia64_atomic_sub (int i, atomic_t *v)
  55. {
  56. __s32 old, new;
  57. CMPXCHG_BUGCHECK_DECL
  58. do {
  59. CMPXCHG_BUGCHECK(v);
  60. old = atomic_read(v);
  61. new = old - i;
  62. } while (ia64_cmpxchg(acq, v, old, new, sizeof(atomic_t)) != old);
  63. return new;
  64. }
  65. static __inline__ int
  66. ia64_atomic64_sub (__s64 i, atomic64_t *v)
  67. {
  68. __s64 old, new;
  69. CMPXCHG_BUGCHECK_DECL
  70. do {
  71. CMPXCHG_BUGCHECK(v);
  72. old = atomic64_read(v);
  73. new = old - i;
  74. } while (ia64_cmpxchg(acq, v, old, new, sizeof(atomic64_t)) != old);
  75. return new;
  76. }
  77. #define atomic_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), old, new))
  78. #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
  79. #define atomic64_cmpxchg(v, old, new) \
  80. (cmpxchg(&((v)->counter), old, new))
  81. #define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
  82. static __inline__ int atomic_add_unless(atomic_t *v, int a, int u)
  83. {
  84. int c, old;
  85. c = atomic_read(v);
  86. for (;;) {
  87. if (unlikely(c == (u)))
  88. break;
  89. old = atomic_cmpxchg((v), c, c + (a));
  90. if (likely(old == c))
  91. break;
  92. c = old;
  93. }
  94. return c != (u);
  95. }
  96. #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
  97. static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
  98. {
  99. long c, old;
  100. c = atomic64_read(v);
  101. for (;;) {
  102. if (unlikely(c == (u)))
  103. break;
  104. old = atomic64_cmpxchg((v), c, c + (a));
  105. if (likely(old == c))
  106. break;
  107. c = old;
  108. }
  109. return c != (u);
  110. }
  111. #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
  112. #define atomic_add_return(i,v) \
  113. ({ \
  114. int __ia64_aar_i = (i); \
  115. (__builtin_constant_p(i) \
  116. && ( (__ia64_aar_i == 1) || (__ia64_aar_i == 4) \
  117. || (__ia64_aar_i == 8) || (__ia64_aar_i == 16) \
  118. || (__ia64_aar_i == -1) || (__ia64_aar_i == -4) \
  119. || (__ia64_aar_i == -8) || (__ia64_aar_i == -16))) \
  120. ? ia64_fetch_and_add(__ia64_aar_i, &(v)->counter) \
  121. : ia64_atomic_add(__ia64_aar_i, v); \
  122. })
  123. #define atomic64_add_return(i,v) \
  124. ({ \
  125. long __ia64_aar_i = (i); \
  126. (__builtin_constant_p(i) \
  127. && ( (__ia64_aar_i == 1) || (__ia64_aar_i == 4) \
  128. || (__ia64_aar_i == 8) || (__ia64_aar_i == 16) \
  129. || (__ia64_aar_i == -1) || (__ia64_aar_i == -4) \
  130. || (__ia64_aar_i == -8) || (__ia64_aar_i == -16))) \
  131. ? ia64_fetch_and_add(__ia64_aar_i, &(v)->counter) \
  132. : ia64_atomic64_add(__ia64_aar_i, v); \
  133. })
  134. /*
  135. * Atomically add I to V and return TRUE if the resulting value is
  136. * negative.
  137. */
  138. static __inline__ int
  139. atomic_add_negative (int i, atomic_t *v)
  140. {
  141. return atomic_add_return(i, v) < 0;
  142. }
  143. static __inline__ int
  144. atomic64_add_negative (__s64 i, atomic64_t *v)
  145. {
  146. return atomic64_add_return(i, v) < 0;
  147. }
  148. #define atomic_sub_return(i,v) \
  149. ({ \
  150. int __ia64_asr_i = (i); \
  151. (__builtin_constant_p(i) \
  152. && ( (__ia64_asr_i == 1) || (__ia64_asr_i == 4) \
  153. || (__ia64_asr_i == 8) || (__ia64_asr_i == 16) \
  154. || (__ia64_asr_i == -1) || (__ia64_asr_i == -4) \
  155. || (__ia64_asr_i == -8) || (__ia64_asr_i == -16))) \
  156. ? ia64_fetch_and_add(-__ia64_asr_i, &(v)->counter) \
  157. : ia64_atomic_sub(__ia64_asr_i, v); \
  158. })
  159. #define atomic64_sub_return(i,v) \
  160. ({ \
  161. long __ia64_asr_i = (i); \
  162. (__builtin_constant_p(i) \
  163. && ( (__ia64_asr_i == 1) || (__ia64_asr_i == 4) \
  164. || (__ia64_asr_i == 8) || (__ia64_asr_i == 16) \
  165. || (__ia64_asr_i == -1) || (__ia64_asr_i == -4) \
  166. || (__ia64_asr_i == -8) || (__ia64_asr_i == -16))) \
  167. ? ia64_fetch_and_add(-__ia64_asr_i, &(v)->counter) \
  168. : ia64_atomic64_sub(__ia64_asr_i, v); \
  169. })
  170. #define atomic_dec_return(v) atomic_sub_return(1, (v))
  171. #define atomic_inc_return(v) atomic_add_return(1, (v))
  172. #define atomic64_dec_return(v) atomic64_sub_return(1, (v))
  173. #define atomic64_inc_return(v) atomic64_add_return(1, (v))
  174. #define atomic_sub_and_test(i,v) (atomic_sub_return((i), (v)) == 0)
  175. #define atomic_dec_and_test(v) (atomic_sub_return(1, (v)) == 0)
  176. #define atomic_inc_and_test(v) (atomic_add_return(1, (v)) == 0)
  177. #define atomic64_sub_and_test(i,v) (atomic64_sub_return((i), (v)) == 0)
  178. #define atomic64_dec_and_test(v) (atomic64_sub_return(1, (v)) == 0)
  179. #define atomic64_inc_and_test(v) (atomic64_add_return(1, (v)) == 0)
  180. #define atomic_add(i,v) atomic_add_return((i), (v))
  181. #define atomic_sub(i,v) atomic_sub_return((i), (v))
  182. #define atomic_inc(v) atomic_add(1, (v))
  183. #define atomic_dec(v) atomic_sub(1, (v))
  184. #define atomic64_add(i,v) atomic64_add_return((i), (v))
  185. #define atomic64_sub(i,v) atomic64_sub_return((i), (v))
  186. #define atomic64_inc(v) atomic64_add(1, (v))
  187. #define atomic64_dec(v) atomic64_sub(1, (v))
  188. /* Atomic operations are already serializing */
  189. #define smp_mb__before_atomic_dec() barrier()
  190. #define smp_mb__after_atomic_dec() barrier()
  191. #define smp_mb__before_atomic_inc() barrier()
  192. #define smp_mb__after_atomic_inc() barrier()
  193. #include <asm-generic/atomic.h>
  194. #endif /* _ASM_IA64_ATOMIC_H */