atomic.h 6.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218
  1. #ifndef _ASM_IA64_ATOMIC_H
  2. #define _ASM_IA64_ATOMIC_H
  3. /*
  4. * Atomic operations that C can't guarantee us. Useful for
  5. * resource counting etc..
  6. *
  7. * NOTE: don't mess with the types below! The "unsigned long" and
  8. * "int" types were carefully placed so as to ensure proper operation
  9. * of the macros.
  10. *
  11. * Copyright (C) 1998, 1999, 2002-2003 Hewlett-Packard Co
  12. * David Mosberger-Tang <davidm@hpl.hp.com>
  13. */
  14. #include <linux/types.h>
  15. #include <asm/intrinsics.h>
  16. #include <asm/system.h>
  17. #define ATOMIC_INIT(i) ((atomic_t) { (i) })
  18. #define ATOMIC64_INIT(i) ((atomic64_t) { (i) })
  19. #define atomic_read(v) (*(volatile int *)&(v)->counter)
  20. #define atomic64_read(v) (*(volatile long *)&(v)->counter)
  21. #define atomic_set(v,i) (((v)->counter) = (i))
  22. #define atomic64_set(v,i) (((v)->counter) = (i))
  23. static __inline__ int
  24. ia64_atomic_add (int i, atomic_t *v)
  25. {
  26. __s32 old, new;
  27. CMPXCHG_BUGCHECK_DECL
  28. do {
  29. CMPXCHG_BUGCHECK(v);
  30. old = atomic_read(v);
  31. new = old + i;
  32. } while (ia64_cmpxchg(acq, v, old, new, sizeof(atomic_t)) != old);
  33. return new;
  34. }
  35. static __inline__ long
  36. ia64_atomic64_add (__s64 i, atomic64_t *v)
  37. {
  38. __s64 old, new;
  39. CMPXCHG_BUGCHECK_DECL
  40. do {
  41. CMPXCHG_BUGCHECK(v);
  42. old = atomic64_read(v);
  43. new = old + i;
  44. } while (ia64_cmpxchg(acq, v, old, new, sizeof(atomic64_t)) != old);
  45. return new;
  46. }
  47. static __inline__ int
  48. ia64_atomic_sub (int i, atomic_t *v)
  49. {
  50. __s32 old, new;
  51. CMPXCHG_BUGCHECK_DECL
  52. do {
  53. CMPXCHG_BUGCHECK(v);
  54. old = atomic_read(v);
  55. new = old - i;
  56. } while (ia64_cmpxchg(acq, v, old, new, sizeof(atomic_t)) != old);
  57. return new;
  58. }
  59. static __inline__ long
  60. ia64_atomic64_sub (__s64 i, atomic64_t *v)
  61. {
  62. __s64 old, new;
  63. CMPXCHG_BUGCHECK_DECL
  64. do {
  65. CMPXCHG_BUGCHECK(v);
  66. old = atomic64_read(v);
  67. new = old - i;
  68. } while (ia64_cmpxchg(acq, v, old, new, sizeof(atomic64_t)) != old);
  69. return new;
  70. }
  71. #define atomic_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), old, new))
  72. #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
  73. #define atomic64_cmpxchg(v, old, new) \
  74. (cmpxchg(&((v)->counter), old, new))
  75. #define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
  76. static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
  77. {
  78. int c, old;
  79. c = atomic_read(v);
  80. for (;;) {
  81. if (unlikely(c == (u)))
  82. break;
  83. old = atomic_cmpxchg((v), c, c + (a));
  84. if (likely(old == c))
  85. break;
  86. c = old;
  87. }
  88. return c;
  89. }
  90. static __inline__ long atomic64_add_unless(atomic64_t *v, long a, long u)
  91. {
  92. long c, old;
  93. c = atomic64_read(v);
  94. for (;;) {
  95. if (unlikely(c == (u)))
  96. break;
  97. old = atomic64_cmpxchg((v), c, c + (a));
  98. if (likely(old == c))
  99. break;
  100. c = old;
  101. }
  102. return c != (u);
  103. }
  104. #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
  105. #define atomic_add_return(i,v) \
  106. ({ \
  107. int __ia64_aar_i = (i); \
  108. (__builtin_constant_p(i) \
  109. && ( (__ia64_aar_i == 1) || (__ia64_aar_i == 4) \
  110. || (__ia64_aar_i == 8) || (__ia64_aar_i == 16) \
  111. || (__ia64_aar_i == -1) || (__ia64_aar_i == -4) \
  112. || (__ia64_aar_i == -8) || (__ia64_aar_i == -16))) \
  113. ? ia64_fetch_and_add(__ia64_aar_i, &(v)->counter) \
  114. : ia64_atomic_add(__ia64_aar_i, v); \
  115. })
  116. #define atomic64_add_return(i,v) \
  117. ({ \
  118. long __ia64_aar_i = (i); \
  119. (__builtin_constant_p(i) \
  120. && ( (__ia64_aar_i == 1) || (__ia64_aar_i == 4) \
  121. || (__ia64_aar_i == 8) || (__ia64_aar_i == 16) \
  122. || (__ia64_aar_i == -1) || (__ia64_aar_i == -4) \
  123. || (__ia64_aar_i == -8) || (__ia64_aar_i == -16))) \
  124. ? ia64_fetch_and_add(__ia64_aar_i, &(v)->counter) \
  125. : ia64_atomic64_add(__ia64_aar_i, v); \
  126. })
  127. /*
  128. * Atomically add I to V and return TRUE if the resulting value is
  129. * negative.
  130. */
  131. static __inline__ int
  132. atomic_add_negative (int i, atomic_t *v)
  133. {
  134. return atomic_add_return(i, v) < 0;
  135. }
  136. static __inline__ long
  137. atomic64_add_negative (__s64 i, atomic64_t *v)
  138. {
  139. return atomic64_add_return(i, v) < 0;
  140. }
  141. #define atomic_sub_return(i,v) \
  142. ({ \
  143. int __ia64_asr_i = (i); \
  144. (__builtin_constant_p(i) \
  145. && ( (__ia64_asr_i == 1) || (__ia64_asr_i == 4) \
  146. || (__ia64_asr_i == 8) || (__ia64_asr_i == 16) \
  147. || (__ia64_asr_i == -1) || (__ia64_asr_i == -4) \
  148. || (__ia64_asr_i == -8) || (__ia64_asr_i == -16))) \
  149. ? ia64_fetch_and_add(-__ia64_asr_i, &(v)->counter) \
  150. : ia64_atomic_sub(__ia64_asr_i, v); \
  151. })
  152. #define atomic64_sub_return(i,v) \
  153. ({ \
  154. long __ia64_asr_i = (i); \
  155. (__builtin_constant_p(i) \
  156. && ( (__ia64_asr_i == 1) || (__ia64_asr_i == 4) \
  157. || (__ia64_asr_i == 8) || (__ia64_asr_i == 16) \
  158. || (__ia64_asr_i == -1) || (__ia64_asr_i == -4) \
  159. || (__ia64_asr_i == -8) || (__ia64_asr_i == -16))) \
  160. ? ia64_fetch_and_add(-__ia64_asr_i, &(v)->counter) \
  161. : ia64_atomic64_sub(__ia64_asr_i, v); \
  162. })
  163. #define atomic_dec_return(v) atomic_sub_return(1, (v))
  164. #define atomic_inc_return(v) atomic_add_return(1, (v))
  165. #define atomic64_dec_return(v) atomic64_sub_return(1, (v))
  166. #define atomic64_inc_return(v) atomic64_add_return(1, (v))
  167. #define atomic_sub_and_test(i,v) (atomic_sub_return((i), (v)) == 0)
  168. #define atomic_dec_and_test(v) (atomic_sub_return(1, (v)) == 0)
  169. #define atomic_inc_and_test(v) (atomic_add_return(1, (v)) == 0)
  170. #define atomic64_sub_and_test(i,v) (atomic64_sub_return((i), (v)) == 0)
  171. #define atomic64_dec_and_test(v) (atomic64_sub_return(1, (v)) == 0)
  172. #define atomic64_inc_and_test(v) (atomic64_add_return(1, (v)) == 0)
  173. #define atomic_add(i,v) atomic_add_return((i), (v))
  174. #define atomic_sub(i,v) atomic_sub_return((i), (v))
  175. #define atomic_inc(v) atomic_add(1, (v))
  176. #define atomic_dec(v) atomic_sub(1, (v))
  177. #define atomic64_add(i,v) atomic64_add_return((i), (v))
  178. #define atomic64_sub(i,v) atomic64_sub_return((i), (v))
  179. #define atomic64_inc(v) atomic64_add(1, (v))
  180. #define atomic64_dec(v) atomic64_sub(1, (v))
  181. /* Atomic operations are already serializing */
  182. #define smp_mb__before_atomic_dec() barrier()
  183. #define smp_mb__after_atomic_dec() barrier()
  184. #define smp_mb__before_atomic_inc() barrier()
  185. #define smp_mb__after_atomic_inc() barrier()
  186. #endif /* _ASM_IA64_ATOMIC_H */