atomic.h 5.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195
  1. #ifndef _ASM_IA64_ATOMIC_H
  2. #define _ASM_IA64_ATOMIC_H
  3. /*
  4. * Atomic operations that C can't guarantee us. Useful for
  5. * resource counting etc..
  6. *
  7. * NOTE: don't mess with the types below! The "unsigned long" and
  8. * "int" types were carefully placed so as to ensure proper operation
  9. * of the macros.
  10. *
  11. * Copyright (C) 1998, 1999, 2002-2003 Hewlett-Packard Co
  12. * David Mosberger-Tang <davidm@hpl.hp.com>
  13. */
  14. #include <linux/types.h>
  15. #include <asm/intrinsics.h>
  16. /*
  17. * On IA-64, counter must always be volatile to ensure that that the
  18. * memory accesses are ordered.
  19. */
  20. typedef struct { volatile __s32 counter; } atomic_t;
  21. typedef struct { volatile __s64 counter; } atomic64_t;
  22. #define ATOMIC_INIT(i) ((atomic_t) { (i) })
  23. #define ATOMIC64_INIT(i) ((atomic64_t) { (i) })
  24. #define atomic_read(v) ((v)->counter)
  25. #define atomic64_read(v) ((v)->counter)
  26. #define atomic_set(v,i) (((v)->counter) = (i))
  27. #define atomic64_set(v,i) (((v)->counter) = (i))
  28. static __inline__ int
  29. ia64_atomic_add (int i, atomic_t *v)
  30. {
  31. __s32 old, new;
  32. CMPXCHG_BUGCHECK_DECL
  33. do {
  34. CMPXCHG_BUGCHECK(v);
  35. old = atomic_read(v);
  36. new = old + i;
  37. } while (ia64_cmpxchg(acq, v, old, new, sizeof(atomic_t)) != old);
  38. return new;
  39. }
  40. static __inline__ int
  41. ia64_atomic64_add (__s64 i, atomic64_t *v)
  42. {
  43. __s64 old, new;
  44. CMPXCHG_BUGCHECK_DECL
  45. do {
  46. CMPXCHG_BUGCHECK(v);
  47. old = atomic_read(v);
  48. new = old + i;
  49. } while (ia64_cmpxchg(acq, v, old, new, sizeof(atomic64_t)) != old);
  50. return new;
  51. }
  52. static __inline__ int
  53. ia64_atomic_sub (int i, atomic_t *v)
  54. {
  55. __s32 old, new;
  56. CMPXCHG_BUGCHECK_DECL
  57. do {
  58. CMPXCHG_BUGCHECK(v);
  59. old = atomic_read(v);
  60. new = old - i;
  61. } while (ia64_cmpxchg(acq, v, old, new, sizeof(atomic_t)) != old);
  62. return new;
  63. }
  64. static __inline__ int
  65. ia64_atomic64_sub (__s64 i, atomic64_t *v)
  66. {
  67. __s64 old, new;
  68. CMPXCHG_BUGCHECK_DECL
  69. do {
  70. CMPXCHG_BUGCHECK(v);
  71. old = atomic_read(v);
  72. new = old - i;
  73. } while (ia64_cmpxchg(acq, v, old, new, sizeof(atomic64_t)) != old);
  74. return new;
  75. }
  76. #define atomic_cmpxchg(v, old, new) ((int)cmpxchg(&((v)->counter), old, new))
  77. #define atomic_add_unless(v, a, u) \
  78. ({ \
  79. int c, old; \
  80. c = atomic_read(v); \
  81. while (c != (u) && (old = atomic_cmpxchg((v), c, c + (a))) != c) \
  82. c = old; \
  83. c != (u); \
  84. })
  85. #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
  86. #define atomic_add_return(i,v) \
  87. ({ \
  88. int __ia64_aar_i = (i); \
  89. (__builtin_constant_p(i) \
  90. && ( (__ia64_aar_i == 1) || (__ia64_aar_i == 4) \
  91. || (__ia64_aar_i == 8) || (__ia64_aar_i == 16) \
  92. || (__ia64_aar_i == -1) || (__ia64_aar_i == -4) \
  93. || (__ia64_aar_i == -8) || (__ia64_aar_i == -16))) \
  94. ? ia64_fetch_and_add(__ia64_aar_i, &(v)->counter) \
  95. : ia64_atomic_add(__ia64_aar_i, v); \
  96. })
  97. #define atomic64_add_return(i,v) \
  98. ({ \
  99. long __ia64_aar_i = (i); \
  100. (__builtin_constant_p(i) \
  101. && ( (__ia64_aar_i == 1) || (__ia64_aar_i == 4) \
  102. || (__ia64_aar_i == 8) || (__ia64_aar_i == 16) \
  103. || (__ia64_aar_i == -1) || (__ia64_aar_i == -4) \
  104. || (__ia64_aar_i == -8) || (__ia64_aar_i == -16))) \
  105. ? ia64_fetch_and_add(__ia64_aar_i, &(v)->counter) \
  106. : ia64_atomic64_add(__ia64_aar_i, v); \
  107. })
  108. /*
  109. * Atomically add I to V and return TRUE if the resulting value is
  110. * negative.
  111. */
  112. static __inline__ int
  113. atomic_add_negative (int i, atomic_t *v)
  114. {
  115. return atomic_add_return(i, v) < 0;
  116. }
  117. static __inline__ int
  118. atomic64_add_negative (__s64 i, atomic64_t *v)
  119. {
  120. return atomic64_add_return(i, v) < 0;
  121. }
  122. #define atomic_sub_return(i,v) \
  123. ({ \
  124. int __ia64_asr_i = (i); \
  125. (__builtin_constant_p(i) \
  126. && ( (__ia64_asr_i == 1) || (__ia64_asr_i == 4) \
  127. || (__ia64_asr_i == 8) || (__ia64_asr_i == 16) \
  128. || (__ia64_asr_i == -1) || (__ia64_asr_i == -4) \
  129. || (__ia64_asr_i == -8) || (__ia64_asr_i == -16))) \
  130. ? ia64_fetch_and_add(-__ia64_asr_i, &(v)->counter) \
  131. : ia64_atomic_sub(__ia64_asr_i, v); \
  132. })
  133. #define atomic64_sub_return(i,v) \
  134. ({ \
  135. long __ia64_asr_i = (i); \
  136. (__builtin_constant_p(i) \
  137. && ( (__ia64_asr_i == 1) || (__ia64_asr_i == 4) \
  138. || (__ia64_asr_i == 8) || (__ia64_asr_i == 16) \
  139. || (__ia64_asr_i == -1) || (__ia64_asr_i == -4) \
  140. || (__ia64_asr_i == -8) || (__ia64_asr_i == -16))) \
  141. ? ia64_fetch_and_add(-__ia64_asr_i, &(v)->counter) \
  142. : ia64_atomic64_sub(__ia64_asr_i, v); \
  143. })
  144. #define atomic_dec_return(v) atomic_sub_return(1, (v))
  145. #define atomic_inc_return(v) atomic_add_return(1, (v))
  146. #define atomic64_dec_return(v) atomic64_sub_return(1, (v))
  147. #define atomic64_inc_return(v) atomic64_add_return(1, (v))
  148. #define atomic_sub_and_test(i,v) (atomic_sub_return((i), (v)) == 0)
  149. #define atomic_dec_and_test(v) (atomic_sub_return(1, (v)) == 0)
  150. #define atomic_inc_and_test(v) (atomic_add_return(1, (v)) == 0)
  151. #define atomic64_sub_and_test(i,v) (atomic64_sub_return((i), (v)) == 0)
  152. #define atomic64_dec_and_test(v) (atomic64_sub_return(1, (v)) == 0)
  153. #define atomic64_inc_and_test(v) (atomic64_add_return(1, (v)) == 0)
  154. #define atomic_add(i,v) atomic_add_return((i), (v))
  155. #define atomic_sub(i,v) atomic_sub_return((i), (v))
  156. #define atomic_inc(v) atomic_add(1, (v))
  157. #define atomic_dec(v) atomic_sub(1, (v))
  158. #define atomic64_add(i,v) atomic64_add_return((i), (v))
  159. #define atomic64_sub(i,v) atomic64_sub_return((i), (v))
  160. #define atomic64_inc(v) atomic64_add(1, (v))
  161. #define atomic64_dec(v) atomic64_sub(1, (v))
  162. /* Atomic operations are already serializing */
  163. #define smp_mb__before_atomic_dec() barrier()
  164. #define smp_mb__after_atomic_dec() barrier()
  165. #define smp_mb__before_atomic_inc() barrier()
  166. #define smp_mb__after_atomic_inc() barrier()
  167. #endif /* _ASM_IA64_ATOMIC_H */