atomic.h 7.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308
  1. #ifndef __ARCH_S390_ATOMIC__
  2. #define __ARCH_S390_ATOMIC__
  3. /*
  4. * Copyright 1999,2009 IBM Corp.
  5. * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>,
  6. * Denis Joseph Barrow,
  7. * Arnd Bergmann <arndb@de.ibm.com>,
  8. *
  9. * Atomic operations that C can't guarantee us.
  10. * Useful for resource counting etc.
  11. * s390 uses 'Compare And Swap' for atomicity in SMP enviroment.
  12. *
  13. */
  14. #include <linux/compiler.h>
  15. #include <linux/types.h>
  16. #define ATOMIC_INIT(i) { (i) }
  17. #define __CS_LOOP(ptr, op_val, op_string) ({ \
  18. int old_val, new_val; \
  19. asm volatile( \
  20. " l %0,%2\n" \
  21. "0: lr %1,%0\n" \
  22. op_string " %1,%3\n" \
  23. " cs %0,%1,%2\n" \
  24. " jl 0b" \
  25. : "=&d" (old_val), "=&d" (new_val), \
  26. "=Q" (((atomic_t *)(ptr))->counter) \
  27. : "d" (op_val), "Q" (((atomic_t *)(ptr))->counter) \
  28. : "cc", "memory"); \
  29. new_val; \
  30. })
  31. static inline int atomic_read(const atomic_t *v)
  32. {
  33. barrier();
  34. return v->counter;
  35. }
  36. static inline void atomic_set(atomic_t *v, int i)
  37. {
  38. v->counter = i;
  39. barrier();
  40. }
  41. static inline int atomic_add_return(int i, atomic_t *v)
  42. {
  43. return __CS_LOOP(v, i, "ar");
  44. }
  45. #define atomic_add(_i, _v) atomic_add_return(_i, _v)
  46. #define atomic_add_negative(_i, _v) (atomic_add_return(_i, _v) < 0)
  47. #define atomic_inc(_v) atomic_add_return(1, _v)
  48. #define atomic_inc_return(_v) atomic_add_return(1, _v)
  49. #define atomic_inc_and_test(_v) (atomic_add_return(1, _v) == 0)
  50. static inline int atomic_sub_return(int i, atomic_t *v)
  51. {
  52. return __CS_LOOP(v, i, "sr");
  53. }
  54. #define atomic_sub(_i, _v) atomic_sub_return(_i, _v)
  55. #define atomic_sub_and_test(_i, _v) (atomic_sub_return(_i, _v) == 0)
  56. #define atomic_dec(_v) atomic_sub_return(1, _v)
  57. #define atomic_dec_return(_v) atomic_sub_return(1, _v)
  58. #define atomic_dec_and_test(_v) (atomic_sub_return(1, _v) == 0)
  59. static inline void atomic_clear_mask(unsigned long mask, atomic_t *v)
  60. {
  61. __CS_LOOP(v, ~mask, "nr");
  62. }
  63. static inline void atomic_set_mask(unsigned long mask, atomic_t *v)
  64. {
  65. __CS_LOOP(v, mask, "or");
  66. }
  67. #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
  68. static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
  69. {
  70. asm volatile(
  71. " cs %0,%2,%1"
  72. : "+d" (old), "=Q" (v->counter)
  73. : "d" (new), "Q" (v->counter)
  74. : "cc", "memory");
  75. return old;
  76. }
  77. static inline int atomic_add_unless(atomic_t *v, int a, int u)
  78. {
  79. int c, old;
  80. c = atomic_read(v);
  81. for (;;) {
  82. if (unlikely(c == u))
  83. break;
  84. old = atomic_cmpxchg(v, c, c + a);
  85. if (likely(old == c))
  86. break;
  87. c = old;
  88. }
  89. return c != u;
  90. }
  91. #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
  92. #undef __CS_LOOP
  93. #define ATOMIC64_INIT(i) { (i) }
  94. #ifdef CONFIG_64BIT
  95. #define __CSG_LOOP(ptr, op_val, op_string) ({ \
  96. long long old_val, new_val; \
  97. asm volatile( \
  98. " lg %0,%2\n" \
  99. "0: lgr %1,%0\n" \
  100. op_string " %1,%3\n" \
  101. " csg %0,%1,%2\n" \
  102. " jl 0b" \
  103. : "=&d" (old_val), "=&d" (new_val), \
  104. "=Q" (((atomic_t *)(ptr))->counter) \
  105. : "d" (op_val), "Q" (((atomic_t *)(ptr))->counter) \
  106. : "cc", "memory"); \
  107. new_val; \
  108. })
  109. static inline long long atomic64_read(const atomic64_t *v)
  110. {
  111. barrier();
  112. return v->counter;
  113. }
  114. static inline void atomic64_set(atomic64_t *v, long long i)
  115. {
  116. v->counter = i;
  117. barrier();
  118. }
  119. static inline long long atomic64_add_return(long long i, atomic64_t *v)
  120. {
  121. return __CSG_LOOP(v, i, "agr");
  122. }
  123. static inline long long atomic64_sub_return(long long i, atomic64_t *v)
  124. {
  125. return __CSG_LOOP(v, i, "sgr");
  126. }
  127. static inline void atomic64_clear_mask(unsigned long mask, atomic64_t *v)
  128. {
  129. __CSG_LOOP(v, ~mask, "ngr");
  130. }
  131. static inline void atomic64_set_mask(unsigned long mask, atomic64_t *v)
  132. {
  133. __CSG_LOOP(v, mask, "ogr");
  134. }
  135. #define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
  136. static inline long long atomic64_cmpxchg(atomic64_t *v,
  137. long long old, long long new)
  138. {
  139. asm volatile(
  140. " csg %0,%2,%1"
  141. : "+d" (old), "=Q" (v->counter)
  142. : "d" (new), "Q" (v->counter)
  143. : "cc", "memory");
  144. return old;
  145. }
  146. #undef __CSG_LOOP
  147. #else /* CONFIG_64BIT */
  148. typedef struct {
  149. long long counter;
  150. } atomic64_t;
  151. static inline long long atomic64_read(const atomic64_t *v)
  152. {
  153. register_pair rp;
  154. asm volatile(
  155. " lm %0,%N0,%1"
  156. : "=&d" (rp) : "Q" (v->counter) );
  157. return rp.pair;
  158. }
  159. static inline void atomic64_set(atomic64_t *v, long long i)
  160. {
  161. register_pair rp = {.pair = i};
  162. asm volatile(
  163. " stm %1,%N1,%0"
  164. : "=Q" (v->counter) : "d" (rp) );
  165. }
  166. static inline long long atomic64_xchg(atomic64_t *v, long long new)
  167. {
  168. register_pair rp_new = {.pair = new};
  169. register_pair rp_old;
  170. asm volatile(
  171. " lm %0,%N0,%1\n"
  172. "0: cds %0,%2,%1\n"
  173. " jl 0b\n"
  174. : "=&d" (rp_old), "=Q" (v->counter)
  175. : "d" (rp_new), "Q" (v->counter)
  176. : "cc");
  177. return rp_old.pair;
  178. }
  179. static inline long long atomic64_cmpxchg(atomic64_t *v,
  180. long long old, long long new)
  181. {
  182. register_pair rp_old = {.pair = old};
  183. register_pair rp_new = {.pair = new};
  184. asm volatile(
  185. " cds %0,%2,%1"
  186. : "+&d" (rp_old), "=Q" (v->counter)
  187. : "d" (rp_new), "Q" (v->counter)
  188. : "cc");
  189. return rp_old.pair;
  190. }
  191. static inline long long atomic64_add_return(long long i, atomic64_t *v)
  192. {
  193. long long old, new;
  194. do {
  195. old = atomic64_read(v);
  196. new = old + i;
  197. } while (atomic64_cmpxchg(v, old, new) != old);
  198. return new;
  199. }
  200. static inline long long atomic64_sub_return(long long i, atomic64_t *v)
  201. {
  202. long long old, new;
  203. do {
  204. old = atomic64_read(v);
  205. new = old - i;
  206. } while (atomic64_cmpxchg(v, old, new) != old);
  207. return new;
  208. }
  209. static inline void atomic64_set_mask(unsigned long long mask, atomic64_t *v)
  210. {
  211. long long old, new;
  212. do {
  213. old = atomic64_read(v);
  214. new = old | mask;
  215. } while (atomic64_cmpxchg(v, old, new) != old);
  216. }
  217. static inline void atomic64_clear_mask(unsigned long long mask, atomic64_t *v)
  218. {
  219. long long old, new;
  220. do {
  221. old = atomic64_read(v);
  222. new = old & mask;
  223. } while (atomic64_cmpxchg(v, old, new) != old);
  224. }
  225. #endif /* CONFIG_64BIT */
  226. static inline int atomic64_add_unless(atomic64_t *v, long long a, long long u)
  227. {
  228. long long c, old;
  229. c = atomic64_read(v);
  230. for (;;) {
  231. if (unlikely(c == u))
  232. break;
  233. old = atomic64_cmpxchg(v, c, c + a);
  234. if (likely(old == c))
  235. break;
  236. c = old;
  237. }
  238. return c != u;
  239. }
  240. #define atomic64_add(_i, _v) atomic64_add_return(_i, _v)
  241. #define atomic64_add_negative(_i, _v) (atomic64_add_return(_i, _v) < 0)
  242. #define atomic64_inc(_v) atomic64_add_return(1, _v)
  243. #define atomic64_inc_return(_v) atomic64_add_return(1, _v)
  244. #define atomic64_inc_and_test(_v) (atomic64_add_return(1, _v) == 0)
  245. #define atomic64_sub(_i, _v) atomic64_sub_return(_i, _v)
  246. #define atomic64_sub_and_test(_i, _v) (atomic64_sub_return(_i, _v) == 0)
  247. #define atomic64_dec(_v) atomic64_sub_return(1, _v)
  248. #define atomic64_dec_return(_v) atomic64_sub_return(1, _v)
  249. #define atomic64_dec_and_test(_v) (atomic64_sub_return(1, _v) == 0)
  250. #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
  251. #define smp_mb__before_atomic_dec() smp_mb()
  252. #define smp_mb__after_atomic_dec() smp_mb()
  253. #define smp_mb__before_atomic_inc() smp_mb()
  254. #define smp_mb__after_atomic_inc() smp_mb()
  255. #include <asm-generic/atomic-long.h>
  256. #endif /* __ARCH_S390_ATOMIC__ */