atomic.h 8.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406
  1. /*
  2. * Copyright IBM Corp. 1999, 2009
  3. * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>,
  4. * Denis Joseph Barrow,
  5. * Arnd Bergmann <arndb@de.ibm.com>,
  6. *
  7. * Atomic operations that C can't guarantee us.
  8. * Useful for resource counting etc.
  9. * s390 uses 'Compare And Swap' for atomicity in SMP environment.
  10. *
  11. */
  12. #ifndef __ARCH_S390_ATOMIC__
  13. #define __ARCH_S390_ATOMIC__
  14. #include <linux/compiler.h>
  15. #include <linux/types.h>
  16. #include <asm/cmpxchg.h>
  17. #define ATOMIC_INIT(i) { (i) }
  18. #ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
  19. #define __ATOMIC_OR "lao"
  20. #define __ATOMIC_AND "lan"
  21. #define __ATOMIC_ADD "laa"
  22. #define __ATOMIC_LOOP(ptr, op_val, op_string) \
  23. ({ \
  24. int old_val; \
  25. \
  26. typecheck(atomic_t *, ptr); \
  27. asm volatile( \
  28. op_string " %0,%2,%1\n" \
  29. : "=d" (old_val), "+Q" ((ptr)->counter) \
  30. : "d" (op_val) \
  31. : "cc", "memory"); \
  32. old_val; \
  33. })
  34. #else /* CONFIG_HAVE_MARCH_Z196_FEATURES */
  35. #define __ATOMIC_OR "or"
  36. #define __ATOMIC_AND "nr"
  37. #define __ATOMIC_ADD "ar"
  38. #define __ATOMIC_LOOP(ptr, op_val, op_string) \
  39. ({ \
  40. int old_val, new_val; \
  41. \
  42. typecheck(atomic_t *, ptr); \
  43. asm volatile( \
  44. " l %0,%2\n" \
  45. "0: lr %1,%0\n" \
  46. op_string " %1,%3\n" \
  47. " cs %0,%1,%2\n" \
  48. " jl 0b" \
  49. : "=&d" (old_val), "=&d" (new_val), "+Q" ((ptr)->counter)\
  50. : "d" (op_val) \
  51. : "cc", "memory"); \
  52. old_val; \
  53. })
  54. #endif /* CONFIG_HAVE_MARCH_Z196_FEATURES */
  55. static inline int atomic_read(const atomic_t *v)
  56. {
  57. int c;
  58. asm volatile(
  59. " l %0,%1\n"
  60. : "=d" (c) : "Q" (v->counter));
  61. return c;
  62. }
  63. static inline void atomic_set(atomic_t *v, int i)
  64. {
  65. asm volatile(
  66. " st %1,%0\n"
  67. : "=Q" (v->counter) : "d" (i));
  68. }
  69. static inline int atomic_add_return(int i, atomic_t *v)
  70. {
  71. return __ATOMIC_LOOP(v, i, __ATOMIC_ADD) + i;
  72. }
  73. static inline void atomic_add(int i, atomic_t *v)
  74. {
  75. #ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
  76. if (__builtin_constant_p(i) && (i > -129) && (i < 128)) {
  77. asm volatile(
  78. "asi %0,%1\n"
  79. : "+Q" (v->counter)
  80. : "i" (i)
  81. : "cc", "memory");
  82. } else {
  83. atomic_add_return(i, v);
  84. }
  85. #else
  86. atomic_add_return(i, v);
  87. #endif
  88. }
  89. #define atomic_add_negative(_i, _v) (atomic_add_return(_i, _v) < 0)
  90. #define atomic_inc(_v) atomic_add(1, _v)
  91. #define atomic_inc_return(_v) atomic_add_return(1, _v)
  92. #define atomic_inc_and_test(_v) (atomic_add_return(1, _v) == 0)
  93. #define atomic_sub(_i, _v) atomic_add(-(int)(_i), _v)
  94. #define atomic_sub_return(_i, _v) atomic_add_return(-(int)(_i), _v)
  95. #define atomic_sub_and_test(_i, _v) (atomic_sub_return(_i, _v) == 0)
  96. #define atomic_dec(_v) atomic_sub(1, _v)
  97. #define atomic_dec_return(_v) atomic_sub_return(1, _v)
  98. #define atomic_dec_and_test(_v) (atomic_sub_return(1, _v) == 0)
  99. static inline void atomic_clear_mask(unsigned int mask, atomic_t *v)
  100. {
  101. __ATOMIC_LOOP(v, ~mask, __ATOMIC_AND);
  102. }
  103. static inline void atomic_set_mask(unsigned int mask, atomic_t *v)
  104. {
  105. __ATOMIC_LOOP(v, mask, __ATOMIC_OR);
  106. }
  107. #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
  108. static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
  109. {
  110. asm volatile(
  111. " cs %0,%2,%1"
  112. : "+d" (old), "+Q" (v->counter)
  113. : "d" (new)
  114. : "cc", "memory");
  115. return old;
  116. }
  117. static inline int __atomic_add_unless(atomic_t *v, int a, int u)
  118. {
  119. int c, old;
  120. c = atomic_read(v);
  121. for (;;) {
  122. if (unlikely(c == u))
  123. break;
  124. old = atomic_cmpxchg(v, c, c + a);
  125. if (likely(old == c))
  126. break;
  127. c = old;
  128. }
  129. return c;
  130. }
  131. #undef __ATOMIC_LOOP
  132. #define ATOMIC64_INIT(i) { (i) }
  133. #ifdef CONFIG_64BIT
  134. #ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
  135. #define __ATOMIC64_OR "laog"
  136. #define __ATOMIC64_AND "lang"
  137. #define __ATOMIC64_ADD "laag"
  138. #define __ATOMIC64_LOOP(ptr, op_val, op_string) \
  139. ({ \
  140. long long old_val; \
  141. \
  142. typecheck(atomic64_t *, ptr); \
  143. asm volatile( \
  144. op_string " %0,%2,%1\n" \
  145. : "=d" (old_val), "+Q" ((ptr)->counter) \
  146. : "d" (op_val) \
  147. : "cc", "memory"); \
  148. old_val; \
  149. })
  150. #else /* CONFIG_HAVE_MARCH_Z196_FEATURES */
  151. #define __ATOMIC64_OR "ogr"
  152. #define __ATOMIC64_AND "ngr"
  153. #define __ATOMIC64_ADD "agr"
  154. #define __ATOMIC64_LOOP(ptr, op_val, op_string) \
  155. ({ \
  156. long long old_val, new_val; \
  157. \
  158. typecheck(atomic64_t *, ptr); \
  159. asm volatile( \
  160. " lg %0,%2\n" \
  161. "0: lgr %1,%0\n" \
  162. op_string " %1,%3\n" \
  163. " csg %0,%1,%2\n" \
  164. " jl 0b" \
  165. : "=&d" (old_val), "=&d" (new_val), "+Q" ((ptr)->counter)\
  166. : "d" (op_val) \
  167. : "cc", "memory"); \
  168. old_val; \
  169. })
  170. #endif /* CONFIG_HAVE_MARCH_Z196_FEATURES */
  171. static inline long long atomic64_read(const atomic64_t *v)
  172. {
  173. long long c;
  174. asm volatile(
  175. " lg %0,%1\n"
  176. : "=d" (c) : "Q" (v->counter));
  177. return c;
  178. }
  179. static inline void atomic64_set(atomic64_t *v, long long i)
  180. {
  181. asm volatile(
  182. " stg %1,%0\n"
  183. : "=Q" (v->counter) : "d" (i));
  184. }
  185. static inline long long atomic64_add_return(long long i, atomic64_t *v)
  186. {
  187. return __ATOMIC64_LOOP(v, i, __ATOMIC64_ADD) + i;
  188. }
  189. static inline void atomic64_clear_mask(unsigned long mask, atomic64_t *v)
  190. {
  191. __ATOMIC64_LOOP(v, ~mask, __ATOMIC64_AND);
  192. }
  193. static inline void atomic64_set_mask(unsigned long mask, atomic64_t *v)
  194. {
  195. __ATOMIC64_LOOP(v, mask, __ATOMIC64_OR);
  196. }
  197. #define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
  198. static inline long long atomic64_cmpxchg(atomic64_t *v,
  199. long long old, long long new)
  200. {
  201. asm volatile(
  202. " csg %0,%2,%1"
  203. : "+d" (old), "+Q" (v->counter)
  204. : "d" (new)
  205. : "cc", "memory");
  206. return old;
  207. }
  208. #undef __ATOMIC64_LOOP
  209. #else /* CONFIG_64BIT */
  210. typedef struct {
  211. long long counter;
  212. } atomic64_t;
  213. static inline long long atomic64_read(const atomic64_t *v)
  214. {
  215. register_pair rp;
  216. asm volatile(
  217. " lm %0,%N0,%1"
  218. : "=&d" (rp) : "Q" (v->counter) );
  219. return rp.pair;
  220. }
  221. static inline void atomic64_set(atomic64_t *v, long long i)
  222. {
  223. register_pair rp = {.pair = i};
  224. asm volatile(
  225. " stm %1,%N1,%0"
  226. : "=Q" (v->counter) : "d" (rp) );
  227. }
  228. static inline long long atomic64_xchg(atomic64_t *v, long long new)
  229. {
  230. register_pair rp_new = {.pair = new};
  231. register_pair rp_old;
  232. asm volatile(
  233. " lm %0,%N0,%1\n"
  234. "0: cds %0,%2,%1\n"
  235. " jl 0b\n"
  236. : "=&d" (rp_old), "+Q" (v->counter)
  237. : "d" (rp_new)
  238. : "cc");
  239. return rp_old.pair;
  240. }
  241. static inline long long atomic64_cmpxchg(atomic64_t *v,
  242. long long old, long long new)
  243. {
  244. register_pair rp_old = {.pair = old};
  245. register_pair rp_new = {.pair = new};
  246. asm volatile(
  247. " cds %0,%2,%1"
  248. : "+&d" (rp_old), "+Q" (v->counter)
  249. : "d" (rp_new)
  250. : "cc");
  251. return rp_old.pair;
  252. }
  253. static inline long long atomic64_add_return(long long i, atomic64_t *v)
  254. {
  255. long long old, new;
  256. do {
  257. old = atomic64_read(v);
  258. new = old + i;
  259. } while (atomic64_cmpxchg(v, old, new) != old);
  260. return new;
  261. }
  262. static inline void atomic64_set_mask(unsigned long long mask, atomic64_t *v)
  263. {
  264. long long old, new;
  265. do {
  266. old = atomic64_read(v);
  267. new = old | mask;
  268. } while (atomic64_cmpxchg(v, old, new) != old);
  269. }
  270. static inline void atomic64_clear_mask(unsigned long long mask, atomic64_t *v)
  271. {
  272. long long old, new;
  273. do {
  274. old = atomic64_read(v);
  275. new = old & mask;
  276. } while (atomic64_cmpxchg(v, old, new) != old);
  277. }
  278. #endif /* CONFIG_64BIT */
  279. static inline void atomic64_add(long long i, atomic64_t *v)
  280. {
  281. #ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
  282. if (__builtin_constant_p(i) && (i > -129) && (i < 128)) {
  283. asm volatile(
  284. "agsi %0,%1\n"
  285. : "+Q" (v->counter)
  286. : "i" (i)
  287. : "cc", "memory");
  288. } else {
  289. atomic64_add_return(i, v);
  290. }
  291. #else
  292. atomic64_add_return(i, v);
  293. #endif
  294. }
  295. static inline int atomic64_add_unless(atomic64_t *v, long long i, long long u)
  296. {
  297. long long c, old;
  298. c = atomic64_read(v);
  299. for (;;) {
  300. if (unlikely(c == u))
  301. break;
  302. old = atomic64_cmpxchg(v, c, c + i);
  303. if (likely(old == c))
  304. break;
  305. c = old;
  306. }
  307. return c != u;
  308. }
  309. static inline long long atomic64_dec_if_positive(atomic64_t *v)
  310. {
  311. long long c, old, dec;
  312. c = atomic64_read(v);
  313. for (;;) {
  314. dec = c - 1;
  315. if (unlikely(dec < 0))
  316. break;
  317. old = atomic64_cmpxchg((v), c, dec);
  318. if (likely(old == c))
  319. break;
  320. c = old;
  321. }
  322. return dec;
  323. }
  324. #define atomic64_add_negative(_i, _v) (atomic64_add_return(_i, _v) < 0)
  325. #define atomic64_inc(_v) atomic64_add(1, _v)
  326. #define atomic64_inc_return(_v) atomic64_add_return(1, _v)
  327. #define atomic64_inc_and_test(_v) (atomic64_add_return(1, _v) == 0)
  328. #define atomic64_sub_return(_i, _v) atomic64_add_return(-(long long)(_i), _v)
  329. #define atomic64_sub(_i, _v) atomic64_add(-(long long)(_i), _v)
  330. #define atomic64_sub_and_test(_i, _v) (atomic64_sub_return(_i, _v) == 0)
  331. #define atomic64_dec(_v) atomic64_sub(1, _v)
  332. #define atomic64_dec_return(_v) atomic64_sub_return(1, _v)
  333. #define atomic64_dec_and_test(_v) (atomic64_sub_return(1, _v) == 0)
  334. #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
  335. #define smp_mb__before_atomic_dec() smp_mb()
  336. #define smp_mb__after_atomic_dec() smp_mb()
  337. #define smp_mb__before_atomic_inc() smp_mb()
  338. #define smp_mb__after_atomic_inc() smp_mb()
  339. #endif /* __ARCH_S390_ATOMIC__ */