atomic.h 8.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345
  1. /* atomic.h: atomic operation emulation for FR-V
  2. *
  3. * For an explanation of how atomic ops work in this arch, see:
  4. * Documentation/fujitsu/frv/atomic-ops.txt
  5. *
  6. * Copyright (C) 2004 Red Hat, Inc. All Rights Reserved.
  7. * Written by David Howells (dhowells@redhat.com)
  8. *
  9. * This program is free software; you can redistribute it and/or
  10. * modify it under the terms of the GNU General Public License
  11. * as published by the Free Software Foundation; either version
  12. * 2 of the License, or (at your option) any later version.
  13. */
  14. #ifndef _ASM_ATOMIC_H
  15. #define _ASM_ATOMIC_H
  16. #include <linux/config.h>
  17. #include <linux/types.h>
  18. #include <asm/spr-regs.h>
  19. #ifdef CONFIG_SMP
  20. #error not SMP safe
  21. #endif
  22. /*
  23. * Atomic operations that C can't guarantee us. Useful for
  24. * resource counting etc..
  25. *
  26. * We do not have SMP systems, so we don't have to deal with that.
  27. */
  28. /* Atomic operations are already serializing */
  29. #define smp_mb__before_atomic_dec() barrier()
  30. #define smp_mb__after_atomic_dec() barrier()
  31. #define smp_mb__before_atomic_inc() barrier()
  32. #define smp_mb__after_atomic_inc() barrier()
  33. typedef struct {
  34. int counter;
  35. } atomic_t;
  36. #define ATOMIC_INIT(i) { (i) }
  37. #define atomic_read(v) ((v)->counter)
  38. #define atomic_set(v, i) (((v)->counter) = (i))
  39. #ifndef CONFIG_FRV_OUTOFLINE_ATOMIC_OPS
  40. static inline int atomic_add_return(int i, atomic_t *v)
  41. {
  42. unsigned long val;
  43. asm("0: \n"
  44. " orcc gr0,gr0,gr0,icc3 \n" /* set ICC3.Z */
  45. " ckeq icc3,cc7 \n"
  46. " ld.p %M0,%1 \n" /* LD.P/ORCR must be atomic */
  47. " orcr cc7,cc7,cc3 \n" /* set CC3 to true */
  48. " add%I2 %1,%2,%1 \n"
  49. " cst.p %1,%M0 ,cc3,#1 \n"
  50. " corcc gr29,gr29,gr0 ,cc3,#1 \n" /* clear ICC3.Z if store happens */
  51. " beq icc3,#0,0b \n"
  52. : "+U"(v->counter), "=&r"(val)
  53. : "NPr"(i)
  54. : "memory", "cc7", "cc3", "icc3"
  55. );
  56. return val;
  57. }
  58. static inline int atomic_sub_return(int i, atomic_t *v)
  59. {
  60. unsigned long val;
  61. asm("0: \n"
  62. " orcc gr0,gr0,gr0,icc3 \n" /* set ICC3.Z */
  63. " ckeq icc3,cc7 \n"
  64. " ld.p %M0,%1 \n" /* LD.P/ORCR must be atomic */
  65. " orcr cc7,cc7,cc3 \n" /* set CC3 to true */
  66. " sub%I2 %1,%2,%1 \n"
  67. " cst.p %1,%M0 ,cc3,#1 \n"
  68. " corcc gr29,gr29,gr0 ,cc3,#1 \n" /* clear ICC3.Z if store happens */
  69. " beq icc3,#0,0b \n"
  70. : "+U"(v->counter), "=&r"(val)
  71. : "NPr"(i)
  72. : "memory", "cc7", "cc3", "icc3"
  73. );
  74. return val;
  75. }
  76. #else
  77. extern int atomic_add_return(int i, atomic_t *v);
  78. extern int atomic_sub_return(int i, atomic_t *v);
  79. #endif
  80. static inline int atomic_add_negative(int i, atomic_t *v)
  81. {
  82. return atomic_add_return(i, v) < 0;
  83. }
  84. static inline void atomic_add(int i, atomic_t *v)
  85. {
  86. atomic_add_return(i, v);
  87. }
  88. static inline void atomic_sub(int i, atomic_t *v)
  89. {
  90. atomic_sub_return(i, v);
  91. }
  92. static inline void atomic_inc(atomic_t *v)
  93. {
  94. atomic_add_return(1, v);
  95. }
  96. static inline void atomic_dec(atomic_t *v)
  97. {
  98. atomic_sub_return(1, v);
  99. }
  100. #define atomic_dec_return(v) atomic_sub_return(1, (v))
  101. #define atomic_inc_return(v) atomic_add_return(1, (v))
  102. #define atomic_sub_and_test(i,v) (atomic_sub_return((i), (v)) == 0)
  103. #define atomic_dec_and_test(v) (atomic_sub_return(1, (v)) == 0)
  104. #define atomic_inc_and_test(v) (atomic_add_return(1, (v)) == 0)
  105. #ifndef CONFIG_FRV_OUTOFLINE_ATOMIC_OPS
  106. static inline
  107. unsigned long atomic_test_and_ANDNOT_mask(unsigned long mask, volatile unsigned long *v)
  108. {
  109. unsigned long old, tmp;
  110. asm volatile(
  111. "0: \n"
  112. " orcc gr0,gr0,gr0,icc3 \n" /* set ICC3.Z */
  113. " ckeq icc3,cc7 \n"
  114. " ld.p %M0,%1 \n" /* LD.P/ORCR are atomic */
  115. " orcr cc7,cc7,cc3 \n" /* set CC3 to true */
  116. " and%I3 %1,%3,%2 \n"
  117. " cst.p %2,%M0 ,cc3,#1 \n" /* if store happens... */
  118. " corcc gr29,gr29,gr0 ,cc3,#1 \n" /* ... clear ICC3.Z */
  119. " beq icc3,#0,0b \n"
  120. : "+U"(*v), "=&r"(old), "=r"(tmp)
  121. : "NPr"(~mask)
  122. : "memory", "cc7", "cc3", "icc3"
  123. );
  124. return old;
  125. }
  126. static inline
  127. unsigned long atomic_test_and_OR_mask(unsigned long mask, volatile unsigned long *v)
  128. {
  129. unsigned long old, tmp;
  130. asm volatile(
  131. "0: \n"
  132. " orcc gr0,gr0,gr0,icc3 \n" /* set ICC3.Z */
  133. " ckeq icc3,cc7 \n"
  134. " ld.p %M0,%1 \n" /* LD.P/ORCR are atomic */
  135. " orcr cc7,cc7,cc3 \n" /* set CC3 to true */
  136. " or%I3 %1,%3,%2 \n"
  137. " cst.p %2,%M0 ,cc3,#1 \n" /* if store happens... */
  138. " corcc gr29,gr29,gr0 ,cc3,#1 \n" /* ... clear ICC3.Z */
  139. " beq icc3,#0,0b \n"
  140. : "+U"(*v), "=&r"(old), "=r"(tmp)
  141. : "NPr"(mask)
  142. : "memory", "cc7", "cc3", "icc3"
  143. );
  144. return old;
  145. }
  146. static inline
  147. unsigned long atomic_test_and_XOR_mask(unsigned long mask, volatile unsigned long *v)
  148. {
  149. unsigned long old, tmp;
  150. asm volatile(
  151. "0: \n"
  152. " orcc gr0,gr0,gr0,icc3 \n" /* set ICC3.Z */
  153. " ckeq icc3,cc7 \n"
  154. " ld.p %M0,%1 \n" /* LD.P/ORCR are atomic */
  155. " orcr cc7,cc7,cc3 \n" /* set CC3 to true */
  156. " xor%I3 %1,%3,%2 \n"
  157. " cst.p %2,%M0 ,cc3,#1 \n" /* if store happens... */
  158. " corcc gr29,gr29,gr0 ,cc3,#1 \n" /* ... clear ICC3.Z */
  159. " beq icc3,#0,0b \n"
  160. : "+U"(*v), "=&r"(old), "=r"(tmp)
  161. : "NPr"(mask)
  162. : "memory", "cc7", "cc3", "icc3"
  163. );
  164. return old;
  165. }
  166. #else
  167. extern unsigned long atomic_test_and_ANDNOT_mask(unsigned long mask, volatile unsigned long *v);
  168. extern unsigned long atomic_test_and_OR_mask(unsigned long mask, volatile unsigned long *v);
  169. extern unsigned long atomic_test_and_XOR_mask(unsigned long mask, volatile unsigned long *v);
  170. #endif
  171. #define atomic_clear_mask(mask, v) atomic_test_and_ANDNOT_mask((mask), (v))
  172. #define atomic_set_mask(mask, v) atomic_test_and_OR_mask((mask), (v))
  173. /*****************************************************************************/
  174. /*
  175. * exchange value with memory
  176. */
  177. #ifndef CONFIG_FRV_OUTOFLINE_ATOMIC_OPS
  178. #define xchg(ptr, x) \
  179. ({ \
  180. __typeof__(ptr) __xg_ptr = (ptr); \
  181. __typeof__(*(ptr)) __xg_orig; \
  182. \
  183. switch (sizeof(__xg_orig)) { \
  184. case 4: \
  185. asm volatile( \
  186. "swap%I0 %M0,%1" \
  187. : "+m"(*__xg_ptr), "=r"(__xg_orig) \
  188. : "1"(x) \
  189. : "memory" \
  190. ); \
  191. break; \
  192. \
  193. default: \
  194. __xg_orig = 0; \
  195. asm volatile("break"); \
  196. break; \
  197. } \
  198. \
  199. __xg_orig; \
  200. })
  201. #else
  202. extern uint32_t __xchg_32(uint32_t i, volatile void *v);
  203. #define xchg(ptr, x) \
  204. ({ \
  205. __typeof__(ptr) __xg_ptr = (ptr); \
  206. __typeof__(*(ptr)) __xg_orig; \
  207. \
  208. switch (sizeof(__xg_orig)) { \
  209. case 4: __xg_orig = (__typeof__(*(ptr))) __xchg_32((uint32_t) x, __xg_ptr); break; \
  210. default: \
  211. __xg_orig = 0; \
  212. asm volatile("break"); \
  213. break; \
  214. } \
  215. __xg_orig; \
  216. })
  217. #endif
  218. #define tas(ptr) (xchg((ptr), 1))
  219. /*****************************************************************************/
  220. /*
  221. * compare and conditionally exchange value with memory
  222. * - if (*ptr == test) then orig = *ptr; *ptr = test;
  223. * - if (*ptr != test) then orig = *ptr;
  224. */
  225. #ifndef CONFIG_FRV_OUTOFLINE_ATOMIC_OPS
  226. #define cmpxchg(ptr, test, new) \
  227. ({ \
  228. __typeof__(ptr) __xg_ptr = (ptr); \
  229. __typeof__(*(ptr)) __xg_orig, __xg_tmp; \
  230. __typeof__(*(ptr)) __xg_test = (test); \
  231. __typeof__(*(ptr)) __xg_new = (new); \
  232. \
  233. switch (sizeof(__xg_orig)) { \
  234. case 4: \
  235. asm volatile( \
  236. "0: \n" \
  237. " orcc gr0,gr0,gr0,icc3 \n" \
  238. " ckeq icc3,cc7 \n" \
  239. " ld.p %M0,%1 \n" \
  240. " orcr cc7,cc7,cc3 \n" \
  241. " sub%I4cc %1,%4,%2,icc0 \n" \
  242. " bne icc0,#0,1f \n" \
  243. " cst.p %3,%M0 ,cc3,#1 \n" \
  244. " corcc gr29,gr29,gr0 ,cc3,#1 \n" \
  245. " beq icc3,#0,0b \n" \
  246. "1: \n" \
  247. : "+U"(*__xg_ptr), "=&r"(__xg_orig), "=&r"(__xg_tmp) \
  248. : "r"(__xg_new), "NPr"(__xg_test) \
  249. : "memory", "cc7", "cc3", "icc3", "icc0" \
  250. ); \
  251. break; \
  252. \
  253. default: \
  254. __xg_orig = 0; \
  255. asm volatile("break"); \
  256. break; \
  257. } \
  258. \
  259. __xg_orig; \
  260. })
  261. #else
  262. extern uint32_t __cmpxchg_32(uint32_t *v, uint32_t test, uint32_t new);
  263. #define cmpxchg(ptr, test, new) \
  264. ({ \
  265. __typeof__(ptr) __xg_ptr = (ptr); \
  266. __typeof__(*(ptr)) __xg_orig; \
  267. __typeof__(*(ptr)) __xg_test = (test); \
  268. __typeof__(*(ptr)) __xg_new = (new); \
  269. \
  270. switch (sizeof(__xg_orig)) { \
  271. case 4: __xg_orig = __cmpxchg_32(__xg_ptr, __xg_test, __xg_new); break; \
  272. default: \
  273. __xg_orig = 0; \
  274. asm volatile("break"); \
  275. break; \
  276. } \
  277. \
  278. __xg_orig; \
  279. })
  280. #endif
  281. #define atomic_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), old, new))
  282. #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
  283. #define atomic_add_unless(v, a, u) \
  284. ({ \
  285. int c, old; \
  286. c = atomic_read(v); \
  287. while (c != (u) && (old = atomic_cmpxchg((v), c, c + (a))) != c) \
  288. c = old; \
  289. c != (u); \
  290. })
  291. #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
  292. #include <asm-generic/atomic.h>
  293. #endif /* _ASM_ATOMIC_H */