atomic.h 8.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344
  1. /* atomic.h: atomic operation emulation for FR-V
  2. *
  3. * For an explanation of how atomic ops work in this arch, see:
  4. * Documentation/fujitsu/frv/atomic-ops.txt
  5. *
  6. * Copyright (C) 2004 Red Hat, Inc. All Rights Reserved.
  7. * Written by David Howells (dhowells@redhat.com)
  8. *
  9. * This program is free software; you can redistribute it and/or
  10. * modify it under the terms of the GNU General Public License
  11. * as published by the Free Software Foundation; either version
  12. * 2 of the License, or (at your option) any later version.
  13. */
  14. #ifndef _ASM_ATOMIC_H
  15. #define _ASM_ATOMIC_H
  16. #include <linux/types.h>
  17. #include <asm/spr-regs.h>
  18. #ifdef CONFIG_SMP
  19. #error not SMP safe
  20. #endif
  21. /*
  22. * Atomic operations that C can't guarantee us. Useful for
  23. * resource counting etc..
  24. *
  25. * We do not have SMP systems, so we don't have to deal with that.
  26. */
  27. /* Atomic operations are already serializing */
  28. #define smp_mb__before_atomic_dec() barrier()
  29. #define smp_mb__after_atomic_dec() barrier()
  30. #define smp_mb__before_atomic_inc() barrier()
  31. #define smp_mb__after_atomic_inc() barrier()
  32. typedef struct {
  33. int counter;
  34. } atomic_t;
  35. #define ATOMIC_INIT(i) { (i) }
  36. #define atomic_read(v) ((v)->counter)
  37. #define atomic_set(v, i) (((v)->counter) = (i))
  38. #ifndef CONFIG_FRV_OUTOFLINE_ATOMIC_OPS
  39. static inline int atomic_add_return(int i, atomic_t *v)
  40. {
  41. unsigned long val;
  42. asm("0: \n"
  43. " orcc gr0,gr0,gr0,icc3 \n" /* set ICC3.Z */
  44. " ckeq icc3,cc7 \n"
  45. " ld.p %M0,%1 \n" /* LD.P/ORCR must be atomic */
  46. " orcr cc7,cc7,cc3 \n" /* set CC3 to true */
  47. " add%I2 %1,%2,%1 \n"
  48. " cst.p %1,%M0 ,cc3,#1 \n"
  49. " corcc gr29,gr29,gr0 ,cc3,#1 \n" /* clear ICC3.Z if store happens */
  50. " beq icc3,#0,0b \n"
  51. : "+U"(v->counter), "=&r"(val)
  52. : "NPr"(i)
  53. : "memory", "cc7", "cc3", "icc3"
  54. );
  55. return val;
  56. }
  57. static inline int atomic_sub_return(int i, atomic_t *v)
  58. {
  59. unsigned long val;
  60. asm("0: \n"
  61. " orcc gr0,gr0,gr0,icc3 \n" /* set ICC3.Z */
  62. " ckeq icc3,cc7 \n"
  63. " ld.p %M0,%1 \n" /* LD.P/ORCR must be atomic */
  64. " orcr cc7,cc7,cc3 \n" /* set CC3 to true */
  65. " sub%I2 %1,%2,%1 \n"
  66. " cst.p %1,%M0 ,cc3,#1 \n"
  67. " corcc gr29,gr29,gr0 ,cc3,#1 \n" /* clear ICC3.Z if store happens */
  68. " beq icc3,#0,0b \n"
  69. : "+U"(v->counter), "=&r"(val)
  70. : "NPr"(i)
  71. : "memory", "cc7", "cc3", "icc3"
  72. );
  73. return val;
  74. }
  75. #else
  76. extern int atomic_add_return(int i, atomic_t *v);
  77. extern int atomic_sub_return(int i, atomic_t *v);
  78. #endif
  79. static inline int atomic_add_negative(int i, atomic_t *v)
  80. {
  81. return atomic_add_return(i, v) < 0;
  82. }
  83. static inline void atomic_add(int i, atomic_t *v)
  84. {
  85. atomic_add_return(i, v);
  86. }
  87. static inline void atomic_sub(int i, atomic_t *v)
  88. {
  89. atomic_sub_return(i, v);
  90. }
  91. static inline void atomic_inc(atomic_t *v)
  92. {
  93. atomic_add_return(1, v);
  94. }
  95. static inline void atomic_dec(atomic_t *v)
  96. {
  97. atomic_sub_return(1, v);
  98. }
  99. #define atomic_dec_return(v) atomic_sub_return(1, (v))
  100. #define atomic_inc_return(v) atomic_add_return(1, (v))
  101. #define atomic_sub_and_test(i,v) (atomic_sub_return((i), (v)) == 0)
  102. #define atomic_dec_and_test(v) (atomic_sub_return(1, (v)) == 0)
  103. #define atomic_inc_and_test(v) (atomic_add_return(1, (v)) == 0)
  104. #ifndef CONFIG_FRV_OUTOFLINE_ATOMIC_OPS
  105. static inline
  106. unsigned long atomic_test_and_ANDNOT_mask(unsigned long mask, volatile unsigned long *v)
  107. {
  108. unsigned long old, tmp;
  109. asm volatile(
  110. "0: \n"
  111. " orcc gr0,gr0,gr0,icc3 \n" /* set ICC3.Z */
  112. " ckeq icc3,cc7 \n"
  113. " ld.p %M0,%1 \n" /* LD.P/ORCR are atomic */
  114. " orcr cc7,cc7,cc3 \n" /* set CC3 to true */
  115. " and%I3 %1,%3,%2 \n"
  116. " cst.p %2,%M0 ,cc3,#1 \n" /* if store happens... */
  117. " corcc gr29,gr29,gr0 ,cc3,#1 \n" /* ... clear ICC3.Z */
  118. " beq icc3,#0,0b \n"
  119. : "+U"(*v), "=&r"(old), "=r"(tmp)
  120. : "NPr"(~mask)
  121. : "memory", "cc7", "cc3", "icc3"
  122. );
  123. return old;
  124. }
  125. static inline
  126. unsigned long atomic_test_and_OR_mask(unsigned long mask, volatile unsigned long *v)
  127. {
  128. unsigned long old, tmp;
  129. asm volatile(
  130. "0: \n"
  131. " orcc gr0,gr0,gr0,icc3 \n" /* set ICC3.Z */
  132. " ckeq icc3,cc7 \n"
  133. " ld.p %M0,%1 \n" /* LD.P/ORCR are atomic */
  134. " orcr cc7,cc7,cc3 \n" /* set CC3 to true */
  135. " or%I3 %1,%3,%2 \n"
  136. " cst.p %2,%M0 ,cc3,#1 \n" /* if store happens... */
  137. " corcc gr29,gr29,gr0 ,cc3,#1 \n" /* ... clear ICC3.Z */
  138. " beq icc3,#0,0b \n"
  139. : "+U"(*v), "=&r"(old), "=r"(tmp)
  140. : "NPr"(mask)
  141. : "memory", "cc7", "cc3", "icc3"
  142. );
  143. return old;
  144. }
  145. static inline
  146. unsigned long atomic_test_and_XOR_mask(unsigned long mask, volatile unsigned long *v)
  147. {
  148. unsigned long old, tmp;
  149. asm volatile(
  150. "0: \n"
  151. " orcc gr0,gr0,gr0,icc3 \n" /* set ICC3.Z */
  152. " ckeq icc3,cc7 \n"
  153. " ld.p %M0,%1 \n" /* LD.P/ORCR are atomic */
  154. " orcr cc7,cc7,cc3 \n" /* set CC3 to true */
  155. " xor%I3 %1,%3,%2 \n"
  156. " cst.p %2,%M0 ,cc3,#1 \n" /* if store happens... */
  157. " corcc gr29,gr29,gr0 ,cc3,#1 \n" /* ... clear ICC3.Z */
  158. " beq icc3,#0,0b \n"
  159. : "+U"(*v), "=&r"(old), "=r"(tmp)
  160. : "NPr"(mask)
  161. : "memory", "cc7", "cc3", "icc3"
  162. );
  163. return old;
  164. }
  165. #else
  166. extern unsigned long atomic_test_and_ANDNOT_mask(unsigned long mask, volatile unsigned long *v);
  167. extern unsigned long atomic_test_and_OR_mask(unsigned long mask, volatile unsigned long *v);
  168. extern unsigned long atomic_test_and_XOR_mask(unsigned long mask, volatile unsigned long *v);
  169. #endif
  170. #define atomic_clear_mask(mask, v) atomic_test_and_ANDNOT_mask((mask), (v))
  171. #define atomic_set_mask(mask, v) atomic_test_and_OR_mask((mask), (v))
  172. /*****************************************************************************/
  173. /*
  174. * exchange value with memory
  175. */
  176. #ifndef CONFIG_FRV_OUTOFLINE_ATOMIC_OPS
  177. #define xchg(ptr, x) \
  178. ({ \
  179. __typeof__(ptr) __xg_ptr = (ptr); \
  180. __typeof__(*(ptr)) __xg_orig; \
  181. \
  182. switch (sizeof(__xg_orig)) { \
  183. case 4: \
  184. asm volatile( \
  185. "swap%I0 %M0,%1" \
  186. : "+m"(*__xg_ptr), "=r"(__xg_orig) \
  187. : "1"(x) \
  188. : "memory" \
  189. ); \
  190. break; \
  191. \
  192. default: \
  193. __xg_orig = (__typeof__(__xg_orig))0; \
  194. asm volatile("break"); \
  195. break; \
  196. } \
  197. \
  198. __xg_orig; \
  199. })
  200. #else
  201. extern uint32_t __xchg_32(uint32_t i, volatile void *v);
  202. #define xchg(ptr, x) \
  203. ({ \
  204. __typeof__(ptr) __xg_ptr = (ptr); \
  205. __typeof__(*(ptr)) __xg_orig; \
  206. \
  207. switch (sizeof(__xg_orig)) { \
  208. case 4: __xg_orig = (__typeof__(*(ptr))) __xchg_32((uint32_t) x, __xg_ptr); break; \
  209. default: \
  210. __xg_orig = (__typeof__(__xg_orig))0; \
  211. asm volatile("break"); \
  212. break; \
  213. } \
  214. __xg_orig; \
  215. })
  216. #endif
  217. #define tas(ptr) (xchg((ptr), 1))
  218. /*****************************************************************************/
  219. /*
  220. * compare and conditionally exchange value with memory
  221. * - if (*ptr == test) then orig = *ptr; *ptr = test;
  222. * - if (*ptr != test) then orig = *ptr;
  223. */
  224. #ifndef CONFIG_FRV_OUTOFLINE_ATOMIC_OPS
  225. #define cmpxchg(ptr, test, new) \
  226. ({ \
  227. __typeof__(ptr) __xg_ptr = (ptr); \
  228. __typeof__(*(ptr)) __xg_orig, __xg_tmp; \
  229. __typeof__(*(ptr)) __xg_test = (test); \
  230. __typeof__(*(ptr)) __xg_new = (new); \
  231. \
  232. switch (sizeof(__xg_orig)) { \
  233. case 4: \
  234. asm volatile( \
  235. "0: \n" \
  236. " orcc gr0,gr0,gr0,icc3 \n" \
  237. " ckeq icc3,cc7 \n" \
  238. " ld.p %M0,%1 \n" \
  239. " orcr cc7,cc7,cc3 \n" \
  240. " sub%I4cc %1,%4,%2,icc0 \n" \
  241. " bne icc0,#0,1f \n" \
  242. " cst.p %3,%M0 ,cc3,#1 \n" \
  243. " corcc gr29,gr29,gr0 ,cc3,#1 \n" \
  244. " beq icc3,#0,0b \n" \
  245. "1: \n" \
  246. : "+U"(*__xg_ptr), "=&r"(__xg_orig), "=&r"(__xg_tmp) \
  247. : "r"(__xg_new), "NPr"(__xg_test) \
  248. : "memory", "cc7", "cc3", "icc3", "icc0" \
  249. ); \
  250. break; \
  251. \
  252. default: \
  253. __xg_orig = 0; \
  254. asm volatile("break"); \
  255. break; \
  256. } \
  257. \
  258. __xg_orig; \
  259. })
  260. #else
  261. extern uint32_t __cmpxchg_32(uint32_t *v, uint32_t test, uint32_t new);
  262. #define cmpxchg(ptr, test, new) \
  263. ({ \
  264. __typeof__(ptr) __xg_ptr = (ptr); \
  265. __typeof__(*(ptr)) __xg_orig; \
  266. __typeof__(*(ptr)) __xg_test = (test); \
  267. __typeof__(*(ptr)) __xg_new = (new); \
  268. \
  269. switch (sizeof(__xg_orig)) { \
  270. case 4: __xg_orig = __cmpxchg_32(__xg_ptr, __xg_test, __xg_new); break; \
  271. default: \
  272. __xg_orig = 0; \
  273. asm volatile("break"); \
  274. break; \
  275. } \
  276. \
  277. __xg_orig; \
  278. })
  279. #endif
  280. #define atomic_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), old, new))
  281. #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
  282. #define atomic_add_unless(v, a, u) \
  283. ({ \
  284. int c, old; \
  285. c = atomic_read(v); \
  286. while (c != (u) && (old = atomic_cmpxchg((v), c, c + (a))) != c) \
  287. c = old; \
  288. c != (u); \
  289. })
  290. #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
  291. #include <asm-generic/atomic.h>
  292. #endif /* _ASM_ATOMIC_H */