atomic.h 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417
  1. /* atomic.h: atomic operation emulation for FR-V
  2. *
  3. * For an explanation of how atomic ops work in this arch, see:
  4. * Documentation/fujitsu/frv/atomic-ops.txt
  5. *
  6. * Copyright (C) 2004 Red Hat, Inc. All Rights Reserved.
  7. * Written by David Howells (dhowells@redhat.com)
  8. *
  9. * This program is free software; you can redistribute it and/or
  10. * modify it under the terms of the GNU General Public License
  11. * as published by the Free Software Foundation; either version
  12. * 2 of the License, or (at your option) any later version.
  13. */
  14. #ifndef _ASM_ATOMIC_H
  15. #define _ASM_ATOMIC_H
  16. #include <linux/config.h>
  17. #include <linux/types.h>
  18. #include <asm/spr-regs.h>
  19. #ifdef CONFIG_SMP
  20. #error not SMP safe
  21. #endif
  22. /*
  23. * Atomic operations that C can't guarantee us. Useful for
  24. * resource counting etc..
  25. *
  26. * We do not have SMP systems, so we don't have to deal with that.
  27. */
  28. /* Atomic operations are already serializing */
  29. #define smp_mb__before_atomic_dec() barrier()
  30. #define smp_mb__after_atomic_dec() barrier()
  31. #define smp_mb__before_atomic_inc() barrier()
  32. #define smp_mb__after_atomic_inc() barrier()
  33. typedef struct {
  34. int counter;
  35. } atomic_t;
  36. #define ATOMIC_INIT(i) { (i) }
  37. #define atomic_read(v) ((v)->counter)
  38. #define atomic_set(v, i) (((v)->counter) = (i))
  39. #ifndef CONFIG_FRV_OUTOFLINE_ATOMIC_OPS
  40. static inline int atomic_add_return(int i, atomic_t *v)
  41. {
  42. unsigned long val;
  43. asm("0: \n"
  44. " orcc gr0,gr0,gr0,icc3 \n" /* set ICC3.Z */
  45. " ckeq icc3,cc7 \n"
  46. " ld.p %M0,%1 \n" /* LD.P/ORCR must be atomic */
  47. " orcr cc7,cc7,cc3 \n" /* set CC3 to true */
  48. " add%I2 %1,%2,%1 \n"
  49. " cst.p %1,%M0 ,cc3,#1 \n"
  50. " corcc gr29,gr29,gr0 ,cc3,#1 \n" /* clear ICC3.Z if store happens */
  51. " beq icc3,#0,0b \n"
  52. : "+U"(v->counter), "=&r"(val)
  53. : "NPr"(i)
  54. : "memory", "cc7", "cc3", "icc3"
  55. );
  56. return val;
  57. }
  58. static inline int atomic_sub_return(int i, atomic_t *v)
  59. {
  60. unsigned long val;
  61. asm("0: \n"
  62. " orcc gr0,gr0,gr0,icc3 \n" /* set ICC3.Z */
  63. " ckeq icc3,cc7 \n"
  64. " ld.p %M0,%1 \n" /* LD.P/ORCR must be atomic */
  65. " orcr cc7,cc7,cc3 \n" /* set CC3 to true */
  66. " sub%I2 %1,%2,%1 \n"
  67. " cst.p %1,%M0 ,cc3,#1 \n"
  68. " corcc gr29,gr29,gr0 ,cc3,#1 \n" /* clear ICC3.Z if store happens */
  69. " beq icc3,#0,0b \n"
  70. : "+U"(v->counter), "=&r"(val)
  71. : "NPr"(i)
  72. : "memory", "cc7", "cc3", "icc3"
  73. );
  74. return val;
  75. }
  76. #else
  77. extern int atomic_add_return(int i, atomic_t *v);
  78. extern int atomic_sub_return(int i, atomic_t *v);
  79. #endif
  80. static inline int atomic_add_negative(int i, atomic_t *v)
  81. {
  82. return atomic_add_return(i, v) < 0;
  83. }
  84. static inline void atomic_add(int i, atomic_t *v)
  85. {
  86. atomic_add_return(i, v);
  87. }
  88. static inline void atomic_sub(int i, atomic_t *v)
  89. {
  90. atomic_sub_return(i, v);
  91. }
  92. static inline void atomic_inc(atomic_t *v)
  93. {
  94. atomic_add_return(1, v);
  95. }
  96. static inline void atomic_dec(atomic_t *v)
  97. {
  98. atomic_sub_return(1, v);
  99. }
  100. #define atomic_dec_return(v) atomic_sub_return(1, (v))
  101. #define atomic_inc_return(v) atomic_add_return(1, (v))
  102. #define atomic_sub_and_test(i,v) (atomic_sub_return((i), (v)) == 0)
  103. #define atomic_dec_and_test(v) (atomic_sub_return(1, (v)) == 0)
  104. #define atomic_inc_and_test(v) (atomic_add_return(1, (v)) == 0)
  105. #ifndef CONFIG_FRV_OUTOFLINE_ATOMIC_OPS
  106. static inline
  107. unsigned long atomic_test_and_ANDNOT_mask(unsigned long mask, volatile unsigned long *v)
  108. {
  109. unsigned long old, tmp;
  110. asm volatile(
  111. "0: \n"
  112. " orcc gr0,gr0,gr0,icc3 \n" /* set ICC3.Z */
  113. " ckeq icc3,cc7 \n"
  114. " ld.p %M0,%1 \n" /* LD.P/ORCR are atomic */
  115. " orcr cc7,cc7,cc3 \n" /* set CC3 to true */
  116. " and%I3 %1,%3,%2 \n"
  117. " cst.p %2,%M0 ,cc3,#1 \n" /* if store happens... */
  118. " corcc gr29,gr29,gr0 ,cc3,#1 \n" /* ... clear ICC3.Z */
  119. " beq icc3,#0,0b \n"
  120. : "+U"(*v), "=&r"(old), "=r"(tmp)
  121. : "NPr"(~mask)
  122. : "memory", "cc7", "cc3", "icc3"
  123. );
  124. return old;
  125. }
  126. static inline
  127. unsigned long atomic_test_and_OR_mask(unsigned long mask, volatile unsigned long *v)
  128. {
  129. unsigned long old, tmp;
  130. asm volatile(
  131. "0: \n"
  132. " orcc gr0,gr0,gr0,icc3 \n" /* set ICC3.Z */
  133. " ckeq icc3,cc7 \n"
  134. " ld.p %M0,%1 \n" /* LD.P/ORCR are atomic */
  135. " orcr cc7,cc7,cc3 \n" /* set CC3 to true */
  136. " or%I3 %1,%3,%2 \n"
  137. " cst.p %2,%M0 ,cc3,#1 \n" /* if store happens... */
  138. " corcc gr29,gr29,gr0 ,cc3,#1 \n" /* ... clear ICC3.Z */
  139. " beq icc3,#0,0b \n"
  140. : "+U"(*v), "=&r"(old), "=r"(tmp)
  141. : "NPr"(mask)
  142. : "memory", "cc7", "cc3", "icc3"
  143. );
  144. return old;
  145. }
  146. static inline
  147. unsigned long atomic_test_and_XOR_mask(unsigned long mask, volatile unsigned long *v)
  148. {
  149. unsigned long old, tmp;
  150. asm volatile(
  151. "0: \n"
  152. " orcc gr0,gr0,gr0,icc3 \n" /* set ICC3.Z */
  153. " ckeq icc3,cc7 \n"
  154. " ld.p %M0,%1 \n" /* LD.P/ORCR are atomic */
  155. " orcr cc7,cc7,cc3 \n" /* set CC3 to true */
  156. " xor%I3 %1,%3,%2 \n"
  157. " cst.p %2,%M0 ,cc3,#1 \n" /* if store happens... */
  158. " corcc gr29,gr29,gr0 ,cc3,#1 \n" /* ... clear ICC3.Z */
  159. " beq icc3,#0,0b \n"
  160. : "+U"(*v), "=&r"(old), "=r"(tmp)
  161. : "NPr"(mask)
  162. : "memory", "cc7", "cc3", "icc3"
  163. );
  164. return old;
  165. }
  166. #else
  167. extern unsigned long atomic_test_and_ANDNOT_mask(unsigned long mask, volatile unsigned long *v);
  168. extern unsigned long atomic_test_and_OR_mask(unsigned long mask, volatile unsigned long *v);
  169. extern unsigned long atomic_test_and_XOR_mask(unsigned long mask, volatile unsigned long *v);
  170. #endif
  171. #define atomic_clear_mask(mask, v) atomic_test_and_ANDNOT_mask((mask), (v))
  172. #define atomic_set_mask(mask, v) atomic_test_and_OR_mask((mask), (v))
  173. /*****************************************************************************/
  174. /*
  175. * exchange value with memory
  176. */
  177. #ifndef CONFIG_FRV_OUTOFLINE_ATOMIC_OPS
  178. #define xchg(ptr, x) \
  179. ({ \
  180. __typeof__(ptr) __xg_ptr = (ptr); \
  181. __typeof__(*(ptr)) __xg_orig; \
  182. \
  183. switch (sizeof(__xg_orig)) { \
  184. case 1: \
  185. asm volatile( \
  186. "0: \n" \
  187. " orcc gr0,gr0,gr0,icc3 \n" \
  188. " ckeq icc3,cc7 \n" \
  189. " ldub.p %M0,%1 \n" \
  190. " orcr cc7,cc7,cc3 \n" \
  191. " cstb.p %2,%M0 ,cc3,#1 \n" \
  192. " corcc gr29,gr29,gr0 ,cc3,#1 \n" \
  193. " beq icc3,#0,0b \n" \
  194. : "+U"(*__xg_ptr), "=&r"(__xg_orig) \
  195. : "r"(x) \
  196. : "memory", "cc7", "cc3", "icc3" \
  197. ); \
  198. break; \
  199. \
  200. case 2: \
  201. asm volatile( \
  202. "0: \n" \
  203. " orcc gr0,gr0,gr0,icc3 \n" \
  204. " ckeq icc3,cc7 \n" \
  205. " lduh.p %M0,%1 \n" \
  206. " orcr cc7,cc7,cc3 \n" \
  207. " csth.p %2,%M0 ,cc3,#1 \n" \
  208. " corcc gr29,gr29,gr0 ,cc3,#1 \n" \
  209. " beq icc3,#0,0b \n" \
  210. : "+U"(*__xg_ptr), "=&r"(__xg_orig) \
  211. : "r"(x) \
  212. : "memory", "cc7", "cc3", "icc3" \
  213. ); \
  214. break; \
  215. \
  216. case 4: \
  217. asm volatile( \
  218. "0: \n" \
  219. " orcc gr0,gr0,gr0,icc3 \n" \
  220. " ckeq icc3,cc7 \n" \
  221. " ld.p %M0,%1 \n" \
  222. " orcr cc7,cc7,cc3 \n" \
  223. " cst.p %2,%M0 ,cc3,#1 \n" \
  224. " corcc gr29,gr29,gr0 ,cc3,#1 \n" \
  225. " beq icc3,#0,0b \n" \
  226. : "+U"(*__xg_ptr), "=&r"(__xg_orig) \
  227. : "r"(x) \
  228. : "memory", "cc7", "cc3", "icc3" \
  229. ); \
  230. break; \
  231. \
  232. default: \
  233. __xg_orig = 0; \
  234. asm volatile("break"); \
  235. break; \
  236. } \
  237. \
  238. __xg_orig; \
  239. })
  240. #else
  241. extern uint8_t __xchg_8 (uint8_t i, volatile void *v);
  242. extern uint16_t __xchg_16(uint16_t i, volatile void *v);
  243. extern uint32_t __xchg_32(uint32_t i, volatile void *v);
  244. #define xchg(ptr, x) \
  245. ({ \
  246. __typeof__(ptr) __xg_ptr = (ptr); \
  247. __typeof__(*(ptr)) __xg_orig; \
  248. \
  249. switch (sizeof(__xg_orig)) { \
  250. case 1: __xg_orig = (__typeof__(*(ptr))) __xchg_8 ((uint8_t) x, __xg_ptr); break; \
  251. case 2: __xg_orig = (__typeof__(*(ptr))) __xchg_16((uint16_t) x, __xg_ptr); break; \
  252. case 4: __xg_orig = (__typeof__(*(ptr))) __xchg_32((uint32_t) x, __xg_ptr); break; \
  253. default: \
  254. __xg_orig = 0; \
  255. asm volatile("break"); \
  256. break; \
  257. } \
  258. __xg_orig; \
  259. })
  260. #endif
  261. #define tas(ptr) (xchg((ptr), 1))
  262. /*****************************************************************************/
  263. /*
  264. * compare and conditionally exchange value with memory
  265. * - if (*ptr == test) then orig = *ptr; *ptr = test;
  266. * - if (*ptr != test) then orig = *ptr;
  267. */
  268. #ifndef CONFIG_FRV_OUTOFLINE_ATOMIC_OPS
  269. #define cmpxchg(ptr, test, new) \
  270. ({ \
  271. __typeof__(ptr) __xg_ptr = (ptr); \
  272. __typeof__(*(ptr)) __xg_orig, __xg_tmp; \
  273. __typeof__(*(ptr)) __xg_test = (test); \
  274. __typeof__(*(ptr)) __xg_new = (new); \
  275. \
  276. switch (sizeof(__xg_orig)) { \
  277. case 1: \
  278. asm volatile( \
  279. "0: \n" \
  280. " orcc gr0,gr0,gr0,icc3 \n" \
  281. " ckeq icc3,cc7 \n" \
  282. " ldub.p %M0,%1 \n" \
  283. " orcr cc7,cc7,cc3 \n" \
  284. " sub%I4 %1,%4,%2 \n" \
  285. " sllcc %2,#24,gr0,icc0 \n" \
  286. " bne icc0,#0,1f \n" \
  287. " cstb.p %3,%M0 ,cc3,#1 \n" \
  288. " corcc gr29,gr29,gr0 ,cc3,#1 \n" \
  289. " beq icc3,#0,0b \n" \
  290. "1: \n" \
  291. : "+U"(*__xg_ptr), "=&r"(__xg_orig), "=&r"(__xg_tmp) \
  292. : "r"(__xg_new), "NPr"(__xg_test) \
  293. : "memory", "cc7", "cc3", "icc3", "icc0" \
  294. ); \
  295. break; \
  296. \
  297. case 2: \
  298. asm volatile( \
  299. "0: \n" \
  300. " orcc gr0,gr0,gr0,icc3 \n" \
  301. " ckeq icc3,cc7 \n" \
  302. " lduh.p %M0,%1 \n" \
  303. " orcr cc7,cc7,cc3 \n" \
  304. " sub%I4 %1,%4,%2 \n" \
  305. " sllcc %2,#16,gr0,icc0 \n" \
  306. " bne icc0,#0,1f \n" \
  307. " csth.p %3,%M0 ,cc3,#1 \n" \
  308. " corcc gr29,gr29,gr0 ,cc3,#1 \n" \
  309. " beq icc3,#0,0b \n" \
  310. "1: \n" \
  311. : "+U"(*__xg_ptr), "=&r"(__xg_orig), "=&r"(__xg_tmp) \
  312. : "r"(__xg_new), "NPr"(__xg_test) \
  313. : "memory", "cc7", "cc3", "icc3", "icc0" \
  314. ); \
  315. break; \
  316. \
  317. case 4: \
  318. asm volatile( \
  319. "0: \n" \
  320. " orcc gr0,gr0,gr0,icc3 \n" \
  321. " ckeq icc3,cc7 \n" \
  322. " ld.p %M0,%1 \n" \
  323. " orcr cc7,cc7,cc3 \n" \
  324. " sub%I4cc %1,%4,%2,icc0 \n" \
  325. " bne icc0,#0,1f \n" \
  326. " cst.p %3,%M0 ,cc3,#1 \n" \
  327. " corcc gr29,gr29,gr0 ,cc3,#1 \n" \
  328. " beq icc3,#0,0b \n" \
  329. "1: \n" \
  330. : "+U"(*__xg_ptr), "=&r"(__xg_orig), "=&r"(__xg_tmp) \
  331. : "r"(__xg_new), "NPr"(__xg_test) \
  332. : "memory", "cc7", "cc3", "icc3", "icc0" \
  333. ); \
  334. break; \
  335. \
  336. default: \
  337. __xg_orig = 0; \
  338. asm volatile("break"); \
  339. break; \
  340. } \
  341. \
  342. __xg_orig; \
  343. })
  344. #else
  345. extern uint8_t __cmpxchg_8 (uint8_t *v, uint8_t test, uint8_t new);
  346. extern uint16_t __cmpxchg_16(uint16_t *v, uint16_t test, uint16_t new);
  347. extern uint32_t __cmpxchg_32(uint32_t *v, uint32_t test, uint32_t new);
  348. #define cmpxchg(ptr, test, new) \
  349. ({ \
  350. __typeof__(ptr) __xg_ptr = (ptr); \
  351. __typeof__(*(ptr)) __xg_orig; \
  352. __typeof__(*(ptr)) __xg_test = (test); \
  353. __typeof__(*(ptr)) __xg_new = (new); \
  354. \
  355. switch (sizeof(__xg_orig)) { \
  356. case 1: __xg_orig = __cmpxchg_8 (__xg_ptr, __xg_test, __xg_new); break; \
  357. case 2: __xg_orig = __cmpxchg_16(__xg_ptr, __xg_test, __xg_new); break; \
  358. case 4: __xg_orig = __cmpxchg_32(__xg_ptr, __xg_test, __xg_new); break; \
  359. default: \
  360. __xg_orig = 0; \
  361. asm volatile("break"); \
  362. break; \
  363. } \
  364. \
  365. __xg_orig; \
  366. })
  367. #endif
  368. #endif /* _ASM_ATOMIC_H */