atomic.h 7.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408
  1. #ifndef _ASM_POWERPC_ATOMIC_H_
  2. #define _ASM_POWERPC_ATOMIC_H_
  3. /*
  4. * PowerPC atomic operations
  5. */
  6. typedef struct { volatile int counter; } atomic_t;
  7. #ifdef __KERNEL__
  8. #include <asm/synch.h>
  9. #include <asm/asm-compat.h>
  10. #define ATOMIC_INIT(i) { (i) }
  11. #define atomic_read(v) ((v)->counter)
  12. #define atomic_set(v,i) (((v)->counter) = (i))
  13. static __inline__ void atomic_add(int a, atomic_t *v)
  14. {
  15. int t;
  16. __asm__ __volatile__(
  17. "1: lwarx %0,0,%3 # atomic_add\n\
  18. add %0,%2,%0\n"
  19. PPC405_ERR77(0,%3)
  20. " stwcx. %0,0,%3 \n\
  21. bne- 1b"
  22. : "=&r" (t), "=m" (v->counter)
  23. : "r" (a), "r" (&v->counter), "m" (v->counter)
  24. : "cc");
  25. }
  26. static __inline__ int atomic_add_return(int a, atomic_t *v)
  27. {
  28. int t;
  29. __asm__ __volatile__(
  30. LWSYNC_ON_SMP
  31. "1: lwarx %0,0,%2 # atomic_add_return\n\
  32. add %0,%1,%0\n"
  33. PPC405_ERR77(0,%2)
  34. " stwcx. %0,0,%2 \n\
  35. bne- 1b"
  36. ISYNC_ON_SMP
  37. : "=&r" (t)
  38. : "r" (a), "r" (&v->counter)
  39. : "cc", "memory");
  40. return t;
  41. }
  42. #define atomic_add_negative(a, v) (atomic_add_return((a), (v)) < 0)
  43. static __inline__ void atomic_sub(int a, atomic_t *v)
  44. {
  45. int t;
  46. __asm__ __volatile__(
  47. "1: lwarx %0,0,%3 # atomic_sub\n\
  48. subf %0,%2,%0\n"
  49. PPC405_ERR77(0,%3)
  50. " stwcx. %0,0,%3 \n\
  51. bne- 1b"
  52. : "=&r" (t), "=m" (v->counter)
  53. : "r" (a), "r" (&v->counter), "m" (v->counter)
  54. : "cc");
  55. }
  56. static __inline__ int atomic_sub_return(int a, atomic_t *v)
  57. {
  58. int t;
  59. __asm__ __volatile__(
  60. LWSYNC_ON_SMP
  61. "1: lwarx %0,0,%2 # atomic_sub_return\n\
  62. subf %0,%1,%0\n"
  63. PPC405_ERR77(0,%2)
  64. " stwcx. %0,0,%2 \n\
  65. bne- 1b"
  66. ISYNC_ON_SMP
  67. : "=&r" (t)
  68. : "r" (a), "r" (&v->counter)
  69. : "cc", "memory");
  70. return t;
  71. }
  72. static __inline__ void atomic_inc(atomic_t *v)
  73. {
  74. int t;
  75. __asm__ __volatile__(
  76. "1: lwarx %0,0,%2 # atomic_inc\n\
  77. addic %0,%0,1\n"
  78. PPC405_ERR77(0,%2)
  79. " stwcx. %0,0,%2 \n\
  80. bne- 1b"
  81. : "=&r" (t), "=m" (v->counter)
  82. : "r" (&v->counter), "m" (v->counter)
  83. : "cc");
  84. }
  85. static __inline__ int atomic_inc_return(atomic_t *v)
  86. {
  87. int t;
  88. __asm__ __volatile__(
  89. LWSYNC_ON_SMP
  90. "1: lwarx %0,0,%1 # atomic_inc_return\n\
  91. addic %0,%0,1\n"
  92. PPC405_ERR77(0,%1)
  93. " stwcx. %0,0,%1 \n\
  94. bne- 1b"
  95. ISYNC_ON_SMP
  96. : "=&r" (t)
  97. : "r" (&v->counter)
  98. : "cc", "memory");
  99. return t;
  100. }
  101. /*
  102. * atomic_inc_and_test - increment and test
  103. * @v: pointer of type atomic_t
  104. *
  105. * Atomically increments @v by 1
  106. * and returns true if the result is zero, or false for all
  107. * other cases.
  108. */
  109. #define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
  110. static __inline__ void atomic_dec(atomic_t *v)
  111. {
  112. int t;
  113. __asm__ __volatile__(
  114. "1: lwarx %0,0,%2 # atomic_dec\n\
  115. addic %0,%0,-1\n"
  116. PPC405_ERR77(0,%2)\
  117. " stwcx. %0,0,%2\n\
  118. bne- 1b"
  119. : "=&r" (t), "=m" (v->counter)
  120. : "r" (&v->counter), "m" (v->counter)
  121. : "cc");
  122. }
  123. static __inline__ int atomic_dec_return(atomic_t *v)
  124. {
  125. int t;
  126. __asm__ __volatile__(
  127. LWSYNC_ON_SMP
  128. "1: lwarx %0,0,%1 # atomic_dec_return\n\
  129. addic %0,%0,-1\n"
  130. PPC405_ERR77(0,%1)
  131. " stwcx. %0,0,%1\n\
  132. bne- 1b"
  133. ISYNC_ON_SMP
  134. : "=&r" (t)
  135. : "r" (&v->counter)
  136. : "cc", "memory");
  137. return t;
  138. }
  139. #define atomic_cmpxchg(v, o, n) ((int)cmpxchg(&((v)->counter), (o), (n)))
  140. #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
  141. /**
  142. * atomic_add_unless - add unless the number is a given value
  143. * @v: pointer of type atomic_t
  144. * @a: the amount to add to v...
  145. * @u: ...unless v is equal to u.
  146. *
  147. * Atomically adds @a to @v, so long as it was not @u.
  148. * Returns non-zero if @v was not @u, and zero otherwise.
  149. */
  150. #define atomic_add_unless(v, a, u) \
  151. ({ \
  152. int c, old; \
  153. c = atomic_read(v); \
  154. for (;;) { \
  155. if (unlikely(c == (u))) \
  156. break; \
  157. old = atomic_cmpxchg((v), c, c + (a)); \
  158. if (likely(old == c)) \
  159. break; \
  160. c = old; \
  161. } \
  162. c != (u); \
  163. })
  164. #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
  165. #define atomic_sub_and_test(a, v) (atomic_sub_return((a), (v)) == 0)
  166. #define atomic_dec_and_test(v) (atomic_dec_return((v)) == 0)
  167. /*
  168. * Atomically test *v and decrement if it is greater than 0.
  169. * The function returns the old value of *v minus 1.
  170. */
  171. static __inline__ int atomic_dec_if_positive(atomic_t *v)
  172. {
  173. int t;
  174. __asm__ __volatile__(
  175. LWSYNC_ON_SMP
  176. "1: lwarx %0,0,%1 # atomic_dec_if_positive\n\
  177. addic. %0,%0,-1\n\
  178. blt- 2f\n"
  179. PPC405_ERR77(0,%1)
  180. " stwcx. %0,0,%1\n\
  181. bne- 1b"
  182. ISYNC_ON_SMP
  183. "\n\
  184. 2:" : "=&r" (t)
  185. : "r" (&v->counter)
  186. : "cc", "memory");
  187. return t;
  188. }
  189. #define smp_mb__before_atomic_dec() smp_mb()
  190. #define smp_mb__after_atomic_dec() smp_mb()
  191. #define smp_mb__before_atomic_inc() smp_mb()
  192. #define smp_mb__after_atomic_inc() smp_mb()
  193. #ifdef __powerpc64__
  194. typedef struct { volatile long counter; } atomic64_t;
  195. #define ATOMIC64_INIT(i) { (i) }
  196. #define atomic64_read(v) ((v)->counter)
  197. #define atomic64_set(v,i) (((v)->counter) = (i))
  198. static __inline__ void atomic64_add(long a, atomic64_t *v)
  199. {
  200. long t;
  201. __asm__ __volatile__(
  202. "1: ldarx %0,0,%3 # atomic64_add\n\
  203. add %0,%2,%0\n\
  204. stdcx. %0,0,%3 \n\
  205. bne- 1b"
  206. : "=&r" (t), "=m" (v->counter)
  207. : "r" (a), "r" (&v->counter), "m" (v->counter)
  208. : "cc");
  209. }
  210. static __inline__ long atomic64_add_return(long a, atomic64_t *v)
  211. {
  212. long t;
  213. __asm__ __volatile__(
  214. LWSYNC_ON_SMP
  215. "1: ldarx %0,0,%2 # atomic64_add_return\n\
  216. add %0,%1,%0\n\
  217. stdcx. %0,0,%2 \n\
  218. bne- 1b"
  219. ISYNC_ON_SMP
  220. : "=&r" (t)
  221. : "r" (a), "r" (&v->counter)
  222. : "cc", "memory");
  223. return t;
  224. }
  225. #define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0)
  226. static __inline__ void atomic64_sub(long a, atomic64_t *v)
  227. {
  228. long t;
  229. __asm__ __volatile__(
  230. "1: ldarx %0,0,%3 # atomic64_sub\n\
  231. subf %0,%2,%0\n\
  232. stdcx. %0,0,%3 \n\
  233. bne- 1b"
  234. : "=&r" (t), "=m" (v->counter)
  235. : "r" (a), "r" (&v->counter), "m" (v->counter)
  236. : "cc");
  237. }
  238. static __inline__ long atomic64_sub_return(long a, atomic64_t *v)
  239. {
  240. long t;
  241. __asm__ __volatile__(
  242. LWSYNC_ON_SMP
  243. "1: ldarx %0,0,%2 # atomic64_sub_return\n\
  244. subf %0,%1,%0\n\
  245. stdcx. %0,0,%2 \n\
  246. bne- 1b"
  247. ISYNC_ON_SMP
  248. : "=&r" (t)
  249. : "r" (a), "r" (&v->counter)
  250. : "cc", "memory");
  251. return t;
  252. }
  253. static __inline__ void atomic64_inc(atomic64_t *v)
  254. {
  255. long t;
  256. __asm__ __volatile__(
  257. "1: ldarx %0,0,%2 # atomic64_inc\n\
  258. addic %0,%0,1\n\
  259. stdcx. %0,0,%2 \n\
  260. bne- 1b"
  261. : "=&r" (t), "=m" (v->counter)
  262. : "r" (&v->counter), "m" (v->counter)
  263. : "cc");
  264. }
  265. static __inline__ long atomic64_inc_return(atomic64_t *v)
  266. {
  267. long t;
  268. __asm__ __volatile__(
  269. LWSYNC_ON_SMP
  270. "1: ldarx %0,0,%1 # atomic64_inc_return\n\
  271. addic %0,%0,1\n\
  272. stdcx. %0,0,%1 \n\
  273. bne- 1b"
  274. ISYNC_ON_SMP
  275. : "=&r" (t)
  276. : "r" (&v->counter)
  277. : "cc", "memory");
  278. return t;
  279. }
  280. /*
  281. * atomic64_inc_and_test - increment and test
  282. * @v: pointer of type atomic64_t
  283. *
  284. * Atomically increments @v by 1
  285. * and returns true if the result is zero, or false for all
  286. * other cases.
  287. */
  288. #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
  289. static __inline__ void atomic64_dec(atomic64_t *v)
  290. {
  291. long t;
  292. __asm__ __volatile__(
  293. "1: ldarx %0,0,%2 # atomic64_dec\n\
  294. addic %0,%0,-1\n\
  295. stdcx. %0,0,%2\n\
  296. bne- 1b"
  297. : "=&r" (t), "=m" (v->counter)
  298. : "r" (&v->counter), "m" (v->counter)
  299. : "cc");
  300. }
  301. static __inline__ long atomic64_dec_return(atomic64_t *v)
  302. {
  303. long t;
  304. __asm__ __volatile__(
  305. LWSYNC_ON_SMP
  306. "1: ldarx %0,0,%1 # atomic64_dec_return\n\
  307. addic %0,%0,-1\n\
  308. stdcx. %0,0,%1\n\
  309. bne- 1b"
  310. ISYNC_ON_SMP
  311. : "=&r" (t)
  312. : "r" (&v->counter)
  313. : "cc", "memory");
  314. return t;
  315. }
  316. #define atomic64_sub_and_test(a, v) (atomic64_sub_return((a), (v)) == 0)
  317. #define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0)
  318. /*
  319. * Atomically test *v and decrement if it is greater than 0.
  320. * The function returns the old value of *v minus 1.
  321. */
  322. static __inline__ long atomic64_dec_if_positive(atomic64_t *v)
  323. {
  324. long t;
  325. __asm__ __volatile__(
  326. LWSYNC_ON_SMP
  327. "1: ldarx %0,0,%1 # atomic64_dec_if_positive\n\
  328. addic. %0,%0,-1\n\
  329. blt- 2f\n\
  330. stdcx. %0,0,%1\n\
  331. bne- 1b"
  332. ISYNC_ON_SMP
  333. "\n\
  334. 2:" : "=&r" (t)
  335. : "r" (&v->counter)
  336. : "cc", "memory");
  337. return t;
  338. }
  339. #endif /* __powerpc64__ */
  340. #include <asm-generic/atomic.h>
  341. #endif /* __KERNEL__ */
  342. #endif /* _ASM_POWERPC_ATOMIC_H_ */