atomic.h 8.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457
  1. #ifndef _ASM_POWERPC_ATOMIC_H_
  2. #define _ASM_POWERPC_ATOMIC_H_
  3. /*
  4. * PowerPC atomic operations
  5. */
  6. typedef struct { volatile int counter; } atomic_t;
  7. #ifdef __KERNEL__
  8. #include <linux/compiler.h>
  9. #include <asm/synch.h>
  10. #include <asm/asm-compat.h>
  11. #include <asm/system.h>
  12. #define ATOMIC_INIT(i) { (i) }
  13. #define atomic_read(v) ((v)->counter)
  14. #define atomic_set(v,i) (((v)->counter) = (i))
  15. static __inline__ void atomic_add(int a, atomic_t *v)
  16. {
  17. int t;
  18. __asm__ __volatile__(
  19. "1: lwarx %0,0,%3 # atomic_add\n\
  20. add %0,%2,%0\n"
  21. PPC405_ERR77(0,%3)
  22. " stwcx. %0,0,%3 \n\
  23. bne- 1b"
  24. : "=&r" (t), "+m" (v->counter)
  25. : "r" (a), "r" (&v->counter)
  26. : "cc");
  27. }
  28. static __inline__ int atomic_add_return(int a, atomic_t *v)
  29. {
  30. int t;
  31. __asm__ __volatile__(
  32. LWSYNC_ON_SMP
  33. "1: lwarx %0,0,%2 # atomic_add_return\n\
  34. add %0,%1,%0\n"
  35. PPC405_ERR77(0,%2)
  36. " stwcx. %0,0,%2 \n\
  37. bne- 1b"
  38. ISYNC_ON_SMP
  39. : "=&r" (t)
  40. : "r" (a), "r" (&v->counter)
  41. : "cc", "memory");
  42. return t;
  43. }
  44. #define atomic_add_negative(a, v) (atomic_add_return((a), (v)) < 0)
  45. static __inline__ void atomic_sub(int a, atomic_t *v)
  46. {
  47. int t;
  48. __asm__ __volatile__(
  49. "1: lwarx %0,0,%3 # atomic_sub\n\
  50. subf %0,%2,%0\n"
  51. PPC405_ERR77(0,%3)
  52. " stwcx. %0,0,%3 \n\
  53. bne- 1b"
  54. : "=&r" (t), "+m" (v->counter)
  55. : "r" (a), "r" (&v->counter)
  56. : "cc");
  57. }
  58. static __inline__ int atomic_sub_return(int a, atomic_t *v)
  59. {
  60. int t;
  61. __asm__ __volatile__(
  62. LWSYNC_ON_SMP
  63. "1: lwarx %0,0,%2 # atomic_sub_return\n\
  64. subf %0,%1,%0\n"
  65. PPC405_ERR77(0,%2)
  66. " stwcx. %0,0,%2 \n\
  67. bne- 1b"
  68. ISYNC_ON_SMP
  69. : "=&r" (t)
  70. : "r" (a), "r" (&v->counter)
  71. : "cc", "memory");
  72. return t;
  73. }
  74. static __inline__ void atomic_inc(atomic_t *v)
  75. {
  76. int t;
  77. __asm__ __volatile__(
  78. "1: lwarx %0,0,%2 # atomic_inc\n\
  79. addic %0,%0,1\n"
  80. PPC405_ERR77(0,%2)
  81. " stwcx. %0,0,%2 \n\
  82. bne- 1b"
  83. : "=&r" (t), "+m" (v->counter)
  84. : "r" (&v->counter)
  85. : "cc");
  86. }
  87. static __inline__ int atomic_inc_return(atomic_t *v)
  88. {
  89. int t;
  90. __asm__ __volatile__(
  91. LWSYNC_ON_SMP
  92. "1: lwarx %0,0,%1 # atomic_inc_return\n\
  93. addic %0,%0,1\n"
  94. PPC405_ERR77(0,%1)
  95. " stwcx. %0,0,%1 \n\
  96. bne- 1b"
  97. ISYNC_ON_SMP
  98. : "=&r" (t)
  99. : "r" (&v->counter)
  100. : "cc", "memory");
  101. return t;
  102. }
  103. /*
  104. * atomic_inc_and_test - increment and test
  105. * @v: pointer of type atomic_t
  106. *
  107. * Atomically increments @v by 1
  108. * and returns true if the result is zero, or false for all
  109. * other cases.
  110. */
  111. #define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
  112. static __inline__ void atomic_dec(atomic_t *v)
  113. {
  114. int t;
  115. __asm__ __volatile__(
  116. "1: lwarx %0,0,%2 # atomic_dec\n\
  117. addic %0,%0,-1\n"
  118. PPC405_ERR77(0,%2)\
  119. " stwcx. %0,0,%2\n\
  120. bne- 1b"
  121. : "=&r" (t), "+m" (v->counter)
  122. : "r" (&v->counter)
  123. : "cc");
  124. }
  125. static __inline__ int atomic_dec_return(atomic_t *v)
  126. {
  127. int t;
  128. __asm__ __volatile__(
  129. LWSYNC_ON_SMP
  130. "1: lwarx %0,0,%1 # atomic_dec_return\n\
  131. addic %0,%0,-1\n"
  132. PPC405_ERR77(0,%1)
  133. " stwcx. %0,0,%1\n\
  134. bne- 1b"
  135. ISYNC_ON_SMP
  136. : "=&r" (t)
  137. : "r" (&v->counter)
  138. : "cc", "memory");
  139. return t;
  140. }
  141. #define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
  142. #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
  143. /**
  144. * atomic_add_unless - add unless the number is a given value
  145. * @v: pointer of type atomic_t
  146. * @a: the amount to add to v...
  147. * @u: ...unless v is equal to u.
  148. *
  149. * Atomically adds @a to @v, so long as it was not @u.
  150. * Returns non-zero if @v was not @u, and zero otherwise.
  151. */
  152. static __inline__ int atomic_add_unless(atomic_t *v, int a, int u)
  153. {
  154. int t;
  155. __asm__ __volatile__ (
  156. LWSYNC_ON_SMP
  157. "1: lwarx %0,0,%1 # atomic_add_unless\n\
  158. cmpw 0,%0,%3 \n\
  159. beq- 2f \n\
  160. add %0,%2,%0 \n"
  161. PPC405_ERR77(0,%2)
  162. " stwcx. %0,0,%1 \n\
  163. bne- 1b \n"
  164. ISYNC_ON_SMP
  165. " subf %0,%2,%0 \n\
  166. 2:"
  167. : "=&r" (t)
  168. : "r" (&v->counter), "r" (a), "r" (u)
  169. : "cc", "memory");
  170. return t != u;
  171. }
  172. #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
  173. #define atomic_sub_and_test(a, v) (atomic_sub_return((a), (v)) == 0)
  174. #define atomic_dec_and_test(v) (atomic_dec_return((v)) == 0)
  175. /*
  176. * Atomically test *v and decrement if it is greater than 0.
  177. * The function returns the old value of *v minus 1, even if
  178. * the atomic variable, v, was not decremented.
  179. */
  180. static __inline__ int atomic_dec_if_positive(atomic_t *v)
  181. {
  182. int t;
  183. __asm__ __volatile__(
  184. LWSYNC_ON_SMP
  185. "1: lwarx %0,0,%1 # atomic_dec_if_positive\n\
  186. cmpwi %0,1\n\
  187. addi %0,%0,-1\n\
  188. blt- 2f\n"
  189. PPC405_ERR77(0,%1)
  190. " stwcx. %0,0,%1\n\
  191. bne- 1b"
  192. ISYNC_ON_SMP
  193. "\n\
  194. 2:" : "=&b" (t)
  195. : "r" (&v->counter)
  196. : "cc", "memory");
  197. return t;
  198. }
  199. #define smp_mb__before_atomic_dec() smp_mb()
  200. #define smp_mb__after_atomic_dec() smp_mb()
  201. #define smp_mb__before_atomic_inc() smp_mb()
  202. #define smp_mb__after_atomic_inc() smp_mb()
  203. #ifdef __powerpc64__
  204. typedef struct { volatile long counter; } atomic64_t;
  205. #define ATOMIC64_INIT(i) { (i) }
  206. #define atomic64_read(v) ((v)->counter)
  207. #define atomic64_set(v,i) (((v)->counter) = (i))
  208. static __inline__ void atomic64_add(long a, atomic64_t *v)
  209. {
  210. long t;
  211. __asm__ __volatile__(
  212. "1: ldarx %0,0,%3 # atomic64_add\n\
  213. add %0,%2,%0\n\
  214. stdcx. %0,0,%3 \n\
  215. bne- 1b"
  216. : "=&r" (t), "+m" (v->counter)
  217. : "r" (a), "r" (&v->counter)
  218. : "cc");
  219. }
  220. static __inline__ long atomic64_add_return(long a, atomic64_t *v)
  221. {
  222. long t;
  223. __asm__ __volatile__(
  224. LWSYNC_ON_SMP
  225. "1: ldarx %0,0,%2 # atomic64_add_return\n\
  226. add %0,%1,%0\n\
  227. stdcx. %0,0,%2 \n\
  228. bne- 1b"
  229. ISYNC_ON_SMP
  230. : "=&r" (t)
  231. : "r" (a), "r" (&v->counter)
  232. : "cc", "memory");
  233. return t;
  234. }
  235. #define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0)
  236. static __inline__ void atomic64_sub(long a, atomic64_t *v)
  237. {
  238. long t;
  239. __asm__ __volatile__(
  240. "1: ldarx %0,0,%3 # atomic64_sub\n\
  241. subf %0,%2,%0\n\
  242. stdcx. %0,0,%3 \n\
  243. bne- 1b"
  244. : "=&r" (t), "+m" (v->counter)
  245. : "r" (a), "r" (&v->counter)
  246. : "cc");
  247. }
  248. static __inline__ long atomic64_sub_return(long a, atomic64_t *v)
  249. {
  250. long t;
  251. __asm__ __volatile__(
  252. LWSYNC_ON_SMP
  253. "1: ldarx %0,0,%2 # atomic64_sub_return\n\
  254. subf %0,%1,%0\n\
  255. stdcx. %0,0,%2 \n\
  256. bne- 1b"
  257. ISYNC_ON_SMP
  258. : "=&r" (t)
  259. : "r" (a), "r" (&v->counter)
  260. : "cc", "memory");
  261. return t;
  262. }
  263. static __inline__ void atomic64_inc(atomic64_t *v)
  264. {
  265. long t;
  266. __asm__ __volatile__(
  267. "1: ldarx %0,0,%2 # atomic64_inc\n\
  268. addic %0,%0,1\n\
  269. stdcx. %0,0,%2 \n\
  270. bne- 1b"
  271. : "=&r" (t), "+m" (v->counter)
  272. : "r" (&v->counter)
  273. : "cc");
  274. }
  275. static __inline__ long atomic64_inc_return(atomic64_t *v)
  276. {
  277. long t;
  278. __asm__ __volatile__(
  279. LWSYNC_ON_SMP
  280. "1: ldarx %0,0,%1 # atomic64_inc_return\n\
  281. addic %0,%0,1\n\
  282. stdcx. %0,0,%1 \n\
  283. bne- 1b"
  284. ISYNC_ON_SMP
  285. : "=&r" (t)
  286. : "r" (&v->counter)
  287. : "cc", "memory");
  288. return t;
  289. }
  290. /*
  291. * atomic64_inc_and_test - increment and test
  292. * @v: pointer of type atomic64_t
  293. *
  294. * Atomically increments @v by 1
  295. * and returns true if the result is zero, or false for all
  296. * other cases.
  297. */
  298. #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
  299. static __inline__ void atomic64_dec(atomic64_t *v)
  300. {
  301. long t;
  302. __asm__ __volatile__(
  303. "1: ldarx %0,0,%2 # atomic64_dec\n\
  304. addic %0,%0,-1\n\
  305. stdcx. %0,0,%2\n\
  306. bne- 1b"
  307. : "=&r" (t), "+m" (v->counter)
  308. : "r" (&v->counter)
  309. : "cc");
  310. }
  311. static __inline__ long atomic64_dec_return(atomic64_t *v)
  312. {
  313. long t;
  314. __asm__ __volatile__(
  315. LWSYNC_ON_SMP
  316. "1: ldarx %0,0,%1 # atomic64_dec_return\n\
  317. addic %0,%0,-1\n\
  318. stdcx. %0,0,%1\n\
  319. bne- 1b"
  320. ISYNC_ON_SMP
  321. : "=&r" (t)
  322. : "r" (&v->counter)
  323. : "cc", "memory");
  324. return t;
  325. }
  326. #define atomic64_sub_and_test(a, v) (atomic64_sub_return((a), (v)) == 0)
  327. #define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0)
  328. /*
  329. * Atomically test *v and decrement if it is greater than 0.
  330. * The function returns the old value of *v minus 1.
  331. */
  332. static __inline__ long atomic64_dec_if_positive(atomic64_t *v)
  333. {
  334. long t;
  335. __asm__ __volatile__(
  336. LWSYNC_ON_SMP
  337. "1: ldarx %0,0,%1 # atomic64_dec_if_positive\n\
  338. addic. %0,%0,-1\n\
  339. blt- 2f\n\
  340. stdcx. %0,0,%1\n\
  341. bne- 1b"
  342. ISYNC_ON_SMP
  343. "\n\
  344. 2:" : "=&r" (t)
  345. : "r" (&v->counter)
  346. : "cc", "memory");
  347. return t;
  348. }
  349. #define atomic64_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
  350. #define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
  351. /**
  352. * atomic64_add_unless - add unless the number is a given value
  353. * @v: pointer of type atomic64_t
  354. * @a: the amount to add to v...
  355. * @u: ...unless v is equal to u.
  356. *
  357. * Atomically adds @a to @v, so long as it was not @u.
  358. * Returns non-zero if @v was not @u, and zero otherwise.
  359. */
  360. static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
  361. {
  362. long t;
  363. __asm__ __volatile__ (
  364. LWSYNC_ON_SMP
  365. "1: ldarx %0,0,%1 # atomic_add_unless\n\
  366. cmpd 0,%0,%3 \n\
  367. beq- 2f \n\
  368. add %0,%2,%0 \n"
  369. " stdcx. %0,0,%1 \n\
  370. bne- 1b \n"
  371. ISYNC_ON_SMP
  372. " subf %0,%2,%0 \n\
  373. 2:"
  374. : "=&r" (t)
  375. : "r" (&v->counter), "r" (a), "r" (u)
  376. : "cc", "memory");
  377. return t != u;
  378. }
  379. #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
  380. #endif /* __powerpc64__ */
  381. #include <asm-generic/atomic.h>
  382. #endif /* __KERNEL__ */
  383. #endif /* _ASM_POWERPC_ATOMIC_H_ */