atomic.h 9.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340
  1. /* Copyright (C) 2000 Philipp Rumpf <prumpf@tux.org>
  2. * Copyright (C) 2006 Kyle McMartin <kyle@parisc-linux.org>
  3. */
  4. #ifndef _ASM_PARISC_ATOMIC_H_
  5. #define _ASM_PARISC_ATOMIC_H_
  6. #include <linux/types.h>
  7. /*
  8. * Atomic operations that C can't guarantee us. Useful for
  9. * resource counting etc..
  10. *
  11. * And probably incredibly slow on parisc. OTOH, we don't
  12. * have to write any serious assembly. prumpf
  13. */
  14. #ifdef CONFIG_SMP
  15. #include <asm/spinlock.h>
  16. #include <asm/cache.h> /* we use L1_CACHE_BYTES */
  17. /* Use an array of spinlocks for our atomic_ts.
  18. * Hash function to index into a different SPINLOCK.
  19. * Since "a" is usually an address, use one spinlock per cacheline.
  20. */
  21. # define ATOMIC_HASH_SIZE 4
  22. # define ATOMIC_HASH(a) (&(__atomic_hash[ (((unsigned long) (a))/L1_CACHE_BYTES) & (ATOMIC_HASH_SIZE-1) ]))
  23. extern arch_spinlock_t __atomic_hash[ATOMIC_HASH_SIZE] __lock_aligned;
  24. /* Can't use raw_spin_lock_irq because of #include problems, so
  25. * this is the substitute */
  26. #define _atomic_spin_lock_irqsave(l,f) do { \
  27. arch_spinlock_t *s = ATOMIC_HASH(l); \
  28. local_irq_save(f); \
  29. arch_spin_lock(s); \
  30. } while(0)
  31. #define _atomic_spin_unlock_irqrestore(l,f) do { \
  32. arch_spinlock_t *s = ATOMIC_HASH(l); \
  33. arch_spin_unlock(s); \
  34. local_irq_restore(f); \
  35. } while(0)
  36. #else
  37. # define _atomic_spin_lock_irqsave(l,f) do { local_irq_save(f); } while (0)
  38. # define _atomic_spin_unlock_irqrestore(l,f) do { local_irq_restore(f); } while (0)
  39. #endif
  40. /* This should get optimized out since it's never called.
  41. ** Or get a link error if xchg is used "wrong".
  42. */
  43. extern void __xchg_called_with_bad_pointer(void);
  44. /* __xchg32/64 defined in arch/parisc/lib/bitops.c */
  45. extern unsigned long __xchg8(char, char *);
  46. extern unsigned long __xchg32(int, int *);
  47. #ifdef CONFIG_64BIT
  48. extern unsigned long __xchg64(unsigned long, unsigned long *);
  49. #endif
  50. /* optimizer better get rid of switch since size is a constant */
  51. static __inline__ unsigned long
  52. __xchg(unsigned long x, __volatile__ void * ptr, int size)
  53. {
  54. switch(size) {
  55. #ifdef CONFIG_64BIT
  56. case 8: return __xchg64(x,(unsigned long *) ptr);
  57. #endif
  58. case 4: return __xchg32((int) x, (int *) ptr);
  59. case 1: return __xchg8((char) x, (char *) ptr);
  60. }
  61. __xchg_called_with_bad_pointer();
  62. return x;
  63. }
  64. /*
  65. ** REVISIT - Abandoned use of LDCW in xchg() for now:
  66. ** o need to test sizeof(*ptr) to avoid clearing adjacent bytes
  67. ** o and while we are at it, could CONFIG_64BIT code use LDCD too?
  68. **
  69. ** if (__builtin_constant_p(x) && (x == NULL))
  70. ** if (((unsigned long)p & 0xf) == 0)
  71. ** return __ldcw(p);
  72. */
  73. #define xchg(ptr,x) \
  74. ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
  75. #define __HAVE_ARCH_CMPXCHG 1
  76. /* bug catcher for when unsupported size is used - won't link */
  77. extern void __cmpxchg_called_with_bad_pointer(void);
  78. /* __cmpxchg_u32/u64 defined in arch/parisc/lib/bitops.c */
  79. extern unsigned long __cmpxchg_u32(volatile unsigned int *m, unsigned int old, unsigned int new_);
  80. extern unsigned long __cmpxchg_u64(volatile unsigned long *ptr, unsigned long old, unsigned long new_);
  81. /* don't worry...optimizer will get rid of most of this */
  82. static __inline__ unsigned long
  83. __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new_, int size)
  84. {
  85. switch(size) {
  86. #ifdef CONFIG_64BIT
  87. case 8: return __cmpxchg_u64((unsigned long *)ptr, old, new_);
  88. #endif
  89. case 4: return __cmpxchg_u32((unsigned int *)ptr, (unsigned int) old, (unsigned int) new_);
  90. }
  91. __cmpxchg_called_with_bad_pointer();
  92. return old;
  93. }
  94. #define cmpxchg(ptr,o,n) \
  95. ({ \
  96. __typeof__(*(ptr)) _o_ = (o); \
  97. __typeof__(*(ptr)) _n_ = (n); \
  98. (__typeof__(*(ptr))) __cmpxchg((ptr), (unsigned long)_o_, \
  99. (unsigned long)_n_, sizeof(*(ptr))); \
  100. })
  101. #include <asm-generic/cmpxchg-local.h>
  102. static inline unsigned long __cmpxchg_local(volatile void *ptr,
  103. unsigned long old,
  104. unsigned long new_, int size)
  105. {
  106. switch (size) {
  107. #ifdef CONFIG_64BIT
  108. case 8: return __cmpxchg_u64((unsigned long *)ptr, old, new_);
  109. #endif
  110. case 4: return __cmpxchg_u32(ptr, old, new_);
  111. default:
  112. return __cmpxchg_local_generic(ptr, old, new_, size);
  113. }
  114. }
  115. /*
  116. * cmpxchg_local and cmpxchg64_local are atomic wrt current CPU. Always make
  117. * them available.
  118. */
  119. #define cmpxchg_local(ptr, o, n) \
  120. ((__typeof__(*(ptr)))__cmpxchg_local((ptr), (unsigned long)(o), \
  121. (unsigned long)(n), sizeof(*(ptr))))
  122. #ifdef CONFIG_64BIT
  123. #define cmpxchg64_local(ptr, o, n) \
  124. ({ \
  125. BUILD_BUG_ON(sizeof(*(ptr)) != 8); \
  126. cmpxchg_local((ptr), (o), (n)); \
  127. })
  128. #else
  129. #define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
  130. #endif
  131. /*
  132. * Note that we need not lock read accesses - aligned word writes/reads
  133. * are atomic, so a reader never sees inconsistent values.
  134. */
  135. /* It's possible to reduce all atomic operations to either
  136. * __atomic_add_return, atomic_set and atomic_read (the latter
  137. * is there only for consistency).
  138. */
  139. static __inline__ int __atomic_add_return(int i, atomic_t *v)
  140. {
  141. int ret;
  142. unsigned long flags;
  143. _atomic_spin_lock_irqsave(v, flags);
  144. ret = (v->counter += i);
  145. _atomic_spin_unlock_irqrestore(v, flags);
  146. return ret;
  147. }
  148. static __inline__ void atomic_set(atomic_t *v, int i)
  149. {
  150. unsigned long flags;
  151. _atomic_spin_lock_irqsave(v, flags);
  152. v->counter = i;
  153. _atomic_spin_unlock_irqrestore(v, flags);
  154. }
  155. static __inline__ int atomic_read(const atomic_t *v)
  156. {
  157. return (*(volatile int *)&(v)->counter);
  158. }
  159. /* exported interface */
  160. #define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
  161. #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
  162. /**
  163. * __atomic_add_unless - add unless the number is a given value
  164. * @v: pointer of type atomic_t
  165. * @a: the amount to add to v...
  166. * @u: ...unless v is equal to u.
  167. *
  168. * Atomically adds @a to @v, so long as it was not @u.
  169. * Returns the old value of @v.
  170. */
  171. static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
  172. {
  173. int c, old;
  174. c = atomic_read(v);
  175. for (;;) {
  176. if (unlikely(c == (u)))
  177. break;
  178. old = atomic_cmpxchg((v), c, c + (a));
  179. if (likely(old == c))
  180. break;
  181. c = old;
  182. }
  183. return c;
  184. }
  185. #define atomic_add(i,v) ((void)(__atomic_add_return( (i),(v))))
  186. #define atomic_sub(i,v) ((void)(__atomic_add_return(-(i),(v))))
  187. #define atomic_inc(v) ((void)(__atomic_add_return( 1,(v))))
  188. #define atomic_dec(v) ((void)(__atomic_add_return( -1,(v))))
  189. #define atomic_add_return(i,v) (__atomic_add_return( (i),(v)))
  190. #define atomic_sub_return(i,v) (__atomic_add_return(-(i),(v)))
  191. #define atomic_inc_return(v) (__atomic_add_return( 1,(v)))
  192. #define atomic_dec_return(v) (__atomic_add_return( -1,(v)))
  193. #define atomic_add_negative(a, v) (atomic_add_return((a), (v)) < 0)
  194. /*
  195. * atomic_inc_and_test - increment and test
  196. * @v: pointer of type atomic_t
  197. *
  198. * Atomically increments @v by 1
  199. * and returns true if the result is zero, or false for all
  200. * other cases.
  201. */
  202. #define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
  203. #define atomic_dec_and_test(v) (atomic_dec_return(v) == 0)
  204. #define atomic_sub_and_test(i,v) (atomic_sub_return((i),(v)) == 0)
  205. #define ATOMIC_INIT(i) ((atomic_t) { (i) })
  206. #define smp_mb__before_atomic_dec() smp_mb()
  207. #define smp_mb__after_atomic_dec() smp_mb()
  208. #define smp_mb__before_atomic_inc() smp_mb()
  209. #define smp_mb__after_atomic_inc() smp_mb()
  210. #ifdef CONFIG_64BIT
  211. #define ATOMIC64_INIT(i) ((atomic64_t) { (i) })
  212. static __inline__ s64
  213. __atomic64_add_return(s64 i, atomic64_t *v)
  214. {
  215. s64 ret;
  216. unsigned long flags;
  217. _atomic_spin_lock_irqsave(v, flags);
  218. ret = (v->counter += i);
  219. _atomic_spin_unlock_irqrestore(v, flags);
  220. return ret;
  221. }
  222. static __inline__ void
  223. atomic64_set(atomic64_t *v, s64 i)
  224. {
  225. unsigned long flags;
  226. _atomic_spin_lock_irqsave(v, flags);
  227. v->counter = i;
  228. _atomic_spin_unlock_irqrestore(v, flags);
  229. }
  230. static __inline__ s64
  231. atomic64_read(const atomic64_t *v)
  232. {
  233. return (*(volatile long *)&(v)->counter);
  234. }
  235. #define atomic64_add(i,v) ((void)(__atomic64_add_return( ((s64)(i)),(v))))
  236. #define atomic64_sub(i,v) ((void)(__atomic64_add_return(-((s64)(i)),(v))))
  237. #define atomic64_inc(v) ((void)(__atomic64_add_return( 1,(v))))
  238. #define atomic64_dec(v) ((void)(__atomic64_add_return( -1,(v))))
  239. #define atomic64_add_return(i,v) (__atomic64_add_return( ((s64)(i)),(v)))
  240. #define atomic64_sub_return(i,v) (__atomic64_add_return(-((s64)(i)),(v)))
  241. #define atomic64_inc_return(v) (__atomic64_add_return( 1,(v)))
  242. #define atomic64_dec_return(v) (__atomic64_add_return( -1,(v)))
  243. #define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0)
  244. #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
  245. #define atomic64_dec_and_test(v) (atomic64_dec_return(v) == 0)
  246. #define atomic64_sub_and_test(i,v) (atomic64_sub_return((i),(v)) == 0)
  247. /* exported interface */
  248. #define atomic64_cmpxchg(v, o, n) \
  249. ((__typeof__((v)->counter))cmpxchg(&((v)->counter), (o), (n)))
  250. #define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
  251. /**
  252. * atomic64_add_unless - add unless the number is a given value
  253. * @v: pointer of type atomic64_t
  254. * @a: the amount to add to v...
  255. * @u: ...unless v is equal to u.
  256. *
  257. * Atomically adds @a to @v, so long as it was not @u.
  258. * Returns the old value of @v.
  259. */
  260. static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
  261. {
  262. long c, old;
  263. c = atomic64_read(v);
  264. for (;;) {
  265. if (unlikely(c == (u)))
  266. break;
  267. old = atomic64_cmpxchg((v), c, c + (a));
  268. if (likely(old == c))
  269. break;
  270. c = old;
  271. }
  272. return c != (u);
  273. }
  274. #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
  275. #endif /* !CONFIG_64BIT */
  276. #endif /* _ASM_PARISC_ATOMIC_H_ */