atomic.h 9.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347
  1. /* Copyright (C) 2000 Philipp Rumpf <prumpf@tux.org>
  2. * Copyright (C) 2006 Kyle McMartin <kyle@parisc-linux.org>
  3. */
  4. #ifndef _ASM_PARISC_ATOMIC_H_
  5. #define _ASM_PARISC_ATOMIC_H_
  6. #include <linux/types.h>
  7. #include <asm/system.h>
  8. /*
  9. * Atomic operations that C can't guarantee us. Useful for
  10. * resource counting etc..
  11. *
  12. * And probably incredibly slow on parisc. OTOH, we don't
  13. * have to write any serious assembly. prumpf
  14. */
  15. #ifdef CONFIG_SMP
  16. #include <asm/spinlock.h>
  17. #include <asm/cache.h> /* we use L1_CACHE_BYTES */
  18. /* Use an array of spinlocks for our atomic_ts.
  19. * Hash function to index into a different SPINLOCK.
  20. * Since "a" is usually an address, use one spinlock per cacheline.
  21. */
  22. # define ATOMIC_HASH_SIZE 4
  23. # define ATOMIC_HASH(a) (&(__atomic_hash[ (((unsigned long) (a))/L1_CACHE_BYTES) & (ATOMIC_HASH_SIZE-1) ]))
  24. extern raw_spinlock_t __atomic_hash[ATOMIC_HASH_SIZE] __lock_aligned;
  25. /* Can't use raw_spin_lock_irq because of #include problems, so
  26. * this is the substitute */
  27. #define _atomic_spin_lock_irqsave(l,f) do { \
  28. raw_spinlock_t *s = ATOMIC_HASH(l); \
  29. local_irq_save(f); \
  30. __raw_spin_lock(s); \
  31. } while(0)
  32. #define _atomic_spin_unlock_irqrestore(l,f) do { \
  33. raw_spinlock_t *s = ATOMIC_HASH(l); \
  34. __raw_spin_unlock(s); \
  35. local_irq_restore(f); \
  36. } while(0)
  37. #else
  38. # define _atomic_spin_lock_irqsave(l,f) do { local_irq_save(f); } while (0)
  39. # define _atomic_spin_unlock_irqrestore(l,f) do { local_irq_restore(f); } while (0)
  40. #endif
  41. /* This should get optimized out since it's never called.
  42. ** Or get a link error if xchg is used "wrong".
  43. */
  44. extern void __xchg_called_with_bad_pointer(void);
  45. /* __xchg32/64 defined in arch/parisc/lib/bitops.c */
  46. extern unsigned long __xchg8(char, char *);
  47. extern unsigned long __xchg32(int, int *);
  48. #ifdef CONFIG_64BIT
  49. extern unsigned long __xchg64(unsigned long, unsigned long *);
  50. #endif
  51. /* optimizer better get rid of switch since size is a constant */
  52. static __inline__ unsigned long
  53. __xchg(unsigned long x, __volatile__ void * ptr, int size)
  54. {
  55. switch(size) {
  56. #ifdef CONFIG_64BIT
  57. case 8: return __xchg64(x,(unsigned long *) ptr);
  58. #endif
  59. case 4: return __xchg32((int) x, (int *) ptr);
  60. case 1: return __xchg8((char) x, (char *) ptr);
  61. }
  62. __xchg_called_with_bad_pointer();
  63. return x;
  64. }
  65. /*
  66. ** REVISIT - Abandoned use of LDCW in xchg() for now:
  67. ** o need to test sizeof(*ptr) to avoid clearing adjacent bytes
  68. ** o and while we are at it, could CONFIG_64BIT code use LDCD too?
  69. **
  70. ** if (__builtin_constant_p(x) && (x == NULL))
  71. ** if (((unsigned long)p & 0xf) == 0)
  72. ** return __ldcw(p);
  73. */
  74. #define xchg(ptr,x) \
  75. ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
  76. #define __HAVE_ARCH_CMPXCHG 1
  77. /* bug catcher for when unsupported size is used - won't link */
  78. extern void __cmpxchg_called_with_bad_pointer(void);
  79. /* __cmpxchg_u32/u64 defined in arch/parisc/lib/bitops.c */
  80. extern unsigned long __cmpxchg_u32(volatile unsigned int *m, unsigned int old, unsigned int new_);
  81. extern unsigned long __cmpxchg_u64(volatile unsigned long *ptr, unsigned long old, unsigned long new_);
  82. /* don't worry...optimizer will get rid of most of this */
  83. static __inline__ unsigned long
  84. __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new_, int size)
  85. {
  86. switch(size) {
  87. #ifdef CONFIG_64BIT
  88. case 8: return __cmpxchg_u64((unsigned long *)ptr, old, new_);
  89. #endif
  90. case 4: return __cmpxchg_u32((unsigned int *)ptr, (unsigned int) old, (unsigned int) new_);
  91. }
  92. __cmpxchg_called_with_bad_pointer();
  93. return old;
  94. }
  95. #define cmpxchg(ptr,o,n) \
  96. ({ \
  97. __typeof__(*(ptr)) _o_ = (o); \
  98. __typeof__(*(ptr)) _n_ = (n); \
  99. (__typeof__(*(ptr))) __cmpxchg((ptr), (unsigned long)_o_, \
  100. (unsigned long)_n_, sizeof(*(ptr))); \
  101. })
  102. #include <asm-generic/cmpxchg-local.h>
  103. static inline unsigned long __cmpxchg_local(volatile void *ptr,
  104. unsigned long old,
  105. unsigned long new_, int size)
  106. {
  107. switch (size) {
  108. #ifdef CONFIG_64BIT
  109. case 8: return __cmpxchg_u64((unsigned long *)ptr, old, new_);
  110. #endif
  111. case 4: return __cmpxchg_u32(ptr, old, new_);
  112. default:
  113. return __cmpxchg_local_generic(ptr, old, new_, size);
  114. }
  115. }
  116. /*
  117. * cmpxchg_local and cmpxchg64_local are atomic wrt current CPU. Always make
  118. * them available.
  119. */
  120. #define cmpxchg_local(ptr, o, n) \
  121. ((__typeof__(*(ptr)))__cmpxchg_local((ptr), (unsigned long)(o), \
  122. (unsigned long)(n), sizeof(*(ptr))))
  123. #ifdef CONFIG_64BIT
  124. #define cmpxchg64_local(ptr, o, n) \
  125. ({ \
  126. BUILD_BUG_ON(sizeof(*(ptr)) != 8); \
  127. cmpxchg_local((ptr), (o), (n)); \
  128. })
  129. #else
  130. #define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
  131. #endif
  132. /*
  133. * Note that we need not lock read accesses - aligned word writes/reads
  134. * are atomic, so a reader never sees inconsistent values.
  135. */
  136. /* It's possible to reduce all atomic operations to either
  137. * __atomic_add_return, atomic_set and atomic_read (the latter
  138. * is there only for consistency).
  139. */
  140. static __inline__ int __atomic_add_return(int i, atomic_t *v)
  141. {
  142. int ret;
  143. unsigned long flags;
  144. _atomic_spin_lock_irqsave(v, flags);
  145. ret = (v->counter += i);
  146. _atomic_spin_unlock_irqrestore(v, flags);
  147. return ret;
  148. }
  149. static __inline__ void atomic_set(atomic_t *v, int i)
  150. {
  151. unsigned long flags;
  152. _atomic_spin_lock_irqsave(v, flags);
  153. v->counter = i;
  154. _atomic_spin_unlock_irqrestore(v, flags);
  155. }
  156. static __inline__ int atomic_read(const atomic_t *v)
  157. {
  158. return v->counter;
  159. }
  160. /* exported interface */
  161. #define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
  162. #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
  163. /**
  164. * atomic_add_unless - add unless the number is a given value
  165. * @v: pointer of type atomic_t
  166. * @a: the amount to add to v...
  167. * @u: ...unless v is equal to u.
  168. *
  169. * Atomically adds @a to @v, so long as it was not @u.
  170. * Returns non-zero if @v was not @u, and zero otherwise.
  171. */
  172. static __inline__ int atomic_add_unless(atomic_t *v, int a, int u)
  173. {
  174. int c, old;
  175. c = atomic_read(v);
  176. for (;;) {
  177. if (unlikely(c == (u)))
  178. break;
  179. old = atomic_cmpxchg((v), c, c + (a));
  180. if (likely(old == c))
  181. break;
  182. c = old;
  183. }
  184. return c != (u);
  185. }
  186. #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
  187. #define atomic_add(i,v) ((void)(__atomic_add_return( (i),(v))))
  188. #define atomic_sub(i,v) ((void)(__atomic_add_return(-(i),(v))))
  189. #define atomic_inc(v) ((void)(__atomic_add_return( 1,(v))))
  190. #define atomic_dec(v) ((void)(__atomic_add_return( -1,(v))))
  191. #define atomic_add_return(i,v) (__atomic_add_return( (i),(v)))
  192. #define atomic_sub_return(i,v) (__atomic_add_return(-(i),(v)))
  193. #define atomic_inc_return(v) (__atomic_add_return( 1,(v)))
  194. #define atomic_dec_return(v) (__atomic_add_return( -1,(v)))
  195. #define atomic_add_negative(a, v) (atomic_add_return((a), (v)) < 0)
  196. /*
  197. * atomic_inc_and_test - increment and test
  198. * @v: pointer of type atomic_t
  199. *
  200. * Atomically increments @v by 1
  201. * and returns true if the result is zero, or false for all
  202. * other cases.
  203. */
  204. #define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
  205. #define atomic_dec_and_test(v) (atomic_dec_return(v) == 0)
  206. #define atomic_sub_and_test(i,v) (atomic_sub_return((i),(v)) == 0)
  207. #define ATOMIC_INIT(i) ((atomic_t) { (i) })
  208. #define smp_mb__before_atomic_dec() smp_mb()
  209. #define smp_mb__after_atomic_dec() smp_mb()
  210. #define smp_mb__before_atomic_inc() smp_mb()
  211. #define smp_mb__after_atomic_inc() smp_mb()
  212. #ifdef CONFIG_64BIT
  213. #define ATOMIC64_INIT(i) ((atomic64_t) { (i) })
  214. static __inline__ int
  215. __atomic64_add_return(s64 i, atomic64_t *v)
  216. {
  217. int ret;
  218. unsigned long flags;
  219. _atomic_spin_lock_irqsave(v, flags);
  220. ret = (v->counter += i);
  221. _atomic_spin_unlock_irqrestore(v, flags);
  222. return ret;
  223. }
  224. static __inline__ void
  225. atomic64_set(atomic64_t *v, s64 i)
  226. {
  227. unsigned long flags;
  228. _atomic_spin_lock_irqsave(v, flags);
  229. v->counter = i;
  230. _atomic_spin_unlock_irqrestore(v, flags);
  231. }
  232. static __inline__ s64
  233. atomic64_read(const atomic64_t *v)
  234. {
  235. return v->counter;
  236. }
  237. #define atomic64_add(i,v) ((void)(__atomic64_add_return( ((s64)(i)),(v))))
  238. #define atomic64_sub(i,v) ((void)(__atomic64_add_return(-((s64)(i)),(v))))
  239. #define atomic64_inc(v) ((void)(__atomic64_add_return( 1,(v))))
  240. #define atomic64_dec(v) ((void)(__atomic64_add_return( -1,(v))))
  241. #define atomic64_add_return(i,v) (__atomic64_add_return( ((s64)(i)),(v)))
  242. #define atomic64_sub_return(i,v) (__atomic64_add_return(-((s64)(i)),(v)))
  243. #define atomic64_inc_return(v) (__atomic64_add_return( 1,(v)))
  244. #define atomic64_dec_return(v) (__atomic64_add_return( -1,(v)))
  245. #define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0)
  246. #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
  247. #define atomic64_dec_and_test(v) (atomic64_dec_return(v) == 0)
  248. #define atomic64_sub_and_test(i,v) (atomic64_sub_return((i),(v)) == 0)
  249. /* exported interface */
  250. #define atomic64_cmpxchg(v, o, n) \
  251. ((__typeof__((v)->counter))cmpxchg(&((v)->counter), (o), (n)))
  252. #define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
  253. /**
  254. * atomic64_add_unless - add unless the number is a given value
  255. * @v: pointer of type atomic64_t
  256. * @a: the amount to add to v...
  257. * @u: ...unless v is equal to u.
  258. *
  259. * Atomically adds @a to @v, so long as it was not @u.
  260. * Returns non-zero if @v was not @u, and zero otherwise.
  261. */
  262. static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
  263. {
  264. long c, old;
  265. c = atomic64_read(v);
  266. for (;;) {
  267. if (unlikely(c == (u)))
  268. break;
  269. old = atomic64_cmpxchg((v), c, c + (a));
  270. if (likely(old == c))
  271. break;
  272. c = old;
  273. }
  274. return c != (u);
  275. }
  276. #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
  277. #else /* CONFIG_64BIT */
  278. #include <asm-generic/atomic64.h>
  279. #endif /* !CONFIG_64BIT */
  280. #include <asm-generic/atomic-long.h>
  281. #endif /* _ASM_PARISC_ATOMIC_H_ */