atomic.h 4.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213
  1. #ifndef __ARCH_BLACKFIN_ATOMIC__
  2. #define __ARCH_BLACKFIN_ATOMIC__
  3. #include <linux/types.h>
  4. #include <asm/system.h> /* local_irq_XXX() */
  5. /*
  6. * Atomic operations that C can't guarantee us. Useful for
  7. * resource counting etc..
  8. *
  9. * Generally we do not concern about SMP BFIN systems, so we don't have
  10. * to deal with that.
  11. *
  12. * Tony Kou (tonyko@lineo.ca) Lineo Inc. 2001
  13. */
  14. #define ATOMIC_INIT(i) { (i) }
  15. #define atomic_set(v, i) (((v)->counter) = i)
  16. #ifdef CONFIG_SMP
  17. #define atomic_read(v) __raw_uncached_fetch_asm(&(v)->counter)
  18. asmlinkage int __raw_uncached_fetch_asm(const volatile int *ptr);
  19. asmlinkage int __raw_atomic_update_asm(volatile int *ptr, int value);
  20. asmlinkage int __raw_atomic_clear_asm(volatile int *ptr, int value);
  21. asmlinkage int __raw_atomic_set_asm(volatile int *ptr, int value);
  22. asmlinkage int __raw_atomic_xor_asm(volatile int *ptr, int value);
  23. asmlinkage int __raw_atomic_test_asm(const volatile int *ptr, int value);
  24. static inline void atomic_add(int i, atomic_t *v)
  25. {
  26. __raw_atomic_update_asm(&v->counter, i);
  27. }
  28. static inline void atomic_sub(int i, atomic_t *v)
  29. {
  30. __raw_atomic_update_asm(&v->counter, -i);
  31. }
  32. static inline int atomic_add_return(int i, atomic_t *v)
  33. {
  34. return __raw_atomic_update_asm(&v->counter, i);
  35. }
  36. static inline int atomic_sub_return(int i, atomic_t *v)
  37. {
  38. return __raw_atomic_update_asm(&v->counter, -i);
  39. }
  40. static inline void atomic_inc(volatile atomic_t *v)
  41. {
  42. __raw_atomic_update_asm(&v->counter, 1);
  43. }
  44. static inline void atomic_dec(volatile atomic_t *v)
  45. {
  46. __raw_atomic_update_asm(&v->counter, -1);
  47. }
  48. static inline void atomic_clear_mask(int mask, atomic_t *v)
  49. {
  50. __raw_atomic_clear_asm(&v->counter, mask);
  51. }
  52. static inline void atomic_set_mask(int mask, atomic_t *v)
  53. {
  54. __raw_atomic_set_asm(&v->counter, mask);
  55. }
  56. static inline int atomic_test_mask(int mask, atomic_t *v)
  57. {
  58. return __raw_atomic_test_asm(&v->counter, mask);
  59. }
  60. /* Atomic operations are already serializing */
  61. #define smp_mb__before_atomic_dec() barrier()
  62. #define smp_mb__after_atomic_dec() barrier()
  63. #define smp_mb__before_atomic_inc() barrier()
  64. #define smp_mb__after_atomic_inc() barrier()
  65. #else /* !CONFIG_SMP */
  66. #define atomic_read(v) ((v)->counter)
  67. static inline void atomic_add(int i, atomic_t *v)
  68. {
  69. long flags;
  70. local_irq_save(flags);
  71. v->counter += i;
  72. local_irq_restore(flags);
  73. }
  74. static inline void atomic_sub(int i, atomic_t *v)
  75. {
  76. long flags;
  77. local_irq_save(flags);
  78. v->counter -= i;
  79. local_irq_restore(flags);
  80. }
  81. static inline int atomic_add_return(int i, atomic_t *v)
  82. {
  83. int __temp = 0;
  84. long flags;
  85. local_irq_save(flags);
  86. v->counter += i;
  87. __temp = v->counter;
  88. local_irq_restore(flags);
  89. return __temp;
  90. }
  91. static inline int atomic_sub_return(int i, atomic_t *v)
  92. {
  93. int __temp = 0;
  94. long flags;
  95. local_irq_save(flags);
  96. v->counter -= i;
  97. __temp = v->counter;
  98. local_irq_restore(flags);
  99. return __temp;
  100. }
  101. static inline void atomic_inc(volatile atomic_t *v)
  102. {
  103. long flags;
  104. local_irq_save(flags);
  105. v->counter++;
  106. local_irq_restore(flags);
  107. }
  108. static inline void atomic_dec(volatile atomic_t *v)
  109. {
  110. long flags;
  111. local_irq_save(flags);
  112. v->counter--;
  113. local_irq_restore(flags);
  114. }
  115. static inline void atomic_clear_mask(unsigned int mask, atomic_t *v)
  116. {
  117. long flags;
  118. local_irq_save(flags);
  119. v->counter &= ~mask;
  120. local_irq_restore(flags);
  121. }
  122. static inline void atomic_set_mask(unsigned int mask, atomic_t *v)
  123. {
  124. long flags;
  125. local_irq_save(flags);
  126. v->counter |= mask;
  127. local_irq_restore(flags);
  128. }
  129. /* Atomic operations are already serializing */
  130. #define smp_mb__before_atomic_dec() barrier()
  131. #define smp_mb__after_atomic_dec() barrier()
  132. #define smp_mb__before_atomic_inc() barrier()
  133. #define smp_mb__after_atomic_inc() barrier()
  134. #endif /* !CONFIG_SMP */
  135. #define atomic_add_negative(a, v) (atomic_add_return((a), (v)) < 0)
  136. #define atomic_dec_return(v) atomic_sub_return(1,(v))
  137. #define atomic_inc_return(v) atomic_add_return(1,(v))
  138. #define atomic_cmpxchg(v, o, n) ((int)cmpxchg(&((v)->counter), (o), (n)))
  139. #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
  140. #define atomic_add_unless(v, a, u) \
  141. ({ \
  142. int c, old; \
  143. c = atomic_read(v); \
  144. while (c != (u) && (old = atomic_cmpxchg((v), c, c + (a))) != c) \
  145. c = old; \
  146. c != (u); \
  147. })
  148. #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
  149. /*
  150. * atomic_inc_and_test - increment and test
  151. * @v: pointer of type atomic_t
  152. *
  153. * Atomically increments @v by 1
  154. * and returns true if the result is zero, or false for all
  155. * other cases.
  156. */
  157. #define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
  158. #define atomic_sub_and_test(i,v) (atomic_sub_return((i), (v)) == 0)
  159. #define atomic_dec_and_test(v) (atomic_sub_return(1, (v)) == 0)
  160. #include <asm-generic/atomic.h>
  161. #endif /* __ARCH_BLACKFIN_ATOMIC __ */