atomic.h 4.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211
  1. /*
  2. * arch/arm/include/asm/atomic.h
  3. *
  4. * Copyright (C) 1996 Russell King.
  5. * Copyright (C) 2002 Deep Blue Solutions Ltd.
  6. *
  7. * This program is free software; you can redistribute it and/or modify
  8. * it under the terms of the GNU General Public License version 2 as
  9. * published by the Free Software Foundation.
  10. */
  11. #ifndef __ASM_ARM_ATOMIC_H
  12. #define __ASM_ARM_ATOMIC_H
  13. #include <linux/compiler.h>
  14. #include <linux/types.h>
  15. #include <asm/system.h>
  16. #define ATOMIC_INIT(i) { (i) }
  17. #ifdef __KERNEL__
  18. #define atomic_read(v) ((v)->counter)
  19. #if __LINUX_ARM_ARCH__ >= 6
  20. /*
  21. * ARMv6 UP and SMP safe atomic ops. We use load exclusive and
  22. * store exclusive to ensure that these are atomic. We may loop
  23. * to ensure that the update happens. Writing to 'v->counter'
  24. * without using the following operations WILL break the atomic
  25. * nature of these ops.
  26. */
  27. static inline void atomic_set(atomic_t *v, int i)
  28. {
  29. unsigned long tmp;
  30. __asm__ __volatile__("@ atomic_set\n"
  31. "1: ldrex %0, [%1]\n"
  32. " strex %0, %2, [%1]\n"
  33. " teq %0, #0\n"
  34. " bne 1b"
  35. : "=&r" (tmp)
  36. : "r" (&v->counter), "r" (i)
  37. : "cc");
  38. }
  39. static inline int atomic_add_return(int i, atomic_t *v)
  40. {
  41. unsigned long tmp;
  42. int result;
  43. __asm__ __volatile__("@ atomic_add_return\n"
  44. "1: ldrex %0, [%2]\n"
  45. " add %0, %0, %3\n"
  46. " strex %1, %0, [%2]\n"
  47. " teq %1, #0\n"
  48. " bne 1b"
  49. : "=&r" (result), "=&r" (tmp)
  50. : "r" (&v->counter), "Ir" (i)
  51. : "cc");
  52. return result;
  53. }
  54. static inline int atomic_sub_return(int i, atomic_t *v)
  55. {
  56. unsigned long tmp;
  57. int result;
  58. __asm__ __volatile__("@ atomic_sub_return\n"
  59. "1: ldrex %0, [%2]\n"
  60. " sub %0, %0, %3\n"
  61. " strex %1, %0, [%2]\n"
  62. " teq %1, #0\n"
  63. " bne 1b"
  64. : "=&r" (result), "=&r" (tmp)
  65. : "r" (&v->counter), "Ir" (i)
  66. : "cc");
  67. return result;
  68. }
  69. static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new)
  70. {
  71. unsigned long oldval, res;
  72. do {
  73. __asm__ __volatile__("@ atomic_cmpxchg\n"
  74. "ldrex %1, [%2]\n"
  75. "mov %0, #0\n"
  76. "teq %1, %3\n"
  77. "strexeq %0, %4, [%2]\n"
  78. : "=&r" (res), "=&r" (oldval)
  79. : "r" (&ptr->counter), "Ir" (old), "r" (new)
  80. : "cc");
  81. } while (res);
  82. return oldval;
  83. }
  84. static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
  85. {
  86. unsigned long tmp, tmp2;
  87. __asm__ __volatile__("@ atomic_clear_mask\n"
  88. "1: ldrex %0, [%2]\n"
  89. " bic %0, %0, %3\n"
  90. " strex %1, %0, [%2]\n"
  91. " teq %1, #0\n"
  92. " bne 1b"
  93. : "=&r" (tmp), "=&r" (tmp2)
  94. : "r" (addr), "Ir" (mask)
  95. : "cc");
  96. }
  97. #else /* ARM_ARCH_6 */
  98. #include <asm/system.h>
  99. #ifdef CONFIG_SMP
  100. #error SMP not supported on pre-ARMv6 CPUs
  101. #endif
  102. #define atomic_set(v,i) (((v)->counter) = (i))
  103. static inline int atomic_add_return(int i, atomic_t *v)
  104. {
  105. unsigned long flags;
  106. int val;
  107. raw_local_irq_save(flags);
  108. val = v->counter;
  109. v->counter = val += i;
  110. raw_local_irq_restore(flags);
  111. return val;
  112. }
  113. static inline int atomic_sub_return(int i, atomic_t *v)
  114. {
  115. unsigned long flags;
  116. int val;
  117. raw_local_irq_save(flags);
  118. val = v->counter;
  119. v->counter = val -= i;
  120. raw_local_irq_restore(flags);
  121. return val;
  122. }
  123. static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
  124. {
  125. int ret;
  126. unsigned long flags;
  127. raw_local_irq_save(flags);
  128. ret = v->counter;
  129. if (likely(ret == old))
  130. v->counter = new;
  131. raw_local_irq_restore(flags);
  132. return ret;
  133. }
  134. static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
  135. {
  136. unsigned long flags;
  137. raw_local_irq_save(flags);
  138. *addr &= ~mask;
  139. raw_local_irq_restore(flags);
  140. }
  141. #endif /* __LINUX_ARM_ARCH__ */
  142. #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
  143. static inline int atomic_add_unless(atomic_t *v, int a, int u)
  144. {
  145. int c, old;
  146. c = atomic_read(v);
  147. while (c != u && (old = atomic_cmpxchg((v), c, c + a)) != c)
  148. c = old;
  149. return c != u;
  150. }
  151. #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
  152. #define atomic_add(i, v) (void) atomic_add_return(i, v)
  153. #define atomic_inc(v) (void) atomic_add_return(1, v)
  154. #define atomic_sub(i, v) (void) atomic_sub_return(i, v)
  155. #define atomic_dec(v) (void) atomic_sub_return(1, v)
  156. #define atomic_inc_and_test(v) (atomic_add_return(1, v) == 0)
  157. #define atomic_dec_and_test(v) (atomic_sub_return(1, v) == 0)
  158. #define atomic_inc_return(v) (atomic_add_return(1, v))
  159. #define atomic_dec_return(v) (atomic_sub_return(1, v))
  160. #define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0)
  161. #define atomic_add_negative(i,v) (atomic_add_return(i, v) < 0)
  162. /* Atomic operations are already serializing on ARM */
  163. #define smp_mb__before_atomic_dec() barrier()
  164. #define smp_mb__after_atomic_dec() barrier()
  165. #define smp_mb__before_atomic_inc() barrier()
  166. #define smp_mb__after_atomic_inc() barrier()
  167. #include <asm-generic/atomic.h>
  168. #endif
  169. #endif