atomic.h 4.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232
  1. /*
  2. * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
  3. *
  4. * This program is free software; you can redistribute it and/or modify
  5. * it under the terms of the GNU General Public License version 2 as
  6. * published by the Free Software Foundation.
  7. */
  8. #ifndef _ASM_ARC_ATOMIC_H
  9. #define _ASM_ARC_ATOMIC_H
  10. #ifdef __KERNEL__
  11. #ifndef __ASSEMBLY__
  12. #include <linux/types.h>
  13. #include <linux/compiler.h>
  14. #include <asm/cmpxchg.h>
  15. #include <asm/barrier.h>
  16. #include <asm/smp.h>
  17. #define atomic_read(v) ((v)->counter)
  18. #ifdef CONFIG_ARC_HAS_LLSC
  19. #define atomic_set(v, i) (((v)->counter) = (i))
  20. static inline void atomic_add(int i, atomic_t *v)
  21. {
  22. unsigned int temp;
  23. __asm__ __volatile__(
  24. "1: llock %0, [%1] \n"
  25. " add %0, %0, %2 \n"
  26. " scond %0, [%1] \n"
  27. " bnz 1b \n"
  28. : "=&r"(temp) /* Early clobber, to prevent reg reuse */
  29. : "r"(&v->counter), "ir"(i)
  30. : "cc");
  31. }
  32. static inline void atomic_sub(int i, atomic_t *v)
  33. {
  34. unsigned int temp;
  35. __asm__ __volatile__(
  36. "1: llock %0, [%1] \n"
  37. " sub %0, %0, %2 \n"
  38. " scond %0, [%1] \n"
  39. " bnz 1b \n"
  40. : "=&r"(temp)
  41. : "r"(&v->counter), "ir"(i)
  42. : "cc");
  43. }
  44. /* add and also return the new value */
  45. static inline int atomic_add_return(int i, atomic_t *v)
  46. {
  47. unsigned int temp;
  48. __asm__ __volatile__(
  49. "1: llock %0, [%1] \n"
  50. " add %0, %0, %2 \n"
  51. " scond %0, [%1] \n"
  52. " bnz 1b \n"
  53. : "=&r"(temp)
  54. : "r"(&v->counter), "ir"(i)
  55. : "cc");
  56. return temp;
  57. }
  58. static inline int atomic_sub_return(int i, atomic_t *v)
  59. {
  60. unsigned int temp;
  61. __asm__ __volatile__(
  62. "1: llock %0, [%1] \n"
  63. " sub %0, %0, %2 \n"
  64. " scond %0, [%1] \n"
  65. " bnz 1b \n"
  66. : "=&r"(temp)
  67. : "r"(&v->counter), "ir"(i)
  68. : "cc");
  69. return temp;
  70. }
  71. static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
  72. {
  73. unsigned int temp;
  74. __asm__ __volatile__(
  75. "1: llock %0, [%1] \n"
  76. " bic %0, %0, %2 \n"
  77. " scond %0, [%1] \n"
  78. " bnz 1b \n"
  79. : "=&r"(temp)
  80. : "r"(addr), "ir"(mask)
  81. : "cc");
  82. }
  83. #else /* !CONFIG_ARC_HAS_LLSC */
  84. #ifndef CONFIG_SMP
  85. /* violating atomic_xxx API locking protocol in UP for optimization sake */
  86. #define atomic_set(v, i) (((v)->counter) = (i))
  87. #else
  88. static inline void atomic_set(atomic_t *v, int i)
  89. {
  90. /*
  91. * Independent of hardware support, all of the atomic_xxx() APIs need
  92. * to follow the same locking rules to make sure that a "hardware"
  93. * atomic insn (e.g. LD) doesn't clobber an "emulated" atomic insn
  94. * sequence
  95. *
  96. * Thus atomic_set() despite being 1 insn (and seemingly atomic)
  97. * requires the locking.
  98. */
  99. unsigned long flags;
  100. atomic_ops_lock(flags);
  101. v->counter = i;
  102. atomic_ops_unlock(flags);
  103. }
  104. #endif
  105. /*
  106. * Non hardware assisted Atomic-R-M-W
  107. * Locking would change to irq-disabling only (UP) and spinlocks (SMP)
  108. */
  109. static inline void atomic_add(int i, atomic_t *v)
  110. {
  111. unsigned long flags;
  112. atomic_ops_lock(flags);
  113. v->counter += i;
  114. atomic_ops_unlock(flags);
  115. }
  116. static inline void atomic_sub(int i, atomic_t *v)
  117. {
  118. unsigned long flags;
  119. atomic_ops_lock(flags);
  120. v->counter -= i;
  121. atomic_ops_unlock(flags);
  122. }
  123. static inline int atomic_add_return(int i, atomic_t *v)
  124. {
  125. unsigned long flags;
  126. unsigned long temp;
  127. atomic_ops_lock(flags);
  128. temp = v->counter;
  129. temp += i;
  130. v->counter = temp;
  131. atomic_ops_unlock(flags);
  132. return temp;
  133. }
  134. static inline int atomic_sub_return(int i, atomic_t *v)
  135. {
  136. unsigned long flags;
  137. unsigned long temp;
  138. atomic_ops_lock(flags);
  139. temp = v->counter;
  140. temp -= i;
  141. v->counter = temp;
  142. atomic_ops_unlock(flags);
  143. return temp;
  144. }
  145. static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
  146. {
  147. unsigned long flags;
  148. atomic_ops_lock(flags);
  149. *addr &= ~mask;
  150. atomic_ops_unlock(flags);
  151. }
  152. #endif /* !CONFIG_ARC_HAS_LLSC */
  153. /**
  154. * __atomic_add_unless - add unless the number is a given value
  155. * @v: pointer of type atomic_t
  156. * @a: the amount to add to v...
  157. * @u: ...unless v is equal to u.
  158. *
  159. * Atomically adds @a to @v, so long as it was not @u.
  160. * Returns the old value of @v
  161. */
  162. #define __atomic_add_unless(v, a, u) \
  163. ({ \
  164. int c, old; \
  165. c = atomic_read(v); \
  166. while (c != (u) && (old = atomic_cmpxchg((v), c, c + (a))) != c)\
  167. c = old; \
  168. c; \
  169. })
  170. #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
  171. #define atomic_inc(v) atomic_add(1, v)
  172. #define atomic_dec(v) atomic_sub(1, v)
  173. #define atomic_inc_and_test(v) (atomic_add_return(1, v) == 0)
  174. #define atomic_dec_and_test(v) (atomic_sub_return(1, v) == 0)
  175. #define atomic_inc_return(v) atomic_add_return(1, (v))
  176. #define atomic_dec_return(v) atomic_sub_return(1, (v))
  177. #define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0)
  178. #define atomic_add_negative(i, v) (atomic_add_return(i, v) < 0)
  179. #define ATOMIC_INIT(i) { (i) }
  180. #include <asm-generic/atomic64.h>
  181. #endif
  182. #endif
  183. #endif