atomic.h 5.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245
  1. /* MN10300 Atomic counter operations
  2. *
  3. * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
  4. * Written by David Howells (dhowells@redhat.com)
  5. *
  6. * This program is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU General Public Licence
  8. * as published by the Free Software Foundation; either version
  9. * 2 of the Licence, or (at your option) any later version.
  10. */
  11. #ifndef _ASM_ATOMIC_H
  12. #define _ASM_ATOMIC_H
  13. #include <asm/irqflags.h>
  14. #include <asm/cmpxchg.h>
  15. #ifndef CONFIG_SMP
  16. #include <asm-generic/atomic.h>
  17. #else
  18. /*
  19. * Atomic operations that C can't guarantee us. Useful for
  20. * resource counting etc..
  21. */
  22. #define ATOMIC_INIT(i) { (i) }
  23. #ifdef __KERNEL__
  24. /**
  25. * atomic_read - read atomic variable
  26. * @v: pointer of type atomic_t
  27. *
  28. * Atomically reads the value of @v. Note that the guaranteed
  29. * useful range of an atomic_t is only 24 bits.
  30. */
  31. #define atomic_read(v) (ACCESS_ONCE((v)->counter))
  32. /**
  33. * atomic_set - set atomic variable
  34. * @v: pointer of type atomic_t
  35. * @i: required value
  36. *
  37. * Atomically sets the value of @v to @i. Note that the guaranteed
  38. * useful range of an atomic_t is only 24 bits.
  39. */
  40. #define atomic_set(v, i) (((v)->counter) = (i))
  41. /**
  42. * atomic_add_return - add integer to atomic variable
  43. * @i: integer value to add
  44. * @v: pointer of type atomic_t
  45. *
  46. * Atomically adds @i to @v and returns the result
  47. * Note that the guaranteed useful range of an atomic_t is only 24 bits.
  48. */
  49. static inline int atomic_add_return(int i, atomic_t *v)
  50. {
  51. int retval;
  52. #ifdef CONFIG_SMP
  53. int status;
  54. asm volatile(
  55. "1: mov %4,(_AAR,%3) \n"
  56. " mov (_ADR,%3),%1 \n"
  57. " add %5,%1 \n"
  58. " mov %1,(_ADR,%3) \n"
  59. " mov (_ADR,%3),%0 \n" /* flush */
  60. " mov (_ASR,%3),%0 \n"
  61. " or %0,%0 \n"
  62. " bne 1b \n"
  63. : "=&r"(status), "=&r"(retval), "=m"(v->counter)
  64. : "a"(ATOMIC_OPS_BASE_ADDR), "r"(&v->counter), "r"(i)
  65. : "memory", "cc");
  66. #else
  67. unsigned long flags;
  68. flags = arch_local_cli_save();
  69. retval = v->counter;
  70. retval += i;
  71. v->counter = retval;
  72. arch_local_irq_restore(flags);
  73. #endif
  74. return retval;
  75. }
  76. /**
  77. * atomic_sub_return - subtract integer from atomic variable
  78. * @i: integer value to subtract
  79. * @v: pointer of type atomic_t
  80. *
  81. * Atomically subtracts @i from @v and returns the result
  82. * Note that the guaranteed useful range of an atomic_t is only 24 bits.
  83. */
  84. static inline int atomic_sub_return(int i, atomic_t *v)
  85. {
  86. int retval;
  87. #ifdef CONFIG_SMP
  88. int status;
  89. asm volatile(
  90. "1: mov %4,(_AAR,%3) \n"
  91. " mov (_ADR,%3),%1 \n"
  92. " sub %5,%1 \n"
  93. " mov %1,(_ADR,%3) \n"
  94. " mov (_ADR,%3),%0 \n" /* flush */
  95. " mov (_ASR,%3),%0 \n"
  96. " or %0,%0 \n"
  97. " bne 1b \n"
  98. : "=&r"(status), "=&r"(retval), "=m"(v->counter)
  99. : "a"(ATOMIC_OPS_BASE_ADDR), "r"(&v->counter), "r"(i)
  100. : "memory", "cc");
  101. #else
  102. unsigned long flags;
  103. flags = arch_local_cli_save();
  104. retval = v->counter;
  105. retval -= i;
  106. v->counter = retval;
  107. arch_local_irq_restore(flags);
  108. #endif
  109. return retval;
  110. }
  111. static inline int atomic_add_negative(int i, atomic_t *v)
  112. {
  113. return atomic_add_return(i, v) < 0;
  114. }
  115. static inline void atomic_add(int i, atomic_t *v)
  116. {
  117. atomic_add_return(i, v);
  118. }
  119. static inline void atomic_sub(int i, atomic_t *v)
  120. {
  121. atomic_sub_return(i, v);
  122. }
  123. static inline void atomic_inc(atomic_t *v)
  124. {
  125. atomic_add_return(1, v);
  126. }
  127. static inline void atomic_dec(atomic_t *v)
  128. {
  129. atomic_sub_return(1, v);
  130. }
  131. #define atomic_dec_return(v) atomic_sub_return(1, (v))
  132. #define atomic_inc_return(v) atomic_add_return(1, (v))
  133. #define atomic_sub_and_test(i, v) (atomic_sub_return((i), (v)) == 0)
  134. #define atomic_dec_and_test(v) (atomic_sub_return(1, (v)) == 0)
  135. #define atomic_inc_and_test(v) (atomic_add_return(1, (v)) == 0)
  136. #define __atomic_add_unless(v, a, u) \
  137. ({ \
  138. int c, old; \
  139. c = atomic_read(v); \
  140. while (c != (u) && (old = atomic_cmpxchg((v), c, c + (a))) != c) \
  141. c = old; \
  142. c; \
  143. })
  144. #define atomic_xchg(ptr, v) (xchg(&(ptr)->counter, (v)))
  145. #define atomic_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), (old), (new)))
  146. /**
  147. * atomic_clear_mask - Atomically clear bits in memory
  148. * @mask: Mask of the bits to be cleared
  149. * @v: pointer to word in memory
  150. *
  151. * Atomically clears the bits set in mask from the memory word specified.
  152. */
  153. static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
  154. {
  155. #ifdef CONFIG_SMP
  156. int status;
  157. asm volatile(
  158. "1: mov %3,(_AAR,%2) \n"
  159. " mov (_ADR,%2),%0 \n"
  160. " and %4,%0 \n"
  161. " mov %0,(_ADR,%2) \n"
  162. " mov (_ADR,%2),%0 \n" /* flush */
  163. " mov (_ASR,%2),%0 \n"
  164. " or %0,%0 \n"
  165. " bne 1b \n"
  166. : "=&r"(status), "=m"(*addr)
  167. : "a"(ATOMIC_OPS_BASE_ADDR), "r"(addr), "r"(~mask)
  168. : "memory", "cc");
  169. #else
  170. unsigned long flags;
  171. mask = ~mask;
  172. flags = arch_local_cli_save();
  173. *addr &= mask;
  174. arch_local_irq_restore(flags);
  175. #endif
  176. }
  177. /**
  178. * atomic_set_mask - Atomically set bits in memory
  179. * @mask: Mask of the bits to be set
  180. * @v: pointer to word in memory
  181. *
  182. * Atomically sets the bits set in mask from the memory word specified.
  183. */
  184. static inline void atomic_set_mask(unsigned long mask, unsigned long *addr)
  185. {
  186. #ifdef CONFIG_SMP
  187. int status;
  188. asm volatile(
  189. "1: mov %3,(_AAR,%2) \n"
  190. " mov (_ADR,%2),%0 \n"
  191. " or %4,%0 \n"
  192. " mov %0,(_ADR,%2) \n"
  193. " mov (_ADR,%2),%0 \n" /* flush */
  194. " mov (_ASR,%2),%0 \n"
  195. " or %0,%0 \n"
  196. " bne 1b \n"
  197. : "=&r"(status), "=m"(*addr)
  198. : "a"(ATOMIC_OPS_BASE_ADDR), "r"(addr), "r"(mask)
  199. : "memory", "cc");
  200. #else
  201. unsigned long flags;
  202. flags = arch_local_cli_save();
  203. *addr |= mask;
  204. arch_local_irq_restore(flags);
  205. #endif
  206. }
  207. /* Atomic operations are already serializing on MN10300??? */
  208. #define smp_mb__before_atomic_dec() barrier()
  209. #define smp_mb__after_atomic_dec() barrier()
  210. #define smp_mb__before_atomic_inc() barrier()
  211. #define smp_mb__after_atomic_inc() barrier()
  212. #endif /* __KERNEL__ */
  213. #endif /* CONFIG_SMP */
  214. #endif /* _ASM_ATOMIC_H */