atomic.h 4.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219
  1. #ifndef __ARCH_M68K_ATOMIC__
  2. #define __ARCH_M68K_ATOMIC__
  3. #include <linux/types.h>
  4. #include <linux/irqflags.h>
  5. #include <asm/cmpxchg.h>
  6. /*
  7. * Atomic operations that C can't guarantee us. Useful for
  8. * resource counting etc..
  9. */
  10. /*
  11. * We do not have SMP m68k systems, so we don't have to deal with that.
  12. */
  13. #define ATOMIC_INIT(i) { (i) }
  14. #define atomic_read(v) (*(volatile int *)&(v)->counter)
  15. #define atomic_set(v, i) (((v)->counter) = i)
  16. /*
  17. * The ColdFire parts cannot do some immediate to memory operations,
  18. * so for them we do not specify the "i" asm constraint.
  19. */
  20. #ifdef CONFIG_COLDFIRE
  21. #define ASM_DI "d"
  22. #else
  23. #define ASM_DI "di"
  24. #endif
  25. static inline void atomic_add(int i, atomic_t *v)
  26. {
  27. __asm__ __volatile__("addl %1,%0" : "+m" (*v) : ASM_DI (i));
  28. }
  29. static inline void atomic_sub(int i, atomic_t *v)
  30. {
  31. __asm__ __volatile__("subl %1,%0" : "+m" (*v) : ASM_DI (i));
  32. }
  33. static inline void atomic_inc(atomic_t *v)
  34. {
  35. __asm__ __volatile__("addql #1,%0" : "+m" (*v));
  36. }
  37. static inline void atomic_dec(atomic_t *v)
  38. {
  39. __asm__ __volatile__("subql #1,%0" : "+m" (*v));
  40. }
  41. static inline int atomic_dec_and_test(atomic_t *v)
  42. {
  43. char c;
  44. __asm__ __volatile__("subql #1,%1; seq %0" : "=d" (c), "+m" (*v));
  45. return c != 0;
  46. }
  47. static inline int atomic_dec_and_test_lt(atomic_t *v)
  48. {
  49. char c;
  50. __asm__ __volatile__(
  51. "subql #1,%1; slt %0"
  52. : "=d" (c), "=m" (*v)
  53. : "m" (*v));
  54. return c != 0;
  55. }
  56. static inline int atomic_inc_and_test(atomic_t *v)
  57. {
  58. char c;
  59. __asm__ __volatile__("addql #1,%1; seq %0" : "=d" (c), "+m" (*v));
  60. return c != 0;
  61. }
  62. #ifdef CONFIG_RMW_INSNS
  63. static inline int atomic_add_return(int i, atomic_t *v)
  64. {
  65. int t, tmp;
  66. __asm__ __volatile__(
  67. "1: movel %2,%1\n"
  68. " addl %3,%1\n"
  69. " casl %2,%1,%0\n"
  70. " jne 1b"
  71. : "+m" (*v), "=&d" (t), "=&d" (tmp)
  72. : "g" (i), "2" (atomic_read(v)));
  73. return t;
  74. }
  75. static inline int atomic_sub_return(int i, atomic_t *v)
  76. {
  77. int t, tmp;
  78. __asm__ __volatile__(
  79. "1: movel %2,%1\n"
  80. " subl %3,%1\n"
  81. " casl %2,%1,%0\n"
  82. " jne 1b"
  83. : "+m" (*v), "=&d" (t), "=&d" (tmp)
  84. : "g" (i), "2" (atomic_read(v)));
  85. return t;
  86. }
  87. #define atomic_cmpxchg(v, o, n) ((int)cmpxchg(&((v)->counter), (o), (n)))
  88. #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
  89. #else /* !CONFIG_RMW_INSNS */
  90. static inline int atomic_add_return(int i, atomic_t * v)
  91. {
  92. unsigned long flags;
  93. int t;
  94. local_irq_save(flags);
  95. t = atomic_read(v);
  96. t += i;
  97. atomic_set(v, t);
  98. local_irq_restore(flags);
  99. return t;
  100. }
  101. static inline int atomic_sub_return(int i, atomic_t * v)
  102. {
  103. unsigned long flags;
  104. int t;
  105. local_irq_save(flags);
  106. t = atomic_read(v);
  107. t -= i;
  108. atomic_set(v, t);
  109. local_irq_restore(flags);
  110. return t;
  111. }
  112. static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
  113. {
  114. unsigned long flags;
  115. int prev;
  116. local_irq_save(flags);
  117. prev = atomic_read(v);
  118. if (prev == old)
  119. atomic_set(v, new);
  120. local_irq_restore(flags);
  121. return prev;
  122. }
  123. static inline int atomic_xchg(atomic_t *v, int new)
  124. {
  125. unsigned long flags;
  126. int prev;
  127. local_irq_save(flags);
  128. prev = atomic_read(v);
  129. atomic_set(v, new);
  130. local_irq_restore(flags);
  131. return prev;
  132. }
  133. #endif /* !CONFIG_RMW_INSNS */
  134. #define atomic_dec_return(v) atomic_sub_return(1, (v))
  135. #define atomic_inc_return(v) atomic_add_return(1, (v))
  136. static inline int atomic_sub_and_test(int i, atomic_t *v)
  137. {
  138. char c;
  139. __asm__ __volatile__("subl %2,%1; seq %0"
  140. : "=d" (c), "+m" (*v)
  141. : ASM_DI (i));
  142. return c != 0;
  143. }
  144. static inline int atomic_add_negative(int i, atomic_t *v)
  145. {
  146. char c;
  147. __asm__ __volatile__("addl %2,%1; smi %0"
  148. : "=d" (c), "+m" (*v)
  149. : ASM_DI (i));
  150. return c != 0;
  151. }
  152. static inline void atomic_clear_mask(unsigned long mask, unsigned long *v)
  153. {
  154. __asm__ __volatile__("andl %1,%0" : "+m" (*v) : ASM_DI (~(mask)));
  155. }
  156. static inline void atomic_set_mask(unsigned long mask, unsigned long *v)
  157. {
  158. __asm__ __volatile__("orl %1,%0" : "+m" (*v) : ASM_DI (mask));
  159. }
  160. static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
  161. {
  162. int c, old;
  163. c = atomic_read(v);
  164. for (;;) {
  165. if (unlikely(c == (u)))
  166. break;
  167. old = atomic_cmpxchg((v), c, c + (a));
  168. if (likely(old == c))
  169. break;
  170. c = old;
  171. }
  172. return c;
  173. }
  174. /* Atomic operations are already serializing */
  175. #define smp_mb__before_atomic_dec() barrier()
  176. #define smp_mb__after_atomic_dec() barrier()
  177. #define smp_mb__before_atomic_inc() barrier()
  178. #define smp_mb__after_atomic_inc() barrier()
  179. #endif /* __ARCH_M68K_ATOMIC __ */