atomic.h 4.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218
  1. #ifndef __ARCH_M68K_ATOMIC__
  2. #define __ARCH_M68K_ATOMIC__
  3. #include <linux/types.h>
  4. #include <asm/system.h>
  5. /*
  6. * Atomic operations that C can't guarantee us. Useful for
  7. * resource counting etc..
  8. */
  9. /*
  10. * We do not have SMP m68k systems, so we don't have to deal with that.
  11. */
  12. #define ATOMIC_INIT(i) { (i) }
  13. #define atomic_read(v) (*(volatile int *)&(v)->counter)
  14. #define atomic_set(v, i) (((v)->counter) = i)
  15. /*
  16. * The ColdFire parts cannot do some immediate to memory operations,
  17. * so for them we do not specify the "i" asm constraint.
  18. */
  19. #ifdef CONFIG_COLDFIRE
  20. #define ASM_DI "d"
  21. #else
  22. #define ASM_DI "di"
  23. #endif
  24. static inline void atomic_add(int i, atomic_t *v)
  25. {
  26. __asm__ __volatile__("addl %1,%0" : "+m" (*v) : ASM_DI (i));
  27. }
  28. static inline void atomic_sub(int i, atomic_t *v)
  29. {
  30. __asm__ __volatile__("subl %1,%0" : "+m" (*v) : ASM_DI (i));
  31. }
  32. static inline void atomic_inc(atomic_t *v)
  33. {
  34. __asm__ __volatile__("addql #1,%0" : "+m" (*v));
  35. }
  36. static inline void atomic_dec(atomic_t *v)
  37. {
  38. __asm__ __volatile__("subql #1,%0" : "+m" (*v));
  39. }
  40. static inline int atomic_dec_and_test(atomic_t *v)
  41. {
  42. char c;
  43. __asm__ __volatile__("subql #1,%1; seq %0" : "=d" (c), "+m" (*v));
  44. return c != 0;
  45. }
  46. static inline int atomic_dec_and_test_lt(atomic_t *v)
  47. {
  48. char c;
  49. __asm__ __volatile__(
  50. "subql #1,%1; slt %0"
  51. : "=d" (c), "=m" (*v)
  52. : "m" (*v));
  53. return c != 0;
  54. }
  55. static inline int atomic_inc_and_test(atomic_t *v)
  56. {
  57. char c;
  58. __asm__ __volatile__("addql #1,%1; seq %0" : "=d" (c), "+m" (*v));
  59. return c != 0;
  60. }
  61. #ifdef CONFIG_RMW_INSNS
  62. static inline int atomic_add_return(int i, atomic_t *v)
  63. {
  64. int t, tmp;
  65. __asm__ __volatile__(
  66. "1: movel %2,%1\n"
  67. " addl %3,%1\n"
  68. " casl %2,%1,%0\n"
  69. " jne 1b"
  70. : "+m" (*v), "=&d" (t), "=&d" (tmp)
  71. : "g" (i), "2" (atomic_read(v)));
  72. return t;
  73. }
  74. static inline int atomic_sub_return(int i, atomic_t *v)
  75. {
  76. int t, tmp;
  77. __asm__ __volatile__(
  78. "1: movel %2,%1\n"
  79. " subl %3,%1\n"
  80. " casl %2,%1,%0\n"
  81. " jne 1b"
  82. : "+m" (*v), "=&d" (t), "=&d" (tmp)
  83. : "g" (i), "2" (atomic_read(v)));
  84. return t;
  85. }
  86. #define atomic_cmpxchg(v, o, n) ((int)cmpxchg(&((v)->counter), (o), (n)))
  87. #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
  88. #else /* !CONFIG_RMW_INSNS */
  89. static inline int atomic_add_return(int i, atomic_t * v)
  90. {
  91. unsigned long flags;
  92. int t;
  93. local_irq_save(flags);
  94. t = atomic_read(v);
  95. t += i;
  96. atomic_set(v, t);
  97. local_irq_restore(flags);
  98. return t;
  99. }
  100. static inline int atomic_sub_return(int i, atomic_t * v)
  101. {
  102. unsigned long flags;
  103. int t;
  104. local_irq_save(flags);
  105. t = atomic_read(v);
  106. t -= i;
  107. atomic_set(v, t);
  108. local_irq_restore(flags);
  109. return t;
  110. }
  111. static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
  112. {
  113. unsigned long flags;
  114. int prev;
  115. local_irq_save(flags);
  116. prev = atomic_read(v);
  117. if (prev == old)
  118. atomic_set(v, new);
  119. local_irq_restore(flags);
  120. return prev;
  121. }
  122. static inline int atomic_xchg(atomic_t *v, int new)
  123. {
  124. unsigned long flags;
  125. int prev;
  126. local_irq_save(flags);
  127. prev = atomic_read(v);
  128. atomic_set(v, new);
  129. local_irq_restore(flags);
  130. return prev;
  131. }
  132. #endif /* !CONFIG_RMW_INSNS */
  133. #define atomic_dec_return(v) atomic_sub_return(1, (v))
  134. #define atomic_inc_return(v) atomic_add_return(1, (v))
  135. static inline int atomic_sub_and_test(int i, atomic_t *v)
  136. {
  137. char c;
  138. __asm__ __volatile__("subl %2,%1; seq %0"
  139. : "=d" (c), "+m" (*v)
  140. : ASM_DI (i));
  141. return c != 0;
  142. }
  143. static inline int atomic_add_negative(int i, atomic_t *v)
  144. {
  145. char c;
  146. __asm__ __volatile__("addl %2,%1; smi %0"
  147. : "=d" (c), "+m" (*v)
  148. : ASM_DI (i));
  149. return c != 0;
  150. }
  151. static inline void atomic_clear_mask(unsigned long mask, unsigned long *v)
  152. {
  153. __asm__ __volatile__("andl %1,%0" : "+m" (*v) : ASM_DI (~(mask)));
  154. }
  155. static inline void atomic_set_mask(unsigned long mask, unsigned long *v)
  156. {
  157. __asm__ __volatile__("orl %1,%0" : "+m" (*v) : ASM_DI (mask));
  158. }
  159. static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
  160. {
  161. int c, old;
  162. c = atomic_read(v);
  163. for (;;) {
  164. if (unlikely(c == (u)))
  165. break;
  166. old = atomic_cmpxchg((v), c, c + (a));
  167. if (likely(old == c))
  168. break;
  169. c = old;
  170. }
  171. return c;
  172. }
  173. /* Atomic operations are already serializing */
  174. #define smp_mb__before_atomic_dec() barrier()
  175. #define smp_mb__after_atomic_dec() barrier()
  176. #define smp_mb__before_atomic_inc() barrier()
  177. #define smp_mb__after_atomic_inc() barrier()
  178. #endif /* __ARCH_M68K_ATOMIC __ */