atomic.h 4.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208
  1. #ifndef __ARCH_M68K_ATOMIC__
  2. #define __ARCH_M68K_ATOMIC__
  3. #include <linux/types.h>
  4. #include <asm/system.h>
  5. /*
  6. * Atomic operations that C can't guarantee us. Useful for
  7. * resource counting etc..
  8. */
  9. /*
  10. * We do not have SMP m68k systems, so we don't have to deal with that.
  11. */
  12. #define ATOMIC_INIT(i) { (i) }
  13. #define atomic_read(v) (*(volatile int *)&(v)->counter)
  14. #define atomic_set(v, i) (((v)->counter) = i)
  15. /*
  16. * The ColdFire parts cannot do some immediate to memory operations,
  17. * so for them we do not specify the "i" asm constraint.
  18. */
  19. #ifdef CONFIG_COLDFIRE
  20. #define ASM_DI "d"
  21. #else
  22. #define ASM_DI "di"
  23. #endif
  24. static inline void atomic_add(int i, atomic_t *v)
  25. {
  26. __asm__ __volatile__("addl %1,%0" : "+m" (*v) : ASM_DI (i));
  27. }
  28. static inline void atomic_sub(int i, atomic_t *v)
  29. {
  30. __asm__ __volatile__("subl %1,%0" : "+m" (*v) : ASM_DI (i));
  31. }
  32. static inline void atomic_inc(atomic_t *v)
  33. {
  34. __asm__ __volatile__("addql #1,%0" : "+m" (*v));
  35. }
  36. static inline void atomic_dec(atomic_t *v)
  37. {
  38. __asm__ __volatile__("subql #1,%0" : "+m" (*v));
  39. }
  40. static inline int atomic_dec_and_test(atomic_t *v)
  41. {
  42. char c;
  43. __asm__ __volatile__("subql #1,%1; seq %0" : "=d" (c), "+m" (*v));
  44. return c != 0;
  45. }
  46. static inline int atomic_inc_and_test(atomic_t *v)
  47. {
  48. char c;
  49. __asm__ __volatile__("addql #1,%1; seq %0" : "=d" (c), "+m" (*v));
  50. return c != 0;
  51. }
  52. #ifdef CONFIG_RMW_INSNS
  53. static inline int atomic_add_return(int i, atomic_t *v)
  54. {
  55. int t, tmp;
  56. __asm__ __volatile__(
  57. "1: movel %2,%1\n"
  58. " addl %3,%1\n"
  59. " casl %2,%1,%0\n"
  60. " jne 1b"
  61. : "+m" (*v), "=&d" (t), "=&d" (tmp)
  62. : "g" (i), "2" (atomic_read(v)));
  63. return t;
  64. }
  65. static inline int atomic_sub_return(int i, atomic_t *v)
  66. {
  67. int t, tmp;
  68. __asm__ __volatile__(
  69. "1: movel %2,%1\n"
  70. " subl %3,%1\n"
  71. " casl %2,%1,%0\n"
  72. " jne 1b"
  73. : "+m" (*v), "=&d" (t), "=&d" (tmp)
  74. : "g" (i), "2" (atomic_read(v)));
  75. return t;
  76. }
  77. #define atomic_cmpxchg(v, o, n) ((int)cmpxchg(&((v)->counter), (o), (n)))
  78. #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
  79. #else /* !CONFIG_RMW_INSNS */
  80. static inline int atomic_add_return(int i, atomic_t * v)
  81. {
  82. unsigned long flags;
  83. int t;
  84. local_irq_save(flags);
  85. t = atomic_read(v);
  86. t += i;
  87. atomic_set(v, t);
  88. local_irq_restore(flags);
  89. return t;
  90. }
  91. static inline int atomic_sub_return(int i, atomic_t * v)
  92. {
  93. unsigned long flags;
  94. int t;
  95. local_irq_save(flags);
  96. t = atomic_read(v);
  97. t -= i;
  98. atomic_set(v, t);
  99. local_irq_restore(flags);
  100. return t;
  101. }
  102. static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
  103. {
  104. unsigned long flags;
  105. int prev;
  106. local_irq_save(flags);
  107. prev = atomic_read(v);
  108. if (prev == old)
  109. atomic_set(v, new);
  110. local_irq_restore(flags);
  111. return prev;
  112. }
  113. static inline int atomic_xchg(atomic_t *v, int new)
  114. {
  115. unsigned long flags;
  116. int prev;
  117. local_irq_save(flags);
  118. prev = atomic_read(v);
  119. atomic_set(v, new);
  120. local_irq_restore(flags);
  121. return prev;
  122. }
  123. #endif /* !CONFIG_RMW_INSNS */
  124. #define atomic_dec_return(v) atomic_sub_return(1, (v))
  125. #define atomic_inc_return(v) atomic_add_return(1, (v))
  126. static inline int atomic_sub_and_test(int i, atomic_t *v)
  127. {
  128. char c;
  129. __asm__ __volatile__("subl %2,%1; seq %0"
  130. : "=d" (c), "+m" (*v)
  131. : ASM_DI (i));
  132. return c != 0;
  133. }
  134. static inline int atomic_add_negative(int i, atomic_t *v)
  135. {
  136. char c;
  137. __asm__ __volatile__("addl %2,%1; smi %0"
  138. : "=d" (c), "+m" (*v)
  139. : ASM_DI (i));
  140. return c != 0;
  141. }
  142. static inline void atomic_clear_mask(unsigned long mask, unsigned long *v)
  143. {
  144. __asm__ __volatile__("andl %1,%0" : "+m" (*v) : ASM_DI (~(mask)));
  145. }
  146. static inline void atomic_set_mask(unsigned long mask, unsigned long *v)
  147. {
  148. __asm__ __volatile__("orl %1,%0" : "+m" (*v) : ASM_DI (mask));
  149. }
  150. static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
  151. {
  152. int c, old;
  153. c = atomic_read(v);
  154. for (;;) {
  155. if (unlikely(c == (u)))
  156. break;
  157. old = atomic_cmpxchg((v), c, c + (a));
  158. if (likely(old == c))
  159. break;
  160. c = old;
  161. }
  162. return c;
  163. }
  164. /* Atomic operations are already serializing */
  165. #define smp_mb__before_atomic_dec() barrier()
  166. #define smp_mb__after_atomic_dec() barrier()
  167. #define smp_mb__before_atomic_inc() barrier()
  168. #define smp_mb__after_atomic_inc() barrier()
  169. #endif /* __ARCH_M68K_ATOMIC __ */