atomic_mm.h 4.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196
  1. #ifndef __ARCH_M68K_ATOMIC__
  2. #define __ARCH_M68K_ATOMIC__
  3. #include <linux/types.h>
  4. #include <asm/system.h>
  5. /*
  6. * Atomic operations that C can't guarantee us. Useful for
  7. * resource counting etc..
  8. */
  9. /*
  10. * We do not have SMP m68k systems, so we don't have to deal with that.
  11. */
  12. #define ATOMIC_INIT(i) { (i) }
  13. #define atomic_read(v) ((v)->counter)
  14. #define atomic_set(v, i) (((v)->counter) = i)
  15. static inline void atomic_add(int i, atomic_t *v)
  16. {
  17. __asm__ __volatile__("addl %1,%0" : "+m" (*v) : "id" (i));
  18. }
  19. static inline void atomic_sub(int i, atomic_t *v)
  20. {
  21. __asm__ __volatile__("subl %1,%0" : "+m" (*v) : "id" (i));
  22. }
  23. static inline void atomic_inc(atomic_t *v)
  24. {
  25. __asm__ __volatile__("addql #1,%0" : "+m" (*v));
  26. }
  27. static inline void atomic_dec(atomic_t *v)
  28. {
  29. __asm__ __volatile__("subql #1,%0" : "+m" (*v));
  30. }
  31. static inline int atomic_dec_and_test(atomic_t *v)
  32. {
  33. char c;
  34. __asm__ __volatile__("subql #1,%1; seq %0" : "=d" (c), "+m" (*v));
  35. return c != 0;
  36. }
  37. static inline int atomic_inc_and_test(atomic_t *v)
  38. {
  39. char c;
  40. __asm__ __volatile__("addql #1,%1; seq %0" : "=d" (c), "+m" (*v));
  41. return c != 0;
  42. }
  43. #ifdef CONFIG_RMW_INSNS
  44. static inline int atomic_add_return(int i, atomic_t *v)
  45. {
  46. int t, tmp;
  47. __asm__ __volatile__(
  48. "1: movel %2,%1\n"
  49. " addl %3,%1\n"
  50. " casl %2,%1,%0\n"
  51. " jne 1b"
  52. : "+m" (*v), "=&d" (t), "=&d" (tmp)
  53. : "g" (i), "2" (atomic_read(v)));
  54. return t;
  55. }
  56. static inline int atomic_sub_return(int i, atomic_t *v)
  57. {
  58. int t, tmp;
  59. __asm__ __volatile__(
  60. "1: movel %2,%1\n"
  61. " subl %3,%1\n"
  62. " casl %2,%1,%0\n"
  63. " jne 1b"
  64. : "+m" (*v), "=&d" (t), "=&d" (tmp)
  65. : "g" (i), "2" (atomic_read(v)));
  66. return t;
  67. }
  68. #define atomic_cmpxchg(v, o, n) ((int)cmpxchg(&((v)->counter), (o), (n)))
  69. #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
  70. #else /* !CONFIG_RMW_INSNS */
  71. static inline int atomic_add_return(int i, atomic_t * v)
  72. {
  73. unsigned long flags;
  74. int t;
  75. local_irq_save(flags);
  76. t = atomic_read(v);
  77. t += i;
  78. atomic_set(v, t);
  79. local_irq_restore(flags);
  80. return t;
  81. }
  82. static inline int atomic_sub_return(int i, atomic_t * v)
  83. {
  84. unsigned long flags;
  85. int t;
  86. local_irq_save(flags);
  87. t = atomic_read(v);
  88. t -= i;
  89. atomic_set(v, t);
  90. local_irq_restore(flags);
  91. return t;
  92. }
  93. static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
  94. {
  95. unsigned long flags;
  96. int prev;
  97. local_irq_save(flags);
  98. prev = atomic_read(v);
  99. if (prev == old)
  100. atomic_set(v, new);
  101. local_irq_restore(flags);
  102. return prev;
  103. }
  104. static inline int atomic_xchg(atomic_t *v, int new)
  105. {
  106. unsigned long flags;
  107. int prev;
  108. local_irq_save(flags);
  109. prev = atomic_read(v);
  110. atomic_set(v, new);
  111. local_irq_restore(flags);
  112. return prev;
  113. }
  114. #endif /* !CONFIG_RMW_INSNS */
  115. #define atomic_dec_return(v) atomic_sub_return(1, (v))
  116. #define atomic_inc_return(v) atomic_add_return(1, (v))
  117. static inline int atomic_sub_and_test(int i, atomic_t *v)
  118. {
  119. char c;
  120. __asm__ __volatile__("subl %2,%1; seq %0" : "=d" (c), "+m" (*v): "g" (i));
  121. return c != 0;
  122. }
  123. static inline int atomic_add_negative(int i, atomic_t *v)
  124. {
  125. char c;
  126. __asm__ __volatile__("addl %2,%1; smi %0" : "=d" (c), "+m" (*v): "g" (i));
  127. return c != 0;
  128. }
  129. static inline void atomic_clear_mask(unsigned long mask, unsigned long *v)
  130. {
  131. __asm__ __volatile__("andl %1,%0" : "+m" (*v) : "id" (~(mask)));
  132. }
  133. static inline void atomic_set_mask(unsigned long mask, unsigned long *v)
  134. {
  135. __asm__ __volatile__("orl %1,%0" : "+m" (*v) : "id" (mask));
  136. }
  137. static __inline__ int atomic_add_unless(atomic_t *v, int a, int u)
  138. {
  139. int c, old;
  140. c = atomic_read(v);
  141. for (;;) {
  142. if (unlikely(c == (u)))
  143. break;
  144. old = atomic_cmpxchg((v), c, c + (a));
  145. if (likely(old == c))
  146. break;
  147. c = old;
  148. }
  149. return c != (u);
  150. }
  151. #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
  152. /* Atomic operations are already serializing */
  153. #define smp_mb__before_atomic_dec() barrier()
  154. #define smp_mb__after_atomic_dec() barrier()
  155. #define smp_mb__before_atomic_inc() barrier()
  156. #define smp_mb__after_atomic_inc() barrier()
  157. #include <asm-generic/atomic.h>
  158. #endif /* __ARCH_M68K_ATOMIC __ */