atomic.h 4.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197
  1. #ifndef __ARCH_M68K_ATOMIC__
  2. #define __ARCH_M68K_ATOMIC__
  3. #include <linux/config.h>
  4. #include <asm/system.h> /* local_irq_XXX() */
  5. /*
  6. * Atomic operations that C can't guarantee us. Useful for
  7. * resource counting etc..
  8. */
  9. /*
  10. * We do not have SMP m68k systems, so we don't have to deal with that.
  11. */
  12. typedef struct { int counter; } atomic_t;
  13. #define ATOMIC_INIT(i) { (i) }
  14. #define atomic_read(v) ((v)->counter)
  15. #define atomic_set(v, i) (((v)->counter) = i)
  16. static inline void atomic_add(int i, atomic_t *v)
  17. {
  18. __asm__ __volatile__("addl %1,%0" : "+m" (*v) : "id" (i));
  19. }
  20. static inline void atomic_sub(int i, atomic_t *v)
  21. {
  22. __asm__ __volatile__("subl %1,%0" : "+m" (*v) : "id" (i));
  23. }
  24. static inline void atomic_inc(atomic_t *v)
  25. {
  26. __asm__ __volatile__("addql #1,%0" : "+m" (*v));
  27. }
  28. static inline void atomic_dec(atomic_t *v)
  29. {
  30. __asm__ __volatile__("subql #1,%0" : "+m" (*v));
  31. }
  32. static inline int atomic_dec_and_test(atomic_t *v)
  33. {
  34. char c;
  35. __asm__ __volatile__("subql #1,%1; seq %0" : "=d" (c), "+m" (*v));
  36. return c != 0;
  37. }
  38. static inline int atomic_inc_and_test(atomic_t *v)
  39. {
  40. char c;
  41. __asm__ __volatile__("addql #1,%1; seq %0" : "=d" (c), "+m" (*v));
  42. return c != 0;
  43. }
  44. #ifdef CONFIG_RMW_INSNS
  45. static inline int atomic_add_return(int i, atomic_t *v)
  46. {
  47. int t, tmp;
  48. __asm__ __volatile__(
  49. "1: movel %2,%1\n"
  50. " addl %3,%1\n"
  51. " casl %2,%1,%0\n"
  52. " jne 1b"
  53. : "+m" (*v), "=&d" (t), "=&d" (tmp)
  54. : "g" (i), "2" (atomic_read(v)));
  55. return t;
  56. }
  57. static inline int atomic_sub_return(int i, atomic_t *v)
  58. {
  59. int t, tmp;
  60. __asm__ __volatile__(
  61. "1: movel %2,%1\n"
  62. " subl %3,%1\n"
  63. " casl %2,%1,%0\n"
  64. " jne 1b"
  65. : "+m" (*v), "=&d" (t), "=&d" (tmp)
  66. : "g" (i), "2" (atomic_read(v)));
  67. return t;
  68. }
  69. #define atomic_cmpxchg(v, o, n) ((int)cmpxchg(&((v)->counter), (o), (n)))
  70. #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
  71. #else /* !CONFIG_RMW_INSNS */
  72. static inline int atomic_add_return(int i, atomic_t * v)
  73. {
  74. unsigned long flags;
  75. int t;
  76. local_irq_save(flags);
  77. t = atomic_read(v);
  78. t += i;
  79. atomic_set(v, t);
  80. local_irq_restore(flags);
  81. return t;
  82. }
  83. static inline int atomic_sub_return(int i, atomic_t * v)
  84. {
  85. unsigned long flags;
  86. int t;
  87. local_irq_save(flags);
  88. t = atomic_read(v);
  89. t -= i;
  90. atomic_set(v, t);
  91. local_irq_restore(flags);
  92. return t;
  93. }
  94. static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
  95. {
  96. unsigned long flags;
  97. int prev;
  98. local_irq_save(flags);
  99. prev = atomic_read(v);
  100. if (prev == old)
  101. atomic_set(v, new);
  102. local_irq_restore(flags);
  103. return prev;
  104. }
  105. static inline int atomic_xchg(atomic_t *v, int new)
  106. {
  107. unsigned long flags;
  108. int prev;
  109. local_irq_save(flags);
  110. prev = atomic_read(v);
  111. atomic_set(v, new);
  112. local_irq_restore(flags);
  113. return prev;
  114. }
  115. #endif /* !CONFIG_RMW_INSNS */
  116. #define atomic_dec_return(v) atomic_sub_return(1, (v))
  117. #define atomic_inc_return(v) atomic_add_return(1, (v))
  118. static inline int atomic_sub_and_test(int i, atomic_t *v)
  119. {
  120. char c;
  121. __asm__ __volatile__("subl %2,%1; seq %0" : "=d" (c), "+m" (*v): "g" (i));
  122. return c != 0;
  123. }
  124. static inline int atomic_add_negative(int i, atomic_t *v)
  125. {
  126. char c;
  127. __asm__ __volatile__("addl %2,%1; smi %0" : "=d" (c), "+m" (*v): "g" (i));
  128. return c != 0;
  129. }
  130. static inline void atomic_clear_mask(unsigned long mask, unsigned long *v)
  131. {
  132. __asm__ __volatile__("andl %1,%0" : "+m" (*v) : "id" (~(mask)));
  133. }
  134. static inline void atomic_set_mask(unsigned long mask, unsigned long *v)
  135. {
  136. __asm__ __volatile__("orl %1,%0" : "+m" (*v) : "id" (mask));
  137. }
  138. #define atomic_add_unless(v, a, u) \
  139. ({ \
  140. int c, old; \
  141. c = atomic_read(v); \
  142. for (;;) { \
  143. if (unlikely(c == (u))) \
  144. break; \
  145. old = atomic_cmpxchg((v), c, c + (a)); \
  146. if (likely(old == c)) \
  147. break; \
  148. c = old; \
  149. } \
  150. c != (u); \
  151. })
  152. #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
  153. /* Atomic operations are already serializing */
  154. #define smp_mb__before_atomic_dec() barrier()
  155. #define smp_mb__after_atomic_dec() barrier()
  156. #define smp_mb__before_atomic_inc() barrier()
  157. #define smp_mb__after_atomic_inc() barrier()
  158. #include <asm-generic/atomic.h>
  159. #endif /* __ARCH_M68K_ATOMIC __ */