atomic_no.h 3.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155
  1. #ifndef __ARCH_M68KNOMMU_ATOMIC__
  2. #define __ARCH_M68KNOMMU_ATOMIC__
  3. #include <linux/types.h>
  4. #include <asm/system.h>
  5. /*
  6. * Atomic operations that C can't guarantee us. Useful for
  7. * resource counting etc..
  8. */
  9. /*
  10. * We do not have SMP m68k systems, so we don't have to deal with that.
  11. */
  12. #define ATOMIC_INIT(i) { (i) }
  13. #define atomic_read(v) ((v)->counter)
  14. #define atomic_set(v, i) (((v)->counter) = i)
  15. static __inline__ void atomic_add(int i, atomic_t *v)
  16. {
  17. #ifdef CONFIG_COLDFIRE
  18. __asm__ __volatile__("addl %1,%0" : "+m" (*v) : "d" (i));
  19. #else
  20. __asm__ __volatile__("addl %1,%0" : "+m" (*v) : "di" (i));
  21. #endif
  22. }
  23. static __inline__ void atomic_sub(int i, atomic_t *v)
  24. {
  25. #ifdef CONFIG_COLDFIRE
  26. __asm__ __volatile__("subl %1,%0" : "+m" (*v) : "d" (i));
  27. #else
  28. __asm__ __volatile__("subl %1,%0" : "+m" (*v) : "di" (i));
  29. #endif
  30. }
  31. static __inline__ int atomic_sub_and_test(int i, atomic_t * v)
  32. {
  33. char c;
  34. #ifdef CONFIG_COLDFIRE
  35. __asm__ __volatile__("subl %2,%1; seq %0"
  36. : "=d" (c), "+m" (*v)
  37. : "d" (i));
  38. #else
  39. __asm__ __volatile__("subl %2,%1; seq %0"
  40. : "=d" (c), "+m" (*v)
  41. : "di" (i));
  42. #endif
  43. return c != 0;
  44. }
  45. static __inline__ void atomic_inc(volatile atomic_t *v)
  46. {
  47. __asm__ __volatile__("addql #1,%0" : "+m" (*v));
  48. }
  49. /*
  50. * atomic_inc_and_test - increment and test
  51. * @v: pointer of type atomic_t
  52. *
  53. * Atomically increments @v by 1
  54. * and returns true if the result is zero, or false for all
  55. * other cases.
  56. */
  57. static __inline__ int atomic_inc_and_test(volatile atomic_t *v)
  58. {
  59. char c;
  60. __asm__ __volatile__("addql #1,%1; seq %0" : "=d" (c), "+m" (*v));
  61. return c != 0;
  62. }
  63. static __inline__ void atomic_dec(volatile atomic_t *v)
  64. {
  65. __asm__ __volatile__("subql #1,%0" : "+m" (*v));
  66. }
  67. static __inline__ int atomic_dec_and_test(volatile atomic_t *v)
  68. {
  69. char c;
  70. __asm__ __volatile__("subql #1,%1; seq %0" : "=d" (c), "+m" (*v));
  71. return c != 0;
  72. }
  73. static __inline__ void atomic_clear_mask(unsigned long mask, unsigned long *v)
  74. {
  75. __asm__ __volatile__("andl %1,%0" : "+m" (*v) : "id" (~(mask)));
  76. }
  77. static __inline__ void atomic_set_mask(unsigned long mask, unsigned long *v)
  78. {
  79. __asm__ __volatile__("orl %1,%0" : "+m" (*v) : "id" (mask));
  80. }
  81. /* Atomic operations are already serializing */
  82. #define smp_mb__before_atomic_dec() barrier()
  83. #define smp_mb__after_atomic_dec() barrier()
  84. #define smp_mb__before_atomic_inc() barrier()
  85. #define smp_mb__after_atomic_inc() barrier()
  86. static inline int atomic_add_return(int i, atomic_t * v)
  87. {
  88. unsigned long temp, flags;
  89. local_irq_save(flags);
  90. temp = *(long *)v;
  91. temp += i;
  92. *(long *)v = temp;
  93. local_irq_restore(flags);
  94. return temp;
  95. }
  96. #define atomic_add_negative(a, v) (atomic_add_return((a), (v)) < 0)
  97. static inline int atomic_sub_return(int i, atomic_t * v)
  98. {
  99. unsigned long temp, flags;
  100. local_irq_save(flags);
  101. temp = *(long *)v;
  102. temp -= i;
  103. *(long *)v = temp;
  104. local_irq_restore(flags);
  105. return temp;
  106. }
  107. #define atomic_cmpxchg(v, o, n) ((int)cmpxchg(&((v)->counter), (o), (n)))
  108. #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
  109. static __inline__ int atomic_add_unless(atomic_t *v, int a, int u)
  110. {
  111. int c, old;
  112. c = atomic_read(v);
  113. for (;;) {
  114. if (unlikely(c == (u)))
  115. break;
  116. old = atomic_cmpxchg((v), c, c + (a));
  117. if (likely(old == c))
  118. break;
  119. c = old;
  120. }
  121. return c != (u);
  122. }
  123. #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
  124. #define atomic_dec_return(v) atomic_sub_return(1,(v))
  125. #define atomic_inc_return(v) atomic_add_return(1,(v))
  126. #include <asm-generic/atomic.h>
  127. #endif /* __ARCH_M68KNOMMU_ATOMIC __ */