atomic.h 3.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158
  1. #ifndef __ASM_SH64_ATOMIC_H
  2. #define __ASM_SH64_ATOMIC_H
  3. /*
  4. * This file is subject to the terms and conditions of the GNU General Public
  5. * License. See the file "COPYING" in the main directory of this archive
  6. * for more details.
  7. *
  8. * include/asm-sh64/atomic.h
  9. *
  10. * Copyright (C) 2000, 2001 Paolo Alberelli
  11. * Copyright (C) 2003 Paul Mundt
  12. *
  13. */
  14. /*
  15. * Atomic operations that C can't guarantee us. Useful for
  16. * resource counting etc..
  17. *
  18. */
  19. typedef struct { volatile int counter; } atomic_t;
  20. #define ATOMIC_INIT(i) ( (atomic_t) { (i) } )
  21. #define atomic_read(v) ((v)->counter)
  22. #define atomic_set(v,i) ((v)->counter = (i))
  23. #include <asm/system.h>
  24. /*
  25. * To get proper branch prediction for the main line, we must branch
  26. * forward to code at the end of this object's .text section, then
  27. * branch back to restart the operation.
  28. */
  29. static __inline__ void atomic_add(int i, atomic_t * v)
  30. {
  31. unsigned long flags;
  32. local_irq_save(flags);
  33. *(long *)v += i;
  34. local_irq_restore(flags);
  35. }
  36. static __inline__ void atomic_sub(int i, atomic_t *v)
  37. {
  38. unsigned long flags;
  39. local_irq_save(flags);
  40. *(long *)v -= i;
  41. local_irq_restore(flags);
  42. }
  43. static __inline__ int atomic_add_return(int i, atomic_t * v)
  44. {
  45. unsigned long temp, flags;
  46. local_irq_save(flags);
  47. temp = *(long *)v;
  48. temp += i;
  49. *(long *)v = temp;
  50. local_irq_restore(flags);
  51. return temp;
  52. }
  53. #define atomic_add_negative(a, v) (atomic_add_return((a), (v)) < 0)
  54. static __inline__ int atomic_sub_return(int i, atomic_t * v)
  55. {
  56. unsigned long temp, flags;
  57. local_irq_save(flags);
  58. temp = *(long *)v;
  59. temp -= i;
  60. *(long *)v = temp;
  61. local_irq_restore(flags);
  62. return temp;
  63. }
  64. #define atomic_dec_return(v) atomic_sub_return(1,(v))
  65. #define atomic_inc_return(v) atomic_add_return(1,(v))
  66. /*
  67. * atomic_inc_and_test - increment and test
  68. * @v: pointer of type atomic_t
  69. *
  70. * Atomically increments @v by 1
  71. * and returns true if the result is zero, or false for all
  72. * other cases.
  73. */
  74. #define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
  75. #define atomic_sub_and_test(i,v) (atomic_sub_return((i), (v)) == 0)
  76. #define atomic_dec_and_test(v) (atomic_sub_return(1, (v)) == 0)
  77. #define atomic_inc(v) atomic_add(1,(v))
  78. #define atomic_dec(v) atomic_sub(1,(v))
  79. static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
  80. {
  81. int ret;
  82. unsigned long flags;
  83. local_irq_save(flags);
  84. ret = v->counter;
  85. if (likely(ret == old))
  86. v->counter = new;
  87. local_irq_restore(flags);
  88. return ret;
  89. }
  90. #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
  91. static inline int atomic_add_unless(atomic_t *v, int a, int u)
  92. {
  93. int ret;
  94. unsigned long flags;
  95. local_irq_save(flags);
  96. ret = v->counter;
  97. if (ret != u)
  98. v->counter += a;
  99. local_irq_restore(flags);
  100. return ret != u;
  101. }
  102. #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
  103. static __inline__ void atomic_clear_mask(unsigned int mask, atomic_t *v)
  104. {
  105. unsigned long flags;
  106. local_irq_save(flags);
  107. *(long *)v &= ~mask;
  108. local_irq_restore(flags);
  109. }
  110. static __inline__ void atomic_set_mask(unsigned int mask, atomic_t *v)
  111. {
  112. unsigned long flags;
  113. local_irq_save(flags);
  114. *(long *)v |= mask;
  115. local_irq_restore(flags);
  116. }
  117. /* Atomic operations are already serializing on SH */
  118. #define smp_mb__before_atomic_dec() barrier()
  119. #define smp_mb__after_atomic_dec() barrier()
  120. #define smp_mb__before_atomic_inc() barrier()
  121. #define smp_mb__after_atomic_inc() barrier()
  122. #include <asm-generic/atomic.h>
  123. #endif /* __ASM_SH64_ATOMIC_H */