atomic.h 3.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146
  1. #ifndef __ASM_SH_ATOMIC_H
  2. #define __ASM_SH_ATOMIC_H
  3. /*
  4. * Atomic operations that C can't guarantee us. Useful for
  5. * resource counting etc..
  6. *
  7. */
  8. typedef struct { volatile int counter; } atomic_t;
  9. #define ATOMIC_INIT(i) ( (atomic_t) { (i) } )
  10. #define atomic_read(v) ((v)->counter)
  11. #define atomic_set(v,i) ((v)->counter = (i))
  12. #include <asm/system.h>
  13. /*
  14. * To get proper branch prediction for the main line, we must branch
  15. * forward to code at the end of this object's .text section, then
  16. * branch back to restart the operation.
  17. */
  18. static __inline__ void atomic_add(int i, atomic_t * v)
  19. {
  20. unsigned long flags;
  21. local_irq_save(flags);
  22. *(long *)v += i;
  23. local_irq_restore(flags);
  24. }
  25. static __inline__ void atomic_sub(int i, atomic_t *v)
  26. {
  27. unsigned long flags;
  28. local_irq_save(flags);
  29. *(long *)v -= i;
  30. local_irq_restore(flags);
  31. }
  32. static __inline__ int atomic_add_return(int i, atomic_t * v)
  33. {
  34. unsigned long temp, flags;
  35. local_irq_save(flags);
  36. temp = *(long *)v;
  37. temp += i;
  38. *(long *)v = temp;
  39. local_irq_restore(flags);
  40. return temp;
  41. }
  42. #define atomic_add_negative(a, v) (atomic_add_return((a), (v)) < 0)
  43. static __inline__ int atomic_sub_return(int i, atomic_t * v)
  44. {
  45. unsigned long temp, flags;
  46. local_irq_save(flags);
  47. temp = *(long *)v;
  48. temp -= i;
  49. *(long *)v = temp;
  50. local_irq_restore(flags);
  51. return temp;
  52. }
  53. #define atomic_dec_return(v) atomic_sub_return(1,(v))
  54. #define atomic_inc_return(v) atomic_add_return(1,(v))
  55. /*
  56. * atomic_inc_and_test - increment and test
  57. * @v: pointer of type atomic_t
  58. *
  59. * Atomically increments @v by 1
  60. * and returns true if the result is zero, or false for all
  61. * other cases.
  62. */
  63. #define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
  64. #define atomic_sub_and_test(i,v) (atomic_sub_return((i), (v)) == 0)
  65. #define atomic_dec_and_test(v) (atomic_sub_return(1, (v)) == 0)
  66. #define atomic_inc(v) atomic_add(1,(v))
  67. #define atomic_dec(v) atomic_sub(1,(v))
  68. static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
  69. {
  70. int ret;
  71. unsigned long flags;
  72. local_irq_save(flags);
  73. ret = v->counter;
  74. if (likely(ret == old))
  75. v->counter = new;
  76. local_irq_restore(flags);
  77. return ret;
  78. }
  79. #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
  80. static inline int atomic_add_unless(atomic_t *v, int a, int u)
  81. {
  82. int ret;
  83. unsigned long flags;
  84. local_irq_save(flags);
  85. ret = v->counter;
  86. if (ret != u)
  87. v->counter += a;
  88. local_irq_restore(flags);
  89. return ret != u;
  90. }
  91. #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
  92. static __inline__ void atomic_clear_mask(unsigned int mask, atomic_t *v)
  93. {
  94. unsigned long flags;
  95. local_irq_save(flags);
  96. *(long *)v &= ~mask;
  97. local_irq_restore(flags);
  98. }
  99. static __inline__ void atomic_set_mask(unsigned int mask, atomic_t *v)
  100. {
  101. unsigned long flags;
  102. local_irq_save(flags);
  103. *(long *)v |= mask;
  104. local_irq_restore(flags);
  105. }
  106. /* Atomic operations are already serializing on SH */
  107. #define smp_mb__before_atomic_dec() barrier()
  108. #define smp_mb__after_atomic_dec() barrier()
  109. #define smp_mb__before_atomic_inc() barrier()
  110. #define smp_mb__after_atomic_inc() barrier()
  111. #include <asm-generic/atomic.h>
  112. #endif /* __ASM_SH_ATOMIC_H */