atomic.h 3.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142
  1. #ifndef __ARCH_BLACKFIN_ATOMIC__
  2. #define __ARCH_BLACKFIN_ATOMIC__
  3. #include <linux/types.h>
  4. #include <asm/system.h> /* local_irq_XXX() */
  5. /*
  6. * Atomic operations that C can't guarantee us. Useful for
  7. * resource counting etc..
  8. *
  9. * Generally we do not concern about SMP BFIN systems, so we don't have
  10. * to deal with that.
  11. *
  12. * Tony Kou (tonyko@lineo.ca) Lineo Inc. 2001
  13. */
  14. #define ATOMIC_INIT(i) { (i) }
  15. #define atomic_read(v) ((v)->counter)
  16. #define atomic_set(v, i) (((v)->counter) = i)
  17. static __inline__ void atomic_add(int i, atomic_t * v)
  18. {
  19. long flags;
  20. local_irq_save(flags);
  21. v->counter += i;
  22. local_irq_restore(flags);
  23. }
  24. static __inline__ void atomic_sub(int i, atomic_t * v)
  25. {
  26. long flags;
  27. local_irq_save(flags);
  28. v->counter -= i;
  29. local_irq_restore(flags);
  30. }
  31. static inline int atomic_add_return(int i, atomic_t * v)
  32. {
  33. int __temp = 0;
  34. long flags;
  35. local_irq_save(flags);
  36. v->counter += i;
  37. __temp = v->counter;
  38. local_irq_restore(flags);
  39. return __temp;
  40. }
  41. #define atomic_add_negative(a, v) (atomic_add_return((a), (v)) < 0)
  42. static inline int atomic_sub_return(int i, atomic_t * v)
  43. {
  44. int __temp = 0;
  45. long flags;
  46. local_irq_save(flags);
  47. v->counter -= i;
  48. __temp = v->counter;
  49. local_irq_restore(flags);
  50. return __temp;
  51. }
  52. static __inline__ void atomic_inc(volatile atomic_t * v)
  53. {
  54. long flags;
  55. local_irq_save(flags);
  56. v->counter++;
  57. local_irq_restore(flags);
  58. }
  59. #define atomic_cmpxchg(v, o, n) ((int)cmpxchg(&((v)->counter), (o), (n)))
  60. #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
  61. #define atomic_add_unless(v, a, u) \
  62. ({ \
  63. int c, old; \
  64. c = atomic_read(v); \
  65. while (c != (u) && (old = atomic_cmpxchg((v), c, c + (a))) != c) \
  66. c = old; \
  67. c != (u); \
  68. })
  69. #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
  70. static __inline__ void atomic_dec(volatile atomic_t * v)
  71. {
  72. long flags;
  73. local_irq_save(flags);
  74. v->counter--;
  75. local_irq_restore(flags);
  76. }
  77. static __inline__ void atomic_clear_mask(unsigned int mask, atomic_t * v)
  78. {
  79. long flags;
  80. local_irq_save(flags);
  81. v->counter &= ~mask;
  82. local_irq_restore(flags);
  83. }
  84. static __inline__ void atomic_set_mask(unsigned int mask, atomic_t * v)
  85. {
  86. long flags;
  87. local_irq_save(flags);
  88. v->counter |= mask;
  89. local_irq_restore(flags);
  90. }
  91. /* Atomic operations are already serializing */
  92. #define smp_mb__before_atomic_dec() barrier()
  93. #define smp_mb__after_atomic_dec() barrier()
  94. #define smp_mb__before_atomic_inc() barrier()
  95. #define smp_mb__after_atomic_inc() barrier()
  96. #define atomic_dec_return(v) atomic_sub_return(1,(v))
  97. #define atomic_inc_return(v) atomic_add_return(1,(v))
  98. /*
  99. * atomic_inc_and_test - increment and test
  100. * @v: pointer of type atomic_t
  101. *
  102. * Atomically increments @v by 1
  103. * and returns true if the result is zero, or false for all
  104. * other cases.
  105. */
  106. #define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
  107. #define atomic_sub_and_test(i,v) (atomic_sub_return((i), (v)) == 0)
  108. #define atomic_dec_and_test(v) (atomic_sub_return(1, (v)) == 0)
  109. #include <asm-generic/atomic.h>
  110. #endif /* __ARCH_BLACKFIN_ATOMIC __ */