atomic.h 2.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126
  1. #ifndef __ASM_SH64_ATOMIC_H
  2. #define __ASM_SH64_ATOMIC_H
  3. /*
  4. * This file is subject to the terms and conditions of the GNU General Public
  5. * License. See the file "COPYING" in the main directory of this archive
  6. * for more details.
  7. *
  8. * include/asm-sh64/atomic.h
  9. *
  10. * Copyright (C) 2000, 2001 Paolo Alberelli
  11. * Copyright (C) 2003 Paul Mundt
  12. *
  13. */
  14. /*
  15. * Atomic operations that C can't guarantee us. Useful for
  16. * resource counting etc..
  17. *
  18. */
  19. typedef struct { volatile int counter; } atomic_t;
  20. #define ATOMIC_INIT(i) ( (atomic_t) { (i) } )
  21. #define atomic_read(v) ((v)->counter)
  22. #define atomic_set(v,i) ((v)->counter = (i))
  23. #include <asm/system.h>
  24. /*
  25. * To get proper branch prediction for the main line, we must branch
  26. * forward to code at the end of this object's .text section, then
  27. * branch back to restart the operation.
  28. */
  29. static __inline__ void atomic_add(int i, atomic_t * v)
  30. {
  31. unsigned long flags;
  32. local_irq_save(flags);
  33. *(long *)v += i;
  34. local_irq_restore(flags);
  35. }
  36. static __inline__ void atomic_sub(int i, atomic_t *v)
  37. {
  38. unsigned long flags;
  39. local_irq_save(flags);
  40. *(long *)v -= i;
  41. local_irq_restore(flags);
  42. }
  43. static __inline__ int atomic_add_return(int i, atomic_t * v)
  44. {
  45. unsigned long temp, flags;
  46. local_irq_save(flags);
  47. temp = *(long *)v;
  48. temp += i;
  49. *(long *)v = temp;
  50. local_irq_restore(flags);
  51. return temp;
  52. }
  53. #define atomic_add_negative(a, v) (atomic_add_return((a), (v)) < 0)
  54. static __inline__ int atomic_sub_return(int i, atomic_t * v)
  55. {
  56. unsigned long temp, flags;
  57. local_irq_save(flags);
  58. temp = *(long *)v;
  59. temp -= i;
  60. *(long *)v = temp;
  61. local_irq_restore(flags);
  62. return temp;
  63. }
  64. #define atomic_dec_return(v) atomic_sub_return(1,(v))
  65. #define atomic_inc_return(v) atomic_add_return(1,(v))
  66. /*
  67. * atomic_inc_and_test - increment and test
  68. * @v: pointer of type atomic_t
  69. *
  70. * Atomically increments @v by 1
  71. * and returns true if the result is zero, or false for all
  72. * other cases.
  73. */
  74. #define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
  75. #define atomic_sub_and_test(i,v) (atomic_sub_return((i), (v)) == 0)
  76. #define atomic_dec_and_test(v) (atomic_sub_return(1, (v)) == 0)
  77. #define atomic_inc(v) atomic_add(1,(v))
  78. #define atomic_dec(v) atomic_sub(1,(v))
  79. static __inline__ void atomic_clear_mask(unsigned int mask, atomic_t *v)
  80. {
  81. unsigned long flags;
  82. local_irq_save(flags);
  83. *(long *)v &= ~mask;
  84. local_irq_restore(flags);
  85. }
  86. static __inline__ void atomic_set_mask(unsigned int mask, atomic_t *v)
  87. {
  88. unsigned long flags;
  89. local_irq_save(flags);
  90. *(long *)v |= mask;
  91. local_irq_restore(flags);
  92. }
  93. /* Atomic operations are already serializing on SH */
  94. #define smp_mb__before_atomic_dec() barrier()
  95. #define smp_mb__after_atomic_dec() barrier()
  96. #define smp_mb__before_atomic_inc() barrier()
  97. #define smp_mb__after_atomic_inc() barrier()
  98. #endif /* __ASM_SH64_ATOMIC_H */