atomic.h 3.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150
  1. /* $Id: atomic.h,v 1.3 2001/07/25 16:15:19 bjornw Exp $ */
  2. #ifndef __ASM_CRIS_ATOMIC__
  3. #define __ASM_CRIS_ATOMIC__
  4. #include <asm/system.h>
  5. /*
  6. * Atomic operations that C can't guarantee us. Useful for
  7. * resource counting etc..
  8. */
  9. /*
  10. * Make sure gcc doesn't try to be clever and move things around
  11. * on us. We need to use _exactly_ the address the user gave us,
  12. * not some alias that contains the same information.
  13. */
  14. #define __atomic_fool_gcc(x) (*(struct { int a[100]; } *)x)
  15. typedef struct { int counter; } atomic_t;
  16. #define ATOMIC_INIT(i) { (i) }
  17. #define atomic_read(v) ((v)->counter)
  18. #define atomic_set(v,i) (((v)->counter) = (i))
  19. /* These should be written in asm but we do it in C for now. */
  20. extern __inline__ void atomic_add(int i, volatile atomic_t *v)
  21. {
  22. unsigned long flags;
  23. local_save_flags(flags);
  24. local_irq_disable();
  25. v->counter += i;
  26. local_irq_restore(flags);
  27. }
  28. extern __inline__ void atomic_sub(int i, volatile atomic_t *v)
  29. {
  30. unsigned long flags;
  31. local_save_flags(flags);
  32. local_irq_disable();
  33. v->counter -= i;
  34. local_irq_restore(flags);
  35. }
  36. extern __inline__ int atomic_add_return(int i, volatile atomic_t *v)
  37. {
  38. unsigned long flags;
  39. int retval;
  40. local_save_flags(flags);
  41. local_irq_disable();
  42. retval = (v->counter += i);
  43. local_irq_restore(flags);
  44. return retval;
  45. }
  46. #define atomic_add_negative(a, v) (atomic_add_return((a), (v)) < 0)
  47. extern __inline__ int atomic_sub_return(int i, volatile atomic_t *v)
  48. {
  49. unsigned long flags;
  50. int retval;
  51. local_save_flags(flags);
  52. local_irq_disable();
  53. retval = (v->counter -= i);
  54. local_irq_restore(flags);
  55. return retval;
  56. }
  57. extern __inline__ int atomic_sub_and_test(int i, volatile atomic_t *v)
  58. {
  59. int retval;
  60. unsigned long flags;
  61. local_save_flags(flags);
  62. local_irq_disable();
  63. retval = (v->counter -= i) == 0;
  64. local_irq_restore(flags);
  65. return retval;
  66. }
  67. extern __inline__ void atomic_inc(volatile atomic_t *v)
  68. {
  69. unsigned long flags;
  70. local_save_flags(flags);
  71. local_irq_disable();
  72. (v->counter)++;
  73. local_irq_restore(flags);
  74. }
  75. extern __inline__ void atomic_dec(volatile atomic_t *v)
  76. {
  77. unsigned long flags;
  78. local_save_flags(flags);
  79. local_irq_disable();
  80. (v->counter)--;
  81. local_irq_restore(flags);
  82. }
  83. extern __inline__ int atomic_inc_return(volatile atomic_t *v)
  84. {
  85. unsigned long flags;
  86. int retval;
  87. local_save_flags(flags);
  88. local_irq_disable();
  89. retval = (v->counter)++;
  90. local_irq_restore(flags);
  91. return retval;
  92. }
  93. extern __inline__ int atomic_dec_return(volatile atomic_t *v)
  94. {
  95. unsigned long flags;
  96. int retval;
  97. local_save_flags(flags);
  98. local_irq_disable();
  99. retval = (v->counter)--;
  100. local_irq_restore(flags);
  101. return retval;
  102. }
  103. extern __inline__ int atomic_dec_and_test(volatile atomic_t *v)
  104. {
  105. int retval;
  106. unsigned long flags;
  107. local_save_flags(flags);
  108. local_irq_disable();
  109. retval = --(v->counter) == 0;
  110. local_irq_restore(flags);
  111. return retval;
  112. }
  113. extern __inline__ int atomic_inc_and_test(volatile atomic_t *v)
  114. {
  115. int retval;
  116. unsigned long flags;
  117. local_save_flags(flags);
  118. local_irq_disable();
  119. retval = ++(v->counter) == 0;
  120. local_irq_restore(flags);
  121. return retval;
  122. }
  123. /* Atomic operations are already serializing */
  124. #define smp_mb__before_atomic_dec() barrier()
  125. #define smp_mb__after_atomic_dec() barrier()
  126. #define smp_mb__before_atomic_inc() barrier()
  127. #define smp_mb__after_atomic_inc() barrier()
  128. #endif