atomic.h 4.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163
  1. /* atomic.h: These still suck, but the I-cache hit rate is higher.
  2. *
  3. * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
  4. * Copyright (C) 2000 Anton Blanchard (anton@linuxcare.com.au)
  5. *
  6. * Additions by Keith M Wesolowski (wesolows@foobazco.org) based
  7. * on asm-parisc/atomic.h Copyright (C) 2000 Philipp Rumpf <prumpf@tux.org>.
  8. */
  9. #ifndef __ARCH_SPARC_ATOMIC__
  10. #define __ARCH_SPARC_ATOMIC__
  11. typedef struct { volatile int counter; } atomic_t;
  12. #ifdef __KERNEL__
  13. #define ATOMIC_INIT(i) { (i) }
  14. extern int __atomic_add_return(int, atomic_t *);
  15. extern int atomic_cmpxchg(atomic_t *, int, int);
  16. #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
  17. extern int atomic_add_unless(atomic_t *, int, int);
  18. extern void atomic_set(atomic_t *, int);
  19. #define atomic_read(v) ((v)->counter)
  20. #define atomic_add(i, v) ((void)__atomic_add_return( (int)(i), (v)))
  21. #define atomic_sub(i, v) ((void)__atomic_add_return(-(int)(i), (v)))
  22. #define atomic_inc(v) ((void)__atomic_add_return( 1, (v)))
  23. #define atomic_dec(v) ((void)__atomic_add_return( -1, (v)))
  24. #define atomic_add_return(i, v) (__atomic_add_return( (int)(i), (v)))
  25. #define atomic_sub_return(i, v) (__atomic_add_return(-(int)(i), (v)))
  26. #define atomic_inc_return(v) (__atomic_add_return( 1, (v)))
  27. #define atomic_dec_return(v) (__atomic_add_return( -1, (v)))
  28. #define atomic_add_negative(a, v) (atomic_add_return((a), (v)) < 0)
  29. /*
  30. * atomic_inc_and_test - increment and test
  31. * @v: pointer of type atomic_t
  32. *
  33. * Atomically increments @v by 1
  34. * and returns true if the result is zero, or false for all
  35. * other cases.
  36. */
  37. #define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
  38. #define atomic_dec_and_test(v) (atomic_dec_return(v) == 0)
  39. #define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0)
  40. #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
  41. /* This is the old 24-bit implementation. It's still used internally
  42. * by some sparc-specific code, notably the semaphore implementation.
  43. */
  44. typedef struct { volatile int counter; } atomic24_t;
  45. #ifndef CONFIG_SMP
  46. #define ATOMIC24_INIT(i) { (i) }
  47. #define atomic24_read(v) ((v)->counter)
  48. #define atomic24_set(v, i) (((v)->counter) = i)
  49. #else
  50. /* We do the bulk of the actual work out of line in two common
  51. * routines in assembler, see arch/sparc/lib/atomic.S for the
  52. * "fun" details.
  53. *
  54. * For SMP the trick is you embed the spin lock byte within
  55. * the word, use the low byte so signedness is easily retained
  56. * via a quick arithmetic shift. It looks like this:
  57. *
  58. * ----------------------------------------
  59. * | signed 24-bit counter value | lock | atomic_t
  60. * ----------------------------------------
  61. * 31 8 7 0
  62. */
  63. #define ATOMIC24_INIT(i) { ((i) << 8) }
  64. static inline int atomic24_read(const atomic24_t *v)
  65. {
  66. int ret = v->counter;
  67. while(ret & 0xff)
  68. ret = v->counter;
  69. return ret >> 8;
  70. }
  71. #define atomic24_set(v, i) (((v)->counter) = ((i) << 8))
  72. #endif
  73. static inline int __atomic24_add(int i, atomic24_t *v)
  74. {
  75. register volatile int *ptr asm("g1");
  76. register int increment asm("g2");
  77. register int tmp1 asm("g3");
  78. register int tmp2 asm("g4");
  79. register int tmp3 asm("g7");
  80. ptr = &v->counter;
  81. increment = i;
  82. __asm__ __volatile__(
  83. "mov %%o7, %%g4\n\t"
  84. "call ___atomic24_add\n\t"
  85. " add %%o7, 8, %%o7\n"
  86. : "=&r" (increment), "=r" (tmp1), "=r" (tmp2), "=r" (tmp3)
  87. : "0" (increment), "r" (ptr)
  88. : "memory", "cc");
  89. return increment;
  90. }
  91. static inline int __atomic24_sub(int i, atomic24_t *v)
  92. {
  93. register volatile int *ptr asm("g1");
  94. register int increment asm("g2");
  95. register int tmp1 asm("g3");
  96. register int tmp2 asm("g4");
  97. register int tmp3 asm("g7");
  98. ptr = &v->counter;
  99. increment = i;
  100. __asm__ __volatile__(
  101. "mov %%o7, %%g4\n\t"
  102. "call ___atomic24_sub\n\t"
  103. " add %%o7, 8, %%o7\n"
  104. : "=&r" (increment), "=r" (tmp1), "=r" (tmp2), "=r" (tmp3)
  105. : "0" (increment), "r" (ptr)
  106. : "memory", "cc");
  107. return increment;
  108. }
  109. #define atomic24_add(i, v) ((void)__atomic24_add((i), (v)))
  110. #define atomic24_sub(i, v) ((void)__atomic24_sub((i), (v)))
  111. #define atomic24_dec_return(v) __atomic24_sub(1, (v))
  112. #define atomic24_inc_return(v) __atomic24_add(1, (v))
  113. #define atomic24_sub_and_test(i, v) (__atomic24_sub((i), (v)) == 0)
  114. #define atomic24_dec_and_test(v) (__atomic24_sub(1, (v)) == 0)
  115. #define atomic24_inc(v) ((void)__atomic24_add(1, (v)))
  116. #define atomic24_dec(v) ((void)__atomic24_sub(1, (v)))
  117. #define atomic24_add_negative(i, v) (__atomic24_add((i), (v)) < 0)
  118. /* Atomic operations are already serializing */
  119. #define smp_mb__before_atomic_dec() barrier()
  120. #define smp_mb__after_atomic_dec() barrier()
  121. #define smp_mb__before_atomic_inc() barrier()
  122. #define smp_mb__after_atomic_inc() barrier()
  123. #endif /* !(__KERNEL__) */
  124. #include <asm-generic/atomic.h>
  125. #endif /* !(__ARCH_SPARC_ATOMIC__) */