atomic.h 4.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158
  1. /* atomic.h: These still suck, but the I-cache hit rate is higher.
  2. *
  3. * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
  4. * Copyright (C) 2000 Anton Blanchard (anton@linuxcare.com.au)
  5. *
  6. * Additions by Keith M Wesolowski (wesolows@foobazco.org) based
  7. * on asm-parisc/atomic.h Copyright (C) 2000 Philipp Rumpf <prumpf@tux.org>.
  8. */
  9. #ifndef __ARCH_SPARC_ATOMIC__
  10. #define __ARCH_SPARC_ATOMIC__
  11. #include <linux/config.h>
  12. typedef struct { volatile int counter; } atomic_t;
  13. #ifdef __KERNEL__
  14. #define ATOMIC_INIT(i) { (i) }
  15. extern int __atomic_add_return(int, atomic_t *);
  16. extern void atomic_set(atomic_t *, int);
  17. #define atomic_read(v) ((v)->counter)
  18. #define atomic_add(i, v) ((void)__atomic_add_return( (int)(i), (v)))
  19. #define atomic_sub(i, v) ((void)__atomic_add_return(-(int)(i), (v)))
  20. #define atomic_inc(v) ((void)__atomic_add_return( 1, (v)))
  21. #define atomic_dec(v) ((void)__atomic_add_return( -1, (v)))
  22. #define atomic_add_return(i, v) (__atomic_add_return( (int)(i), (v)))
  23. #define atomic_sub_return(i, v) (__atomic_add_return(-(int)(i), (v)))
  24. #define atomic_inc_return(v) (__atomic_add_return( 1, (v)))
  25. #define atomic_dec_return(v) (__atomic_add_return( -1, (v)))
  26. #define atomic_add_negative(a, v) (atomic_add_return((a), (v)) < 0)
  27. /*
  28. * atomic_inc_and_test - increment and test
  29. * @v: pointer of type atomic_t
  30. *
  31. * Atomically increments @v by 1
  32. * and returns true if the result is zero, or false for all
  33. * other cases.
  34. */
  35. #define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
  36. #define atomic_dec_and_test(v) (atomic_dec_return(v) == 0)
  37. #define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0)
  38. /* This is the old 24-bit implementation. It's still used internally
  39. * by some sparc-specific code, notably the semaphore implementation.
  40. */
  41. typedef struct { volatile int counter; } atomic24_t;
  42. #ifndef CONFIG_SMP
  43. #define ATOMIC24_INIT(i) { (i) }
  44. #define atomic24_read(v) ((v)->counter)
  45. #define atomic24_set(v, i) (((v)->counter) = i)
  46. #else
  47. /* We do the bulk of the actual work out of line in two common
  48. * routines in assembler, see arch/sparc/lib/atomic.S for the
  49. * "fun" details.
  50. *
  51. * For SMP the trick is you embed the spin lock byte within
  52. * the word, use the low byte so signedness is easily retained
  53. * via a quick arithmetic shift. It looks like this:
  54. *
  55. * ----------------------------------------
  56. * | signed 24-bit counter value | lock | atomic_t
  57. * ----------------------------------------
  58. * 31 8 7 0
  59. */
  60. #define ATOMIC24_INIT(i) { ((i) << 8) }
  61. static inline int atomic24_read(const atomic24_t *v)
  62. {
  63. int ret = v->counter;
  64. while(ret & 0xff)
  65. ret = v->counter;
  66. return ret >> 8;
  67. }
  68. #define atomic24_set(v, i) (((v)->counter) = ((i) << 8))
  69. #endif
  70. static inline int __atomic24_add(int i, atomic24_t *v)
  71. {
  72. register volatile int *ptr asm("g1");
  73. register int increment asm("g2");
  74. register int tmp1 asm("g3");
  75. register int tmp2 asm("g4");
  76. register int tmp3 asm("g7");
  77. ptr = &v->counter;
  78. increment = i;
  79. __asm__ __volatile__(
  80. "mov %%o7, %%g4\n\t"
  81. "call ___atomic24_add\n\t"
  82. " add %%o7, 8, %%o7\n"
  83. : "=&r" (increment), "=r" (tmp1), "=r" (tmp2), "=r" (tmp3)
  84. : "0" (increment), "r" (ptr)
  85. : "memory", "cc");
  86. return increment;
  87. }
  88. static inline int __atomic24_sub(int i, atomic24_t *v)
  89. {
  90. register volatile int *ptr asm("g1");
  91. register int increment asm("g2");
  92. register int tmp1 asm("g3");
  93. register int tmp2 asm("g4");
  94. register int tmp3 asm("g7");
  95. ptr = &v->counter;
  96. increment = i;
  97. __asm__ __volatile__(
  98. "mov %%o7, %%g4\n\t"
  99. "call ___atomic24_sub\n\t"
  100. " add %%o7, 8, %%o7\n"
  101. : "=&r" (increment), "=r" (tmp1), "=r" (tmp2), "=r" (tmp3)
  102. : "0" (increment), "r" (ptr)
  103. : "memory", "cc");
  104. return increment;
  105. }
  106. #define atomic24_add(i, v) ((void)__atomic24_add((i), (v)))
  107. #define atomic24_sub(i, v) ((void)__atomic24_sub((i), (v)))
  108. #define atomic24_dec_return(v) __atomic24_sub(1, (v))
  109. #define atomic24_inc_return(v) __atomic24_add(1, (v))
  110. #define atomic24_sub_and_test(i, v) (__atomic24_sub((i), (v)) == 0)
  111. #define atomic24_dec_and_test(v) (__atomic24_sub(1, (v)) == 0)
  112. #define atomic24_inc(v) ((void)__atomic24_add(1, (v)))
  113. #define atomic24_dec(v) ((void)__atomic24_sub(1, (v)))
  114. #define atomic24_add_negative(i, v) (__atomic24_add((i), (v)) < 0)
  115. /* Atomic operations are already serializing */
  116. #define smp_mb__before_atomic_dec() barrier()
  117. #define smp_mb__after_atomic_dec() barrier()
  118. #define smp_mb__before_atomic_inc() barrier()
  119. #define smp_mb__after_atomic_inc() barrier()
  120. #endif /* !(__KERNEL__) */
  121. #endif /* !(__ARCH_SPARC_ATOMIC__) */