spinlock.h 4.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225
  1. /* spinlock.h: 64-bit Sparc spinlock support.
  2. *
  3. * Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu)
  4. */
  5. #ifndef __SPARC64_SPINLOCK_H
  6. #define __SPARC64_SPINLOCK_H
  7. #include <linux/config.h>
  8. #include <linux/threads.h> /* For NR_CPUS */
  9. #ifndef __ASSEMBLY__
  10. /* To get debugging spinlocks which detect and catch
  11. * deadlock situations, set CONFIG_DEBUG_SPINLOCK
  12. * and rebuild your kernel.
  13. */
  14. /* All of these locking primitives are expected to work properly
  15. * even in an RMO memory model, which currently is what the kernel
  16. * runs in.
  17. *
  18. * There is another issue. Because we play games to save cycles
  19. * in the non-contention case, we need to be extra careful about
  20. * branch targets into the "spinning" code. They live in their
  21. * own section, but the newer V9 branches have a shorter range
  22. * than the traditional 32-bit sparc branch variants. The rule
  23. * is that the branches that go into and out of the spinner sections
  24. * must be pre-V9 branches.
  25. */
  26. #define __raw_spin_is_locked(lp) ((lp)->lock != 0)
  27. #define __raw_spin_unlock_wait(lp) \
  28. do { rmb(); \
  29. } while((lp)->lock)
  30. static inline void __raw_spin_lock(raw_spinlock_t *lock)
  31. {
  32. unsigned long tmp;
  33. __asm__ __volatile__(
  34. "1: ldstub [%1], %0\n"
  35. " membar #StoreLoad | #StoreStore\n"
  36. " brnz,pn %0, 2f\n"
  37. " nop\n"
  38. " .subsection 2\n"
  39. "2: ldub [%1], %0\n"
  40. " membar #LoadLoad\n"
  41. " brnz,pt %0, 2b\n"
  42. " nop\n"
  43. " ba,a,pt %%xcc, 1b\n"
  44. " .previous"
  45. : "=&r" (tmp)
  46. : "r" (lock)
  47. : "memory");
  48. }
  49. static inline int __raw_spin_trylock(raw_spinlock_t *lock)
  50. {
  51. unsigned long result;
  52. __asm__ __volatile__(
  53. " ldstub [%1], %0\n"
  54. " membar #StoreLoad | #StoreStore"
  55. : "=r" (result)
  56. : "r" (lock)
  57. : "memory");
  58. return (result == 0UL);
  59. }
  60. static inline void __raw_spin_unlock(raw_spinlock_t *lock)
  61. {
  62. __asm__ __volatile__(
  63. " membar #StoreStore | #LoadStore\n"
  64. " stb %%g0, [%0]"
  65. : /* No outputs */
  66. : "r" (lock)
  67. : "memory");
  68. }
  69. static inline void __raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long flags)
  70. {
  71. unsigned long tmp1, tmp2;
  72. __asm__ __volatile__(
  73. "1: ldstub [%2], %0\n"
  74. " membar #StoreLoad | #StoreStore\n"
  75. " brnz,pn %0, 2f\n"
  76. " nop\n"
  77. " .subsection 2\n"
  78. "2: rdpr %%pil, %1\n"
  79. " wrpr %3, %%pil\n"
  80. "3: ldub [%2], %0\n"
  81. " membar #LoadLoad\n"
  82. " brnz,pt %0, 3b\n"
  83. " nop\n"
  84. " ba,pt %%xcc, 1b\n"
  85. " wrpr %1, %%pil\n"
  86. " .previous"
  87. : "=&r" (tmp1), "=&r" (tmp2)
  88. : "r"(lock), "r"(flags)
  89. : "memory");
  90. }
  91. /* Multi-reader locks, these are much saner than the 32-bit Sparc ones... */
  92. static void inline __read_lock(raw_rwlock_t *lock)
  93. {
  94. unsigned long tmp1, tmp2;
  95. __asm__ __volatile__ (
  96. "1: ldsw [%2], %0\n"
  97. " brlz,pn %0, 2f\n"
  98. "4: add %0, 1, %1\n"
  99. " cas [%2], %0, %1\n"
  100. " cmp %0, %1\n"
  101. " membar #StoreLoad | #StoreStore\n"
  102. " bne,pn %%icc, 1b\n"
  103. " nop\n"
  104. " .subsection 2\n"
  105. "2: ldsw [%2], %0\n"
  106. " membar #LoadLoad\n"
  107. " brlz,pt %0, 2b\n"
  108. " nop\n"
  109. " ba,a,pt %%xcc, 4b\n"
  110. " .previous"
  111. : "=&r" (tmp1), "=&r" (tmp2)
  112. : "r" (lock)
  113. : "memory");
  114. }
  115. static void inline __read_unlock(raw_rwlock_t *lock)
  116. {
  117. unsigned long tmp1, tmp2;
  118. __asm__ __volatile__(
  119. " membar #StoreLoad | #LoadLoad\n"
  120. "1: lduw [%2], %0\n"
  121. " sub %0, 1, %1\n"
  122. " cas [%2], %0, %1\n"
  123. " cmp %0, %1\n"
  124. " bne,pn %%xcc, 1b\n"
  125. " nop"
  126. : "=&r" (tmp1), "=&r" (tmp2)
  127. : "r" (lock)
  128. : "memory");
  129. }
  130. static void inline __write_lock(raw_rwlock_t *lock)
  131. {
  132. unsigned long mask, tmp1, tmp2;
  133. mask = 0x80000000UL;
  134. __asm__ __volatile__(
  135. "1: lduw [%2], %0\n"
  136. " brnz,pn %0, 2f\n"
  137. "4: or %0, %3, %1\n"
  138. " cas [%2], %0, %1\n"
  139. " cmp %0, %1\n"
  140. " membar #StoreLoad | #StoreStore\n"
  141. " bne,pn %%icc, 1b\n"
  142. " nop\n"
  143. " .subsection 2\n"
  144. "2: lduw [%2], %0\n"
  145. " membar #LoadLoad\n"
  146. " brnz,pt %0, 2b\n"
  147. " nop\n"
  148. " ba,a,pt %%xcc, 4b\n"
  149. " .previous"
  150. : "=&r" (tmp1), "=&r" (tmp2)
  151. : "r" (lock), "r" (mask)
  152. : "memory");
  153. }
  154. static void inline __write_unlock(raw_rwlock_t *lock)
  155. {
  156. __asm__ __volatile__(
  157. " membar #LoadStore | #StoreStore\n"
  158. " stw %%g0, [%0]"
  159. : /* no outputs */
  160. : "r" (lock)
  161. : "memory");
  162. }
  163. static int inline __write_trylock(raw_rwlock_t *lock)
  164. {
  165. unsigned long mask, tmp1, tmp2, result;
  166. mask = 0x80000000UL;
  167. __asm__ __volatile__(
  168. " mov 0, %2\n"
  169. "1: lduw [%3], %0\n"
  170. " brnz,pn %0, 2f\n"
  171. " or %0, %4, %1\n"
  172. " cas [%3], %0, %1\n"
  173. " cmp %0, %1\n"
  174. " membar #StoreLoad | #StoreStore\n"
  175. " bne,pn %%icc, 1b\n"
  176. " nop\n"
  177. " mov 1, %2\n"
  178. "2:"
  179. : "=&r" (tmp1), "=&r" (tmp2), "=&r" (result)
  180. : "r" (lock), "r" (mask)
  181. : "memory");
  182. return result;
  183. }
  184. #define __raw_read_lock(p) __read_lock(p)
  185. #define __raw_read_unlock(p) __read_unlock(p)
  186. #define __raw_write_lock(p) __write_lock(p)
  187. #define __raw_write_unlock(p) __write_unlock(p)
  188. #define __raw_write_trylock(p) __write_trylock(p)
  189. #define __raw_read_trylock(lock) generic__raw_read_trylock(lock)
  190. #define __raw_read_can_lock(rw) (!((rw)->lock & 0x80000000UL))
  191. #define __raw_write_can_lock(rw) (!(rw)->lock)
  192. #endif /* !(__ASSEMBLY__) */
  193. #endif /* !(__SPARC64_SPINLOCK_H) */