spinlock.h 5.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247
  1. /* spinlock.h: 64-bit Sparc spinlock support.
  2. *
  3. * Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu)
  4. */
  5. #ifndef __SPARC64_SPINLOCK_H
  6. #define __SPARC64_SPINLOCK_H
  7. #include <linux/config.h>
  8. #include <linux/threads.h> /* For NR_CPUS */
  9. #ifndef __ASSEMBLY__
  10. /* To get debugging spinlocks which detect and catch
  11. * deadlock situations, set CONFIG_DEBUG_SPINLOCK
  12. * and rebuild your kernel.
  13. */
  14. /* All of these locking primitives are expected to work properly
  15. * even in an RMO memory model, which currently is what the kernel
  16. * runs in.
  17. *
  18. * There is another issue. Because we play games to save cycles
  19. * in the non-contention case, we need to be extra careful about
  20. * branch targets into the "spinning" code. They live in their
  21. * own section, but the newer V9 branches have a shorter range
  22. * than the traditional 32-bit sparc branch variants. The rule
  23. * is that the branches that go into and out of the spinner sections
  24. * must be pre-V9 branches.
  25. */
  26. #define __raw_spin_is_locked(lp) ((lp)->lock != 0)
  27. #define __raw_spin_unlock_wait(lp) \
  28. do { rmb(); \
  29. } while((lp)->lock)
  30. static inline void __raw_spin_lock(raw_spinlock_t *lock)
  31. {
  32. unsigned long tmp;
  33. __asm__ __volatile__(
  34. "1: ldstub [%1], %0\n"
  35. " membar #StoreLoad | #StoreStore\n"
  36. " brnz,pn %0, 2f\n"
  37. " nop\n"
  38. " .subsection 2\n"
  39. "2: ldub [%1], %0\n"
  40. " membar #LoadLoad\n"
  41. " brnz,pt %0, 2b\n"
  42. " nop\n"
  43. " ba,a,pt %%xcc, 1b\n"
  44. " .previous"
  45. : "=&r" (tmp)
  46. : "r" (lock)
  47. : "memory");
  48. }
  49. static inline int __raw_spin_trylock(raw_spinlock_t *lock)
  50. {
  51. unsigned long result;
  52. __asm__ __volatile__(
  53. " ldstub [%1], %0\n"
  54. " membar #StoreLoad | #StoreStore"
  55. : "=r" (result)
  56. : "r" (lock)
  57. : "memory");
  58. return (result == 0UL);
  59. }
  60. static inline void __raw_spin_unlock(raw_spinlock_t *lock)
  61. {
  62. __asm__ __volatile__(
  63. " membar #StoreStore | #LoadStore\n"
  64. " stb %%g0, [%0]"
  65. : /* No outputs */
  66. : "r" (lock)
  67. : "memory");
  68. }
  69. static inline void __raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long flags)
  70. {
  71. unsigned long tmp1, tmp2;
  72. __asm__ __volatile__(
  73. "1: ldstub [%2], %0\n"
  74. " membar #StoreLoad | #StoreStore\n"
  75. " brnz,pn %0, 2f\n"
  76. " nop\n"
  77. " .subsection 2\n"
  78. "2: rdpr %%pil, %1\n"
  79. " wrpr %3, %%pil\n"
  80. "3: ldub [%2], %0\n"
  81. " membar #LoadLoad\n"
  82. " brnz,pt %0, 3b\n"
  83. " nop\n"
  84. " ba,pt %%xcc, 1b\n"
  85. " wrpr %1, %%pil\n"
  86. " .previous"
  87. : "=&r" (tmp1), "=&r" (tmp2)
  88. : "r"(lock), "r"(flags)
  89. : "memory");
  90. }
  91. /* Multi-reader locks, these are much saner than the 32-bit Sparc ones... */
  92. static void inline __read_lock(raw_rwlock_t *lock)
  93. {
  94. unsigned long tmp1, tmp2;
  95. __asm__ __volatile__ (
  96. "1: ldsw [%2], %0\n"
  97. " brlz,pn %0, 2f\n"
  98. "4: add %0, 1, %1\n"
  99. " cas [%2], %0, %1\n"
  100. " cmp %0, %1\n"
  101. " membar #StoreLoad | #StoreStore\n"
  102. " bne,pn %%icc, 1b\n"
  103. " nop\n"
  104. " .subsection 2\n"
  105. "2: ldsw [%2], %0\n"
  106. " membar #LoadLoad\n"
  107. " brlz,pt %0, 2b\n"
  108. " nop\n"
  109. " ba,a,pt %%xcc, 4b\n"
  110. " .previous"
  111. : "=&r" (tmp1), "=&r" (tmp2)
  112. : "r" (lock)
  113. : "memory");
  114. }
  115. static int inline __read_trylock(raw_rwlock_t *lock)
  116. {
  117. int tmp1, tmp2;
  118. __asm__ __volatile__ (
  119. "1: ldsw [%2], %0\n"
  120. " brlz,a,pn %0, 2f\n"
  121. " mov 0, %0\n"
  122. " add %0, 1, %1\n"
  123. " cas [%2], %0, %1\n"
  124. " cmp %0, %1\n"
  125. " membar #StoreLoad | #StoreStore\n"
  126. " bne,pn %%icc, 1b\n"
  127. " mov 1, %0\n"
  128. "2:"
  129. : "=&r" (tmp1), "=&r" (tmp2)
  130. : "r" (lock)
  131. : "memory");
  132. return tmp1;
  133. }
  134. static void inline __read_unlock(raw_rwlock_t *lock)
  135. {
  136. unsigned long tmp1, tmp2;
  137. __asm__ __volatile__(
  138. " membar #StoreLoad | #LoadLoad\n"
  139. "1: lduw [%2], %0\n"
  140. " sub %0, 1, %1\n"
  141. " cas [%2], %0, %1\n"
  142. " cmp %0, %1\n"
  143. " bne,pn %%xcc, 1b\n"
  144. " nop"
  145. : "=&r" (tmp1), "=&r" (tmp2)
  146. : "r" (lock)
  147. : "memory");
  148. }
  149. static void inline __write_lock(raw_rwlock_t *lock)
  150. {
  151. unsigned long mask, tmp1, tmp2;
  152. mask = 0x80000000UL;
  153. __asm__ __volatile__(
  154. "1: lduw [%2], %0\n"
  155. " brnz,pn %0, 2f\n"
  156. "4: or %0, %3, %1\n"
  157. " cas [%2], %0, %1\n"
  158. " cmp %0, %1\n"
  159. " membar #StoreLoad | #StoreStore\n"
  160. " bne,pn %%icc, 1b\n"
  161. " nop\n"
  162. " .subsection 2\n"
  163. "2: lduw [%2], %0\n"
  164. " membar #LoadLoad\n"
  165. " brnz,pt %0, 2b\n"
  166. " nop\n"
  167. " ba,a,pt %%xcc, 4b\n"
  168. " .previous"
  169. : "=&r" (tmp1), "=&r" (tmp2)
  170. : "r" (lock), "r" (mask)
  171. : "memory");
  172. }
  173. static void inline __write_unlock(raw_rwlock_t *lock)
  174. {
  175. __asm__ __volatile__(
  176. " membar #LoadStore | #StoreStore\n"
  177. " stw %%g0, [%0]"
  178. : /* no outputs */
  179. : "r" (lock)
  180. : "memory");
  181. }
  182. static int inline __write_trylock(raw_rwlock_t *lock)
  183. {
  184. unsigned long mask, tmp1, tmp2, result;
  185. mask = 0x80000000UL;
  186. __asm__ __volatile__(
  187. " mov 0, %2\n"
  188. "1: lduw [%3], %0\n"
  189. " brnz,pn %0, 2f\n"
  190. " or %0, %4, %1\n"
  191. " cas [%3], %0, %1\n"
  192. " cmp %0, %1\n"
  193. " membar #StoreLoad | #StoreStore\n"
  194. " bne,pn %%icc, 1b\n"
  195. " nop\n"
  196. " mov 1, %2\n"
  197. "2:"
  198. : "=&r" (tmp1), "=&r" (tmp2), "=&r" (result)
  199. : "r" (lock), "r" (mask)
  200. : "memory");
  201. return result;
  202. }
  203. #define __raw_read_lock(p) __read_lock(p)
  204. #define __raw_read_trylock(p) __read_trylock(p)
  205. #define __raw_read_unlock(p) __read_unlock(p)
  206. #define __raw_write_lock(p) __write_lock(p)
  207. #define __raw_write_unlock(p) __write_unlock(p)
  208. #define __raw_write_trylock(p) __write_trylock(p)
  209. #define __raw_read_can_lock(rw) (!((rw)->lock & 0x80000000UL))
  210. #define __raw_write_can_lock(rw) (!(rw)->lock)
  211. #endif /* !(__ASSEMBLY__) */
  212. #endif /* !(__SPARC64_SPINLOCK_H) */