spinlock.h 5.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250
  1. /* spinlock.h: 64-bit Sparc spinlock support.
  2. *
  3. * Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu)
  4. */
  5. #ifndef __SPARC64_SPINLOCK_H
  6. #define __SPARC64_SPINLOCK_H
  7. #include <linux/threads.h> /* For NR_CPUS */
  8. #ifndef __ASSEMBLY__
  9. /* To get debugging spinlocks which detect and catch
  10. * deadlock situations, set CONFIG_DEBUG_SPINLOCK
  11. * and rebuild your kernel.
  12. */
  13. /* All of these locking primitives are expected to work properly
  14. * even in an RMO memory model, which currently is what the kernel
  15. * runs in.
  16. *
  17. * There is another issue. Because we play games to save cycles
  18. * in the non-contention case, we need to be extra careful about
  19. * branch targets into the "spinning" code. They live in their
  20. * own section, but the newer V9 branches have a shorter range
  21. * than the traditional 32-bit sparc branch variants. The rule
  22. * is that the branches that go into and out of the spinner sections
  23. * must be pre-V9 branches.
  24. */
  25. #define __raw_spin_is_locked(lp) ((lp)->lock != 0)
  26. #define __raw_spin_unlock_wait(lp) \
  27. do { rmb(); \
  28. } while((lp)->lock)
  29. static inline void __raw_spin_lock(raw_spinlock_t *lock)
  30. {
  31. unsigned long tmp;
  32. __asm__ __volatile__(
  33. "1: ldstub [%1], %0\n"
  34. " membar #StoreLoad | #StoreStore\n"
  35. " brnz,pn %0, 2f\n"
  36. " nop\n"
  37. " .subsection 2\n"
  38. "2: ldub [%1], %0\n"
  39. " membar #LoadLoad\n"
  40. " brnz,pt %0, 2b\n"
  41. " nop\n"
  42. " ba,a,pt %%xcc, 1b\n"
  43. " .previous"
  44. : "=&r" (tmp)
  45. : "r" (lock)
  46. : "memory");
  47. }
  48. static inline int __raw_spin_trylock(raw_spinlock_t *lock)
  49. {
  50. unsigned long result;
  51. __asm__ __volatile__(
  52. " ldstub [%1], %0\n"
  53. " membar #StoreLoad | #StoreStore"
  54. : "=r" (result)
  55. : "r" (lock)
  56. : "memory");
  57. return (result == 0UL);
  58. }
  59. static inline void __raw_spin_unlock(raw_spinlock_t *lock)
  60. {
  61. __asm__ __volatile__(
  62. " membar #StoreStore | #LoadStore\n"
  63. " stb %%g0, [%0]"
  64. : /* No outputs */
  65. : "r" (lock)
  66. : "memory");
  67. }
  68. static inline void __raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long flags)
  69. {
  70. unsigned long tmp1, tmp2;
  71. __asm__ __volatile__(
  72. "1: ldstub [%2], %0\n"
  73. " membar #StoreLoad | #StoreStore\n"
  74. " brnz,pn %0, 2f\n"
  75. " nop\n"
  76. " .subsection 2\n"
  77. "2: rdpr %%pil, %1\n"
  78. " wrpr %3, %%pil\n"
  79. "3: ldub [%2], %0\n"
  80. " membar #LoadLoad\n"
  81. " brnz,pt %0, 3b\n"
  82. " nop\n"
  83. " ba,pt %%xcc, 1b\n"
  84. " wrpr %1, %%pil\n"
  85. " .previous"
  86. : "=&r" (tmp1), "=&r" (tmp2)
  87. : "r"(lock), "r"(flags)
  88. : "memory");
  89. }
  90. /* Multi-reader locks, these are much saner than the 32-bit Sparc ones... */
  91. static void inline __read_lock(raw_rwlock_t *lock)
  92. {
  93. unsigned long tmp1, tmp2;
  94. __asm__ __volatile__ (
  95. "1: ldsw [%2], %0\n"
  96. " brlz,pn %0, 2f\n"
  97. "4: add %0, 1, %1\n"
  98. " cas [%2], %0, %1\n"
  99. " cmp %0, %1\n"
  100. " membar #StoreLoad | #StoreStore\n"
  101. " bne,pn %%icc, 1b\n"
  102. " nop\n"
  103. " .subsection 2\n"
  104. "2: ldsw [%2], %0\n"
  105. " membar #LoadLoad\n"
  106. " brlz,pt %0, 2b\n"
  107. " nop\n"
  108. " ba,a,pt %%xcc, 4b\n"
  109. " .previous"
  110. : "=&r" (tmp1), "=&r" (tmp2)
  111. : "r" (lock)
  112. : "memory");
  113. }
  114. static int inline __read_trylock(raw_rwlock_t *lock)
  115. {
  116. int tmp1, tmp2;
  117. __asm__ __volatile__ (
  118. "1: ldsw [%2], %0\n"
  119. " brlz,a,pn %0, 2f\n"
  120. " mov 0, %0\n"
  121. " add %0, 1, %1\n"
  122. " cas [%2], %0, %1\n"
  123. " cmp %0, %1\n"
  124. " membar #StoreLoad | #StoreStore\n"
  125. " bne,pn %%icc, 1b\n"
  126. " mov 1, %0\n"
  127. "2:"
  128. : "=&r" (tmp1), "=&r" (tmp2)
  129. : "r" (lock)
  130. : "memory");
  131. return tmp1;
  132. }
  133. static void inline __read_unlock(raw_rwlock_t *lock)
  134. {
  135. unsigned long tmp1, tmp2;
  136. __asm__ __volatile__(
  137. " membar #StoreLoad | #LoadLoad\n"
  138. "1: lduw [%2], %0\n"
  139. " sub %0, 1, %1\n"
  140. " cas [%2], %0, %1\n"
  141. " cmp %0, %1\n"
  142. " bne,pn %%xcc, 1b\n"
  143. " nop"
  144. : "=&r" (tmp1), "=&r" (tmp2)
  145. : "r" (lock)
  146. : "memory");
  147. }
  148. static void inline __write_lock(raw_rwlock_t *lock)
  149. {
  150. unsigned long mask, tmp1, tmp2;
  151. mask = 0x80000000UL;
  152. __asm__ __volatile__(
  153. "1: lduw [%2], %0\n"
  154. " brnz,pn %0, 2f\n"
  155. "4: or %0, %3, %1\n"
  156. " cas [%2], %0, %1\n"
  157. " cmp %0, %1\n"
  158. " membar #StoreLoad | #StoreStore\n"
  159. " bne,pn %%icc, 1b\n"
  160. " nop\n"
  161. " .subsection 2\n"
  162. "2: lduw [%2], %0\n"
  163. " membar #LoadLoad\n"
  164. " brnz,pt %0, 2b\n"
  165. " nop\n"
  166. " ba,a,pt %%xcc, 4b\n"
  167. " .previous"
  168. : "=&r" (tmp1), "=&r" (tmp2)
  169. : "r" (lock), "r" (mask)
  170. : "memory");
  171. }
  172. static void inline __write_unlock(raw_rwlock_t *lock)
  173. {
  174. __asm__ __volatile__(
  175. " membar #LoadStore | #StoreStore\n"
  176. " stw %%g0, [%0]"
  177. : /* no outputs */
  178. : "r" (lock)
  179. : "memory");
  180. }
  181. static int inline __write_trylock(raw_rwlock_t *lock)
  182. {
  183. unsigned long mask, tmp1, tmp2, result;
  184. mask = 0x80000000UL;
  185. __asm__ __volatile__(
  186. " mov 0, %2\n"
  187. "1: lduw [%3], %0\n"
  188. " brnz,pn %0, 2f\n"
  189. " or %0, %4, %1\n"
  190. " cas [%3], %0, %1\n"
  191. " cmp %0, %1\n"
  192. " membar #StoreLoad | #StoreStore\n"
  193. " bne,pn %%icc, 1b\n"
  194. " nop\n"
  195. " mov 1, %2\n"
  196. "2:"
  197. : "=&r" (tmp1), "=&r" (tmp2), "=&r" (result)
  198. : "r" (lock), "r" (mask)
  199. : "memory");
  200. return result;
  201. }
  202. #define __raw_read_lock(p) __read_lock(p)
  203. #define __raw_read_trylock(p) __read_trylock(p)
  204. #define __raw_read_unlock(p) __read_unlock(p)
  205. #define __raw_write_lock(p) __write_lock(p)
  206. #define __raw_write_unlock(p) __write_unlock(p)
  207. #define __raw_write_trylock(p) __write_trylock(p)
  208. #define __raw_read_can_lock(rw) (!((rw)->lock & 0x80000000UL))
  209. #define __raw_write_can_lock(rw) (!(rw)->lock)
  210. #define _raw_spin_relax(lock) cpu_relax()
  211. #define _raw_read_relax(lock) cpu_relax()
  212. #define _raw_write_relax(lock) cpu_relax()
  213. #endif /* !(__ASSEMBLY__) */
  214. #endif /* !(__SPARC64_SPINLOCK_H) */