spinlock_64.h 5.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248
  1. /* spinlock.h: 64-bit Sparc spinlock support.
  2. *
  3. * Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu)
  4. */
  5. #ifndef __SPARC64_SPINLOCK_H
  6. #define __SPARC64_SPINLOCK_H
  7. #ifndef __ASSEMBLY__
  8. /* To get debugging spinlocks which detect and catch
  9. * deadlock situations, set CONFIG_DEBUG_SPINLOCK
  10. * and rebuild your kernel.
  11. */
  12. /* All of these locking primitives are expected to work properly
  13. * even in an RMO memory model, which currently is what the kernel
  14. * runs in.
  15. *
  16. * There is another issue. Because we play games to save cycles
  17. * in the non-contention case, we need to be extra careful about
  18. * branch targets into the "spinning" code. They live in their
  19. * own section, but the newer V9 branches have a shorter range
  20. * than the traditional 32-bit sparc branch variants. The rule
  21. * is that the branches that go into and out of the spinner sections
  22. * must be pre-V9 branches.
  23. */
  24. #define __raw_spin_is_locked(lp) ((lp)->lock != 0)
  25. #define __raw_spin_unlock_wait(lp) \
  26. do { rmb(); \
  27. } while((lp)->lock)
  28. static inline void __raw_spin_lock(raw_spinlock_t *lock)
  29. {
  30. unsigned long tmp;
  31. __asm__ __volatile__(
  32. "1: ldstub [%1], %0\n"
  33. " membar #StoreLoad | #StoreStore\n"
  34. " brnz,pn %0, 2f\n"
  35. " nop\n"
  36. " .subsection 2\n"
  37. "2: ldub [%1], %0\n"
  38. " membar #LoadLoad\n"
  39. " brnz,pt %0, 2b\n"
  40. " nop\n"
  41. " ba,a,pt %%xcc, 1b\n"
  42. " .previous"
  43. : "=&r" (tmp)
  44. : "r" (lock)
  45. : "memory");
  46. }
  47. static inline int __raw_spin_trylock(raw_spinlock_t *lock)
  48. {
  49. unsigned long result;
  50. __asm__ __volatile__(
  51. " ldstub [%1], %0\n"
  52. " membar #StoreLoad | #StoreStore"
  53. : "=r" (result)
  54. : "r" (lock)
  55. : "memory");
  56. return (result == 0UL);
  57. }
  58. static inline void __raw_spin_unlock(raw_spinlock_t *lock)
  59. {
  60. __asm__ __volatile__(
  61. " membar #StoreStore | #LoadStore\n"
  62. " stb %%g0, [%0]"
  63. : /* No outputs */
  64. : "r" (lock)
  65. : "memory");
  66. }
  67. static inline void __raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long flags)
  68. {
  69. unsigned long tmp1, tmp2;
  70. __asm__ __volatile__(
  71. "1: ldstub [%2], %0\n"
  72. " membar #StoreLoad | #StoreStore\n"
  73. " brnz,pn %0, 2f\n"
  74. " nop\n"
  75. " .subsection 2\n"
  76. "2: rdpr %%pil, %1\n"
  77. " wrpr %3, %%pil\n"
  78. "3: ldub [%2], %0\n"
  79. " membar #LoadLoad\n"
  80. " brnz,pt %0, 3b\n"
  81. " nop\n"
  82. " ba,pt %%xcc, 1b\n"
  83. " wrpr %1, %%pil\n"
  84. " .previous"
  85. : "=&r" (tmp1), "=&r" (tmp2)
  86. : "r"(lock), "r"(flags)
  87. : "memory");
  88. }
  89. /* Multi-reader locks, these are much saner than the 32-bit Sparc ones... */
  90. static void inline __read_lock(raw_rwlock_t *lock)
  91. {
  92. unsigned long tmp1, tmp2;
  93. __asm__ __volatile__ (
  94. "1: ldsw [%2], %0\n"
  95. " brlz,pn %0, 2f\n"
  96. "4: add %0, 1, %1\n"
  97. " cas [%2], %0, %1\n"
  98. " cmp %0, %1\n"
  99. " membar #StoreLoad | #StoreStore\n"
  100. " bne,pn %%icc, 1b\n"
  101. " nop\n"
  102. " .subsection 2\n"
  103. "2: ldsw [%2], %0\n"
  104. " membar #LoadLoad\n"
  105. " brlz,pt %0, 2b\n"
  106. " nop\n"
  107. " ba,a,pt %%xcc, 4b\n"
  108. " .previous"
  109. : "=&r" (tmp1), "=&r" (tmp2)
  110. : "r" (lock)
  111. : "memory");
  112. }
  113. static int inline __read_trylock(raw_rwlock_t *lock)
  114. {
  115. int tmp1, tmp2;
  116. __asm__ __volatile__ (
  117. "1: ldsw [%2], %0\n"
  118. " brlz,a,pn %0, 2f\n"
  119. " mov 0, %0\n"
  120. " add %0, 1, %1\n"
  121. " cas [%2], %0, %1\n"
  122. " cmp %0, %1\n"
  123. " membar #StoreLoad | #StoreStore\n"
  124. " bne,pn %%icc, 1b\n"
  125. " mov 1, %0\n"
  126. "2:"
  127. : "=&r" (tmp1), "=&r" (tmp2)
  128. : "r" (lock)
  129. : "memory");
  130. return tmp1;
  131. }
  132. static void inline __read_unlock(raw_rwlock_t *lock)
  133. {
  134. unsigned long tmp1, tmp2;
  135. __asm__ __volatile__(
  136. " membar #StoreLoad | #LoadLoad\n"
  137. "1: lduw [%2], %0\n"
  138. " sub %0, 1, %1\n"
  139. " cas [%2], %0, %1\n"
  140. " cmp %0, %1\n"
  141. " bne,pn %%xcc, 1b\n"
  142. " nop"
  143. : "=&r" (tmp1), "=&r" (tmp2)
  144. : "r" (lock)
  145. : "memory");
  146. }
  147. static void inline __write_lock(raw_rwlock_t *lock)
  148. {
  149. unsigned long mask, tmp1, tmp2;
  150. mask = 0x80000000UL;
  151. __asm__ __volatile__(
  152. "1: lduw [%2], %0\n"
  153. " brnz,pn %0, 2f\n"
  154. "4: or %0, %3, %1\n"
  155. " cas [%2], %0, %1\n"
  156. " cmp %0, %1\n"
  157. " membar #StoreLoad | #StoreStore\n"
  158. " bne,pn %%icc, 1b\n"
  159. " nop\n"
  160. " .subsection 2\n"
  161. "2: lduw [%2], %0\n"
  162. " membar #LoadLoad\n"
  163. " brnz,pt %0, 2b\n"
  164. " nop\n"
  165. " ba,a,pt %%xcc, 4b\n"
  166. " .previous"
  167. : "=&r" (tmp1), "=&r" (tmp2)
  168. : "r" (lock), "r" (mask)
  169. : "memory");
  170. }
  171. static void inline __write_unlock(raw_rwlock_t *lock)
  172. {
  173. __asm__ __volatile__(
  174. " membar #LoadStore | #StoreStore\n"
  175. " stw %%g0, [%0]"
  176. : /* no outputs */
  177. : "r" (lock)
  178. : "memory");
  179. }
  180. static int inline __write_trylock(raw_rwlock_t *lock)
  181. {
  182. unsigned long mask, tmp1, tmp2, result;
  183. mask = 0x80000000UL;
  184. __asm__ __volatile__(
  185. " mov 0, %2\n"
  186. "1: lduw [%3], %0\n"
  187. " brnz,pn %0, 2f\n"
  188. " or %0, %4, %1\n"
  189. " cas [%3], %0, %1\n"
  190. " cmp %0, %1\n"
  191. " membar #StoreLoad | #StoreStore\n"
  192. " bne,pn %%icc, 1b\n"
  193. " nop\n"
  194. " mov 1, %2\n"
  195. "2:"
  196. : "=&r" (tmp1), "=&r" (tmp2), "=&r" (result)
  197. : "r" (lock), "r" (mask)
  198. : "memory");
  199. return result;
  200. }
  201. #define __raw_read_lock(p) __read_lock(p)
  202. #define __raw_read_trylock(p) __read_trylock(p)
  203. #define __raw_read_unlock(p) __read_unlock(p)
  204. #define __raw_write_lock(p) __write_lock(p)
  205. #define __raw_write_unlock(p) __write_unlock(p)
  206. #define __raw_write_trylock(p) __write_trylock(p)
  207. #define __raw_read_can_lock(rw) (!((rw)->lock & 0x80000000UL))
  208. #define __raw_write_can_lock(rw) (!(rw)->lock)
  209. #define _raw_spin_relax(lock) cpu_relax()
  210. #define _raw_read_relax(lock) cpu_relax()
  211. #define _raw_write_relax(lock) cpu_relax()
  212. #endif /* !(__ASSEMBLY__) */
  213. #endif /* !(__SPARC64_SPINLOCK_H) */