spinlock_64.h 5.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243
  1. /* spinlock.h: 64-bit Sparc spinlock support.
  2. *
  3. * Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu)
  4. */
  5. #ifndef __SPARC64_SPINLOCK_H
  6. #define __SPARC64_SPINLOCK_H
  7. #ifndef __ASSEMBLY__
  8. /* To get debugging spinlocks which detect and catch
  9. * deadlock situations, set CONFIG_DEBUG_SPINLOCK
  10. * and rebuild your kernel.
  11. */
  12. /* Because we play games to save cycles in the non-contention case, we
  13. * need to be extra careful about branch targets into the "spinning"
  14. * code. They live in their own section, but the newer V9 branches
  15. * have a shorter range than the traditional 32-bit sparc branch
  16. * variants. The rule is that the branches that go into and out of
  17. * the spinner sections must be pre-V9 branches.
  18. */
  19. #define __raw_spin_is_locked(lp) ((lp)->lock != 0)
  20. #define __raw_spin_unlock_wait(lp) \
  21. do { rmb(); \
  22. } while((lp)->lock)
  23. static inline void __raw_spin_lock(raw_spinlock_t *lock)
  24. {
  25. unsigned long tmp;
  26. __asm__ __volatile__(
  27. "1: ldstub [%1], %0\n"
  28. " membar #StoreLoad | #StoreStore\n"
  29. " brnz,pn %0, 2f\n"
  30. " nop\n"
  31. " .subsection 2\n"
  32. "2: ldub [%1], %0\n"
  33. " membar #LoadLoad\n"
  34. " brnz,pt %0, 2b\n"
  35. " nop\n"
  36. " ba,a,pt %%xcc, 1b\n"
  37. " .previous"
  38. : "=&r" (tmp)
  39. : "r" (lock)
  40. : "memory");
  41. }
  42. static inline int __raw_spin_trylock(raw_spinlock_t *lock)
  43. {
  44. unsigned long result;
  45. __asm__ __volatile__(
  46. " ldstub [%1], %0\n"
  47. " membar #StoreLoad | #StoreStore"
  48. : "=r" (result)
  49. : "r" (lock)
  50. : "memory");
  51. return (result == 0UL);
  52. }
  53. static inline void __raw_spin_unlock(raw_spinlock_t *lock)
  54. {
  55. __asm__ __volatile__(
  56. " membar #StoreStore | #LoadStore\n"
  57. " stb %%g0, [%0]"
  58. : /* No outputs */
  59. : "r" (lock)
  60. : "memory");
  61. }
  62. static inline void __raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long flags)
  63. {
  64. unsigned long tmp1, tmp2;
  65. __asm__ __volatile__(
  66. "1: ldstub [%2], %0\n"
  67. " membar #StoreLoad | #StoreStore\n"
  68. " brnz,pn %0, 2f\n"
  69. " nop\n"
  70. " .subsection 2\n"
  71. "2: rdpr %%pil, %1\n"
  72. " wrpr %3, %%pil\n"
  73. "3: ldub [%2], %0\n"
  74. " membar #LoadLoad\n"
  75. " brnz,pt %0, 3b\n"
  76. " nop\n"
  77. " ba,pt %%xcc, 1b\n"
  78. " wrpr %1, %%pil\n"
  79. " .previous"
  80. : "=&r" (tmp1), "=&r" (tmp2)
  81. : "r"(lock), "r"(flags)
  82. : "memory");
  83. }
  84. /* Multi-reader locks, these are much saner than the 32-bit Sparc ones... */
  85. static void inline __read_lock(raw_rwlock_t *lock)
  86. {
  87. unsigned long tmp1, tmp2;
  88. __asm__ __volatile__ (
  89. "1: ldsw [%2], %0\n"
  90. " brlz,pn %0, 2f\n"
  91. "4: add %0, 1, %1\n"
  92. " cas [%2], %0, %1\n"
  93. " cmp %0, %1\n"
  94. " membar #StoreLoad | #StoreStore\n"
  95. " bne,pn %%icc, 1b\n"
  96. " nop\n"
  97. " .subsection 2\n"
  98. "2: ldsw [%2], %0\n"
  99. " membar #LoadLoad\n"
  100. " brlz,pt %0, 2b\n"
  101. " nop\n"
  102. " ba,a,pt %%xcc, 4b\n"
  103. " .previous"
  104. : "=&r" (tmp1), "=&r" (tmp2)
  105. : "r" (lock)
  106. : "memory");
  107. }
  108. static int inline __read_trylock(raw_rwlock_t *lock)
  109. {
  110. int tmp1, tmp2;
  111. __asm__ __volatile__ (
  112. "1: ldsw [%2], %0\n"
  113. " brlz,a,pn %0, 2f\n"
  114. " mov 0, %0\n"
  115. " add %0, 1, %1\n"
  116. " cas [%2], %0, %1\n"
  117. " cmp %0, %1\n"
  118. " membar #StoreLoad | #StoreStore\n"
  119. " bne,pn %%icc, 1b\n"
  120. " mov 1, %0\n"
  121. "2:"
  122. : "=&r" (tmp1), "=&r" (tmp2)
  123. : "r" (lock)
  124. : "memory");
  125. return tmp1;
  126. }
  127. static void inline __read_unlock(raw_rwlock_t *lock)
  128. {
  129. unsigned long tmp1, tmp2;
  130. __asm__ __volatile__(
  131. " membar #StoreLoad | #LoadLoad\n"
  132. "1: lduw [%2], %0\n"
  133. " sub %0, 1, %1\n"
  134. " cas [%2], %0, %1\n"
  135. " cmp %0, %1\n"
  136. " bne,pn %%xcc, 1b\n"
  137. " nop"
  138. : "=&r" (tmp1), "=&r" (tmp2)
  139. : "r" (lock)
  140. : "memory");
  141. }
  142. static void inline __write_lock(raw_rwlock_t *lock)
  143. {
  144. unsigned long mask, tmp1, tmp2;
  145. mask = 0x80000000UL;
  146. __asm__ __volatile__(
  147. "1: lduw [%2], %0\n"
  148. " brnz,pn %0, 2f\n"
  149. "4: or %0, %3, %1\n"
  150. " cas [%2], %0, %1\n"
  151. " cmp %0, %1\n"
  152. " membar #StoreLoad | #StoreStore\n"
  153. " bne,pn %%icc, 1b\n"
  154. " nop\n"
  155. " .subsection 2\n"
  156. "2: lduw [%2], %0\n"
  157. " membar #LoadLoad\n"
  158. " brnz,pt %0, 2b\n"
  159. " nop\n"
  160. " ba,a,pt %%xcc, 4b\n"
  161. " .previous"
  162. : "=&r" (tmp1), "=&r" (tmp2)
  163. : "r" (lock), "r" (mask)
  164. : "memory");
  165. }
  166. static void inline __write_unlock(raw_rwlock_t *lock)
  167. {
  168. __asm__ __volatile__(
  169. " membar #LoadStore | #StoreStore\n"
  170. " stw %%g0, [%0]"
  171. : /* no outputs */
  172. : "r" (lock)
  173. : "memory");
  174. }
  175. static int inline __write_trylock(raw_rwlock_t *lock)
  176. {
  177. unsigned long mask, tmp1, tmp2, result;
  178. mask = 0x80000000UL;
  179. __asm__ __volatile__(
  180. " mov 0, %2\n"
  181. "1: lduw [%3], %0\n"
  182. " brnz,pn %0, 2f\n"
  183. " or %0, %4, %1\n"
  184. " cas [%3], %0, %1\n"
  185. " cmp %0, %1\n"
  186. " membar #StoreLoad | #StoreStore\n"
  187. " bne,pn %%icc, 1b\n"
  188. " nop\n"
  189. " mov 1, %2\n"
  190. "2:"
  191. : "=&r" (tmp1), "=&r" (tmp2), "=&r" (result)
  192. : "r" (lock), "r" (mask)
  193. : "memory");
  194. return result;
  195. }
  196. #define __raw_read_lock(p) __read_lock(p)
  197. #define __raw_read_trylock(p) __read_trylock(p)
  198. #define __raw_read_unlock(p) __read_unlock(p)
  199. #define __raw_write_lock(p) __write_lock(p)
  200. #define __raw_write_unlock(p) __write_unlock(p)
  201. #define __raw_write_trylock(p) __write_trylock(p)
  202. #define __raw_read_can_lock(rw) (!((rw)->lock & 0x80000000UL))
  203. #define __raw_write_can_lock(rw) (!(rw)->lock)
  204. #define _raw_spin_relax(lock) cpu_relax()
  205. #define _raw_read_relax(lock) cpu_relax()
  206. #define _raw_write_relax(lock) cpu_relax()
  207. #endif /* !(__ASSEMBLY__) */
  208. #endif /* !(__SPARC64_SPINLOCK_H) */