spinlock.h 7.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324
  1. /* spinlock.h: 64-bit Sparc spinlock support.
  2. *
  3. * Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu)
  4. */
  5. #ifndef __SPARC64_SPINLOCK_H
  6. #define __SPARC64_SPINLOCK_H
  7. #include <linux/config.h>
  8. #include <linux/threads.h> /* For NR_CPUS */
  9. #ifndef __ASSEMBLY__
  10. /* To get debugging spinlocks which detect and catch
  11. * deadlock situations, set CONFIG_DEBUG_SPINLOCK
  12. * and rebuild your kernel.
  13. */
  14. /* All of these locking primitives are expected to work properly
  15. * even in an RMO memory model, which currently is what the kernel
  16. * runs in.
  17. *
  18. * There is another issue. Because we play games to save cycles
  19. * in the non-contention case, we need to be extra careful about
  20. * branch targets into the "spinning" code. They live in their
  21. * own section, but the newer V9 branches have a shorter range
  22. * than the traditional 32-bit sparc branch variants. The rule
  23. * is that the branches that go into and out of the spinner sections
  24. * must be pre-V9 branches.
  25. */
  26. #ifndef CONFIG_DEBUG_SPINLOCK
  27. typedef struct {
  28. volatile unsigned char lock;
  29. #ifdef CONFIG_PREEMPT
  30. unsigned int break_lock;
  31. #endif
  32. } spinlock_t;
  33. #define SPIN_LOCK_UNLOCKED (spinlock_t) {0,}
  34. #define spin_lock_init(lp) do { *(lp)= SPIN_LOCK_UNLOCKED; } while(0)
  35. #define spin_is_locked(lp) ((lp)->lock != 0)
  36. #define spin_unlock_wait(lp) \
  37. do { membar("#LoadLoad"); \
  38. } while((lp)->lock)
  39. static inline void _raw_spin_lock(spinlock_t *lock)
  40. {
  41. unsigned long tmp;
  42. __asm__ __volatile__(
  43. "1: ldstub [%1], %0\n"
  44. " brnz,pn %0, 2f\n"
  45. " membar #StoreLoad | #StoreStore\n"
  46. " .subsection 2\n"
  47. "2: ldub [%1], %0\n"
  48. " brnz,pt %0, 2b\n"
  49. " membar #LoadLoad\n"
  50. " ba,a,pt %%xcc, 1b\n"
  51. " .previous"
  52. : "=&r" (tmp)
  53. : "r" (lock)
  54. : "memory");
  55. }
  56. static inline int _raw_spin_trylock(spinlock_t *lock)
  57. {
  58. unsigned long result;
  59. __asm__ __volatile__(
  60. " ldstub [%1], %0\n"
  61. " membar #StoreLoad | #StoreStore"
  62. : "=r" (result)
  63. : "r" (lock)
  64. : "memory");
  65. return (result == 0UL);
  66. }
  67. static inline void _raw_spin_unlock(spinlock_t *lock)
  68. {
  69. __asm__ __volatile__(
  70. " membar #StoreStore | #LoadStore\n"
  71. " stb %%g0, [%0]"
  72. : /* No outputs */
  73. : "r" (lock)
  74. : "memory");
  75. }
  76. static inline void _raw_spin_lock_flags(spinlock_t *lock, unsigned long flags)
  77. {
  78. unsigned long tmp1, tmp2;
  79. __asm__ __volatile__(
  80. "1: ldstub [%2], %0\n"
  81. " brnz,pn %0, 2f\n"
  82. " membar #StoreLoad | #StoreStore\n"
  83. " .subsection 2\n"
  84. "2: rdpr %%pil, %1\n"
  85. " wrpr %3, %%pil\n"
  86. "3: ldub [%2], %0\n"
  87. " brnz,pt %0, 3b\n"
  88. " membar #LoadLoad\n"
  89. " ba,pt %%xcc, 1b\n"
  90. " wrpr %1, %%pil\n"
  91. " .previous"
  92. : "=&r" (tmp1), "=&r" (tmp2)
  93. : "r"(lock), "r"(flags)
  94. : "memory");
  95. }
  96. #else /* !(CONFIG_DEBUG_SPINLOCK) */
  97. typedef struct {
  98. volatile unsigned char lock;
  99. unsigned int owner_pc, owner_cpu;
  100. #ifdef CONFIG_PREEMPT
  101. unsigned int break_lock;
  102. #endif
  103. } spinlock_t;
  104. #define SPIN_LOCK_UNLOCKED (spinlock_t) { 0, 0, 0xff }
  105. #define spin_lock_init(lp) do { *(lp)= SPIN_LOCK_UNLOCKED; } while(0)
  106. #define spin_is_locked(__lock) ((__lock)->lock != 0)
  107. #define spin_unlock_wait(__lock) \
  108. do { \
  109. membar("#LoadLoad"); \
  110. } while((__lock)->lock)
  111. extern void _do_spin_lock (spinlock_t *lock, char *str);
  112. extern void _do_spin_unlock (spinlock_t *lock);
  113. extern int _do_spin_trylock (spinlock_t *lock);
  114. #define _raw_spin_trylock(lp) _do_spin_trylock(lp)
  115. #define _raw_spin_lock(lock) _do_spin_lock(lock, "spin_lock")
  116. #define _raw_spin_unlock(lock) _do_spin_unlock(lock)
  117. #define _raw_spin_lock_flags(lock, flags) _raw_spin_lock(lock)
  118. #endif /* CONFIG_DEBUG_SPINLOCK */
  119. /* Multi-reader locks, these are much saner than the 32-bit Sparc ones... */
  120. #ifndef CONFIG_DEBUG_SPINLOCK
  121. typedef struct {
  122. volatile unsigned int lock;
  123. #ifdef CONFIG_PREEMPT
  124. unsigned int break_lock;
  125. #endif
  126. } rwlock_t;
  127. #define RW_LOCK_UNLOCKED (rwlock_t) {0,}
  128. #define rwlock_init(lp) do { *(lp) = RW_LOCK_UNLOCKED; } while(0)
  129. static void inline __read_lock(rwlock_t *lock)
  130. {
  131. unsigned long tmp1, tmp2;
  132. __asm__ __volatile__ (
  133. "1: ldsw [%2], %0\n"
  134. " brlz,pn %0, 2f\n"
  135. "4: add %0, 1, %1\n"
  136. " cas [%2], %0, %1\n"
  137. " cmp %0, %1\n"
  138. " bne,pn %%icc, 1b\n"
  139. " membar #StoreLoad | #StoreStore\n"
  140. " .subsection 2\n"
  141. "2: ldsw [%2], %0\n"
  142. " brlz,pt %0, 2b\n"
  143. " membar #LoadLoad\n"
  144. " ba,a,pt %%xcc, 4b\n"
  145. " .previous"
  146. : "=&r" (tmp1), "=&r" (tmp2)
  147. : "r" (lock)
  148. : "memory");
  149. }
  150. static void inline __read_unlock(rwlock_t *lock)
  151. {
  152. unsigned long tmp1, tmp2;
  153. __asm__ __volatile__(
  154. " membar #StoreLoad | #LoadLoad\n"
  155. "1: lduw [%2], %0\n"
  156. " sub %0, 1, %1\n"
  157. " cas [%2], %0, %1\n"
  158. " cmp %0, %1\n"
  159. " bne,pn %%xcc, 1b\n"
  160. " nop"
  161. : "=&r" (tmp1), "=&r" (tmp2)
  162. : "r" (lock)
  163. : "memory");
  164. }
  165. static void inline __write_lock(rwlock_t *lock)
  166. {
  167. unsigned long mask, tmp1, tmp2;
  168. mask = 0x80000000UL;
  169. __asm__ __volatile__(
  170. "1: lduw [%2], %0\n"
  171. " brnz,pn %0, 2f\n"
  172. "4: or %0, %3, %1\n"
  173. " cas [%2], %0, %1\n"
  174. " cmp %0, %1\n"
  175. " bne,pn %%icc, 1b\n"
  176. " membar #StoreLoad | #StoreStore\n"
  177. " .subsection 2\n"
  178. "2: lduw [%2], %0\n"
  179. " brnz,pt %0, 2b\n"
  180. " membar #LoadLoad\n"
  181. " ba,a,pt %%xcc, 4b\n"
  182. " .previous"
  183. : "=&r" (tmp1), "=&r" (tmp2)
  184. : "r" (lock), "r" (mask)
  185. : "memory");
  186. }
  187. static void inline __write_unlock(rwlock_t *lock)
  188. {
  189. __asm__ __volatile__(
  190. " membar #LoadStore | #StoreStore\n"
  191. " stw %%g0, [%0]"
  192. : /* no outputs */
  193. : "r" (lock)
  194. : "memory");
  195. }
  196. static int inline __write_trylock(rwlock_t *lock)
  197. {
  198. unsigned long mask, tmp1, tmp2, result;
  199. mask = 0x80000000UL;
  200. __asm__ __volatile__(
  201. " mov 0, %2\n"
  202. "1: lduw [%3], %0\n"
  203. " brnz,pn %0, 2f\n"
  204. " or %0, %4, %1\n"
  205. " cas [%3], %0, %1\n"
  206. " cmp %0, %1\n"
  207. " bne,pn %%icc, 1b\n"
  208. " membar #StoreLoad | #StoreStore\n"
  209. " mov 1, %2\n"
  210. "2:"
  211. : "=&r" (tmp1), "=&r" (tmp2), "=&r" (result)
  212. : "r" (lock), "r" (mask)
  213. : "memory");
  214. return result;
  215. }
  216. #define _raw_read_lock(p) __read_lock(p)
  217. #define _raw_read_unlock(p) __read_unlock(p)
  218. #define _raw_write_lock(p) __write_lock(p)
  219. #define _raw_write_unlock(p) __write_unlock(p)
  220. #define _raw_write_trylock(p) __write_trylock(p)
  221. #else /* !(CONFIG_DEBUG_SPINLOCK) */
  222. typedef struct {
  223. volatile unsigned long lock;
  224. unsigned int writer_pc, writer_cpu;
  225. unsigned int reader_pc[NR_CPUS];
  226. #ifdef CONFIG_PREEMPT
  227. unsigned int break_lock;
  228. #endif
  229. } rwlock_t;
  230. #define RW_LOCK_UNLOCKED (rwlock_t) { 0, 0, 0xff, { } }
  231. #define rwlock_init(lp) do { *(lp) = RW_LOCK_UNLOCKED; } while(0)
  232. extern void _do_read_lock(rwlock_t *rw, char *str);
  233. extern void _do_read_unlock(rwlock_t *rw, char *str);
  234. extern void _do_write_lock(rwlock_t *rw, char *str);
  235. extern void _do_write_unlock(rwlock_t *rw);
  236. extern int _do_write_trylock(rwlock_t *rw, char *str);
  237. #define _raw_read_lock(lock) \
  238. do { unsigned long flags; \
  239. local_irq_save(flags); \
  240. _do_read_lock(lock, "read_lock"); \
  241. local_irq_restore(flags); \
  242. } while(0)
  243. #define _raw_read_unlock(lock) \
  244. do { unsigned long flags; \
  245. local_irq_save(flags); \
  246. _do_read_unlock(lock, "read_unlock"); \
  247. local_irq_restore(flags); \
  248. } while(0)
  249. #define _raw_write_lock(lock) \
  250. do { unsigned long flags; \
  251. local_irq_save(flags); \
  252. _do_write_lock(lock, "write_lock"); \
  253. local_irq_restore(flags); \
  254. } while(0)
  255. #define _raw_write_unlock(lock) \
  256. do { unsigned long flags; \
  257. local_irq_save(flags); \
  258. _do_write_unlock(lock); \
  259. local_irq_restore(flags); \
  260. } while(0)
  261. #define _raw_write_trylock(lock) \
  262. ({ unsigned long flags; \
  263. int val; \
  264. local_irq_save(flags); \
  265. val = _do_write_trylock(lock, "write_trylock"); \
  266. local_irq_restore(flags); \
  267. val; \
  268. })
  269. #endif /* CONFIG_DEBUG_SPINLOCK */
  270. #define _raw_read_trylock(lock) generic_raw_read_trylock(lock)
  271. #define read_can_lock(rw) (!((rw)->lock & 0x80000000UL))
  272. #define write_can_lock(rw) (!(rw)->lock)
  273. #endif /* !(__ASSEMBLY__) */
  274. #endif /* !(__SPARC64_SPINLOCK_H) */