spinlock.h 7.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333
  1. /* spinlock.h: 64-bit Sparc spinlock support.
  2. *
  3. * Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu)
  4. */
  5. #ifndef __SPARC64_SPINLOCK_H
  6. #define __SPARC64_SPINLOCK_H
  7. #include <linux/config.h>
  8. #include <linux/threads.h> /* For NR_CPUS */
  9. #ifndef __ASSEMBLY__
  10. /* To get debugging spinlocks which detect and catch
  11. * deadlock situations, set CONFIG_DEBUG_SPINLOCK
  12. * and rebuild your kernel.
  13. */
  14. /* All of these locking primitives are expected to work properly
  15. * even in an RMO memory model, which currently is what the kernel
  16. * runs in.
  17. *
  18. * There is another issue. Because we play games to save cycles
  19. * in the non-contention case, we need to be extra careful about
  20. * branch targets into the "spinning" code. They live in their
  21. * own section, but the newer V9 branches have a shorter range
  22. * than the traditional 32-bit sparc branch variants. The rule
  23. * is that the branches that go into and out of the spinner sections
  24. * must be pre-V9 branches.
  25. */
  26. #ifndef CONFIG_DEBUG_SPINLOCK
  27. typedef struct {
  28. volatile unsigned char lock;
  29. #ifdef CONFIG_PREEMPT
  30. unsigned int break_lock;
  31. #endif
  32. } spinlock_t;
  33. #define SPIN_LOCK_UNLOCKED (spinlock_t) {0,}
  34. #define spin_lock_init(lp) do { *(lp)= SPIN_LOCK_UNLOCKED; } while(0)
  35. #define spin_is_locked(lp) ((lp)->lock != 0)
  36. #define spin_unlock_wait(lp) \
  37. do { membar("#LoadLoad"); \
  38. } while((lp)->lock)
  39. static inline void _raw_spin_lock(spinlock_t *lock)
  40. {
  41. unsigned long tmp;
  42. __asm__ __volatile__(
  43. "1: ldstub [%1], %0\n"
  44. " membar #StoreLoad | #StoreStore\n"
  45. " brnz,pn %0, 2f\n"
  46. " nop\n"
  47. " .subsection 2\n"
  48. "2: ldub [%1], %0\n"
  49. " membar #LoadLoad\n"
  50. " brnz,pt %0, 2b\n"
  51. " nop\n"
  52. " ba,a,pt %%xcc, 1b\n"
  53. " .previous"
  54. : "=&r" (tmp)
  55. : "r" (lock)
  56. : "memory");
  57. }
  58. static inline int _raw_spin_trylock(spinlock_t *lock)
  59. {
  60. unsigned long result;
  61. __asm__ __volatile__(
  62. " ldstub [%1], %0\n"
  63. " membar #StoreLoad | #StoreStore"
  64. : "=r" (result)
  65. : "r" (lock)
  66. : "memory");
  67. return (result == 0UL);
  68. }
  69. static inline void _raw_spin_unlock(spinlock_t *lock)
  70. {
  71. __asm__ __volatile__(
  72. " membar #StoreStore | #LoadStore\n"
  73. " stb %%g0, [%0]"
  74. : /* No outputs */
  75. : "r" (lock)
  76. : "memory");
  77. }
  78. static inline void _raw_spin_lock_flags(spinlock_t *lock, unsigned long flags)
  79. {
  80. unsigned long tmp1, tmp2;
  81. __asm__ __volatile__(
  82. "1: ldstub [%2], %0\n"
  83. " membar #StoreLoad | #StoreStore\n"
  84. " brnz,pn %0, 2f\n"
  85. " nop\n"
  86. " .subsection 2\n"
  87. "2: rdpr %%pil, %1\n"
  88. " wrpr %3, %%pil\n"
  89. "3: ldub [%2], %0\n"
  90. " membar #LoadLoad\n"
  91. " brnz,pt %0, 3b\n"
  92. " nop\n"
  93. " ba,pt %%xcc, 1b\n"
  94. " wrpr %1, %%pil\n"
  95. " .previous"
  96. : "=&r" (tmp1), "=&r" (tmp2)
  97. : "r"(lock), "r"(flags)
  98. : "memory");
  99. }
  100. #else /* !(CONFIG_DEBUG_SPINLOCK) */
  101. typedef struct {
  102. volatile unsigned char lock;
  103. unsigned int owner_pc, owner_cpu;
  104. #ifdef CONFIG_PREEMPT
  105. unsigned int break_lock;
  106. #endif
  107. } spinlock_t;
  108. #define SPIN_LOCK_UNLOCKED (spinlock_t) { 0, 0, 0xff }
  109. #define spin_lock_init(lp) do { *(lp)= SPIN_LOCK_UNLOCKED; } while(0)
  110. #define spin_is_locked(__lock) ((__lock)->lock != 0)
  111. #define spin_unlock_wait(__lock) \
  112. do { \
  113. membar("#LoadLoad"); \
  114. } while((__lock)->lock)
  115. extern void _do_spin_lock (spinlock_t *lock, char *str);
  116. extern void _do_spin_unlock (spinlock_t *lock);
  117. extern int _do_spin_trylock (spinlock_t *lock);
  118. #define _raw_spin_trylock(lp) _do_spin_trylock(lp)
  119. #define _raw_spin_lock(lock) _do_spin_lock(lock, "spin_lock")
  120. #define _raw_spin_unlock(lock) _do_spin_unlock(lock)
  121. #define _raw_spin_lock_flags(lock, flags) _raw_spin_lock(lock)
  122. #endif /* CONFIG_DEBUG_SPINLOCK */
  123. /* Multi-reader locks, these are much saner than the 32-bit Sparc ones... */
  124. #ifndef CONFIG_DEBUG_SPINLOCK
  125. typedef struct {
  126. volatile unsigned int lock;
  127. #ifdef CONFIG_PREEMPT
  128. unsigned int break_lock;
  129. #endif
  130. } rwlock_t;
  131. #define RW_LOCK_UNLOCKED (rwlock_t) {0,}
  132. #define rwlock_init(lp) do { *(lp) = RW_LOCK_UNLOCKED; } while(0)
  133. static void inline __read_lock(rwlock_t *lock)
  134. {
  135. unsigned long tmp1, tmp2;
  136. __asm__ __volatile__ (
  137. "1: ldsw [%2], %0\n"
  138. " brlz,pn %0, 2f\n"
  139. "4: add %0, 1, %1\n"
  140. " cas [%2], %0, %1\n"
  141. " cmp %0, %1\n"
  142. " membar #StoreLoad | #StoreStore\n"
  143. " bne,pn %%icc, 1b\n"
  144. " nop\n"
  145. " .subsection 2\n"
  146. "2: ldsw [%2], %0\n"
  147. " membar #LoadLoad\n"
  148. " brlz,pt %0, 2b\n"
  149. " nop\n"
  150. " ba,a,pt %%xcc, 4b\n"
  151. " .previous"
  152. : "=&r" (tmp1), "=&r" (tmp2)
  153. : "r" (lock)
  154. : "memory");
  155. }
  156. static void inline __read_unlock(rwlock_t *lock)
  157. {
  158. unsigned long tmp1, tmp2;
  159. __asm__ __volatile__(
  160. " membar #StoreLoad | #LoadLoad\n"
  161. "1: lduw [%2], %0\n"
  162. " sub %0, 1, %1\n"
  163. " cas [%2], %0, %1\n"
  164. " cmp %0, %1\n"
  165. " bne,pn %%xcc, 1b\n"
  166. " nop"
  167. : "=&r" (tmp1), "=&r" (tmp2)
  168. : "r" (lock)
  169. : "memory");
  170. }
  171. static void inline __write_lock(rwlock_t *lock)
  172. {
  173. unsigned long mask, tmp1, tmp2;
  174. mask = 0x80000000UL;
  175. __asm__ __volatile__(
  176. "1: lduw [%2], %0\n"
  177. " brnz,pn %0, 2f\n"
  178. "4: or %0, %3, %1\n"
  179. " cas [%2], %0, %1\n"
  180. " cmp %0, %1\n"
  181. " membar #StoreLoad | #StoreStore\n"
  182. " bne,pn %%icc, 1b\n"
  183. " nop\n"
  184. " .subsection 2\n"
  185. "2: lduw [%2], %0\n"
  186. " membar #LoadLoad\n"
  187. " brnz,pt %0, 2b\n"
  188. " nop\n"
  189. " ba,a,pt %%xcc, 4b\n"
  190. " .previous"
  191. : "=&r" (tmp1), "=&r" (tmp2)
  192. : "r" (lock), "r" (mask)
  193. : "memory");
  194. }
  195. static void inline __write_unlock(rwlock_t *lock)
  196. {
  197. __asm__ __volatile__(
  198. " membar #LoadStore | #StoreStore\n"
  199. " stw %%g0, [%0]"
  200. : /* no outputs */
  201. : "r" (lock)
  202. : "memory");
  203. }
  204. static int inline __write_trylock(rwlock_t *lock)
  205. {
  206. unsigned long mask, tmp1, tmp2, result;
  207. mask = 0x80000000UL;
  208. __asm__ __volatile__(
  209. " mov 0, %2\n"
  210. "1: lduw [%3], %0\n"
  211. " brnz,pn %0, 2f\n"
  212. " or %0, %4, %1\n"
  213. " cas [%3], %0, %1\n"
  214. " cmp %0, %1\n"
  215. " membar #StoreLoad | #StoreStore\n"
  216. " bne,pn %%icc, 1b\n"
  217. " nop\n"
  218. " mov 1, %2\n"
  219. "2:"
  220. : "=&r" (tmp1), "=&r" (tmp2), "=&r" (result)
  221. : "r" (lock), "r" (mask)
  222. : "memory");
  223. return result;
  224. }
  225. #define _raw_read_lock(p) __read_lock(p)
  226. #define _raw_read_unlock(p) __read_unlock(p)
  227. #define _raw_write_lock(p) __write_lock(p)
  228. #define _raw_write_unlock(p) __write_unlock(p)
  229. #define _raw_write_trylock(p) __write_trylock(p)
  230. #else /* !(CONFIG_DEBUG_SPINLOCK) */
  231. typedef struct {
  232. volatile unsigned long lock;
  233. unsigned int writer_pc, writer_cpu;
  234. unsigned int reader_pc[NR_CPUS];
  235. #ifdef CONFIG_PREEMPT
  236. unsigned int break_lock;
  237. #endif
  238. } rwlock_t;
  239. #define RW_LOCK_UNLOCKED (rwlock_t) { 0, 0, 0xff, { } }
  240. #define rwlock_init(lp) do { *(lp) = RW_LOCK_UNLOCKED; } while(0)
  241. extern void _do_read_lock(rwlock_t *rw, char *str);
  242. extern void _do_read_unlock(rwlock_t *rw, char *str);
  243. extern void _do_write_lock(rwlock_t *rw, char *str);
  244. extern void _do_write_unlock(rwlock_t *rw);
  245. extern int _do_write_trylock(rwlock_t *rw, char *str);
  246. #define _raw_read_lock(lock) \
  247. do { unsigned long flags; \
  248. local_irq_save(flags); \
  249. _do_read_lock(lock, "read_lock"); \
  250. local_irq_restore(flags); \
  251. } while(0)
  252. #define _raw_read_unlock(lock) \
  253. do { unsigned long flags; \
  254. local_irq_save(flags); \
  255. _do_read_unlock(lock, "read_unlock"); \
  256. local_irq_restore(flags); \
  257. } while(0)
  258. #define _raw_write_lock(lock) \
  259. do { unsigned long flags; \
  260. local_irq_save(flags); \
  261. _do_write_lock(lock, "write_lock"); \
  262. local_irq_restore(flags); \
  263. } while(0)
  264. #define _raw_write_unlock(lock) \
  265. do { unsigned long flags; \
  266. local_irq_save(flags); \
  267. _do_write_unlock(lock); \
  268. local_irq_restore(flags); \
  269. } while(0)
  270. #define _raw_write_trylock(lock) \
  271. ({ unsigned long flags; \
  272. int val; \
  273. local_irq_save(flags); \
  274. val = _do_write_trylock(lock, "write_trylock"); \
  275. local_irq_restore(flags); \
  276. val; \
  277. })
  278. #endif /* CONFIG_DEBUG_SPINLOCK */
  279. #define _raw_read_trylock(lock) generic_raw_read_trylock(lock)
  280. #define read_can_lock(rw) (!((rw)->lock & 0x80000000UL))
  281. #define write_can_lock(rw) (!(rw)->lock)
  282. #endif /* !(__ASSEMBLY__) */
  283. #endif /* !(__SPARC64_SPINLOCK_H) */