spinlock.h 8.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341
  1. /* spinlock.h: 64-bit Sparc spinlock support.
  2. *
  3. * Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu)
  4. */
  5. #ifndef __SPARC64_SPINLOCK_H
  6. #define __SPARC64_SPINLOCK_H
  7. #include <linux/config.h>
  8. #include <linux/threads.h> /* For NR_CPUS */
  9. #ifndef __ASSEMBLY__
  10. /* To get debugging spinlocks which detect and catch
  11. * deadlock situations, set CONFIG_DEBUG_SPINLOCK
  12. * and rebuild your kernel.
  13. */
  14. /* All of these locking primitives are expected to work properly
  15. * even in an RMO memory model, which currently is what the kernel
  16. * runs in.
  17. *
  18. * There is another issue. Because we play games to save cycles
  19. * in the non-contention case, we need to be extra careful about
  20. * branch targets into the "spinning" code. They live in their
  21. * own section, but the newer V9 branches have a shorter range
  22. * than the traditional 32-bit sparc branch variants. The rule
  23. * is that the branches that go into and out of the spinner sections
  24. * must be pre-V9 branches.
  25. */
  26. #ifndef CONFIG_DEBUG_SPINLOCK
  27. typedef struct {
  28. volatile unsigned char lock;
  29. #ifdef CONFIG_PREEMPT
  30. unsigned int break_lock;
  31. #endif
  32. } spinlock_t;
  33. #define SPIN_LOCK_UNLOCKED (spinlock_t) {0,}
  34. #define spin_lock_init(lp) do { *(lp)= SPIN_LOCK_UNLOCKED; } while(0)
  35. #define spin_is_locked(lp) ((lp)->lock != 0)
  36. #define spin_unlock_wait(lp) \
  37. do { rmb(); \
  38. } while((lp)->lock)
  39. static inline void _raw_spin_lock(spinlock_t *lock)
  40. {
  41. unsigned long tmp;
  42. __asm__ __volatile__(
  43. "1: ldstub [%1], %0\n"
  44. " membar #StoreLoad | #StoreStore\n"
  45. " brnz,pn %0, 2f\n"
  46. " nop\n"
  47. " .subsection 2\n"
  48. "2: ldub [%1], %0\n"
  49. " membar #LoadLoad\n"
  50. " brnz,pt %0, 2b\n"
  51. " nop\n"
  52. " ba,a,pt %%xcc, 1b\n"
  53. " .previous"
  54. : "=&r" (tmp)
  55. : "r" (lock)
  56. : "memory");
  57. }
  58. static inline int _raw_spin_trylock(spinlock_t *lock)
  59. {
  60. unsigned long result;
  61. __asm__ __volatile__(
  62. " ldstub [%1], %0\n"
  63. " membar #StoreLoad | #StoreStore"
  64. : "=r" (result)
  65. : "r" (lock)
  66. : "memory");
  67. return (result == 0UL);
  68. }
  69. static inline void _raw_spin_unlock(spinlock_t *lock)
  70. {
  71. __asm__ __volatile__(
  72. " membar #StoreStore | #LoadStore\n"
  73. " stb %%g0, [%0]"
  74. : /* No outputs */
  75. : "r" (lock)
  76. : "memory");
  77. }
  78. static inline void _raw_spin_lock_flags(spinlock_t *lock, unsigned long flags)
  79. {
  80. unsigned long tmp1, tmp2;
  81. __asm__ __volatile__(
  82. "1: ldstub [%2], %0\n"
  83. " membar #StoreLoad | #StoreStore\n"
  84. " brnz,pn %0, 2f\n"
  85. " nop\n"
  86. " .subsection 2\n"
  87. "2: rdpr %%pil, %1\n"
  88. " wrpr %3, %%pil\n"
  89. "3: ldub [%2], %0\n"
  90. " membar #LoadLoad\n"
  91. " brnz,pt %0, 3b\n"
  92. " nop\n"
  93. " ba,pt %%xcc, 1b\n"
  94. " wrpr %1, %%pil\n"
  95. " .previous"
  96. : "=&r" (tmp1), "=&r" (tmp2)
  97. : "r"(lock), "r"(flags)
  98. : "memory");
  99. }
  100. #else /* !(CONFIG_DEBUG_SPINLOCK) */
  101. typedef struct {
  102. volatile unsigned char lock;
  103. unsigned int owner_pc, owner_cpu;
  104. #ifdef CONFIG_PREEMPT
  105. unsigned int break_lock;
  106. #endif
  107. } spinlock_t;
  108. #define SPIN_LOCK_UNLOCKED (spinlock_t) { 0, 0, 0xff }
  109. #define spin_lock_init(lp) do { *(lp)= SPIN_LOCK_UNLOCKED; } while(0)
  110. #define spin_is_locked(__lock) ((__lock)->lock != 0)
  111. #define spin_unlock_wait(__lock) \
  112. do { \
  113. rmb(); \
  114. } while((__lock)->lock)
  115. extern void _do_spin_lock(spinlock_t *lock, char *str, unsigned long caller);
  116. extern void _do_spin_unlock(spinlock_t *lock);
  117. extern int _do_spin_trylock(spinlock_t *lock, unsigned long caller);
  118. #define _raw_spin_trylock(lp) \
  119. _do_spin_trylock(lp, (unsigned long) __builtin_return_address(0))
  120. #define _raw_spin_lock(lock) \
  121. _do_spin_lock(lock, "spin_lock", \
  122. (unsigned long) __builtin_return_address(0))
  123. #define _raw_spin_unlock(lock) _do_spin_unlock(lock)
  124. #define _raw_spin_lock_flags(lock, flags) _raw_spin_lock(lock)
  125. #endif /* CONFIG_DEBUG_SPINLOCK */
  126. /* Multi-reader locks, these are much saner than the 32-bit Sparc ones... */
  127. #ifndef CONFIG_DEBUG_SPINLOCK
  128. typedef struct {
  129. volatile unsigned int lock;
  130. #ifdef CONFIG_PREEMPT
  131. unsigned int break_lock;
  132. #endif
  133. } rwlock_t;
  134. #define RW_LOCK_UNLOCKED (rwlock_t) {0,}
  135. #define rwlock_init(lp) do { *(lp) = RW_LOCK_UNLOCKED; } while(0)
  136. static void inline __read_lock(rwlock_t *lock)
  137. {
  138. unsigned long tmp1, tmp2;
  139. __asm__ __volatile__ (
  140. "1: ldsw [%2], %0\n"
  141. " brlz,pn %0, 2f\n"
  142. "4: add %0, 1, %1\n"
  143. " cas [%2], %0, %1\n"
  144. " cmp %0, %1\n"
  145. " membar #StoreLoad | #StoreStore\n"
  146. " bne,pn %%icc, 1b\n"
  147. " nop\n"
  148. " .subsection 2\n"
  149. "2: ldsw [%2], %0\n"
  150. " membar #LoadLoad\n"
  151. " brlz,pt %0, 2b\n"
  152. " nop\n"
  153. " ba,a,pt %%xcc, 4b\n"
  154. " .previous"
  155. : "=&r" (tmp1), "=&r" (tmp2)
  156. : "r" (lock)
  157. : "memory");
  158. }
  159. static void inline __read_unlock(rwlock_t *lock)
  160. {
  161. unsigned long tmp1, tmp2;
  162. __asm__ __volatile__(
  163. " membar #StoreLoad | #LoadLoad\n"
  164. "1: lduw [%2], %0\n"
  165. " sub %0, 1, %1\n"
  166. " cas [%2], %0, %1\n"
  167. " cmp %0, %1\n"
  168. " bne,pn %%xcc, 1b\n"
  169. " nop"
  170. : "=&r" (tmp1), "=&r" (tmp2)
  171. : "r" (lock)
  172. : "memory");
  173. }
  174. static void inline __write_lock(rwlock_t *lock)
  175. {
  176. unsigned long mask, tmp1, tmp2;
  177. mask = 0x80000000UL;
  178. __asm__ __volatile__(
  179. "1: lduw [%2], %0\n"
  180. " brnz,pn %0, 2f\n"
  181. "4: or %0, %3, %1\n"
  182. " cas [%2], %0, %1\n"
  183. " cmp %0, %1\n"
  184. " membar #StoreLoad | #StoreStore\n"
  185. " bne,pn %%icc, 1b\n"
  186. " nop\n"
  187. " .subsection 2\n"
  188. "2: lduw [%2], %0\n"
  189. " membar #LoadLoad\n"
  190. " brnz,pt %0, 2b\n"
  191. " nop\n"
  192. " ba,a,pt %%xcc, 4b\n"
  193. " .previous"
  194. : "=&r" (tmp1), "=&r" (tmp2)
  195. : "r" (lock), "r" (mask)
  196. : "memory");
  197. }
  198. static void inline __write_unlock(rwlock_t *lock)
  199. {
  200. __asm__ __volatile__(
  201. " membar #LoadStore | #StoreStore\n"
  202. " stw %%g0, [%0]"
  203. : /* no outputs */
  204. : "r" (lock)
  205. : "memory");
  206. }
  207. static int inline __write_trylock(rwlock_t *lock)
  208. {
  209. unsigned long mask, tmp1, tmp2, result;
  210. mask = 0x80000000UL;
  211. __asm__ __volatile__(
  212. " mov 0, %2\n"
  213. "1: lduw [%3], %0\n"
  214. " brnz,pn %0, 2f\n"
  215. " or %0, %4, %1\n"
  216. " cas [%3], %0, %1\n"
  217. " cmp %0, %1\n"
  218. " membar #StoreLoad | #StoreStore\n"
  219. " bne,pn %%icc, 1b\n"
  220. " nop\n"
  221. " mov 1, %2\n"
  222. "2:"
  223. : "=&r" (tmp1), "=&r" (tmp2), "=&r" (result)
  224. : "r" (lock), "r" (mask)
  225. : "memory");
  226. return result;
  227. }
  228. #define _raw_read_lock(p) __read_lock(p)
  229. #define _raw_read_unlock(p) __read_unlock(p)
  230. #define _raw_write_lock(p) __write_lock(p)
  231. #define _raw_write_unlock(p) __write_unlock(p)
  232. #define _raw_write_trylock(p) __write_trylock(p)
  233. #else /* !(CONFIG_DEBUG_SPINLOCK) */
  234. typedef struct {
  235. volatile unsigned long lock;
  236. unsigned int writer_pc, writer_cpu;
  237. unsigned int reader_pc[NR_CPUS];
  238. #ifdef CONFIG_PREEMPT
  239. unsigned int break_lock;
  240. #endif
  241. } rwlock_t;
  242. #define RW_LOCK_UNLOCKED (rwlock_t) { 0, 0, 0xff, { } }
  243. #define rwlock_init(lp) do { *(lp) = RW_LOCK_UNLOCKED; } while(0)
  244. extern void _do_read_lock(rwlock_t *rw, char *str, unsigned long caller);
  245. extern void _do_read_unlock(rwlock_t *rw, char *str, unsigned long caller);
  246. extern void _do_write_lock(rwlock_t *rw, char *str, unsigned long caller);
  247. extern void _do_write_unlock(rwlock_t *rw, unsigned long caller);
  248. extern int _do_write_trylock(rwlock_t *rw, char *str, unsigned long caller);
  249. #define _raw_read_lock(lock) \
  250. do { unsigned long flags; \
  251. local_irq_save(flags); \
  252. _do_read_lock(lock, "read_lock", \
  253. (unsigned long) __builtin_return_address(0)); \
  254. local_irq_restore(flags); \
  255. } while(0)
  256. #define _raw_read_unlock(lock) \
  257. do { unsigned long flags; \
  258. local_irq_save(flags); \
  259. _do_read_unlock(lock, "read_unlock", \
  260. (unsigned long) __builtin_return_address(0)); \
  261. local_irq_restore(flags); \
  262. } while(0)
  263. #define _raw_write_lock(lock) \
  264. do { unsigned long flags; \
  265. local_irq_save(flags); \
  266. _do_write_lock(lock, "write_lock", \
  267. (unsigned long) __builtin_return_address(0)); \
  268. local_irq_restore(flags); \
  269. } while(0)
  270. #define _raw_write_unlock(lock) \
  271. do { unsigned long flags; \
  272. local_irq_save(flags); \
  273. _do_write_unlock(lock, \
  274. (unsigned long) __builtin_return_address(0)); \
  275. local_irq_restore(flags); \
  276. } while(0)
  277. #define _raw_write_trylock(lock) \
  278. ({ unsigned long flags; \
  279. int val; \
  280. local_irq_save(flags); \
  281. val = _do_write_trylock(lock, "write_trylock", \
  282. (unsigned long) __builtin_return_address(0)); \
  283. local_irq_restore(flags); \
  284. val; \
  285. })
  286. #endif /* CONFIG_DEBUG_SPINLOCK */
  287. #define _raw_read_trylock(lock) generic_raw_read_trylock(lock)
  288. #define read_can_lock(rw) (!((rw)->lock & 0x80000000UL))
  289. #define write_can_lock(rw) (!(rw)->lock)
  290. #endif /* !(__ASSEMBLY__) */
  291. #endif /* !(__SPARC64_SPINLOCK_H) */