spinlock.h 6.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272
  1. #ifndef __ASM_SPINLOCK_H
  2. #define __ASM_SPINLOCK_H
  3. #ifdef __KERNEL__
  4. /*
  5. * Simple spin lock operations.
  6. *
  7. * Copyright (C) 2001-2004 Paul Mackerras <paulus@au.ibm.com>, IBM
  8. * Copyright (C) 2001 Anton Blanchard <anton@au.ibm.com>, IBM
  9. * Copyright (C) 2002 Dave Engebretsen <engebret@us.ibm.com>, IBM
  10. * Rework to support virtual processors
  11. *
  12. * Type of int is used as a full 64b word is not necessary.
  13. *
  14. * This program is free software; you can redistribute it and/or
  15. * modify it under the terms of the GNU General Public License
  16. * as published by the Free Software Foundation; either version
  17. * 2 of the License, or (at your option) any later version.
  18. *
  19. * (the type definitions are in asm/spinlock_types.h)
  20. */
  21. #ifdef CONFIG_PPC64
  22. #include <asm/paca.h>
  23. #include <asm/hvcall.h>
  24. #include <asm/iseries/hv_call.h>
  25. #endif
  26. #include <asm/asm-compat.h>
  27. #include <asm/synch.h>
  28. #define __raw_spin_is_locked(x) ((x)->slock != 0)
  29. #ifdef CONFIG_PPC64
  30. /* use 0x800000yy when locked, where yy == CPU number */
  31. #define LOCK_TOKEN (*(u32 *)(&get_paca()->lock_token))
  32. #else
  33. #define LOCK_TOKEN 1
  34. #endif
  35. /*
  36. * This returns the old value in the lock, so we succeeded
  37. * in getting the lock if the return value is 0.
  38. */
  39. static __inline__ unsigned long __spin_trylock(raw_spinlock_t *lock)
  40. {
  41. unsigned long tmp, token;
  42. token = LOCK_TOKEN;
  43. __asm__ __volatile__(
  44. "1: lwarx %0,0,%2\n\
  45. cmpwi 0,%0,0\n\
  46. bne- 2f\n\
  47. stwcx. %1,0,%2\n\
  48. bne- 1b\n\
  49. isync\n\
  50. 2:" : "=&r" (tmp)
  51. : "r" (token), "r" (&lock->slock)
  52. : "cr0", "memory");
  53. return tmp;
  54. }
  55. static int __inline__ __raw_spin_trylock(raw_spinlock_t *lock)
  56. {
  57. return __spin_trylock(lock) == 0;
  58. }
  59. /*
  60. * On a system with shared processors (that is, where a physical
  61. * processor is multiplexed between several virtual processors),
  62. * there is no point spinning on a lock if the holder of the lock
  63. * isn't currently scheduled on a physical processor. Instead
  64. * we detect this situation and ask the hypervisor to give the
  65. * rest of our timeslice to the lock holder.
  66. *
  67. * So that we can tell which virtual processor is holding a lock,
  68. * we put 0x80000000 | smp_processor_id() in the lock when it is
  69. * held. Conveniently, we have a word in the paca that holds this
  70. * value.
  71. */
  72. #if defined(CONFIG_PPC_SPLPAR) || defined(CONFIG_PPC_ISERIES)
  73. /* We only yield to the hypervisor if we are in shared processor mode */
  74. #define SHARED_PROCESSOR (get_lppaca()->shared_proc)
  75. extern void __spin_yield(raw_spinlock_t *lock);
  76. extern void __rw_yield(raw_rwlock_t *lock);
  77. #else /* SPLPAR || ISERIES */
  78. #define __spin_yield(x) barrier()
  79. #define __rw_yield(x) barrier()
  80. #define SHARED_PROCESSOR 0
  81. #endif
  82. static void __inline__ __raw_spin_lock(raw_spinlock_t *lock)
  83. {
  84. while (1) {
  85. if (likely(__spin_trylock(lock) == 0))
  86. break;
  87. do {
  88. HMT_low();
  89. if (SHARED_PROCESSOR)
  90. __spin_yield(lock);
  91. } while (unlikely(lock->slock != 0));
  92. HMT_medium();
  93. }
  94. }
  95. static void __inline__ __raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long flags)
  96. {
  97. unsigned long flags_dis;
  98. while (1) {
  99. if (likely(__spin_trylock(lock) == 0))
  100. break;
  101. local_save_flags(flags_dis);
  102. local_irq_restore(flags);
  103. do {
  104. HMT_low();
  105. if (SHARED_PROCESSOR)
  106. __spin_yield(lock);
  107. } while (unlikely(lock->slock != 0));
  108. HMT_medium();
  109. local_irq_restore(flags_dis);
  110. }
  111. }
  112. static __inline__ void __raw_spin_unlock(raw_spinlock_t *lock)
  113. {
  114. __asm__ __volatile__("# __raw_spin_unlock\n\t"
  115. LWSYNC_ON_SMP: : :"memory");
  116. lock->slock = 0;
  117. }
  118. #ifdef CONFIG_PPC64
  119. extern void __raw_spin_unlock_wait(raw_spinlock_t *lock);
  120. #else
  121. #define __raw_spin_unlock_wait(lock) \
  122. do { while (__raw_spin_is_locked(lock)) cpu_relax(); } while (0)
  123. #endif
  124. /*
  125. * Read-write spinlocks, allowing multiple readers
  126. * but only one writer.
  127. *
  128. * NOTE! it is quite common to have readers in interrupts
  129. * but no interrupt writers. For those circumstances we
  130. * can "mix" irq-safe locks - any writer needs to get a
  131. * irq-safe write-lock, but readers can get non-irqsafe
  132. * read-locks.
  133. */
  134. #define __raw_read_can_lock(rw) ((rw)->lock >= 0)
  135. #define __raw_write_can_lock(rw) (!(rw)->lock)
  136. #ifdef CONFIG_PPC64
  137. #define __DO_SIGN_EXTEND "extsw %0,%0\n"
  138. #define WRLOCK_TOKEN LOCK_TOKEN /* it's negative */
  139. #else
  140. #define __DO_SIGN_EXTEND
  141. #define WRLOCK_TOKEN (-1)
  142. #endif
  143. /*
  144. * This returns the old value in the lock + 1,
  145. * so we got a read lock if the return value is > 0.
  146. */
  147. static long __inline__ __read_trylock(raw_rwlock_t *rw)
  148. {
  149. long tmp;
  150. __asm__ __volatile__(
  151. "1: lwarx %0,0,%1\n"
  152. __DO_SIGN_EXTEND
  153. " addic. %0,%0,1\n\
  154. ble- 2f\n"
  155. PPC405_ERR77(0,%1)
  156. " stwcx. %0,0,%1\n\
  157. bne- 1b\n\
  158. isync\n\
  159. 2:" : "=&r" (tmp)
  160. : "r" (&rw->lock)
  161. : "cr0", "xer", "memory");
  162. return tmp;
  163. }
  164. /*
  165. * This returns the old value in the lock,
  166. * so we got the write lock if the return value is 0.
  167. */
  168. static __inline__ long __write_trylock(raw_rwlock_t *rw)
  169. {
  170. long tmp, token;
  171. token = WRLOCK_TOKEN;
  172. __asm__ __volatile__(
  173. "1: lwarx %0,0,%2\n\
  174. cmpwi 0,%0,0\n\
  175. bne- 2f\n"
  176. PPC405_ERR77(0,%1)
  177. " stwcx. %1,0,%2\n\
  178. bne- 1b\n\
  179. isync\n\
  180. 2:" : "=&r" (tmp)
  181. : "r" (token), "r" (&rw->lock)
  182. : "cr0", "memory");
  183. return tmp;
  184. }
  185. static void __inline__ __raw_read_lock(raw_rwlock_t *rw)
  186. {
  187. while (1) {
  188. if (likely(__read_trylock(rw) > 0))
  189. break;
  190. do {
  191. HMT_low();
  192. if (SHARED_PROCESSOR)
  193. __rw_yield(rw);
  194. } while (unlikely(rw->lock < 0));
  195. HMT_medium();
  196. }
  197. }
  198. static void __inline__ __raw_write_lock(raw_rwlock_t *rw)
  199. {
  200. while (1) {
  201. if (likely(__write_trylock(rw) == 0))
  202. break;
  203. do {
  204. HMT_low();
  205. if (SHARED_PROCESSOR)
  206. __rw_yield(rw);
  207. } while (unlikely(rw->lock != 0));
  208. HMT_medium();
  209. }
  210. }
  211. static int __inline__ __raw_read_trylock(raw_rwlock_t *rw)
  212. {
  213. return __read_trylock(rw) > 0;
  214. }
  215. static int __inline__ __raw_write_trylock(raw_rwlock_t *rw)
  216. {
  217. return __write_trylock(rw) == 0;
  218. }
  219. static void __inline__ __raw_read_unlock(raw_rwlock_t *rw)
  220. {
  221. long tmp;
  222. __asm__ __volatile__(
  223. "# read_unlock\n\t"
  224. LWSYNC_ON_SMP
  225. "1: lwarx %0,0,%1\n\
  226. addic %0,%0,-1\n"
  227. PPC405_ERR77(0,%1)
  228. " stwcx. %0,0,%1\n\
  229. bne- 1b"
  230. : "=&r"(tmp)
  231. : "r"(&rw->lock)
  232. : "cr0", "memory");
  233. }
  234. static __inline__ void __raw_write_unlock(raw_rwlock_t *rw)
  235. {
  236. __asm__ __volatile__("# write_unlock\n\t"
  237. LWSYNC_ON_SMP: : :"memory");
  238. rw->lock = 0;
  239. }
  240. #endif /* __KERNEL__ */
  241. #endif /* __ASM_SPINLOCK_H */