spinlock.h 6.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256
  1. #ifndef _ASM_X86_SPINLOCK_H
  2. #define _ASM_X86_SPINLOCK_H
  3. #include <linux/atomic.h>
  4. #include <asm/page.h>
  5. #include <asm/processor.h>
  6. #include <linux/compiler.h>
  7. #include <asm/paravirt.h>
  8. /*
  9. * Your basic SMP spinlocks, allowing only a single CPU anywhere
  10. *
  11. * Simple spin lock operations. There are two variants, one clears IRQ's
  12. * on the local processor, one does not.
  13. *
  14. * These are fair FIFO ticket locks, which are currently limited to 256
  15. * CPUs.
  16. *
  17. * (the type definitions are in asm/spinlock_types.h)
  18. */
  19. #ifdef CONFIG_X86_32
  20. # define LOCK_PTR_REG "a"
  21. # define REG_PTR_MODE "k"
  22. #else
  23. # define LOCK_PTR_REG "D"
  24. # define REG_PTR_MODE "q"
  25. #endif
  26. #if defined(CONFIG_X86_32) && \
  27. (defined(CONFIG_X86_OOSTORE) || defined(CONFIG_X86_PPRO_FENCE))
  28. /*
  29. * On PPro SMP or if we are using OOSTORE, we use a locked operation to unlock
  30. * (PPro errata 66, 92)
  31. */
  32. # define UNLOCK_LOCK_PREFIX LOCK_PREFIX
  33. #else
  34. # define UNLOCK_LOCK_PREFIX
  35. #endif
  36. /*
  37. * Ticket locks are conceptually two parts, one indicating the current head of
  38. * the queue, and the other indicating the current tail. The lock is acquired
  39. * by atomically noting the tail and incrementing it by one (thus adding
  40. * ourself to the queue and noting our position), then waiting until the head
  41. * becomes equal to the the initial value of the tail.
  42. *
  43. * We use an xadd covering *both* parts of the lock, to increment the tail and
  44. * also load the position of the head, which takes care of memory ordering
  45. * issues and should be optimal for the uncontended case. Note the tail must be
  46. * in the high part, because a wide xadd increment of the low part would carry
  47. * up and contaminate the high part.
  48. */
  49. static __always_inline void __ticket_spin_lock(arch_spinlock_t *lock)
  50. {
  51. register struct __raw_tickets inc = { .tail = 1 };
  52. inc = xadd(&lock->tickets, inc);
  53. for (;;) {
  54. if (inc.head == inc.tail)
  55. break;
  56. cpu_relax();
  57. inc.head = ACCESS_ONCE(lock->tickets.head);
  58. }
  59. barrier(); /* make sure nothing creeps before the lock is taken */
  60. }
  61. static __always_inline int __ticket_spin_trylock(arch_spinlock_t *lock)
  62. {
  63. arch_spinlock_t old, new;
  64. old.tickets = ACCESS_ONCE(lock->tickets);
  65. if (old.tickets.head != old.tickets.tail)
  66. return 0;
  67. new.head_tail = old.head_tail + (1 << TICKET_SHIFT);
  68. /* cmpxchg is a full barrier, so nothing can move before it */
  69. return cmpxchg(&lock->head_tail, old.head_tail, new.head_tail) == old.head_tail;
  70. }
  71. #if (NR_CPUS < 256)
  72. static __always_inline void __ticket_spin_unlock(arch_spinlock_t *lock)
  73. {
  74. asm volatile(UNLOCK_LOCK_PREFIX "incb %0"
  75. : "+m" (lock->head_tail)
  76. :
  77. : "memory", "cc");
  78. }
  79. #else
  80. static __always_inline void __ticket_spin_unlock(arch_spinlock_t *lock)
  81. {
  82. asm volatile(UNLOCK_LOCK_PREFIX "incw %0"
  83. : "+m" (lock->head_tail)
  84. :
  85. : "memory", "cc");
  86. }
  87. #endif
  88. static inline int __ticket_spin_is_locked(arch_spinlock_t *lock)
  89. {
  90. struct __raw_tickets tmp = ACCESS_ONCE(lock->tickets);
  91. return !!(tmp.tail ^ tmp.head);
  92. }
  93. static inline int __ticket_spin_is_contended(arch_spinlock_t *lock)
  94. {
  95. struct __raw_tickets tmp = ACCESS_ONCE(lock->tickets);
  96. return ((tmp.tail - tmp.head) & TICKET_MASK) > 1;
  97. }
  98. #ifndef CONFIG_PARAVIRT_SPINLOCKS
  99. static inline int arch_spin_is_locked(arch_spinlock_t *lock)
  100. {
  101. return __ticket_spin_is_locked(lock);
  102. }
  103. static inline int arch_spin_is_contended(arch_spinlock_t *lock)
  104. {
  105. return __ticket_spin_is_contended(lock);
  106. }
  107. #define arch_spin_is_contended arch_spin_is_contended
  108. static __always_inline void arch_spin_lock(arch_spinlock_t *lock)
  109. {
  110. __ticket_spin_lock(lock);
  111. }
  112. static __always_inline int arch_spin_trylock(arch_spinlock_t *lock)
  113. {
  114. return __ticket_spin_trylock(lock);
  115. }
  116. static __always_inline void arch_spin_unlock(arch_spinlock_t *lock)
  117. {
  118. __ticket_spin_unlock(lock);
  119. }
  120. static __always_inline void arch_spin_lock_flags(arch_spinlock_t *lock,
  121. unsigned long flags)
  122. {
  123. arch_spin_lock(lock);
  124. }
  125. #endif /* CONFIG_PARAVIRT_SPINLOCKS */
  126. static inline void arch_spin_unlock_wait(arch_spinlock_t *lock)
  127. {
  128. while (arch_spin_is_locked(lock))
  129. cpu_relax();
  130. }
  131. /*
  132. * Read-write spinlocks, allowing multiple readers
  133. * but only one writer.
  134. *
  135. * NOTE! it is quite common to have readers in interrupts
  136. * but no interrupt writers. For those circumstances we
  137. * can "mix" irq-safe locks - any writer needs to get a
  138. * irq-safe write-lock, but readers can get non-irqsafe
  139. * read-locks.
  140. *
  141. * On x86, we implement read-write locks as a 32-bit counter
  142. * with the high bit (sign) being the "contended" bit.
  143. */
  144. /**
  145. * read_can_lock - would read_trylock() succeed?
  146. * @lock: the rwlock in question.
  147. */
  148. static inline int arch_read_can_lock(arch_rwlock_t *lock)
  149. {
  150. return lock->lock > 0;
  151. }
  152. /**
  153. * write_can_lock - would write_trylock() succeed?
  154. * @lock: the rwlock in question.
  155. */
  156. static inline int arch_write_can_lock(arch_rwlock_t *lock)
  157. {
  158. return lock->write == WRITE_LOCK_CMP;
  159. }
  160. static inline void arch_read_lock(arch_rwlock_t *rw)
  161. {
  162. asm volatile(LOCK_PREFIX READ_LOCK_SIZE(dec) " (%0)\n\t"
  163. "jns 1f\n"
  164. "call __read_lock_failed\n\t"
  165. "1:\n"
  166. ::LOCK_PTR_REG (rw) : "memory");
  167. }
  168. static inline void arch_write_lock(arch_rwlock_t *rw)
  169. {
  170. asm volatile(LOCK_PREFIX WRITE_LOCK_SUB(%1) "(%0)\n\t"
  171. "jz 1f\n"
  172. "call __write_lock_failed\n\t"
  173. "1:\n"
  174. ::LOCK_PTR_REG (&rw->write), "i" (RW_LOCK_BIAS)
  175. : "memory");
  176. }
  177. static inline int arch_read_trylock(arch_rwlock_t *lock)
  178. {
  179. READ_LOCK_ATOMIC(t) *count = (READ_LOCK_ATOMIC(t) *)lock;
  180. if (READ_LOCK_ATOMIC(dec_return)(count) >= 0)
  181. return 1;
  182. READ_LOCK_ATOMIC(inc)(count);
  183. return 0;
  184. }
  185. static inline int arch_write_trylock(arch_rwlock_t *lock)
  186. {
  187. atomic_t *count = (atomic_t *)&lock->write;
  188. if (atomic_sub_and_test(WRITE_LOCK_CMP, count))
  189. return 1;
  190. atomic_add(WRITE_LOCK_CMP, count);
  191. return 0;
  192. }
  193. static inline void arch_read_unlock(arch_rwlock_t *rw)
  194. {
  195. asm volatile(LOCK_PREFIX READ_LOCK_SIZE(inc) " %0"
  196. :"+m" (rw->lock) : : "memory");
  197. }
  198. static inline void arch_write_unlock(arch_rwlock_t *rw)
  199. {
  200. asm volatile(LOCK_PREFIX WRITE_LOCK_ADD(%1) "%0"
  201. : "+m" (rw->write) : "i" (RW_LOCK_BIAS) : "memory");
  202. }
  203. #define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
  204. #define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
  205. #undef READ_LOCK_SIZE
  206. #undef READ_LOCK_ATOMIC
  207. #undef WRITE_LOCK_ADD
  208. #undef WRITE_LOCK_SUB
  209. #undef WRITE_LOCK_CMP
  210. #define arch_spin_relax(lock) cpu_relax()
  211. #define arch_read_relax(lock) cpu_relax()
  212. #define arch_write_relax(lock) cpu_relax()
  213. /* The {read|write|spin}_lock() on x86 are full memory barriers. */
  214. static inline void smp_mb__after_lock(void) { }
  215. #define ARCH_HAS_SMP_MB_AFTER_LOCK
  216. #endif /* _ASM_X86_SPINLOCK_H */