spinlock.h 4.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214
  1. #ifndef __ASM_SPINLOCK_H
  2. #define __ASM_SPINLOCK_H
  3. #include <asm/atomic.h>
  4. #include <asm/rwlock.h>
  5. #include <asm/page.h>
  6. #include <linux/config.h>
  7. extern int printk(const char * fmt, ...)
  8. __attribute__ ((format (printf, 1, 2)));
  9. /*
  10. * Your basic SMP spinlocks, allowing only a single CPU anywhere
  11. */
  12. typedef struct {
  13. volatile unsigned int lock;
  14. #ifdef CONFIG_DEBUG_SPINLOCK
  15. unsigned magic;
  16. #endif
  17. #ifdef CONFIG_PREEMPT
  18. unsigned int break_lock;
  19. #endif
  20. } spinlock_t;
  21. #define SPINLOCK_MAGIC 0xdead4ead
  22. #ifdef CONFIG_DEBUG_SPINLOCK
  23. #define SPINLOCK_MAGIC_INIT , SPINLOCK_MAGIC
  24. #else
  25. #define SPINLOCK_MAGIC_INIT /* */
  26. #endif
  27. #define SPIN_LOCK_UNLOCKED (spinlock_t) { 1 SPINLOCK_MAGIC_INIT }
  28. #define spin_lock_init(x) do { *(x) = SPIN_LOCK_UNLOCKED; } while(0)
  29. /*
  30. * Simple spin lock operations. There are two variants, one clears IRQ's
  31. * on the local processor, one does not.
  32. *
  33. * We make no fairness assumptions. They have a cost.
  34. */
  35. #define spin_is_locked(x) (*(volatile signed char *)(&(x)->lock) <= 0)
  36. #define spin_unlock_wait(x) do { barrier(); } while(spin_is_locked(x))
  37. #define _raw_spin_lock_flags(lock, flags) _raw_spin_lock(lock)
  38. #define spin_lock_string \
  39. "\n1:\t" \
  40. "lock ; decb %0\n\t" \
  41. "js 2f\n" \
  42. LOCK_SECTION_START("") \
  43. "2:\t" \
  44. "rep;nop\n\t" \
  45. "cmpb $0,%0\n\t" \
  46. "jle 2b\n\t" \
  47. "jmp 1b\n" \
  48. LOCK_SECTION_END
  49. /*
  50. * This works. Despite all the confusion.
  51. * (except on PPro SMP or if we are using OOSTORE)
  52. * (PPro errata 66, 92)
  53. */
  54. #if !defined(CONFIG_X86_OOSTORE) && !defined(CONFIG_X86_PPRO_FENCE)
  55. #define spin_unlock_string \
  56. "movb $1,%0" \
  57. :"=m" (lock->lock) : : "memory"
  58. static inline void _raw_spin_unlock(spinlock_t *lock)
  59. {
  60. #ifdef CONFIG_DEBUG_SPINLOCK
  61. BUG_ON(lock->magic != SPINLOCK_MAGIC);
  62. assert_spin_locked(lock);
  63. #endif
  64. __asm__ __volatile__(
  65. spin_unlock_string
  66. );
  67. }
  68. #else
  69. #define spin_unlock_string \
  70. "xchgb %b0, %1" \
  71. :"=q" (oldval), "=m" (lock->lock) \
  72. :"0" (oldval) : "memory"
  73. static inline void _raw_spin_unlock(spinlock_t *lock)
  74. {
  75. char oldval = 1;
  76. #ifdef CONFIG_DEBUG_SPINLOCK
  77. BUG_ON(lock->magic != SPINLOCK_MAGIC);
  78. assert_spin_locked(lock);
  79. #endif
  80. __asm__ __volatile__(
  81. spin_unlock_string
  82. );
  83. }
  84. #endif
  85. static inline int _raw_spin_trylock(spinlock_t *lock)
  86. {
  87. char oldval;
  88. __asm__ __volatile__(
  89. "xchgb %b0,%1"
  90. :"=q" (oldval), "=m" (lock->lock)
  91. :"0" (0) : "memory");
  92. return oldval > 0;
  93. }
  94. static inline void _raw_spin_lock(spinlock_t *lock)
  95. {
  96. #ifdef CONFIG_DEBUG_SPINLOCK
  97. if (lock->magic != SPINLOCK_MAGIC) {
  98. printk("eip: %p\n", __builtin_return_address(0));
  99. BUG();
  100. }
  101. #endif
  102. __asm__ __volatile__(
  103. spin_lock_string
  104. :"=m" (lock->lock) : : "memory");
  105. }
  106. /*
  107. * Read-write spinlocks, allowing multiple readers
  108. * but only one writer.
  109. *
  110. * NOTE! it is quite common to have readers in interrupts
  111. * but no interrupt writers. For those circumstances we
  112. * can "mix" irq-safe locks - any writer needs to get a
  113. * irq-safe write-lock, but readers can get non-irqsafe
  114. * read-locks.
  115. */
  116. typedef struct {
  117. volatile unsigned int lock;
  118. #ifdef CONFIG_DEBUG_SPINLOCK
  119. unsigned magic;
  120. #endif
  121. #ifdef CONFIG_PREEMPT
  122. unsigned int break_lock;
  123. #endif
  124. } rwlock_t;
  125. #define RWLOCK_MAGIC 0xdeaf1eed
  126. #ifdef CONFIG_DEBUG_SPINLOCK
  127. #define RWLOCK_MAGIC_INIT , RWLOCK_MAGIC
  128. #else
  129. #define RWLOCK_MAGIC_INIT /* */
  130. #endif
  131. #define RW_LOCK_UNLOCKED (rwlock_t) { RW_LOCK_BIAS RWLOCK_MAGIC_INIT }
  132. #define rwlock_init(x) do { *(x) = RW_LOCK_UNLOCKED; } while(0)
  133. #define read_can_lock(x) ((int)(x)->lock > 0)
  134. #define write_can_lock(x) ((x)->lock == RW_LOCK_BIAS)
  135. /*
  136. * On x86, we implement read-write locks as a 32-bit counter
  137. * with the high bit (sign) being the "contended" bit.
  138. *
  139. * The inline assembly is non-obvious. Think about it.
  140. *
  141. * Changed to use the same technique as rw semaphores. See
  142. * semaphore.h for details. -ben
  143. */
  144. /* the spinlock helpers are in arch/i386/kernel/semaphore.c */
  145. static inline void _raw_read_lock(rwlock_t *rw)
  146. {
  147. #ifdef CONFIG_DEBUG_SPINLOCK
  148. BUG_ON(rw->magic != RWLOCK_MAGIC);
  149. #endif
  150. __build_read_lock(rw, "__read_lock_failed");
  151. }
  152. static inline void _raw_write_lock(rwlock_t *rw)
  153. {
  154. #ifdef CONFIG_DEBUG_SPINLOCK
  155. BUG_ON(rw->magic != RWLOCK_MAGIC);
  156. #endif
  157. __build_write_lock(rw, "__write_lock_failed");
  158. }
  159. #define _raw_read_unlock(rw) asm volatile("lock ; incl %0" :"=m" ((rw)->lock) : : "memory")
  160. #define _raw_write_unlock(rw) asm volatile("lock ; addl $" RW_LOCK_BIAS_STR ",%0":"=m" ((rw)->lock) : : "memory")
  161. static inline int _raw_read_trylock(rwlock_t *lock)
  162. {
  163. atomic_t *count = (atomic_t *)lock;
  164. atomic_dec(count);
  165. if (atomic_read(count) >= 0)
  166. return 1;
  167. atomic_inc(count);
  168. return 0;
  169. }
  170. static inline int _raw_write_trylock(rwlock_t *lock)
  171. {
  172. atomic_t *count = (atomic_t *)lock;
  173. if (atomic_sub_and_test(RW_LOCK_BIAS, count))
  174. return 1;
  175. atomic_add(RW_LOCK_BIAS, count);
  176. return 0;
  177. }
  178. #endif /* __ASM_SPINLOCK_H */