spinlock.h 3.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134
  1. #ifndef __ASM_SPINLOCK_H
  2. #define __ASM_SPINLOCK_H
  3. #include <asm/atomic.h>
  4. #include <asm/rwlock.h>
  5. #include <asm/page.h>
  6. /*
  7. * Your basic SMP spinlocks, allowing only a single CPU anywhere
  8. *
  9. * Simple spin lock operations. There are two variants, one clears IRQ's
  10. * on the local processor, one does not.
  11. *
  12. * We make no fairness assumptions. They have a cost.
  13. *
  14. * (the type definitions are in asm/spinlock_types.h)
  15. */
  16. #define __raw_spin_is_locked(x) \
  17. (*(volatile signed int *)(&(x)->slock) <= 0)
  18. #define __raw_spin_lock_string \
  19. "\n1:\t" \
  20. LOCK_PREFIX " ; decl %0\n\t" \
  21. "js 2f\n" \
  22. LOCK_SECTION_START("") \
  23. "2:\t" \
  24. "rep;nop\n\t" \
  25. "cmpl $0,%0\n\t" \
  26. "jle 2b\n\t" \
  27. "jmp 1b\n" \
  28. LOCK_SECTION_END
  29. #define __raw_spin_lock_string_up \
  30. "\n\tdecl %0"
  31. #define __raw_spin_unlock_string \
  32. "movl $1,%0" \
  33. :"=m" (lock->slock) : : "memory"
  34. static inline void __raw_spin_lock(raw_spinlock_t *lock)
  35. {
  36. asm volatile(__raw_spin_lock_string : "=m" (lock->slock) : : "memory");
  37. }
  38. #define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock)
  39. static inline int __raw_spin_trylock(raw_spinlock_t *lock)
  40. {
  41. int oldval;
  42. __asm__ __volatile__(
  43. "xchgl %0,%1"
  44. :"=q" (oldval), "=m" (lock->slock)
  45. :"0" (0) : "memory");
  46. return oldval > 0;
  47. }
  48. static inline void __raw_spin_unlock(raw_spinlock_t *lock)
  49. {
  50. __asm__ __volatile__(
  51. __raw_spin_unlock_string
  52. );
  53. }
  54. #define __raw_spin_unlock_wait(lock) \
  55. do { while (__raw_spin_is_locked(lock)) cpu_relax(); } while (0)
  56. /*
  57. * Read-write spinlocks, allowing multiple readers
  58. * but only one writer.
  59. *
  60. * NOTE! it is quite common to have readers in interrupts
  61. * but no interrupt writers. For those circumstances we
  62. * can "mix" irq-safe locks - any writer needs to get a
  63. * irq-safe write-lock, but readers can get non-irqsafe
  64. * read-locks.
  65. *
  66. * On x86, we implement read-write locks as a 32-bit counter
  67. * with the high bit (sign) being the "contended" bit.
  68. *
  69. * The inline assembly is non-obvious. Think about it.
  70. *
  71. * Changed to use the same technique as rw semaphores. See
  72. * semaphore.h for details. -ben
  73. *
  74. * the helpers are in arch/i386/kernel/semaphore.c
  75. */
  76. #define __raw_read_can_lock(x) ((int)(x)->lock > 0)
  77. #define __raw_write_can_lock(x) ((x)->lock == RW_LOCK_BIAS)
  78. static inline void __raw_read_lock(raw_rwlock_t *rw)
  79. {
  80. __build_read_lock(rw, "__read_lock_failed");
  81. }
  82. static inline void __raw_write_lock(raw_rwlock_t *rw)
  83. {
  84. __build_write_lock(rw, "__write_lock_failed");
  85. }
  86. static inline int __raw_read_trylock(raw_rwlock_t *lock)
  87. {
  88. atomic_t *count = (atomic_t *)lock;
  89. atomic_dec(count);
  90. if (atomic_read(count) >= 0)
  91. return 1;
  92. atomic_inc(count);
  93. return 0;
  94. }
  95. static inline int __raw_write_trylock(raw_rwlock_t *lock)
  96. {
  97. atomic_t *count = (atomic_t *)lock;
  98. if (atomic_sub_and_test(RW_LOCK_BIAS, count))
  99. return 1;
  100. atomic_add(RW_LOCK_BIAS, count);
  101. return 0;
  102. }
  103. static inline void __raw_read_unlock(raw_rwlock_t *rw)
  104. {
  105. asm volatile(LOCK_PREFIX " ; incl %0" :"=m" (rw->lock) : : "memory");
  106. }
  107. static inline void __raw_write_unlock(raw_rwlock_t *rw)
  108. {
  109. asm volatile(LOCK_PREFIX " ; addl $" RW_LOCK_BIAS_STR ",%0"
  110. : "=m" (rw->lock) : : "memory");
  111. }
  112. #endif /* __ASM_SPINLOCK_H */