spinlock.h 2.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127
  1. #ifndef __ASM_SPINLOCK_H
  2. #define __ASM_SPINLOCK_H
  3. #include <asm/atomic.h>
  4. #include <asm/rwlock.h>
  5. #include <asm/page.h>
  6. /*
  7. * Your basic SMP spinlocks, allowing only a single CPU anywhere
  8. *
  9. * Simple spin lock operations. There are two variants, one clears IRQ's
  10. * on the local processor, one does not.
  11. *
  12. * We make no fairness assumptions. They have a cost.
  13. *
  14. * (the type definitions are in asm/spinlock_types.h)
  15. */
  16. #define __raw_spin_is_locked(x) \
  17. (*(volatile signed int *)(&(x)->slock) <= 0)
  18. #define __raw_spin_lock_string \
  19. "\n1:\t" \
  20. LOCK_PREFIX " ; decl %0\n\t" \
  21. "js 2f\n" \
  22. LOCK_SECTION_START("") \
  23. "2:\t" \
  24. "rep;nop\n\t" \
  25. "cmpl $0,%0\n\t" \
  26. "jle 2b\n\t" \
  27. "jmp 1b\n" \
  28. LOCK_SECTION_END
  29. #define __raw_spin_lock_string_up \
  30. "\n\tdecl %0"
  31. #define __raw_spin_unlock_string \
  32. "movl $1,%0" \
  33. :"=m" (lock->slock) : : "memory"
  34. static inline void __raw_spin_lock(raw_spinlock_t *lock)
  35. {
  36. asm volatile(__raw_spin_lock_string : "=m" (lock->slock) : : "memory");
  37. }
  38. #define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock)
  39. static inline int __raw_spin_trylock(raw_spinlock_t *lock)
  40. {
  41. int oldval;
  42. __asm__ __volatile__(
  43. "xchgl %0,%1"
  44. :"=q" (oldval), "=m" (lock->slock)
  45. :"0" (0) : "memory");
  46. return oldval > 0;
  47. }
  48. static inline void __raw_spin_unlock(raw_spinlock_t *lock)
  49. {
  50. __asm__ __volatile__(
  51. __raw_spin_unlock_string
  52. );
  53. }
  54. #define __raw_spin_unlock_wait(lock) \
  55. do { while (__raw_spin_is_locked(lock)) cpu_relax(); } while (0)
  56. /*
  57. * Read-write spinlocks, allowing multiple readers
  58. * but only one writer.
  59. *
  60. * NOTE! it is quite common to have readers in interrupts
  61. * but no interrupt writers. For those circumstances we
  62. * can "mix" irq-safe locks - any writer needs to get a
  63. * irq-safe write-lock, but readers can get non-irqsafe
  64. * read-locks.
  65. *
  66. * On x86, we implement read-write locks as a 32-bit counter
  67. * with the high bit (sign) being the "contended" bit.
  68. */
  69. #define __raw_read_can_lock(x) ((int)(x)->lock > 0)
  70. #define __raw_write_can_lock(x) ((x)->lock == RW_LOCK_BIAS)
  71. static inline void __raw_read_lock(raw_rwlock_t *rw)
  72. {
  73. __build_read_lock(rw);
  74. }
  75. static inline void __raw_write_lock(raw_rwlock_t *rw)
  76. {
  77. __build_write_lock(rw);
  78. }
  79. static inline int __raw_read_trylock(raw_rwlock_t *lock)
  80. {
  81. atomic_t *count = (atomic_t *)lock;
  82. atomic_dec(count);
  83. if (atomic_read(count) >= 0)
  84. return 1;
  85. atomic_inc(count);
  86. return 0;
  87. }
  88. static inline int __raw_write_trylock(raw_rwlock_t *lock)
  89. {
  90. atomic_t *count = (atomic_t *)lock;
  91. if (atomic_sub_and_test(RW_LOCK_BIAS, count))
  92. return 1;
  93. atomic_add(RW_LOCK_BIAS, count);
  94. return 0;
  95. }
  96. static inline void __raw_read_unlock(raw_rwlock_t *rw)
  97. {
  98. asm volatile(LOCK_PREFIX " ; incl %0" :"=m" (rw->lock) : : "memory");
  99. }
  100. static inline void __raw_write_unlock(raw_rwlock_t *rw)
  101. {
  102. asm volatile(LOCK_PREFIX " ; addl $" RW_LOCK_BIAS_STR ",%0"
  103. : "=m" (rw->lock) : : "memory");
  104. }
  105. #endif /* __ASM_SPINLOCK_H */