spinlock.h 3.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172
  1. #ifndef __ASM_SPINLOCK_H
  2. #define __ASM_SPINLOCK_H
  3. #if __LINUX_ARM_ARCH__ < 6
  4. #error SMP not supported on pre-ARMv6 CPUs
  5. #endif
  6. /*
  7. * ARMv6 Spin-locking.
  8. *
  9. * We (exclusively) read the old value, and decrement it. If it
  10. * hits zero, we may have won the lock, so we try (exclusively)
  11. * storing it.
  12. *
  13. * Unlocked value: 0
  14. * Locked value: 1
  15. */
  16. typedef struct {
  17. volatile unsigned int lock;
  18. #ifdef CONFIG_PREEMPT
  19. unsigned int break_lock;
  20. #endif
  21. } spinlock_t;
  22. #define SPIN_LOCK_UNLOCKED (spinlock_t) { 0 }
  23. #define spin_lock_init(x) do { *(x) = SPIN_LOCK_UNLOCKED; } while (0)
  24. #define spin_is_locked(x) ((x)->lock != 0)
  25. #define spin_unlock_wait(x) do { barrier(); } while (spin_is_locked(x))
  26. #define _raw_spin_lock_flags(lock, flags) _raw_spin_lock(lock)
  27. static inline void _raw_spin_lock(spinlock_t *lock)
  28. {
  29. unsigned long tmp;
  30. __asm__ __volatile__(
  31. "1: ldrex %0, [%1]\n"
  32. " teq %0, #0\n"
  33. " strexeq %0, %2, [%1]\n"
  34. " teqeq %0, #0\n"
  35. " bne 1b"
  36. : "=&r" (tmp)
  37. : "r" (&lock->lock), "r" (1)
  38. : "cc", "memory");
  39. }
  40. static inline int _raw_spin_trylock(spinlock_t *lock)
  41. {
  42. unsigned long tmp;
  43. __asm__ __volatile__(
  44. " ldrex %0, [%1]\n"
  45. " teq %0, #0\n"
  46. " strexeq %0, %2, [%1]"
  47. : "=&r" (tmp)
  48. : "r" (&lock->lock), "r" (1)
  49. : "cc", "memory");
  50. return tmp == 0;
  51. }
  52. static inline void _raw_spin_unlock(spinlock_t *lock)
  53. {
  54. __asm__ __volatile__(
  55. " str %1, [%0]"
  56. :
  57. : "r" (&lock->lock), "r" (0)
  58. : "cc", "memory");
  59. }
  60. /*
  61. * RWLOCKS
  62. */
  63. typedef struct {
  64. volatile unsigned int lock;
  65. #ifdef CONFIG_PREEMPT
  66. unsigned int break_lock;
  67. #endif
  68. } rwlock_t;
  69. #define RW_LOCK_UNLOCKED (rwlock_t) { 0 }
  70. #define rwlock_init(x) do { *(x) = RW_LOCK_UNLOCKED; } while (0)
  71. #define rwlock_is_locked(x) (*((volatile unsigned int *)(x)) != 0)
  72. /*
  73. * Write locks are easy - we just set bit 31. When unlocking, we can
  74. * just write zero since the lock is exclusively held.
  75. */
  76. static inline void _raw_write_lock(rwlock_t *rw)
  77. {
  78. unsigned long tmp;
  79. __asm__ __volatile__(
  80. "1: ldrex %0, [%1]\n"
  81. " teq %0, #0\n"
  82. " strexeq %0, %2, [%1]\n"
  83. " teq %0, #0\n"
  84. " bne 1b"
  85. : "=&r" (tmp)
  86. : "r" (&rw->lock), "r" (0x80000000)
  87. : "cc", "memory");
  88. }
  89. static inline int _raw_write_trylock(rwlock_t *rw)
  90. {
  91. unsigned long tmp;
  92. __asm__ __volatile__(
  93. "1: ldrex %0, [%1]\n"
  94. " teq %0, #0\n"
  95. " strexeq %0, %2, [%1]"
  96. : "=&r" (tmp)
  97. : "r" (&rw->lock), "r" (0x80000000)
  98. : "cc", "memory");
  99. return tmp == 0;
  100. }
  101. static inline void _raw_write_unlock(rwlock_t *rw)
  102. {
  103. __asm__ __volatile__(
  104. "str %1, [%0]"
  105. :
  106. : "r" (&rw->lock), "r" (0)
  107. : "cc", "memory");
  108. }
  109. /*
  110. * Read locks are a bit more hairy:
  111. * - Exclusively load the lock value.
  112. * - Increment it.
  113. * - Store new lock value if positive, and we still own this location.
  114. * If the value is negative, we've already failed.
  115. * - If we failed to store the value, we want a negative result.
  116. * - If we failed, try again.
  117. * Unlocking is similarly hairy. We may have multiple read locks
  118. * currently active. However, we know we won't have any write
  119. * locks.
  120. */
  121. static inline void _raw_read_lock(rwlock_t *rw)
  122. {
  123. unsigned long tmp, tmp2;
  124. __asm__ __volatile__(
  125. "1: ldrex %0, [%2]\n"
  126. " adds %0, %0, #1\n"
  127. " strexpl %1, %0, [%2]\n"
  128. " rsbpls %0, %1, #0\n"
  129. " bmi 1b"
  130. : "=&r" (tmp), "=&r" (tmp2)
  131. : "r" (&rw->lock)
  132. : "cc", "memory");
  133. }
  134. static inline void _raw_read_unlock(rwlock_t *rw)
  135. {
  136. unsigned long tmp, tmp2;
  137. __asm__ __volatile__(
  138. "1: ldrex %0, [%2]\n"
  139. " sub %0, %0, #1\n"
  140. " strex %1, %0, [%2]\n"
  141. " teq %1, #0\n"
  142. " bne 1b"
  143. : "=&r" (tmp), "=&r" (tmp2)
  144. : "r" (&rw->lock)
  145. : "cc", "memory");
  146. }
  147. #define _raw_read_trylock(lock) generic_raw_read_trylock(lock)
  148. #endif /* __ASM_SPINLOCK_H */