spinlock.h 3.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195
  1. #ifndef __ASM_SPINLOCK_H
  2. #define __ASM_SPINLOCK_H
  3. #if __LINUX_ARM_ARCH__ < 6
  4. #error SMP not supported on pre-ARMv6 CPUs
  5. #endif
  6. /*
  7. * ARMv6 Spin-locking.
  8. *
  9. * We exclusively read the old value. If it is zero, we may have
  10. * won the lock, so we try exclusively storing it. A memory barrier
  11. * is required after we get a lock, and before we release it, because
  12. * V6 CPUs are assumed to have weakly ordered memory.
  13. *
  14. * Unlocked value: 0
  15. * Locked value: 1
  16. */
  17. typedef struct {
  18. volatile unsigned int lock;
  19. #ifdef CONFIG_PREEMPT
  20. unsigned int break_lock;
  21. #endif
  22. } spinlock_t;
  23. #define SPIN_LOCK_UNLOCKED (spinlock_t) { 0 }
  24. #define spin_lock_init(x) do { *(x) = SPIN_LOCK_UNLOCKED; } while (0)
  25. #define spin_is_locked(x) ((x)->lock != 0)
  26. #define spin_unlock_wait(x) do { barrier(); } while (spin_is_locked(x))
  27. #define _raw_spin_lock_flags(lock, flags) _raw_spin_lock(lock)
  28. static inline void _raw_spin_lock(spinlock_t *lock)
  29. {
  30. unsigned long tmp;
  31. __asm__ __volatile__(
  32. "1: ldrex %0, [%1]\n"
  33. " teq %0, #0\n"
  34. " strexeq %0, %2, [%1]\n"
  35. " teqeq %0, #0\n"
  36. " bne 1b"
  37. : "=&r" (tmp)
  38. : "r" (&lock->lock), "r" (1)
  39. : "cc");
  40. smp_mb();
  41. }
  42. static inline int _raw_spin_trylock(spinlock_t *lock)
  43. {
  44. unsigned long tmp;
  45. __asm__ __volatile__(
  46. " ldrex %0, [%1]\n"
  47. " teq %0, #0\n"
  48. " strexeq %0, %2, [%1]"
  49. : "=&r" (tmp)
  50. : "r" (&lock->lock), "r" (1)
  51. : "cc");
  52. if (tmp == 0) {
  53. smp_mb();
  54. return 1;
  55. } else {
  56. return 0;
  57. }
  58. }
  59. static inline void _raw_spin_unlock(spinlock_t *lock)
  60. {
  61. smp_mb();
  62. __asm__ __volatile__(
  63. " str %1, [%0]"
  64. :
  65. : "r" (&lock->lock), "r" (0)
  66. : "cc");
  67. }
  68. /*
  69. * RWLOCKS
  70. */
  71. typedef struct {
  72. volatile unsigned int lock;
  73. #ifdef CONFIG_PREEMPT
  74. unsigned int break_lock;
  75. #endif
  76. } rwlock_t;
  77. #define RW_LOCK_UNLOCKED (rwlock_t) { 0 }
  78. #define rwlock_init(x) do { *(x) = RW_LOCK_UNLOCKED; } while (0)
  79. #define rwlock_is_locked(x) (*((volatile unsigned int *)(x)) != 0)
  80. /*
  81. * Write locks are easy - we just set bit 31. When unlocking, we can
  82. * just write zero since the lock is exclusively held.
  83. */
  84. static inline void _raw_write_lock(rwlock_t *rw)
  85. {
  86. unsigned long tmp;
  87. __asm__ __volatile__(
  88. "1: ldrex %0, [%1]\n"
  89. " teq %0, #0\n"
  90. " strexeq %0, %2, [%1]\n"
  91. " teq %0, #0\n"
  92. " bne 1b"
  93. : "=&r" (tmp)
  94. : "r" (&rw->lock), "r" (0x80000000)
  95. : "cc");
  96. smp_mb();
  97. }
  98. static inline int _raw_write_trylock(rwlock_t *rw)
  99. {
  100. unsigned long tmp;
  101. __asm__ __volatile__(
  102. "1: ldrex %0, [%1]\n"
  103. " teq %0, #0\n"
  104. " strexeq %0, %2, [%1]"
  105. : "=&r" (tmp)
  106. : "r" (&rw->lock), "r" (0x80000000)
  107. : "cc");
  108. if (tmp == 0) {
  109. smp_mb();
  110. return 1;
  111. } else {
  112. return 0;
  113. }
  114. }
  115. static inline void _raw_write_unlock(rwlock_t *rw)
  116. {
  117. smp_mb();
  118. __asm__ __volatile__(
  119. "str %1, [%0]"
  120. :
  121. : "r" (&rw->lock), "r" (0)
  122. : "cc");
  123. }
  124. /*
  125. * Read locks are a bit more hairy:
  126. * - Exclusively load the lock value.
  127. * - Increment it.
  128. * - Store new lock value if positive, and we still own this location.
  129. * If the value is negative, we've already failed.
  130. * - If we failed to store the value, we want a negative result.
  131. * - If we failed, try again.
  132. * Unlocking is similarly hairy. We may have multiple read locks
  133. * currently active. However, we know we won't have any write
  134. * locks.
  135. */
  136. static inline void _raw_read_lock(rwlock_t *rw)
  137. {
  138. unsigned long tmp, tmp2;
  139. __asm__ __volatile__(
  140. "1: ldrex %0, [%2]\n"
  141. " adds %0, %0, #1\n"
  142. " strexpl %1, %0, [%2]\n"
  143. " rsbpls %0, %1, #0\n"
  144. " bmi 1b"
  145. : "=&r" (tmp), "=&r" (tmp2)
  146. : "r" (&rw->lock)
  147. : "cc");
  148. smp_mb();
  149. }
  150. static inline void _raw_read_unlock(rwlock_t *rw)
  151. {
  152. unsigned long tmp, tmp2;
  153. smp_mb();
  154. __asm__ __volatile__(
  155. "1: ldrex %0, [%2]\n"
  156. " sub %0, %0, #1\n"
  157. " strex %1, %0, [%2]\n"
  158. " teq %1, #0\n"
  159. " bne 1b"
  160. : "=&r" (tmp), "=&r" (tmp2)
  161. : "r" (&rw->lock)
  162. : "cc");
  163. }
  164. #define _raw_read_trylock(lock) generic_raw_read_trylock(lock)
  165. #endif /* __ASM_SPINLOCK_H */