spinlock.h 3.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179
  1. #ifndef __ASM_SPINLOCK_H
  2. #define __ASM_SPINLOCK_H
  3. #if __LINUX_ARM_ARCH__ < 6
  4. #error SMP not supported on pre-ARMv6 CPUs
  5. #endif
  6. /*
  7. * ARMv6 Spin-locking.
  8. *
  9. * We exclusively read the old value. If it is zero, we may have
  10. * won the lock, so we try exclusively storing it. A memory barrier
  11. * is required after we get a lock, and before we release it, because
  12. * V6 CPUs are assumed to have weakly ordered memory.
  13. *
  14. * Unlocked value: 0
  15. * Locked value: 1
  16. */
  17. #define __raw_spin_is_locked(x) ((x)->lock != 0)
  18. #define __raw_spin_unlock_wait(lock) \
  19. do { while (__raw_spin_is_locked(lock)) cpu_relax(); } while (0)
  20. #define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock)
  21. static inline void __raw_spin_lock(raw_spinlock_t *lock)
  22. {
  23. unsigned long tmp;
  24. __asm__ __volatile__(
  25. "1: ldrex %0, [%1]\n"
  26. " teq %0, #0\n"
  27. " strexeq %0, %2, [%1]\n"
  28. " teqeq %0, #0\n"
  29. " bne 1b"
  30. : "=&r" (tmp)
  31. : "r" (&lock->lock), "r" (1)
  32. : "cc");
  33. smp_mb();
  34. }
  35. static inline int __raw_spin_trylock(raw_spinlock_t *lock)
  36. {
  37. unsigned long tmp;
  38. __asm__ __volatile__(
  39. " ldrex %0, [%1]\n"
  40. " teq %0, #0\n"
  41. " strexeq %0, %2, [%1]"
  42. : "=&r" (tmp)
  43. : "r" (&lock->lock), "r" (1)
  44. : "cc");
  45. if (tmp == 0) {
  46. smp_mb();
  47. return 1;
  48. } else {
  49. return 0;
  50. }
  51. }
  52. static inline void __raw_spin_unlock(raw_spinlock_t *lock)
  53. {
  54. smp_mb();
  55. __asm__ __volatile__(
  56. " str %1, [%0]"
  57. :
  58. : "r" (&lock->lock), "r" (0)
  59. : "cc");
  60. }
  61. /*
  62. * RWLOCKS
  63. *
  64. *
  65. * Write locks are easy - we just set bit 31. When unlocking, we can
  66. * just write zero since the lock is exclusively held.
  67. */
  68. #define rwlock_is_locked(x) (*((volatile unsigned int *)(x)) != 0)
  69. static inline void __raw_write_lock(rwlock_t *rw)
  70. {
  71. unsigned long tmp;
  72. __asm__ __volatile__(
  73. "1: ldrex %0, [%1]\n"
  74. " teq %0, #0\n"
  75. " strexeq %0, %2, [%1]\n"
  76. " teq %0, #0\n"
  77. " bne 1b"
  78. : "=&r" (tmp)
  79. : "r" (&rw->lock), "r" (0x80000000)
  80. : "cc");
  81. smp_mb();
  82. }
  83. static inline int __raw_write_trylock(rwlock_t *rw)
  84. {
  85. unsigned long tmp;
  86. __asm__ __volatile__(
  87. "1: ldrex %0, [%1]\n"
  88. " teq %0, #0\n"
  89. " strexeq %0, %2, [%1]"
  90. : "=&r" (tmp)
  91. : "r" (&rw->lock), "r" (0x80000000)
  92. : "cc");
  93. if (tmp == 0) {
  94. smp_mb();
  95. return 1;
  96. } else {
  97. return 0;
  98. }
  99. }
  100. static inline void __raw_write_unlock(raw_rwlock_t *rw)
  101. {
  102. smp_mb();
  103. __asm__ __volatile__(
  104. "str %1, [%0]"
  105. :
  106. : "r" (&rw->lock), "r" (0)
  107. : "cc");
  108. }
  109. /*
  110. * Read locks are a bit more hairy:
  111. * - Exclusively load the lock value.
  112. * - Increment it.
  113. * - Store new lock value if positive, and we still own this location.
  114. * If the value is negative, we've already failed.
  115. * - If we failed to store the value, we want a negative result.
  116. * - If we failed, try again.
  117. * Unlocking is similarly hairy. We may have multiple read locks
  118. * currently active. However, we know we won't have any write
  119. * locks.
  120. */
  121. static inline void __raw_read_lock(raw_rwlock_t *rw)
  122. {
  123. unsigned long tmp, tmp2;
  124. __asm__ __volatile__(
  125. "1: ldrex %0, [%2]\n"
  126. " adds %0, %0, #1\n"
  127. " strexpl %1, %0, [%2]\n"
  128. " rsbpls %0, %1, #0\n"
  129. " bmi 1b"
  130. : "=&r" (tmp), "=&r" (tmp2)
  131. : "r" (&rw->lock)
  132. : "cc");
  133. smp_mb();
  134. }
  135. static inline void __raw_read_unlock(rwlock_t *rw)
  136. {
  137. unsigned long tmp, tmp2;
  138. smp_mb();
  139. __asm__ __volatile__(
  140. "1: ldrex %0, [%2]\n"
  141. " sub %0, %0, #1\n"
  142. " strex %1, %0, [%2]\n"
  143. " teq %1, #0\n"
  144. " bne 1b"
  145. : "=&r" (tmp), "=&r" (tmp2)
  146. : "r" (&rw->lock)
  147. : "cc");
  148. }
  149. #define __raw_read_trylock(lock) generic__raw_read_trylock(lock)
  150. #endif /* __ASM_SPINLOCK_H */