spinlock.h 4.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202
  1. /*
  2. * include/asm-xtensa/spinlock.h
  3. *
  4. * This file is subject to the terms and conditions of the GNU General Public
  5. * License. See the file "COPYING" in the main directory of this archive
  6. * for more details.
  7. *
  8. * Copyright (C) 2001 - 2005 Tensilica Inc.
  9. */
  10. #ifndef _XTENSA_SPINLOCK_H
  11. #define _XTENSA_SPINLOCK_H
  12. /*
  13. * spinlock
  14. *
  15. * There is at most one owner of a spinlock. There are not different
  16. * types of spinlock owners like there are for rwlocks (see below).
  17. *
  18. * When trying to obtain a spinlock, the function "spins" forever, or busy-
  19. * waits, until the lock is obtained. When spinning, presumably some other
  20. * owner will soon give up the spinlock making it available to others. Use
  21. * the trylock functions to avoid spinning forever.
  22. *
  23. * possible values:
  24. *
  25. * 0 nobody owns the spinlock
  26. * 1 somebody owns the spinlock
  27. */
  28. #define __raw_spin_is_locked(x) ((x)->slock != 0)
  29. #define __raw_spin_unlock_wait(lock) \
  30. do { while (__raw_spin_is_locked(lock)) cpu_relax(); } while (0)
  31. #define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock)
  32. static inline void __raw_spin_lock(raw_spinlock_t *lock)
  33. {
  34. unsigned long tmp;
  35. __asm__ __volatile__(
  36. " movi %0, 0\n"
  37. " wsr %0, scompare1\n"
  38. "1: movi %0, 1\n"
  39. " s32c1i %0, %1, 0\n"
  40. " bnez %0, 1b\n"
  41. : "=&a" (tmp)
  42. : "a" (&lock->slock)
  43. : "memory");
  44. }
  45. /* Returns 1 if the lock is obtained, 0 otherwise. */
  46. static inline int __raw_spin_trylock(raw_spinlock_t *lock)
  47. {
  48. unsigned long tmp;
  49. __asm__ __volatile__(
  50. " movi %0, 0\n"
  51. " wsr %0, scompare1\n"
  52. " movi %0, 1\n"
  53. " s32c1i %0, %1, 0\n"
  54. : "=&a" (tmp)
  55. : "a" (&lock->slock)
  56. : "memory");
  57. return tmp == 0 ? 1 : 0;
  58. }
  59. static inline void __raw_spin_unlock(raw_spinlock_t *lock)
  60. {
  61. unsigned long tmp;
  62. __asm__ __volatile__(
  63. " movi %0, 0\n"
  64. " s32ri %0, %1, 0\n"
  65. : "=&a" (tmp)
  66. : "a" (&lock->slock)
  67. : "memory");
  68. }
  69. /*
  70. * rwlock
  71. *
  72. * Read-write locks are really a more flexible spinlock. They allow
  73. * multiple readers but only one writer. Write ownership is exclusive
  74. * (i.e., all other readers and writers are blocked from ownership while
  75. * there is a write owner). These rwlocks are unfair to writers. Writers
  76. * can be starved for an indefinite time by readers.
  77. *
  78. * possible values:
  79. *
  80. * 0 nobody owns the rwlock
  81. * >0 one or more readers own the rwlock
  82. * (the positive value is the actual number of readers)
  83. * 0x80000000 one writer owns the rwlock, no other writers, no readers
  84. */
  85. #define __raw_write_can_lock(x) ((x)->lock == 0)
  86. static inline void __raw_write_lock(raw_rwlock_t *rw)
  87. {
  88. unsigned long tmp;
  89. __asm__ __volatile__(
  90. " movi %0, 0\n"
  91. " wsr %0, scompare1\n"
  92. "1: movi %0, 1\n"
  93. " slli %0, %0, 31\n"
  94. " s32c1i %0, %1, 0\n"
  95. " bnez %0, 1b\n"
  96. : "=&a" (tmp)
  97. : "a" (&rw->lock)
  98. : "memory");
  99. }
  100. /* Returns 1 if the lock is obtained, 0 otherwise. */
  101. static inline int __raw_write_trylock(raw_rwlock_t *rw)
  102. {
  103. unsigned long tmp;
  104. __asm__ __volatile__(
  105. " movi %0, 0\n"
  106. " wsr %0, scompare1\n"
  107. " movi %0, 1\n"
  108. " slli %0, %0, 31\n"
  109. " s32c1i %0, %1, 0\n"
  110. : "=&a" (tmp)
  111. : "a" (&rw->lock)
  112. : "memory");
  113. return tmp == 0 ? 1 : 0;
  114. }
  115. static inline void __raw_write_unlock(raw_rwlock_t *rw)
  116. {
  117. unsigned long tmp;
  118. __asm__ __volatile__(
  119. " movi %0, 0\n"
  120. " s32ri %0, %1, 0\n"
  121. : "=&a" (tmp)
  122. : "a" (&rw->lock)
  123. : "memory");
  124. }
  125. static inline void __raw_read_lock(raw_rwlock_t *rw)
  126. {
  127. unsigned long tmp;
  128. unsigned long result;
  129. __asm__ __volatile__(
  130. "1: l32i %1, %2, 0\n"
  131. " bltz %1, 1b\n"
  132. " wsr %1, scompare1\n"
  133. " addi %0, %1, 1\n"
  134. " s32c1i %0, %2, 0\n"
  135. " bne %0, %1, 1b\n"
  136. : "=&a" (result), "=&a" (tmp)
  137. : "a" (&rw->lock)
  138. : "memory");
  139. }
  140. /* Returns 1 if the lock is obtained, 0 otherwise. */
  141. static inline int __raw_read_trylock(raw_rwlock_t *rw)
  142. {
  143. unsigned long result;
  144. unsigned long tmp;
  145. __asm__ __volatile__(
  146. " l32i %1, %2, 0\n"
  147. " addi %0, %1, 1\n"
  148. " bltz %0, 1f\n"
  149. " wsr %1, scompare1\n"
  150. " s32c1i %0, %2, 0\n"
  151. " sub %0, %0, %1\n"
  152. "1:\n"
  153. : "=&a" (result), "=&a" (tmp)
  154. : "a" (&rw->lock)
  155. : "memory");
  156. return result == 0;
  157. }
  158. static inline void __raw_read_unlock(raw_rwlock_t *rw)
  159. {
  160. unsigned long tmp1, tmp2;
  161. __asm__ __volatile__(
  162. "1: l32i %1, %2, 0\n"
  163. " addi %0, %1, -1\n"
  164. " wsr %1, scompare1\n"
  165. " s32c1i %0, %2, 0\n"
  166. " bne %0, %1, 1b\n"
  167. : "=&a" (tmp1), "=&a" (tmp2)
  168. : "a" (&rw->lock)
  169. : "memory");
  170. }
  171. #endif /* _XTENSA_SPINLOCK_H */