spinlock_32.h 5.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199
  1. /*
  2. * Copyright 2010 Tilera Corporation. All Rights Reserved.
  3. *
  4. * This program is free software; you can redistribute it and/or
  5. * modify it under the terms of the GNU General Public License
  6. * as published by the Free Software Foundation, version 2.
  7. *
  8. * This program is distributed in the hope that it will be useful, but
  9. * WITHOUT ANY WARRANTY; without even the implied warranty of
  10. * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
  11. * NON INFRINGEMENT. See the GNU General Public License for
  12. * more details.
  13. *
  14. * 32-bit SMP spinlocks.
  15. */
  16. #ifndef _ASM_TILE_SPINLOCK_32_H
  17. #define _ASM_TILE_SPINLOCK_32_H
  18. #include <asm/atomic.h>
  19. #include <asm/page.h>
  20. #include <asm/system.h>
  21. #include <linux/compiler.h>
  22. /*
  23. * We only use even ticket numbers so the '1' inserted by a tns is
  24. * an unambiguous "ticket is busy" flag.
  25. */
  26. #define TICKET_QUANTUM 2
  27. /*
  28. * SMP ticket spinlocks, allowing only a single CPU anywhere
  29. *
  30. * (the type definitions are in asm/spinlock_types.h)
  31. */
  32. static inline int arch_spin_is_locked(arch_spinlock_t *lock)
  33. {
  34. /*
  35. * Note that even if a new ticket is in the process of being
  36. * acquired, so lock->next_ticket is 1, it's still reasonable
  37. * to claim the lock is held, since it will be momentarily
  38. * if not already. There's no need to wait for a "valid"
  39. * lock->next_ticket to become available.
  40. */
  41. return lock->next_ticket != lock->current_ticket;
  42. }
  43. void arch_spin_lock(arch_spinlock_t *lock);
  44. /* We cannot take an interrupt after getting a ticket, so don't enable them. */
  45. #define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
  46. int arch_spin_trylock(arch_spinlock_t *lock);
  47. static inline void arch_spin_unlock(arch_spinlock_t *lock)
  48. {
  49. /* For efficiency, overlap fetching the old ticket with the wmb(). */
  50. int old_ticket = lock->current_ticket;
  51. wmb(); /* guarantee anything modified under the lock is visible */
  52. lock->current_ticket = old_ticket + TICKET_QUANTUM;
  53. }
  54. void arch_spin_unlock_wait(arch_spinlock_t *lock);
  55. /*
  56. * Read-write spinlocks, allowing multiple readers
  57. * but only one writer.
  58. *
  59. * We use a "tns/store-back" technique on a single word to manage
  60. * the lock state, looping around to retry if the tns returns 1.
  61. */
  62. /* Internal layout of the word; do not use. */
  63. #define _WR_NEXT_SHIFT 8
  64. #define _WR_CURR_SHIFT 16
  65. #define _WR_WIDTH 8
  66. #define _RD_COUNT_SHIFT 24
  67. #define _RD_COUNT_WIDTH 8
  68. /* Internal functions; do not use. */
  69. void arch_read_lock_slow(arch_rwlock_t *, u32);
  70. int arch_read_trylock_slow(arch_rwlock_t *);
  71. void arch_read_unlock_slow(arch_rwlock_t *);
  72. void arch_write_lock_slow(arch_rwlock_t *, u32);
  73. void arch_write_unlock_slow(arch_rwlock_t *, u32);
  74. /**
  75. * arch_read_can_lock() - would read_trylock() succeed?
  76. */
  77. static inline int arch_read_can_lock(arch_rwlock_t *rwlock)
  78. {
  79. return (rwlock->lock << _RD_COUNT_WIDTH) == 0;
  80. }
  81. /**
  82. * arch_write_can_lock() - would write_trylock() succeed?
  83. */
  84. static inline int arch_write_can_lock(arch_rwlock_t *rwlock)
  85. {
  86. return rwlock->lock == 0;
  87. }
  88. /**
  89. * arch_read_lock() - acquire a read lock.
  90. */
  91. static inline void arch_read_lock(arch_rwlock_t *rwlock)
  92. {
  93. u32 val = __insn_tns((int *)&rwlock->lock);
  94. if (unlikely(val << _RD_COUNT_WIDTH)) {
  95. arch_read_lock_slow(rwlock, val);
  96. return;
  97. }
  98. rwlock->lock = val + (1 << _RD_COUNT_SHIFT);
  99. }
  100. /**
  101. * arch_read_lock() - acquire a write lock.
  102. */
  103. static inline void arch_write_lock(arch_rwlock_t *rwlock)
  104. {
  105. u32 val = __insn_tns((int *)&rwlock->lock);
  106. if (unlikely(val != 0)) {
  107. arch_write_lock_slow(rwlock, val);
  108. return;
  109. }
  110. rwlock->lock = 1 << _WR_NEXT_SHIFT;
  111. }
  112. /**
  113. * arch_read_trylock() - try to acquire a read lock.
  114. */
  115. static inline int arch_read_trylock(arch_rwlock_t *rwlock)
  116. {
  117. int locked;
  118. u32 val = __insn_tns((int *)&rwlock->lock);
  119. if (unlikely(val & 1))
  120. return arch_read_trylock_slow(rwlock);
  121. locked = (val << _RD_COUNT_WIDTH) == 0;
  122. rwlock->lock = val + (locked << _RD_COUNT_SHIFT);
  123. return locked;
  124. }
  125. /**
  126. * arch_write_trylock() - try to acquire a write lock.
  127. */
  128. static inline int arch_write_trylock(arch_rwlock_t *rwlock)
  129. {
  130. u32 val = __insn_tns((int *)&rwlock->lock);
  131. /*
  132. * If a tns is in progress, or there's a waiting or active locker,
  133. * or active readers, we can't take the lock, so give up.
  134. */
  135. if (unlikely(val != 0)) {
  136. if (!(val & 1))
  137. rwlock->lock = val;
  138. return 0;
  139. }
  140. /* Set the "next" field to mark it locked. */
  141. rwlock->lock = 1 << _WR_NEXT_SHIFT;
  142. return 1;
  143. }
  144. /**
  145. * arch_read_unlock() - release a read lock.
  146. */
  147. static inline void arch_read_unlock(arch_rwlock_t *rwlock)
  148. {
  149. u32 val;
  150. mb(); /* guarantee anything modified under the lock is visible */
  151. val = __insn_tns((int *)&rwlock->lock);
  152. if (unlikely(val & 1)) {
  153. arch_read_unlock_slow(rwlock);
  154. return;
  155. }
  156. rwlock->lock = val - (1 << _RD_COUNT_SHIFT);
  157. }
  158. /**
  159. * arch_write_unlock() - release a write lock.
  160. */
  161. static inline void arch_write_unlock(arch_rwlock_t *rwlock)
  162. {
  163. u32 val;
  164. mb(); /* guarantee anything modified under the lock is visible */
  165. val = __insn_tns((int *)&rwlock->lock);
  166. if (unlikely(val != (1 << _WR_NEXT_SHIFT))) {
  167. arch_write_unlock_slow(rwlock, val);
  168. return;
  169. }
  170. rwlock->lock = 0;
  171. }
  172. #define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
  173. #define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
  174. #endif /* _ASM_TILE_SPINLOCK_32_H */