spinlock.h 4.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196
  1. #ifndef __ASM_SPINLOCK_H
  2. #define __ASM_SPINLOCK_H
  3. #include <asm/processor.h>
  4. #include <asm/spinlock_types.h>
  5. static inline int arch_spin_is_locked(arch_spinlock_t *x)
  6. {
  7. volatile unsigned int *a = __ldcw_align(x);
  8. return *a == 0;
  9. }
  10. #define arch_spin_lock(lock) arch_spin_lock_flags(lock, 0)
  11. #define arch_spin_unlock_wait(x) \
  12. do { cpu_relax(); } while (arch_spin_is_locked(x))
  13. static inline void arch_spin_lock_flags(arch_spinlock_t *x,
  14. unsigned long flags)
  15. {
  16. volatile unsigned int *a;
  17. mb();
  18. a = __ldcw_align(x);
  19. while (__ldcw(a) == 0)
  20. while (*a == 0)
  21. if (flags & PSW_SM_I) {
  22. local_irq_enable();
  23. cpu_relax();
  24. local_irq_disable();
  25. } else
  26. cpu_relax();
  27. mb();
  28. }
  29. static inline void arch_spin_unlock(arch_spinlock_t *x)
  30. {
  31. volatile unsigned int *a;
  32. mb();
  33. a = __ldcw_align(x);
  34. *a = 1;
  35. mb();
  36. }
  37. static inline int arch_spin_trylock(arch_spinlock_t *x)
  38. {
  39. volatile unsigned int *a;
  40. int ret;
  41. mb();
  42. a = __ldcw_align(x);
  43. ret = __ldcw(a) != 0;
  44. mb();
  45. return ret;
  46. }
  47. /*
  48. * Read-write spinlocks, allowing multiple readers but only one writer.
  49. * Linux rwlocks are unfair to writers; they can be starved for an indefinite
  50. * time by readers. With care, they can also be taken in interrupt context.
  51. *
  52. * In the PA-RISC implementation, we have a spinlock and a counter.
  53. * Readers use the lock to serialise their access to the counter (which
  54. * records how many readers currently hold the lock).
  55. * Writers hold the spinlock, preventing any readers or other writers from
  56. * grabbing the rwlock.
  57. */
  58. /* Note that we have to ensure interrupts are disabled in case we're
  59. * interrupted by some other code that wants to grab the same read lock */
  60. static __inline__ void arch_read_lock(arch_rwlock_t *rw)
  61. {
  62. unsigned long flags;
  63. local_irq_save(flags);
  64. arch_spin_lock_flags(&rw->lock, flags);
  65. rw->counter++;
  66. arch_spin_unlock(&rw->lock);
  67. local_irq_restore(flags);
  68. }
  69. /* Note that we have to ensure interrupts are disabled in case we're
  70. * interrupted by some other code that wants to grab the same read lock */
  71. static __inline__ void arch_read_unlock(arch_rwlock_t *rw)
  72. {
  73. unsigned long flags;
  74. local_irq_save(flags);
  75. arch_spin_lock_flags(&rw->lock, flags);
  76. rw->counter--;
  77. arch_spin_unlock(&rw->lock);
  78. local_irq_restore(flags);
  79. }
  80. /* Note that we have to ensure interrupts are disabled in case we're
  81. * interrupted by some other code that wants to grab the same read lock */
  82. static __inline__ int arch_read_trylock(arch_rwlock_t *rw)
  83. {
  84. unsigned long flags;
  85. retry:
  86. local_irq_save(flags);
  87. if (arch_spin_trylock(&rw->lock)) {
  88. rw->counter++;
  89. arch_spin_unlock(&rw->lock);
  90. local_irq_restore(flags);
  91. return 1;
  92. }
  93. local_irq_restore(flags);
  94. /* If write-locked, we fail to acquire the lock */
  95. if (rw->counter < 0)
  96. return 0;
  97. /* Wait until we have a realistic chance at the lock */
  98. while (arch_spin_is_locked(&rw->lock) && rw->counter >= 0)
  99. cpu_relax();
  100. goto retry;
  101. }
  102. /* Note that we have to ensure interrupts are disabled in case we're
  103. * interrupted by some other code that wants to read_trylock() this lock */
  104. static __inline__ void arch_write_lock(arch_rwlock_t *rw)
  105. {
  106. unsigned long flags;
  107. retry:
  108. local_irq_save(flags);
  109. arch_spin_lock_flags(&rw->lock, flags);
  110. if (rw->counter != 0) {
  111. arch_spin_unlock(&rw->lock);
  112. local_irq_restore(flags);
  113. while (rw->counter != 0)
  114. cpu_relax();
  115. goto retry;
  116. }
  117. rw->counter = -1; /* mark as write-locked */
  118. mb();
  119. local_irq_restore(flags);
  120. }
  121. static __inline__ void arch_write_unlock(arch_rwlock_t *rw)
  122. {
  123. rw->counter = 0;
  124. arch_spin_unlock(&rw->lock);
  125. }
  126. /* Note that we have to ensure interrupts are disabled in case we're
  127. * interrupted by some other code that wants to read_trylock() this lock */
  128. static __inline__ int arch_write_trylock(arch_rwlock_t *rw)
  129. {
  130. unsigned long flags;
  131. int result = 0;
  132. local_irq_save(flags);
  133. if (arch_spin_trylock(&rw->lock)) {
  134. if (rw->counter == 0) {
  135. rw->counter = -1;
  136. result = 1;
  137. } else {
  138. /* Read-locked. Oh well. */
  139. arch_spin_unlock(&rw->lock);
  140. }
  141. }
  142. local_irq_restore(flags);
  143. return result;
  144. }
  145. /*
  146. * read_can_lock - would read_trylock() succeed?
  147. * @lock: the rwlock in question.
  148. */
  149. static __inline__ int arch_read_can_lock(arch_rwlock_t *rw)
  150. {
  151. return rw->counter >= 0;
  152. }
  153. /*
  154. * write_can_lock - would write_trylock() succeed?
  155. * @lock: the rwlock in question.
  156. */
  157. static __inline__ int arch_write_can_lock(arch_rwlock_t *rw)
  158. {
  159. return !rw->counter;
  160. }
  161. #define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
  162. #define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
  163. #define arch_spin_relax(lock) cpu_relax()
  164. #define arch_read_relax(lock) cpu_relax()
  165. #define arch_write_relax(lock) cpu_relax()
  166. #endif /* __ASM_SPINLOCK_H */