spinlock.h 4.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197
  1. #ifndef __ASM_SPINLOCK_H
  2. #define __ASM_SPINLOCK_H
  3. #include <asm/system.h>
  4. #include <asm/processor.h>
  5. #include <asm/spinlock_types.h>
  6. static inline int __raw_spin_is_locked(raw_spinlock_t *x)
  7. {
  8. volatile unsigned int *a = __ldcw_align(x);
  9. return *a == 0;
  10. }
  11. #define __raw_spin_lock(lock) __raw_spin_lock_flags(lock, 0)
  12. #define __raw_spin_unlock_wait(x) \
  13. do { cpu_relax(); } while (__raw_spin_is_locked(x))
  14. static inline void __raw_spin_lock_flags(raw_spinlock_t *x,
  15. unsigned long flags)
  16. {
  17. volatile unsigned int *a;
  18. mb();
  19. a = __ldcw_align(x);
  20. while (__ldcw(a) == 0)
  21. while (*a == 0)
  22. if (flags & PSW_SM_I) {
  23. local_irq_enable();
  24. cpu_relax();
  25. local_irq_disable();
  26. } else
  27. cpu_relax();
  28. mb();
  29. }
  30. static inline void __raw_spin_unlock(raw_spinlock_t *x)
  31. {
  32. volatile unsigned int *a;
  33. mb();
  34. a = __ldcw_align(x);
  35. *a = 1;
  36. mb();
  37. }
  38. static inline int __raw_spin_trylock(raw_spinlock_t *x)
  39. {
  40. volatile unsigned int *a;
  41. int ret;
  42. mb();
  43. a = __ldcw_align(x);
  44. ret = __ldcw(a) != 0;
  45. mb();
  46. return ret;
  47. }
  48. /*
  49. * Read-write spinlocks, allowing multiple readers but only one writer.
  50. * Linux rwlocks are unfair to writers; they can be starved for an indefinite
  51. * time by readers. With care, they can also be taken in interrupt context.
  52. *
  53. * In the PA-RISC implementation, we have a spinlock and a counter.
  54. * Readers use the lock to serialise their access to the counter (which
  55. * records how many readers currently hold the lock).
  56. * Writers hold the spinlock, preventing any readers or other writers from
  57. * grabbing the rwlock.
  58. */
  59. /* Note that we have to ensure interrupts are disabled in case we're
  60. * interrupted by some other code that wants to grab the same read lock */
  61. static __inline__ void __raw_read_lock(raw_rwlock_t *rw)
  62. {
  63. unsigned long flags;
  64. local_irq_save(flags);
  65. __raw_spin_lock_flags(&rw->lock, flags);
  66. rw->counter++;
  67. __raw_spin_unlock(&rw->lock);
  68. local_irq_restore(flags);
  69. }
  70. /* Note that we have to ensure interrupts are disabled in case we're
  71. * interrupted by some other code that wants to grab the same read lock */
  72. static __inline__ void __raw_read_unlock(raw_rwlock_t *rw)
  73. {
  74. unsigned long flags;
  75. local_irq_save(flags);
  76. __raw_spin_lock_flags(&rw->lock, flags);
  77. rw->counter--;
  78. __raw_spin_unlock(&rw->lock);
  79. local_irq_restore(flags);
  80. }
  81. /* Note that we have to ensure interrupts are disabled in case we're
  82. * interrupted by some other code that wants to grab the same read lock */
  83. static __inline__ int __raw_read_trylock(raw_rwlock_t *rw)
  84. {
  85. unsigned long flags;
  86. retry:
  87. local_irq_save(flags);
  88. if (__raw_spin_trylock(&rw->lock)) {
  89. rw->counter++;
  90. __raw_spin_unlock(&rw->lock);
  91. local_irq_restore(flags);
  92. return 1;
  93. }
  94. local_irq_restore(flags);
  95. /* If write-locked, we fail to acquire the lock */
  96. if (rw->counter < 0)
  97. return 0;
  98. /* Wait until we have a realistic chance at the lock */
  99. while (__raw_spin_is_locked(&rw->lock) && rw->counter >= 0)
  100. cpu_relax();
  101. goto retry;
  102. }
  103. /* Note that we have to ensure interrupts are disabled in case we're
  104. * interrupted by some other code that wants to read_trylock() this lock */
  105. static __inline__ void __raw_write_lock(raw_rwlock_t *rw)
  106. {
  107. unsigned long flags;
  108. retry:
  109. local_irq_save(flags);
  110. __raw_spin_lock_flags(&rw->lock, flags);
  111. if (rw->counter != 0) {
  112. __raw_spin_unlock(&rw->lock);
  113. local_irq_restore(flags);
  114. while (rw->counter != 0)
  115. cpu_relax();
  116. goto retry;
  117. }
  118. rw->counter = -1; /* mark as write-locked */
  119. mb();
  120. local_irq_restore(flags);
  121. }
  122. static __inline__ void __raw_write_unlock(raw_rwlock_t *rw)
  123. {
  124. rw->counter = 0;
  125. __raw_spin_unlock(&rw->lock);
  126. }
  127. /* Note that we have to ensure interrupts are disabled in case we're
  128. * interrupted by some other code that wants to read_trylock() this lock */
  129. static __inline__ int __raw_write_trylock(raw_rwlock_t *rw)
  130. {
  131. unsigned long flags;
  132. int result = 0;
  133. local_irq_save(flags);
  134. if (__raw_spin_trylock(&rw->lock)) {
  135. if (rw->counter == 0) {
  136. rw->counter = -1;
  137. result = 1;
  138. } else {
  139. /* Read-locked. Oh well. */
  140. __raw_spin_unlock(&rw->lock);
  141. }
  142. }
  143. local_irq_restore(flags);
  144. return result;
  145. }
  146. /*
  147. * read_can_lock - would read_trylock() succeed?
  148. * @lock: the rwlock in question.
  149. */
  150. static __inline__ int __raw_read_can_lock(raw_rwlock_t *rw)
  151. {
  152. return rw->counter >= 0;
  153. }
  154. /*
  155. * write_can_lock - would write_trylock() succeed?
  156. * @lock: the rwlock in question.
  157. */
  158. static __inline__ int __raw_write_can_lock(raw_rwlock_t *rw)
  159. {
  160. return !rw->counter;
  161. }
  162. #define __raw_read_lock_flags(lock, flags) __raw_read_lock(lock)
  163. #define __raw_write_lock_flags(lock, flags) __raw_write_lock(lock)
  164. #define _raw_spin_relax(lock) cpu_relax()
  165. #define _raw_read_relax(lock) cpu_relax()
  166. #define _raw_write_relax(lock) cpu_relax()
  167. #endif /* __ASM_SPINLOCK_H */