spinlock.h 3.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183
  1. #ifndef __ASM_SPINLOCK_H
  2. #define __ASM_SPINLOCK_H
  3. #include <asm/system.h>
  4. #include <asm/processor.h>
  5. #include <asm/spinlock_types.h>
  6. static inline int __raw_spin_is_locked(raw_spinlock_t *x)
  7. {
  8. volatile unsigned int *a = __ldcw_align(x);
  9. return *a == 0;
  10. }
  11. #define __raw_spin_lock(lock) __raw_spin_lock_flags(lock, 0)
  12. #define __raw_spin_unlock_wait(x) \
  13. do { cpu_relax(); } while (__raw_spin_is_locked(x))
  14. static inline void __raw_spin_lock_flags(raw_spinlock_t *x,
  15. unsigned long flags)
  16. {
  17. volatile unsigned int *a;
  18. mb();
  19. a = __ldcw_align(x);
  20. while (__ldcw(a) == 0)
  21. while (*a == 0)
  22. if (flags & PSW_SM_I) {
  23. local_irq_enable();
  24. cpu_relax();
  25. local_irq_disable();
  26. } else
  27. cpu_relax();
  28. mb();
  29. }
  30. static inline void __raw_spin_unlock(raw_spinlock_t *x)
  31. {
  32. volatile unsigned int *a;
  33. mb();
  34. a = __ldcw_align(x);
  35. *a = 1;
  36. mb();
  37. }
  38. static inline int __raw_spin_trylock(raw_spinlock_t *x)
  39. {
  40. volatile unsigned int *a;
  41. int ret;
  42. mb();
  43. a = __ldcw_align(x);
  44. ret = __ldcw(a) != 0;
  45. mb();
  46. return ret;
  47. }
  48. /*
  49. * Read-write spinlocks, allowing multiple readers but only one writer.
  50. * The spinlock is held by the writer, preventing any readers or other
  51. * writers from grabbing the rwlock. Readers use the lock to serialise their
  52. * access to the counter (which records how many readers currently hold the
  53. * lock). Linux rwlocks are unfair to writers; they can be starved for
  54. * an indefinite time by readers. They can also be taken in interrupt context,
  55. * so we have to disable interrupts when acquiring the spin lock to be sure
  56. * that an interrupting reader doesn't get an inconsistent view of the lock.
  57. */
  58. static __inline__ void __raw_read_lock(raw_rwlock_t *rw)
  59. {
  60. unsigned long flags;
  61. local_irq_save(flags);
  62. __raw_spin_lock(&rw->lock);
  63. rw->counter++;
  64. __raw_spin_unlock(&rw->lock);
  65. local_irq_restore(flags);
  66. }
  67. static __inline__ void __raw_read_unlock(raw_rwlock_t *rw)
  68. {
  69. unsigned long flags;
  70. local_irq_save(flags);
  71. __raw_spin_lock(&rw->lock);
  72. rw->counter--;
  73. __raw_spin_unlock(&rw->lock);
  74. local_irq_restore(flags);
  75. }
  76. static __inline__ int __raw_read_trylock(raw_rwlock_t *rw)
  77. {
  78. unsigned long flags;
  79. retry:
  80. local_irq_save(flags);
  81. if (__raw_spin_trylock(&rw->lock)) {
  82. rw->counter++;
  83. __raw_spin_unlock(&rw->lock);
  84. local_irq_restore(flags);
  85. return 1;
  86. }
  87. local_irq_restore(flags);
  88. /* If write-locked, we fail to acquire the lock */
  89. if (rw->counter < 0)
  90. return 0;
  91. /* Wait until we have a realistic chance at the lock */
  92. while (__raw_spin_is_locked(&rw->lock) && rw->counter >= 0)
  93. cpu_relax();
  94. goto retry;
  95. }
  96. static __inline__ void __raw_write_lock(raw_rwlock_t *rw)
  97. {
  98. unsigned long flags;
  99. retry:
  100. local_irq_save(flags);
  101. __raw_spin_lock(&rw->lock);
  102. if (rw->counter != 0) {
  103. __raw_spin_unlock(&rw->lock);
  104. local_irq_restore(flags);
  105. while (rw->counter != 0)
  106. cpu_relax();
  107. goto retry;
  108. }
  109. rw->counter = -1; /* mark as write-locked */
  110. mb();
  111. local_irq_restore(flags);
  112. }
  113. static __inline__ void __raw_write_unlock(raw_rwlock_t *rw)
  114. {
  115. rw->counter = 0;
  116. __raw_spin_unlock(&rw->lock);
  117. }
  118. static __inline__ int __raw_write_trylock(raw_rwlock_t *rw)
  119. {
  120. unsigned long flags;
  121. int result = 0;
  122. local_irq_save(flags);
  123. if (__raw_spin_trylock(&rw->lock)) {
  124. if (rw->counter == 0) {
  125. rw->counter = -1;
  126. result = 1;
  127. } else {
  128. /* Read-locked. Oh well. */
  129. __raw_spin_unlock(&rw->lock);
  130. }
  131. }
  132. local_irq_restore(flags);
  133. return result;
  134. }
  135. /*
  136. * read_can_lock - would read_trylock() succeed?
  137. * @lock: the rwlock in question.
  138. */
  139. static __inline__ int __raw_read_can_lock(raw_rwlock_t *rw)
  140. {
  141. return rw->counter >= 0;
  142. }
  143. /*
  144. * write_can_lock - would write_trylock() succeed?
  145. * @lock: the rwlock in question.
  146. */
  147. static __inline__ int __raw_write_can_lock(raw_rwlock_t *rw)
  148. {
  149. return !rw->counter;
  150. }
  151. #define _raw_spin_relax(lock) cpu_relax()
  152. #define _raw_read_relax(lock) cpu_relax()
  153. #define _raw_write_relax(lock) cpu_relax()
  154. #endif /* __ASM_SPINLOCK_H */