spinlock.h 3.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155
  1. #ifndef __ASM_SPINLOCK_H
  2. #define __ASM_SPINLOCK_H
  3. #include <asm/system.h>
  4. #include <asm/processor.h>
  5. #include <asm/spinlock_types.h>
  6. static inline int __raw_spin_is_locked(raw_spinlock_t *x)
  7. {
  8. volatile unsigned int *a = __ldcw_align(x);
  9. return *a == 0;
  10. }
  11. #define __raw_spin_lock(lock) __raw_spin_lock_flags(lock, 0)
  12. #define __raw_spin_unlock_wait(x) \
  13. do { cpu_relax(); } while (__raw_spin_is_locked(x))
  14. static inline void __raw_spin_lock_flags(raw_spinlock_t *x,
  15. unsigned long flags)
  16. {
  17. volatile unsigned int *a;
  18. mb();
  19. a = __ldcw_align(x);
  20. while (__ldcw(a) == 0)
  21. while (*a == 0)
  22. if (flags & PSW_SM_I) {
  23. local_irq_enable();
  24. cpu_relax();
  25. local_irq_disable();
  26. } else
  27. cpu_relax();
  28. mb();
  29. }
  30. static inline void __raw_spin_unlock(raw_spinlock_t *x)
  31. {
  32. volatile unsigned int *a;
  33. mb();
  34. a = __ldcw_align(x);
  35. *a = 1;
  36. mb();
  37. }
  38. static inline int __raw_spin_trylock(raw_spinlock_t *x)
  39. {
  40. volatile unsigned int *a;
  41. int ret;
  42. mb();
  43. a = __ldcw_align(x);
  44. ret = __ldcw(a) != 0;
  45. mb();
  46. return ret;
  47. }
  48. /*
  49. * Read-write spinlocks, allowing multiple readers
  50. * but only one writer.
  51. */
  52. #define __raw_read_trylock(lock) generic__raw_read_trylock(lock)
  53. /* read_lock, read_unlock are pretty straightforward. Of course it somehow
  54. * sucks we end up saving/restoring flags twice for read_lock_irqsave aso. */
  55. static __inline__ void __raw_read_lock(raw_rwlock_t *rw)
  56. {
  57. __raw_spin_lock(&rw->lock);
  58. rw->counter++;
  59. __raw_spin_unlock(&rw->lock);
  60. }
  61. static __inline__ void __raw_read_unlock(raw_rwlock_t *rw)
  62. {
  63. __raw_spin_lock(&rw->lock);
  64. rw->counter--;
  65. __raw_spin_unlock(&rw->lock);
  66. }
  67. /* write_lock is less trivial. We optimistically grab the lock and check
  68. * if we surprised any readers. If so we release the lock and wait till
  69. * they're all gone before trying again
  70. *
  71. * Also note that we don't use the _irqsave / _irqrestore suffixes here.
  72. * If we're called with interrupts enabled and we've got readers (or other
  73. * writers) in interrupt handlers someone fucked up and we'd dead-lock
  74. * sooner or later anyway. prumpf */
  75. static __inline__ void __raw_write_lock(raw_rwlock_t *rw)
  76. {
  77. retry:
  78. __raw_spin_lock(&rw->lock);
  79. if(rw->counter != 0) {
  80. /* this basically never happens */
  81. __raw_spin_unlock(&rw->lock);
  82. while (rw->counter != 0)
  83. cpu_relax();
  84. goto retry;
  85. }
  86. /* got it. now leave without unlocking */
  87. rw->counter = -1; /* remember we are locked */
  88. }
  89. /* write_unlock is absolutely trivial - we don't have to wait for anything */
  90. static __inline__ void __raw_write_unlock(raw_rwlock_t *rw)
  91. {
  92. rw->counter = 0;
  93. __raw_spin_unlock(&rw->lock);
  94. }
  95. static __inline__ int __raw_write_trylock(raw_rwlock_t *rw)
  96. {
  97. __raw_spin_lock(&rw->lock);
  98. if (rw->counter != 0) {
  99. /* this basically never happens */
  100. __raw_spin_unlock(&rw->lock);
  101. return 0;
  102. }
  103. /* got it. now leave without unlocking */
  104. rw->counter = -1; /* remember we are locked */
  105. return 1;
  106. }
  107. /*
  108. * read_can_lock - would read_trylock() succeed?
  109. * @lock: the rwlock in question.
  110. */
  111. static __inline__ int __raw_read_can_lock(raw_rwlock_t *rw)
  112. {
  113. return rw->counter >= 0;
  114. }
  115. /*
  116. * write_can_lock - would write_trylock() succeed?
  117. * @lock: the rwlock in question.
  118. */
  119. static __inline__ int __raw_write_can_lock(raw_rwlock_t *rw)
  120. {
  121. return !rw->counter;
  122. }
  123. #endif /* __ASM_SPINLOCK_H */