spinlock.h 3.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146
  1. #ifndef __ASM_SPINLOCK_H
  2. #define __ASM_SPINLOCK_H
  3. #include <asm/system.h>
  4. #include <asm/processor.h>
  5. #include <asm/spinlock_types.h>
  6. static inline int __raw_spin_is_locked(raw_spinlock_t *x)
  7. {
  8. volatile unsigned int *a = __ldcw_align(x);
  9. return *a == 0;
  10. }
  11. #define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock)
  12. #define __raw_spin_unlock_wait(x) \
  13. do { cpu_relax(); } while (__raw_spin_is_locked(x))
  14. static inline void __raw_spin_lock(raw_spinlock_t *x)
  15. {
  16. volatile unsigned int *a;
  17. mb();
  18. a = __ldcw_align(x);
  19. while (__ldcw(a) == 0)
  20. while (*a == 0);
  21. mb();
  22. }
  23. static inline void __raw_spin_unlock(raw_spinlock_t *x)
  24. {
  25. volatile unsigned int *a;
  26. mb();
  27. a = __ldcw_align(x);
  28. *a = 1;
  29. mb();
  30. }
  31. static inline int __raw_spin_trylock(raw_spinlock_t *x)
  32. {
  33. volatile unsigned int *a;
  34. int ret;
  35. mb();
  36. a = __ldcw_align(x);
  37. ret = __ldcw(a) != 0;
  38. mb();
  39. return ret;
  40. }
  41. /*
  42. * Read-write spinlocks, allowing multiple readers
  43. * but only one writer.
  44. */
  45. #define __raw_read_trylock(lock) generic__raw_read_trylock(lock)
  46. /* read_lock, read_unlock are pretty straightforward. Of course it somehow
  47. * sucks we end up saving/restoring flags twice for read_lock_irqsave aso. */
  48. static __inline__ void __raw_read_lock(raw_rwlock_t *rw)
  49. {
  50. unsigned long flags;
  51. local_irq_save(flags);
  52. __raw_spin_lock(&rw->lock);
  53. rw->counter++;
  54. __raw_spin_unlock(&rw->lock);
  55. local_irq_restore(flags);
  56. }
  57. static __inline__ void __raw_read_unlock(raw_rwlock_t *rw)
  58. {
  59. unsigned long flags;
  60. local_irq_save(flags);
  61. __raw_spin_lock(&rw->lock);
  62. rw->counter--;
  63. __raw_spin_unlock(&rw->lock);
  64. local_irq_restore(flags);
  65. }
  66. /* write_lock is less trivial. We optimistically grab the lock and check
  67. * if we surprised any readers. If so we release the lock and wait till
  68. * they're all gone before trying again
  69. *
  70. * Also note that we don't use the _irqsave / _irqrestore suffixes here.
  71. * If we're called with interrupts enabled and we've got readers (or other
  72. * writers) in interrupt handlers someone fucked up and we'd dead-lock
  73. * sooner or later anyway. prumpf */
  74. static __inline__ void __raw_write_lock(raw_rwlock_t *rw)
  75. {
  76. retry:
  77. __raw_spin_lock(&rw->lock);
  78. if(rw->counter != 0) {
  79. /* this basically never happens */
  80. __raw_spin_unlock(&rw->lock);
  81. while (rw->counter != 0)
  82. cpu_relax();
  83. goto retry;
  84. }
  85. /* got it. now leave without unlocking */
  86. rw->counter = -1; /* remember we are locked */
  87. }
  88. /* write_unlock is absolutely trivial - we don't have to wait for anything */
  89. static __inline__ void __raw_write_unlock(raw_rwlock_t *rw)
  90. {
  91. rw->counter = 0;
  92. __raw_spin_unlock(&rw->lock);
  93. }
  94. static __inline__ int __raw_write_trylock(raw_rwlock_t *rw)
  95. {
  96. __raw_spin_lock(&rw->lock);
  97. if (rw->counter != 0) {
  98. /* this basically never happens */
  99. __raw_spin_unlock(&rw->lock);
  100. return 0;
  101. }
  102. /* got it. now leave without unlocking */
  103. rw->counter = -1; /* remember we are locked */
  104. return 1;
  105. }
  106. static __inline__ int __raw_is_read_locked(raw_rwlock_t *rw)
  107. {
  108. return rw->counter > 0;
  109. }
  110. static __inline__ int __raw_is_write_locked(raw_rwlock_t *rw)
  111. {
  112. return rw->counter < 0;
  113. }
  114. #endif /* __ASM_SPINLOCK_H */