spinlock.h 3.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163
  1. #ifndef __ASM_ARCH_SPINLOCK_H
  2. #define __ASM_ARCH_SPINLOCK_H
  3. #include <asm/system.h>
  4. #define RW_LOCK_BIAS 0x01000000
  5. #define SPIN_LOCK_UNLOCKED (spinlock_t) { 1 }
  6. #define spin_lock_init(x) do { *(x) = SPIN_LOCK_UNLOCKED; } while(0)
  7. #define spin_is_locked(x) (*(volatile signed char *)(&(x)->lock) <= 0)
  8. #define spin_unlock_wait(x) do { barrier(); } while(spin_is_locked(x))
  9. extern void cris_spin_unlock(void *l, int val);
  10. extern void cris_spin_lock(void *l);
  11. extern int cris_spin_trylock(void* l);
  12. static inline void _raw_spin_unlock(spinlock_t *lock)
  13. {
  14. __asm__ volatile ("move.d %1,%0" \
  15. : "=m" (lock->lock) \
  16. : "r" (1) \
  17. : "memory");
  18. }
  19. static inline int _raw_spin_trylock(spinlock_t *lock)
  20. {
  21. return cris_spin_trylock((void*)&lock->lock);
  22. }
  23. static inline void _raw_spin_lock(spinlock_t *lock)
  24. {
  25. cris_spin_lock((void*)&lock->lock);
  26. }
  27. static inline void _raw_spin_lock_flags (spinlock_t *lock, unsigned long flags)
  28. {
  29. _raw_spin_lock(lock);
  30. }
  31. /*
  32. * Read-write spinlocks, allowing multiple readers
  33. * but only one writer.
  34. *
  35. * NOTE! it is quite common to have readers in interrupts
  36. * but no interrupt writers. For those circumstances we
  37. * can "mix" irq-safe locks - any writer needs to get a
  38. * irq-safe write-lock, but readers can get non-irqsafe
  39. * read-locks.
  40. */
  41. typedef struct {
  42. spinlock_t lock;
  43. volatile int counter;
  44. #ifdef CONFIG_PREEMPT
  45. unsigned int break_lock;
  46. #endif
  47. } rwlock_t;
  48. #define RW_LOCK_UNLOCKED (rwlock_t) { {1}, 0 }
  49. #define rwlock_init(lp) do { *(lp) = RW_LOCK_UNLOCKED; } while (0)
  50. /**
  51. * read_can_lock - would read_trylock() succeed?
  52. * @lock: the rwlock in question.
  53. */
  54. #define read_can_lock(x) ((int)(x)->counter >= 0)
  55. /**
  56. * write_can_lock - would write_trylock() succeed?
  57. * @lock: the rwlock in question.
  58. */
  59. #define write_can_lock(x) ((x)->counter == 0)
  60. #define _raw_read_trylock(lock) generic_raw_read_trylock(lock)
  61. /* read_lock, read_unlock are pretty straightforward. Of course it somehow
  62. * sucks we end up saving/restoring flags twice for read_lock_irqsave aso. */
  63. static __inline__ void _raw_read_lock(rwlock_t *rw)
  64. {
  65. unsigned long flags;
  66. local_irq_save(flags);
  67. _raw_spin_lock(&rw->lock);
  68. rw->counter++;
  69. _raw_spin_unlock(&rw->lock);
  70. local_irq_restore(flags);
  71. }
  72. static __inline__ void _raw_read_unlock(rwlock_t *rw)
  73. {
  74. unsigned long flags;
  75. local_irq_save(flags);
  76. _raw_spin_lock(&rw->lock);
  77. rw->counter--;
  78. _raw_spin_unlock(&rw->lock);
  79. local_irq_restore(flags);
  80. }
  81. /* write_lock is less trivial. We optimistically grab the lock and check
  82. * if we surprised any readers. If so we release the lock and wait till
  83. * they're all gone before trying again
  84. *
  85. * Also note that we don't use the _irqsave / _irqrestore suffixes here.
  86. * If we're called with interrupts enabled and we've got readers (or other
  87. * writers) in interrupt handlers someone fucked up and we'd dead-lock
  88. * sooner or later anyway. prumpf */
  89. static __inline__ void _raw_write_lock(rwlock_t *rw)
  90. {
  91. retry:
  92. _raw_spin_lock(&rw->lock);
  93. if(rw->counter != 0) {
  94. /* this basically never happens */
  95. _raw_spin_unlock(&rw->lock);
  96. while(rw->counter != 0);
  97. goto retry;
  98. }
  99. /* got it. now leave without unlocking */
  100. rw->counter = -1; /* remember we are locked */
  101. }
  102. /* write_unlock is absolutely trivial - we don't have to wait for anything */
  103. static __inline__ void _raw_write_unlock(rwlock_t *rw)
  104. {
  105. rw->counter = 0;
  106. _raw_spin_unlock(&rw->lock);
  107. }
  108. static __inline__ int _raw_write_trylock(rwlock_t *rw)
  109. {
  110. _raw_spin_lock(&rw->lock);
  111. if (rw->counter != 0) {
  112. /* this basically never happens */
  113. _raw_spin_unlock(&rw->lock);
  114. return 0;
  115. }
  116. /* got it. now leave without unlocking */
  117. rw->counter = -1; /* remember we are locked */
  118. return 1;
  119. }
  120. static __inline__ int is_read_locked(rwlock_t *rw)
  121. {
  122. return rw->counter > 0;
  123. }
  124. static __inline__ int is_write_locked(rwlock_t *rw)
  125. {
  126. return rw->counter < 0;
  127. }
  128. #endif /* __ASM_ARCH_SPINLOCK_H */