spinlock.h 2.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124
  1. /*
  2. * include/asm-sh/spinlock.h
  3. *
  4. * Copyright (C) 2002, 2003 Paul Mundt
  5. *
  6. * This file is subject to the terms and conditions of the GNU General Public
  7. * License. See the file "COPYING" in the main directory of this archive
  8. * for more details.
  9. */
  10. #ifndef __ASM_SH_SPINLOCK_H
  11. #define __ASM_SH_SPINLOCK_H
  12. #include <asm/atomic.h>
  13. /*
  14. * Your basic SMP spinlocks, allowing only a single CPU anywhere
  15. */
  16. typedef struct {
  17. volatile unsigned long lock;
  18. #ifdef CONFIG_PREEMPT
  19. unsigned int break_lock;
  20. #endif
  21. } spinlock_t;
  22. #define SPIN_LOCK_UNLOCKED (spinlock_t) { 0 }
  23. #define spin_lock_init(x) do { *(x) = SPIN_LOCK_UNLOCKED; } while(0)
  24. #define spin_is_locked(x) ((x)->lock != 0)
  25. #define spin_unlock_wait(x) do { barrier(); } while (spin_is_locked(x))
  26. #define _raw_spin_lock_flags(lock, flags) _raw_spin_lock(lock)
  27. /*
  28. * Simple spin lock operations. There are two variants, one clears IRQ's
  29. * on the local processor, one does not.
  30. *
  31. * We make no fairness assumptions. They have a cost.
  32. */
  33. static inline void _raw_spin_lock(spinlock_t *lock)
  34. {
  35. __asm__ __volatile__ (
  36. "1:\n\t"
  37. "tas.b @%0\n\t"
  38. "bf/s 1b\n\t"
  39. "nop\n\t"
  40. : "=r" (lock->lock)
  41. : "r" (&lock->lock)
  42. : "t", "memory"
  43. );
  44. }
  45. static inline void _raw_spin_unlock(spinlock_t *lock)
  46. {
  47. assert_spin_locked(lock);
  48. lock->lock = 0;
  49. }
  50. #define _raw_spin_trylock(x) (!test_and_set_bit(0, &(x)->lock))
  51. /*
  52. * Read-write spinlocks, allowing multiple readers but only one writer.
  53. *
  54. * NOTE! it is quite common to have readers in interrupts but no interrupt
  55. * writers. For those circumstances we can "mix" irq-safe locks - any writer
  56. * needs to get a irq-safe write-lock, but readers can get non-irqsafe
  57. * read-locks.
  58. */
  59. typedef struct {
  60. spinlock_t lock;
  61. atomic_t counter;
  62. #ifdef CONFIG_PREEMPT
  63. unsigned int break_lock;
  64. #endif
  65. } rwlock_t;
  66. #define RW_LOCK_BIAS 0x01000000
  67. #define RW_LOCK_UNLOCKED (rwlock_t) { { 0 }, { RW_LOCK_BIAS } }
  68. #define rwlock_init(x) do { *(x) = RW_LOCK_UNLOCKED; } while (0)
  69. static inline void _raw_read_lock(rwlock_t *rw)
  70. {
  71. _raw_spin_lock(&rw->lock);
  72. atomic_inc(&rw->counter);
  73. _raw_spin_unlock(&rw->lock);
  74. }
  75. static inline void _raw_read_unlock(rwlock_t *rw)
  76. {
  77. _raw_spin_lock(&rw->lock);
  78. atomic_dec(&rw->counter);
  79. _raw_spin_unlock(&rw->lock);
  80. }
  81. static inline void _raw_write_lock(rwlock_t *rw)
  82. {
  83. _raw_spin_lock(&rw->lock);
  84. atomic_set(&rw->counter, -1);
  85. }
  86. static inline void _raw_write_unlock(rwlock_t *rw)
  87. {
  88. atomic_set(&rw->counter, 0);
  89. _raw_spin_unlock(&rw->lock);
  90. }
  91. #define _raw_read_trylock(lock) generic_raw_read_trylock(lock)
  92. static inline int _raw_write_trylock(rwlock_t *rw)
  93. {
  94. if (atomic_sub_and_test(RW_LOCK_BIAS, &rw->counter))
  95. return 1;
  96. atomic_add(RW_LOCK_BIAS, &rw->counter);
  97. return 0;
  98. }
  99. #endif /* __ASM_SH_SPINLOCK_H */