spinlock.h 2.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120
  1. /*
  2. * include/asm-sh/spinlock.h
  3. *
  4. * Copyright (C) 2002, 2003 Paul Mundt
  5. *
  6. * This file is subject to the terms and conditions of the GNU General Public
  7. * License. See the file "COPYING" in the main directory of this archive
  8. * for more details.
  9. */
  10. #ifndef __ASM_SH_SPINLOCK_H
  11. #define __ASM_SH_SPINLOCK_H
  12. #include <asm/atomic.h>
  13. #include <asm/spinlock_types.h>
  14. /*
  15. * Your basic SMP spinlocks, allowing only a single CPU anywhere
  16. */
  17. #define __raw_spin_is_locked(x) ((x)->lock != 0)
  18. #define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock)
  19. #define __raw_spin_unlock_wait(x) \
  20. do { cpu_relax(); } while (__raw_spin_is_locked(x))
  21. /*
  22. * Simple spin lock operations. There are two variants, one clears IRQ's
  23. * on the local processor, one does not.
  24. *
  25. * We make no fairness assumptions. They have a cost.
  26. */
  27. static inline void __raw_spin_lock(raw_spinlock_t *lock)
  28. {
  29. __asm__ __volatile__ (
  30. "1:\n\t"
  31. "tas.b @%0\n\t"
  32. "bf/s 1b\n\t"
  33. "nop\n\t"
  34. : "=r" (lock->lock)
  35. : "r" (&lock->lock)
  36. : "t", "memory"
  37. );
  38. }
  39. static inline void __raw_spin_unlock(raw_spinlock_t *lock)
  40. {
  41. //assert_spin_locked(lock);
  42. lock->lock = 0;
  43. }
  44. #define __raw_spin_trylock(x) (!test_and_set_bit(0, &(x)->lock))
  45. /*
  46. * Read-write spinlocks, allowing multiple readers but only one writer.
  47. *
  48. * NOTE! it is quite common to have readers in interrupts but no interrupt
  49. * writers. For those circumstances we can "mix" irq-safe locks - any writer
  50. * needs to get a irq-safe write-lock, but readers can get non-irqsafe
  51. * read-locks.
  52. */
  53. static inline void __raw_read_lock(raw_rwlock_t *rw)
  54. {
  55. __raw_spin_lock(&rw->lock);
  56. atomic_inc(&rw->counter);
  57. __raw_spin_unlock(&rw->lock);
  58. }
  59. static inline void __raw_read_unlock(raw_rwlock_t *rw)
  60. {
  61. __raw_spin_lock(&rw->lock);
  62. atomic_dec(&rw->counter);
  63. __raw_spin_unlock(&rw->lock);
  64. }
  65. static inline void __raw_write_lock(raw_rwlock_t *rw)
  66. {
  67. __raw_spin_lock(&rw->lock);
  68. atomic_set(&rw->counter, -1);
  69. }
  70. static inline void __raw_write_unlock(raw_rwlock_t *rw)
  71. {
  72. atomic_set(&rw->counter, 0);
  73. __raw_spin_unlock(&rw->lock);
  74. }
  75. static inline int __raw_write_can_lock(raw_rwlock_t *rw)
  76. {
  77. return (atomic_read(&rw->counter) == RW_LOCK_BIAS);
  78. }
  79. static inline int __raw_read_trylock(raw_rwlock_t *lock)
  80. {
  81. atomic_t *count = (atomic_t*)lock;
  82. if (atomic_dec_return(count) >= 0)
  83. return 1;
  84. atomic_inc(count);
  85. return 0;
  86. }
  87. static inline int __raw_write_trylock(raw_rwlock_t *rw)
  88. {
  89. if (atomic_sub_and_test(RW_LOCK_BIAS, &rw->counter))
  90. return 1;
  91. atomic_add(RW_LOCK_BIAS, &rw->counter);
  92. return 0;
  93. }
  94. #define _raw_spin_relax(lock) cpu_relax()
  95. #define _raw_read_relax(lock) cpu_relax()
  96. #define _raw_write_relax(lock) cpu_relax()
  97. #endif /* __ASM_SH_SPINLOCK_H */