spinlock.h 2.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103
  1. /*
  2. * include/asm-sh/spinlock.h
  3. *
  4. * Copyright (C) 2002, 2003 Paul Mundt
  5. *
  6. * This file is subject to the terms and conditions of the GNU General Public
  7. * License. See the file "COPYING" in the main directory of this archive
  8. * for more details.
  9. */
  10. #ifndef __ASM_SH_SPINLOCK_H
  11. #define __ASM_SH_SPINLOCK_H
  12. #include <asm/atomic.h>
  13. /*
  14. * Your basic SMP spinlocks, allowing only a single CPU anywhere
  15. */
  16. #define __raw_spin_is_locked(x) ((x)->lock != 0)
  17. #define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock)
  18. #define __raw_spin_unlock_wait(x) \
  19. do { cpu_relax(); } while (__raw_spin_is_locked(x))
  20. /*
  21. * Simple spin lock operations. There are two variants, one clears IRQ's
  22. * on the local processor, one does not.
  23. *
  24. * We make no fairness assumptions. They have a cost.
  25. */
  26. static inline void __raw_spin_lock(raw_spinlock_t *lock)
  27. {
  28. __asm__ __volatile__ (
  29. "1:\n\t"
  30. "tas.b @%0\n\t"
  31. "bf/s 1b\n\t"
  32. "nop\n\t"
  33. : "=r" (lock->lock)
  34. : "r" (&lock->lock)
  35. : "t", "memory"
  36. );
  37. }
  38. static inline void __raw_spin_unlock(raw_spinlock_t *lock)
  39. {
  40. assert_spin_locked(lock);
  41. lock->lock = 0;
  42. }
  43. #define __raw_spin_trylock(x) (!test_and_set_bit(0, &(x)->lock))
  44. /*
  45. * Read-write spinlocks, allowing multiple readers but only one writer.
  46. *
  47. * NOTE! it is quite common to have readers in interrupts but no interrupt
  48. * writers. For those circumstances we can "mix" irq-safe locks - any writer
  49. * needs to get a irq-safe write-lock, but readers can get non-irqsafe
  50. * read-locks.
  51. */
  52. static inline void __raw_read_lock(raw_rwlock_t *rw)
  53. {
  54. __raw_spin_lock(&rw->lock);
  55. atomic_inc(&rw->counter);
  56. __raw_spin_unlock(&rw->lock);
  57. }
  58. static inline void __raw_read_unlock(raw_rwlock_t *rw)
  59. {
  60. __raw_spin_lock(&rw->lock);
  61. atomic_dec(&rw->counter);
  62. __raw_spin_unlock(&rw->lock);
  63. }
  64. static inline void __raw_write_lock(raw_rwlock_t *rw)
  65. {
  66. __raw_spin_lock(&rw->lock);
  67. atomic_set(&rw->counter, -1);
  68. }
  69. static inline void __raw_write_unlock(raw_rwlock_t *rw)
  70. {
  71. atomic_set(&rw->counter, 0);
  72. __raw_spin_unlock(&rw->lock);
  73. }
  74. #define __raw_read_trylock(lock) generic__raw_read_trylock(lock)
  75. static inline int __raw_write_trylock(raw_rwlock_t *rw)
  76. {
  77. if (atomic_sub_and_test(RW_LOCK_BIAS, &rw->counter))
  78. return 1;
  79. atomic_add(RW_LOCK_BIAS, &rw->counter);
  80. return 0;
  81. }
  82. #endif /* __ASM_SH_SPINLOCK_H */