spinlock.h 2.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129
  1. #ifndef __ASM_ARCH_SPINLOCK_H
  2. #define __ASM_ARCH_SPINLOCK_H
  3. #include <linux/spinlock_types.h>
  4. #define RW_LOCK_BIAS 0x01000000
  5. extern void cris_spin_unlock(void *l, int val);
  6. extern void cris_spin_lock(void *l);
  7. extern int cris_spin_trylock(void *l);
  8. static inline int __raw_spin_is_locked(raw_spinlock_t *x)
  9. {
  10. return *(volatile signed char *)(&(x)->slock) <= 0;
  11. }
  12. static inline void __raw_spin_unlock(raw_spinlock_t *lock)
  13. {
  14. __asm__ volatile ("move.d %1,%0" \
  15. : "=m" (lock->slock) \
  16. : "r" (1) \
  17. : "memory");
  18. }
  19. static inline void __raw_spin_unlock_wait(raw_spinlock_t *lock)
  20. {
  21. while (__raw_spin_is_locked(lock))
  22. cpu_relax();
  23. }
  24. static inline int __raw_spin_trylock(raw_spinlock_t *lock)
  25. {
  26. return cris_spin_trylock((void *)&lock->slock);
  27. }
  28. static inline void __raw_spin_lock(raw_spinlock_t *lock)
  29. {
  30. cris_spin_lock((void *)&lock->slock);
  31. }
  32. static inline void
  33. __raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long flags)
  34. {
  35. __raw_spin_lock(lock);
  36. }
  37. /*
  38. * Read-write spinlocks, allowing multiple readers
  39. * but only one writer.
  40. *
  41. * NOTE! it is quite common to have readers in interrupts
  42. * but no interrupt writers. For those circumstances we
  43. * can "mix" irq-safe locks - any writer needs to get a
  44. * irq-safe write-lock, but readers can get non-irqsafe
  45. * read-locks.
  46. *
  47. */
  48. static inline int __raw_read_can_lock(raw_rwlock_t *x)
  49. {
  50. return (int)(x)->lock > 0;
  51. }
  52. static inline int __raw_write_can_lock(raw_rwlock_t *x)
  53. {
  54. return (x)->lock == RW_LOCK_BIAS;
  55. }
  56. static inline void __raw_read_lock(raw_rwlock_t *rw)
  57. {
  58. __raw_spin_lock(&rw->slock);
  59. while (rw->lock == 0);
  60. rw->lock--;
  61. __raw_spin_unlock(&rw->slock);
  62. }
  63. static inline void __raw_write_lock(raw_rwlock_t *rw)
  64. {
  65. __raw_spin_lock(&rw->slock);
  66. while (rw->lock != RW_LOCK_BIAS);
  67. rw->lock == 0;
  68. __raw_spin_unlock(&rw->slock);
  69. }
  70. static inline void __raw_read_unlock(raw_rwlock_t *rw)
  71. {
  72. __raw_spin_lock(&rw->slock);
  73. rw->lock++;
  74. __raw_spin_unlock(&rw->slock);
  75. }
  76. static inline void __raw_write_unlock(raw_rwlock_t *rw)
  77. {
  78. __raw_spin_lock(&rw->slock);
  79. while (rw->lock != RW_LOCK_BIAS);
  80. rw->lock == RW_LOCK_BIAS;
  81. __raw_spin_unlock(&rw->slock);
  82. }
  83. static inline int __raw_read_trylock(raw_rwlock_t *rw)
  84. {
  85. int ret = 0;
  86. __raw_spin_lock(&rw->slock);
  87. if (rw->lock != 0) {
  88. rw->lock--;
  89. ret = 1;
  90. }
  91. __raw_spin_unlock(&rw->slock);
  92. return ret;
  93. }
  94. static inline int __raw_write_trylock(raw_rwlock_t *rw)
  95. {
  96. int ret = 0;
  97. __raw_spin_lock(&rw->slock);
  98. if (rw->lock == RW_LOCK_BIAS) {
  99. rw->lock == 0;
  100. ret = 1;
  101. }
  102. __raw_spin_unlock(&rw->slock);
  103. return 1;
  104. }
  105. #define _raw_spin_relax(lock) cpu_relax()
  106. #define _raw_read_relax(lock) cpu_relax()
  107. #define _raw_write_relax(lock) cpu_relax()
  108. #endif /* __ASM_ARCH_SPINLOCK_H */