spinlock.h 4.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176
  1. /*
  2. * include/asm-s390/spinlock.h
  3. *
  4. * S390 version
  5. * Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation
  6. * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
  7. *
  8. * Derived from "include/asm-i386/spinlock.h"
  9. */
  10. #ifndef __ASM_SPINLOCK_H
  11. #define __ASM_SPINLOCK_H
  12. #include <linux/smp.h>
  13. static inline int
  14. _raw_compare_and_swap(volatile unsigned int *lock,
  15. unsigned int old, unsigned int new)
  16. {
  17. asm volatile(
  18. " cs %0,%3,%1"
  19. : "=d" (old), "=Q" (*lock)
  20. : "0" (old), "d" (new), "Q" (*lock)
  21. : "cc", "memory" );
  22. return old;
  23. }
  24. /*
  25. * Simple spin lock operations. There are two variants, one clears IRQ's
  26. * on the local processor, one does not.
  27. *
  28. * We make no fairness assumptions. They have a cost.
  29. *
  30. * (the type definitions are in asm/spinlock_types.h)
  31. */
  32. #define arch_spin_is_locked(x) ((x)->owner_cpu != 0)
  33. #define arch_spin_unlock_wait(lock) \
  34. do { while (arch_spin_is_locked(lock)) \
  35. arch_spin_relax(lock); } while (0)
  36. extern void arch_spin_lock_wait(arch_spinlock_t *);
  37. extern void arch_spin_lock_wait_flags(arch_spinlock_t *, unsigned long flags);
  38. extern int arch_spin_trylock_retry(arch_spinlock_t *);
  39. extern void arch_spin_relax(arch_spinlock_t *lock);
  40. static inline void arch_spin_lock(arch_spinlock_t *lp)
  41. {
  42. int old;
  43. old = _raw_compare_and_swap(&lp->owner_cpu, 0, ~smp_processor_id());
  44. if (likely(old == 0))
  45. return;
  46. arch_spin_lock_wait(lp);
  47. }
  48. static inline void arch_spin_lock_flags(arch_spinlock_t *lp,
  49. unsigned long flags)
  50. {
  51. int old;
  52. old = _raw_compare_and_swap(&lp->owner_cpu, 0, ~smp_processor_id());
  53. if (likely(old == 0))
  54. return;
  55. arch_spin_lock_wait_flags(lp, flags);
  56. }
  57. static inline int arch_spin_trylock(arch_spinlock_t *lp)
  58. {
  59. int old;
  60. old = _raw_compare_and_swap(&lp->owner_cpu, 0, ~smp_processor_id());
  61. if (likely(old == 0))
  62. return 1;
  63. return arch_spin_trylock_retry(lp);
  64. }
  65. static inline void arch_spin_unlock(arch_spinlock_t *lp)
  66. {
  67. _raw_compare_and_swap(&lp->owner_cpu, lp->owner_cpu, 0);
  68. }
  69. /*
  70. * Read-write spinlocks, allowing multiple readers
  71. * but only one writer.
  72. *
  73. * NOTE! it is quite common to have readers in interrupts
  74. * but no interrupt writers. For those circumstances we
  75. * can "mix" irq-safe locks - any writer needs to get a
  76. * irq-safe write-lock, but readers can get non-irqsafe
  77. * read-locks.
  78. */
  79. /**
  80. * read_can_lock - would read_trylock() succeed?
  81. * @lock: the rwlock in question.
  82. */
  83. #define arch_read_can_lock(x) ((int)(x)->lock >= 0)
  84. /**
  85. * write_can_lock - would write_trylock() succeed?
  86. * @lock: the rwlock in question.
  87. */
  88. #define arch_write_can_lock(x) ((x)->lock == 0)
  89. extern void _raw_read_lock_wait(arch_rwlock_t *lp);
  90. extern void _raw_read_lock_wait_flags(arch_rwlock_t *lp, unsigned long flags);
  91. extern int _raw_read_trylock_retry(arch_rwlock_t *lp);
  92. extern void _raw_write_lock_wait(arch_rwlock_t *lp);
  93. extern void _raw_write_lock_wait_flags(arch_rwlock_t *lp, unsigned long flags);
  94. extern int _raw_write_trylock_retry(arch_rwlock_t *lp);
  95. static inline void arch_read_lock(arch_rwlock_t *rw)
  96. {
  97. unsigned int old;
  98. old = rw->lock & 0x7fffffffU;
  99. if (_raw_compare_and_swap(&rw->lock, old, old + 1) != old)
  100. _raw_read_lock_wait(rw);
  101. }
  102. static inline void arch_read_lock_flags(arch_rwlock_t *rw, unsigned long flags)
  103. {
  104. unsigned int old;
  105. old = rw->lock & 0x7fffffffU;
  106. if (_raw_compare_and_swap(&rw->lock, old, old + 1) != old)
  107. _raw_read_lock_wait_flags(rw, flags);
  108. }
  109. static inline void arch_read_unlock(arch_rwlock_t *rw)
  110. {
  111. unsigned int old, cmp;
  112. old = rw->lock;
  113. do {
  114. cmp = old;
  115. old = _raw_compare_and_swap(&rw->lock, old, old - 1);
  116. } while (cmp != old);
  117. }
  118. static inline void arch_write_lock(arch_rwlock_t *rw)
  119. {
  120. if (unlikely(_raw_compare_and_swap(&rw->lock, 0, 0x80000000) != 0))
  121. _raw_write_lock_wait(rw);
  122. }
  123. static inline void arch_write_lock_flags(arch_rwlock_t *rw, unsigned long flags)
  124. {
  125. if (unlikely(_raw_compare_and_swap(&rw->lock, 0, 0x80000000) != 0))
  126. _raw_write_lock_wait_flags(rw, flags);
  127. }
  128. static inline void arch_write_unlock(arch_rwlock_t *rw)
  129. {
  130. _raw_compare_and_swap(&rw->lock, 0x80000000, 0);
  131. }
  132. static inline int arch_read_trylock(arch_rwlock_t *rw)
  133. {
  134. unsigned int old;
  135. old = rw->lock & 0x7fffffffU;
  136. if (likely(_raw_compare_and_swap(&rw->lock, old, old + 1) == old))
  137. return 1;
  138. return _raw_read_trylock_retry(rw);
  139. }
  140. static inline int arch_write_trylock(arch_rwlock_t *rw)
  141. {
  142. if (likely(_raw_compare_and_swap(&rw->lock, 0, 0x80000000) == 0))
  143. return 1;
  144. return _raw_write_trylock_retry(rw);
  145. }
  146. #define arch_read_relax(lock) cpu_relax()
  147. #define arch_write_relax(lock) cpu_relax()
  148. #endif /* __ASM_SPINLOCK_H */