spinlock.h 3.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155
  1. /*
  2. * include/asm-s390/spinlock.h
  3. *
  4. * S390 version
  5. * Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation
  6. * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
  7. *
  8. * Derived from "include/asm-i386/spinlock.h"
  9. */
  10. #ifndef __ASM_SPINLOCK_H
  11. #define __ASM_SPINLOCK_H
  12. static inline int
  13. _raw_compare_and_swap(volatile unsigned int *lock,
  14. unsigned int old, unsigned int new)
  15. {
  16. asm volatile ("cs %0,%3,0(%4)"
  17. : "=d" (old), "=m" (*lock)
  18. : "0" (old), "d" (new), "a" (lock), "m" (*lock)
  19. : "cc", "memory" );
  20. return old;
  21. }
  22. /*
  23. * Simple spin lock operations. There are two variants, one clears IRQ's
  24. * on the local processor, one does not.
  25. *
  26. * We make no fairness assumptions. They have a cost.
  27. */
  28. typedef struct {
  29. volatile unsigned int lock;
  30. #ifdef CONFIG_PREEMPT
  31. unsigned int break_lock;
  32. #endif
  33. } __attribute__ ((aligned (4))) spinlock_t;
  34. #define SPIN_LOCK_UNLOCKED (spinlock_t) { 0 }
  35. #define spin_lock_init(lp) do { (lp)->lock = 0; } while(0)
  36. #define spin_unlock_wait(lp) do { barrier(); } while(((volatile spinlock_t *)(lp))->lock)
  37. #define spin_is_locked(x) ((x)->lock != 0)
  38. #define _raw_spin_lock_flags(lock, flags) _raw_spin_lock(lock)
  39. extern void _raw_spin_lock_wait(spinlock_t *lp, unsigned int pc);
  40. extern int _raw_spin_trylock_retry(spinlock_t *lp, unsigned int pc);
  41. static inline void _raw_spin_lock(spinlock_t *lp)
  42. {
  43. unsigned long pc = (unsigned long) __builtin_return_address(0);
  44. if (unlikely(_raw_compare_and_swap(&lp->lock, 0, pc) != 0))
  45. _raw_spin_lock_wait(lp, pc);
  46. }
  47. static inline int _raw_spin_trylock(spinlock_t *lp)
  48. {
  49. unsigned long pc = (unsigned long) __builtin_return_address(0);
  50. if (likely(_raw_compare_and_swap(&lp->lock, 0, pc) == 0))
  51. return 1;
  52. return _raw_spin_trylock_retry(lp, pc);
  53. }
  54. static inline void _raw_spin_unlock(spinlock_t *lp)
  55. {
  56. _raw_compare_and_swap(&lp->lock, lp->lock, 0);
  57. }
  58. /*
  59. * Read-write spinlocks, allowing multiple readers
  60. * but only one writer.
  61. *
  62. * NOTE! it is quite common to have readers in interrupts
  63. * but no interrupt writers. For those circumstances we
  64. * can "mix" irq-safe locks - any writer needs to get a
  65. * irq-safe write-lock, but readers can get non-irqsafe
  66. * read-locks.
  67. */
  68. typedef struct {
  69. volatile unsigned int lock;
  70. volatile unsigned long owner_pc;
  71. #ifdef CONFIG_PREEMPT
  72. unsigned int break_lock;
  73. #endif
  74. } rwlock_t;
  75. #define RW_LOCK_UNLOCKED (rwlock_t) { 0, 0 }
  76. #define rwlock_init(x) do { *(x) = RW_LOCK_UNLOCKED; } while(0)
  77. /**
  78. * read_can_lock - would read_trylock() succeed?
  79. * @lock: the rwlock in question.
  80. */
  81. #define read_can_lock(x) ((int)(x)->lock >= 0)
  82. /**
  83. * write_can_lock - would write_trylock() succeed?
  84. * @lock: the rwlock in question.
  85. */
  86. #define write_can_lock(x) ((x)->lock == 0)
  87. extern void _raw_read_lock_wait(rwlock_t *lp);
  88. extern int _raw_read_trylock_retry(rwlock_t *lp);
  89. extern void _raw_write_lock_wait(rwlock_t *lp);
  90. extern int _raw_write_trylock_retry(rwlock_t *lp);
  91. static inline void _raw_read_lock(rwlock_t *rw)
  92. {
  93. unsigned int old;
  94. old = rw->lock & 0x7fffffffU;
  95. if (_raw_compare_and_swap(&rw->lock, old, old + 1) != old)
  96. _raw_read_lock_wait(rw);
  97. }
  98. static inline void _raw_read_unlock(rwlock_t *rw)
  99. {
  100. unsigned int old, cmp;
  101. old = rw->lock;
  102. do {
  103. cmp = old;
  104. old = _raw_compare_and_swap(&rw->lock, old, old - 1);
  105. } while (cmp != old);
  106. }
  107. static inline void _raw_write_lock(rwlock_t *rw)
  108. {
  109. if (unlikely(_raw_compare_and_swap(&rw->lock, 0, 0x80000000) != 0))
  110. _raw_write_lock_wait(rw);
  111. }
  112. static inline void _raw_write_unlock(rwlock_t *rw)
  113. {
  114. _raw_compare_and_swap(&rw->lock, 0x80000000, 0);
  115. }
  116. static inline int _raw_read_trylock(rwlock_t *rw)
  117. {
  118. unsigned int old;
  119. old = rw->lock & 0x7fffffffU;
  120. if (likely(_raw_compare_and_swap(&rw->lock, old, old + 1) == old))
  121. return 1;
  122. return _raw_read_trylock_retry(rw);
  123. }
  124. static inline int _raw_write_trylock(rwlock_t *rw)
  125. {
  126. if (likely(_raw_compare_and_swap(&rw->lock, 0, 0x80000000) == 0))
  127. return 1;
  128. return _raw_write_trylock_retry(rw);
  129. }
  130. #endif /* __ASM_SPINLOCK_H */