spinlock.h 3.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138
  1. /*
  2. * include/asm-s390/spinlock.h
  3. *
  4. * S390 version
  5. * Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation
  6. * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
  7. *
  8. * Derived from "include/asm-i386/spinlock.h"
  9. */
  10. #ifndef __ASM_SPINLOCK_H
  11. #define __ASM_SPINLOCK_H
  12. static inline int
  13. _raw_compare_and_swap(volatile unsigned int *lock,
  14. unsigned int old, unsigned int new)
  15. {
  16. asm volatile ("cs %0,%3,0(%4)"
  17. : "=d" (old), "=m" (*lock)
  18. : "0" (old), "d" (new), "a" (lock), "m" (*lock)
  19. : "cc", "memory" );
  20. return old;
  21. }
  22. /*
  23. * Simple spin lock operations. There are two variants, one clears IRQ's
  24. * on the local processor, one does not.
  25. *
  26. * We make no fairness assumptions. They have a cost.
  27. *
  28. * (the type definitions are in asm/spinlock_types.h)
  29. */
  30. #define __raw_spin_is_locked(x) ((x)->lock != 0)
  31. #define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock)
  32. #define __raw_spin_unlock_wait(lock) \
  33. do { while (__raw_spin_is_locked(lock)) cpu_relax(); } while (0)
  34. extern void _raw_spin_lock_wait(raw_spinlock_t *lp, unsigned int pc);
  35. extern int _raw_spin_trylock_retry(raw_spinlock_t *lp, unsigned int pc);
  36. static inline void __raw_spin_lock(raw_spinlock_t *lp)
  37. {
  38. unsigned long pc = 1 | (unsigned long) __builtin_return_address(0);
  39. if (unlikely(_raw_compare_and_swap(&lp->lock, 0, pc) != 0))
  40. _raw_spin_lock_wait(lp, pc);
  41. }
  42. static inline int __raw_spin_trylock(raw_spinlock_t *lp)
  43. {
  44. unsigned long pc = 1 | (unsigned long) __builtin_return_address(0);
  45. if (likely(_raw_compare_and_swap(&lp->lock, 0, pc) == 0))
  46. return 1;
  47. return _raw_spin_trylock_retry(lp, pc);
  48. }
  49. static inline void __raw_spin_unlock(raw_spinlock_t *lp)
  50. {
  51. _raw_compare_and_swap(&lp->lock, lp->lock, 0);
  52. }
  53. /*
  54. * Read-write spinlocks, allowing multiple readers
  55. * but only one writer.
  56. *
  57. * NOTE! it is quite common to have readers in interrupts
  58. * but no interrupt writers. For those circumstances we
  59. * can "mix" irq-safe locks - any writer needs to get a
  60. * irq-safe write-lock, but readers can get non-irqsafe
  61. * read-locks.
  62. */
  63. /**
  64. * read_can_lock - would read_trylock() succeed?
  65. * @lock: the rwlock in question.
  66. */
  67. #define __raw_read_can_lock(x) ((int)(x)->lock >= 0)
  68. /**
  69. * write_can_lock - would write_trylock() succeed?
  70. * @lock: the rwlock in question.
  71. */
  72. #define __raw_write_can_lock(x) ((x)->lock == 0)
  73. extern void _raw_read_lock_wait(raw_rwlock_t *lp);
  74. extern int _raw_read_trylock_retry(raw_rwlock_t *lp);
  75. extern void _raw_write_lock_wait(raw_rwlock_t *lp);
  76. extern int _raw_write_trylock_retry(raw_rwlock_t *lp);
  77. static inline void __raw_read_lock(raw_rwlock_t *rw)
  78. {
  79. unsigned int old;
  80. old = rw->lock & 0x7fffffffU;
  81. if (_raw_compare_and_swap(&rw->lock, old, old + 1) != old)
  82. _raw_read_lock_wait(rw);
  83. }
  84. static inline void __raw_read_unlock(raw_rwlock_t *rw)
  85. {
  86. unsigned int old, cmp;
  87. old = rw->lock;
  88. do {
  89. cmp = old;
  90. old = _raw_compare_and_swap(&rw->lock, old, old - 1);
  91. } while (cmp != old);
  92. }
  93. static inline void __raw_write_lock(raw_rwlock_t *rw)
  94. {
  95. if (unlikely(_raw_compare_and_swap(&rw->lock, 0, 0x80000000) != 0))
  96. _raw_write_lock_wait(rw);
  97. }
  98. static inline void __raw_write_unlock(raw_rwlock_t *rw)
  99. {
  100. _raw_compare_and_swap(&rw->lock, 0x80000000, 0);
  101. }
  102. static inline int __raw_read_trylock(raw_rwlock_t *rw)
  103. {
  104. unsigned int old;
  105. old = rw->lock & 0x7fffffffU;
  106. if (likely(_raw_compare_and_swap(&rw->lock, old, old + 1) == old))
  107. return 1;
  108. return _raw_read_trylock_retry(rw);
  109. }
  110. static inline int __raw_write_trylock(raw_rwlock_t *rw)
  111. {
  112. if (likely(_raw_compare_and_swap(&rw->lock, 0, 0x80000000) == 0))
  113. return 1;
  114. return _raw_write_trylock_retry(rw);
  115. }
  116. #endif /* __ASM_SPINLOCK_H */