rwsem.h 4.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170
  1. /*
  2. * rwsem.h: R/W semaphores implemented using CAS
  3. *
  4. * Written by David S. Miller (davem@redhat.com), 2001.
  5. * Derived from asm-i386/rwsem.h
  6. */
  7. #ifndef _SPARC64_RWSEM_H
  8. #define _SPARC64_RWSEM_H
  9. #ifndef _LINUX_RWSEM_H
  10. #error "please don't include asm/rwsem.h directly, use linux/rwsem.h instead"
  11. #endif
  12. #ifdef __KERNEL__
  13. #include <linux/list.h>
  14. #include <linux/spinlock.h>
  15. struct rwsem_waiter;
  16. struct rw_semaphore {
  17. signed long count;
  18. #define RWSEM_UNLOCKED_VALUE 0x00000000L
  19. #define RWSEM_ACTIVE_BIAS 0x00000001L
  20. #define RWSEM_ACTIVE_MASK 0xffffffffL
  21. #define RWSEM_WAITING_BIAS (-RWSEM_ACTIVE_MASK-1)
  22. #define RWSEM_ACTIVE_READ_BIAS RWSEM_ACTIVE_BIAS
  23. #define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS)
  24. spinlock_t wait_lock;
  25. struct list_head wait_list;
  26. #ifdef CONFIG_DEBUG_LOCK_ALLOC
  27. struct lockdep_map dep_map;
  28. #endif
  29. };
  30. #ifdef CONFIG_DEBUG_LOCK_ALLOC
  31. # define __RWSEM_DEP_MAP_INIT(lockname) , .dep_map = { .name = #lockname }
  32. #else
  33. # define __RWSEM_DEP_MAP_INIT(lockname)
  34. #endif
  35. #define __RWSEM_INITIALIZER(name) \
  36. { RWSEM_UNLOCKED_VALUE, __SPIN_LOCK_UNLOCKED((name).wait_lock), \
  37. LIST_HEAD_INIT((name).wait_list) __RWSEM_DEP_MAP_INIT(name) }
  38. #define DECLARE_RWSEM(name) \
  39. struct rw_semaphore name = __RWSEM_INITIALIZER(name)
  40. extern struct rw_semaphore *rwsem_down_read_failed(struct rw_semaphore *sem);
  41. extern struct rw_semaphore *rwsem_down_write_failed(struct rw_semaphore *sem);
  42. extern struct rw_semaphore *rwsem_wake(struct rw_semaphore *sem);
  43. extern struct rw_semaphore *rwsem_downgrade_wake(struct rw_semaphore *sem);
  44. extern void __init_rwsem(struct rw_semaphore *sem, const char *name,
  45. struct lock_class_key *key);
  46. #define init_rwsem(sem) \
  47. do { \
  48. static struct lock_class_key __key; \
  49. \
  50. __init_rwsem((sem), #sem, &__key); \
  51. } while (0)
  52. /*
  53. * lock for reading
  54. */
  55. static inline void __down_read(struct rw_semaphore *sem)
  56. {
  57. if (unlikely(atomic64_inc_return((atomic64_t *)(&sem->count)) <= 0L))
  58. rwsem_down_read_failed(sem);
  59. }
  60. static inline int __down_read_trylock(struct rw_semaphore *sem)
  61. {
  62. long tmp;
  63. while ((tmp = sem->count) >= 0L) {
  64. if (tmp == cmpxchg(&sem->count, tmp,
  65. tmp + RWSEM_ACTIVE_READ_BIAS)) {
  66. return 1;
  67. }
  68. }
  69. return 0;
  70. }
  71. /*
  72. * lock for writing
  73. */
  74. static inline void __down_write_nested(struct rw_semaphore *sem, int subclass)
  75. {
  76. long tmp;
  77. tmp = atomic64_add_return(RWSEM_ACTIVE_WRITE_BIAS,
  78. (atomic64_t *)(&sem->count));
  79. if (unlikely(tmp != RWSEM_ACTIVE_WRITE_BIAS))
  80. rwsem_down_write_failed(sem);
  81. }
  82. static inline void __down_write(struct rw_semaphore *sem)
  83. {
  84. __down_write_nested(sem, 0);
  85. }
  86. static inline int __down_write_trylock(struct rw_semaphore *sem)
  87. {
  88. long tmp;
  89. tmp = cmpxchg(&sem->count, RWSEM_UNLOCKED_VALUE,
  90. RWSEM_ACTIVE_WRITE_BIAS);
  91. return tmp == RWSEM_UNLOCKED_VALUE;
  92. }
  93. /*
  94. * unlock after reading
  95. */
  96. static inline void __up_read(struct rw_semaphore *sem)
  97. {
  98. long tmp;
  99. tmp = atomic64_dec_return((atomic64_t *)(&sem->count));
  100. if (unlikely(tmp < -1L && (tmp & RWSEM_ACTIVE_MASK) == 0L))
  101. rwsem_wake(sem);
  102. }
  103. /*
  104. * unlock after writing
  105. */
  106. static inline void __up_write(struct rw_semaphore *sem)
  107. {
  108. if (unlikely(atomic64_sub_return(RWSEM_ACTIVE_WRITE_BIAS,
  109. (atomic64_t *)(&sem->count)) < 0L))
  110. rwsem_wake(sem);
  111. }
  112. /*
  113. * implement atomic add functionality
  114. */
  115. static inline void rwsem_atomic_add(long delta, struct rw_semaphore *sem)
  116. {
  117. atomic64_add(delta, (atomic64_t *)(&sem->count));
  118. }
  119. /*
  120. * downgrade write lock to read lock
  121. */
  122. static inline void __downgrade_write(struct rw_semaphore *sem)
  123. {
  124. long tmp;
  125. tmp = atomic64_add_return(-RWSEM_WAITING_BIAS, (atomic64_t *)(&sem->count));
  126. if (tmp < 0L)
  127. rwsem_downgrade_wake(sem);
  128. }
  129. /*
  130. * implement exchange and add functionality
  131. */
  132. static inline long rwsem_atomic_update(long delta, struct rw_semaphore *sem)
  133. {
  134. return atomic64_add_return(delta, (atomic64_t *)(&sem->count));
  135. }
  136. static inline int rwsem_is_locked(struct rw_semaphore *sem)
  137. {
  138. return (sem->count != 0);
  139. }
  140. #endif /* __KERNEL__ */
  141. #endif /* _SPARC64_RWSEM_H */