rwsem.h 4.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183
  1. #ifndef _ASM_POWERPC_RWSEM_H
  2. #define _ASM_POWERPC_RWSEM_H
  3. #ifndef _LINUX_RWSEM_H
  4. #error "Please don't include <asm/rwsem.h> directly, use <linux/rwsem.h> instead."
  5. #endif
  6. #ifdef __KERNEL__
  7. /*
  8. * R/W semaphores for PPC using the stuff in lib/rwsem.c.
  9. * Adapted largely from include/asm-i386/rwsem.h
  10. * by Paul Mackerras <paulus@samba.org>.
  11. */
  12. #include <linux/list.h>
  13. #include <linux/spinlock.h>
  14. #include <asm/atomic.h>
  15. #include <asm/system.h>
  16. /*
  17. * the semaphore definition
  18. */
  19. #ifdef CONFIG_PPC64
  20. # define RWSEM_ACTIVE_MASK 0xffffffffL
  21. #else
  22. # define RWSEM_ACTIVE_MASK 0x0000ffffL
  23. #endif
  24. #define RWSEM_UNLOCKED_VALUE 0x00000000L
  25. #define RWSEM_ACTIVE_BIAS 0x00000001L
  26. #define RWSEM_WAITING_BIAS (-RWSEM_ACTIVE_MASK-1)
  27. #define RWSEM_ACTIVE_READ_BIAS RWSEM_ACTIVE_BIAS
  28. #define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS)
  29. struct rw_semaphore {
  30. long count;
  31. spinlock_t wait_lock;
  32. struct list_head wait_list;
  33. #ifdef CONFIG_DEBUG_LOCK_ALLOC
  34. struct lockdep_map dep_map;
  35. #endif
  36. };
  37. #ifdef CONFIG_DEBUG_LOCK_ALLOC
  38. # define __RWSEM_DEP_MAP_INIT(lockname) , .dep_map = { .name = #lockname }
  39. #else
  40. # define __RWSEM_DEP_MAP_INIT(lockname)
  41. #endif
  42. #define __RWSEM_INITIALIZER(name) \
  43. { \
  44. RWSEM_UNLOCKED_VALUE, \
  45. __SPIN_LOCK_UNLOCKED((name).wait_lock), \
  46. LIST_HEAD_INIT((name).wait_list) \
  47. __RWSEM_DEP_MAP_INIT(name) \
  48. }
  49. #define DECLARE_RWSEM(name) \
  50. struct rw_semaphore name = __RWSEM_INITIALIZER(name)
  51. extern struct rw_semaphore *rwsem_down_read_failed(struct rw_semaphore *sem);
  52. extern struct rw_semaphore *rwsem_down_write_failed(struct rw_semaphore *sem);
  53. extern struct rw_semaphore *rwsem_wake(struct rw_semaphore *sem);
  54. extern struct rw_semaphore *rwsem_downgrade_wake(struct rw_semaphore *sem);
  55. extern void __init_rwsem(struct rw_semaphore *sem, const char *name,
  56. struct lock_class_key *key);
  57. #define init_rwsem(sem) \
  58. do { \
  59. static struct lock_class_key __key; \
  60. \
  61. __init_rwsem((sem), #sem, &__key); \
  62. } while (0)
  63. /*
  64. * lock for reading
  65. */
  66. static inline void __down_read(struct rw_semaphore *sem)
  67. {
  68. if (unlikely(atomic_long_inc_return((atomic_long_t *)&sem->count) <= 0))
  69. rwsem_down_read_failed(sem);
  70. }
  71. static inline int __down_read_trylock(struct rw_semaphore *sem)
  72. {
  73. long tmp;
  74. while ((tmp = sem->count) >= 0) {
  75. if (tmp == cmpxchg(&sem->count, tmp,
  76. tmp + RWSEM_ACTIVE_READ_BIAS)) {
  77. return 1;
  78. }
  79. }
  80. return 0;
  81. }
  82. /*
  83. * lock for writing
  84. */
  85. static inline void __down_write_nested(struct rw_semaphore *sem, int subclass)
  86. {
  87. long tmp;
  88. tmp = atomic_long_add_return(RWSEM_ACTIVE_WRITE_BIAS,
  89. (atomic_long_t *)&sem->count);
  90. if (unlikely(tmp != RWSEM_ACTIVE_WRITE_BIAS))
  91. rwsem_down_write_failed(sem);
  92. }
  93. static inline void __down_write(struct rw_semaphore *sem)
  94. {
  95. __down_write_nested(sem, 0);
  96. }
  97. static inline int __down_write_trylock(struct rw_semaphore *sem)
  98. {
  99. long tmp;
  100. tmp = cmpxchg(&sem->count, RWSEM_UNLOCKED_VALUE,
  101. RWSEM_ACTIVE_WRITE_BIAS);
  102. return tmp == RWSEM_UNLOCKED_VALUE;
  103. }
  104. /*
  105. * unlock after reading
  106. */
  107. static inline void __up_read(struct rw_semaphore *sem)
  108. {
  109. long tmp;
  110. tmp = atomic_long_dec_return((atomic_long_t *)&sem->count);
  111. if (unlikely(tmp < -1 && (tmp & RWSEM_ACTIVE_MASK) == 0))
  112. rwsem_wake(sem);
  113. }
  114. /*
  115. * unlock after writing
  116. */
  117. static inline void __up_write(struct rw_semaphore *sem)
  118. {
  119. if (unlikely(atomic_long_sub_return(RWSEM_ACTIVE_WRITE_BIAS,
  120. (atomic_long_t *)&sem->count) < 0))
  121. rwsem_wake(sem);
  122. }
  123. /*
  124. * implement atomic add functionality
  125. */
  126. static inline void rwsem_atomic_add(long delta, struct rw_semaphore *sem)
  127. {
  128. atomic_long_add(delta, (atomic_long_t *)&sem->count);
  129. }
  130. /*
  131. * downgrade write lock to read lock
  132. */
  133. static inline void __downgrade_write(struct rw_semaphore *sem)
  134. {
  135. long tmp;
  136. tmp = atomic_long_add_return(-RWSEM_WAITING_BIAS,
  137. (atomic_long_t *)&sem->count);
  138. if (tmp < 0)
  139. rwsem_downgrade_wake(sem);
  140. }
  141. /*
  142. * implement exchange and add functionality
  143. */
  144. static inline long rwsem_atomic_update(long delta, struct rw_semaphore *sem)
  145. {
  146. return atomic_long_add_return(delta, (atomic_long_t *)&sem->count);
  147. }
  148. static inline int rwsem_is_locked(struct rw_semaphore *sem)
  149. {
  150. return sem->count != 0;
  151. }
  152. #endif /* __KERNEL__ */
  153. #endif /* _ASM_POWERPC_RWSEM_H */