rwsem.h 4.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188
  1. /*
  2. * include/asm-sh/rwsem.h: R/W semaphores for SH using the stuff
  3. * in lib/rwsem.c.
  4. */
  5. #ifndef _ASM_SH_RWSEM_H
  6. #define _ASM_SH_RWSEM_H
  7. #ifndef _LINUX_RWSEM_H
  8. #error "please don't include asm/rwsem.h directly, use linux/rwsem.h instead"
  9. #endif
  10. #ifdef __KERNEL__
  11. #include <linux/list.h>
  12. #include <linux/spinlock.h>
  13. #include <asm/atomic.h>
  14. #include <asm/system.h>
  15. /*
  16. * the semaphore definition
  17. */
  18. struct rw_semaphore {
  19. long count;
  20. #define RWSEM_UNLOCKED_VALUE 0x00000000
  21. #define RWSEM_ACTIVE_BIAS 0x00000001
  22. #define RWSEM_ACTIVE_MASK 0x0000ffff
  23. #define RWSEM_WAITING_BIAS (-0x00010000)
  24. #define RWSEM_ACTIVE_READ_BIAS RWSEM_ACTIVE_BIAS
  25. #define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS)
  26. spinlock_t wait_lock;
  27. struct list_head wait_list;
  28. #ifdef CONFIG_DEBUG_LOCK_ALLOC
  29. struct lockdep_map dep_map;
  30. #endif
  31. };
  32. #ifdef CONFIG_DEBUG_LOCK_ALLOC
  33. # define __RWSEM_DEP_MAP_INIT(lockname) , .dep_map = { .name = #lockname }
  34. #else
  35. # define __RWSEM_DEP_MAP_INIT(lockname)
  36. #endif
  37. #define __RWSEM_INITIALIZER(name) \
  38. { RWSEM_UNLOCKED_VALUE, SPIN_LOCK_UNLOCKED, \
  39. LIST_HEAD_INIT((name).wait_list) \
  40. __RWSEM_DEP_MAP_INIT(name) }
  41. #define DECLARE_RWSEM(name) \
  42. struct rw_semaphore name = __RWSEM_INITIALIZER(name)
  43. extern struct rw_semaphore *rwsem_down_read_failed(struct rw_semaphore *sem);
  44. extern struct rw_semaphore *rwsem_down_write_failed(struct rw_semaphore *sem);
  45. extern struct rw_semaphore *rwsem_wake(struct rw_semaphore *sem);
  46. extern struct rw_semaphore *rwsem_downgrade_wake(struct rw_semaphore *sem);
  47. extern void __init_rwsem(struct rw_semaphore *sem, const char *name,
  48. struct lock_class_key *key);
  49. #define init_rwsem(sem) \
  50. do { \
  51. static struct lock_class_key __key; \
  52. \
  53. __init_rwsem((sem), #sem, &__key); \
  54. } while (0)
  55. static inline void init_rwsem(struct rw_semaphore *sem)
  56. {
  57. sem->count = RWSEM_UNLOCKED_VALUE;
  58. spin_lock_init(&sem->wait_lock);
  59. INIT_LIST_HEAD(&sem->wait_list);
  60. }
  61. /*
  62. * lock for reading
  63. */
  64. static inline void __down_read(struct rw_semaphore *sem)
  65. {
  66. if (atomic_inc_return((atomic_t *)(&sem->count)) > 0)
  67. smp_wmb();
  68. else
  69. rwsem_down_read_failed(sem);
  70. }
  71. static inline int __down_read_trylock(struct rw_semaphore *sem)
  72. {
  73. int tmp;
  74. while ((tmp = sem->count) >= 0) {
  75. if (tmp == cmpxchg(&sem->count, tmp,
  76. tmp + RWSEM_ACTIVE_READ_BIAS)) {
  77. smp_wmb();
  78. return 1;
  79. }
  80. }
  81. return 0;
  82. }
  83. /*
  84. * lock for writing
  85. */
  86. static inline void __down_write(struct rw_semaphore *sem)
  87. {
  88. int tmp;
  89. tmp = atomic_add_return(RWSEM_ACTIVE_WRITE_BIAS,
  90. (atomic_t *)(&sem->count));
  91. if (tmp == RWSEM_ACTIVE_WRITE_BIAS)
  92. smp_wmb();
  93. else
  94. rwsem_down_write_failed(sem);
  95. }
  96. static inline int __down_write_trylock(struct rw_semaphore *sem)
  97. {
  98. int tmp;
  99. tmp = cmpxchg(&sem->count, RWSEM_UNLOCKED_VALUE,
  100. RWSEM_ACTIVE_WRITE_BIAS);
  101. smp_wmb();
  102. return tmp == RWSEM_UNLOCKED_VALUE;
  103. }
  104. /*
  105. * unlock after reading
  106. */
  107. static inline void __up_read(struct rw_semaphore *sem)
  108. {
  109. int tmp;
  110. smp_wmb();
  111. tmp = atomic_dec_return((atomic_t *)(&sem->count));
  112. if (tmp < -1 && (tmp & RWSEM_ACTIVE_MASK) == 0)
  113. rwsem_wake(sem);
  114. }
  115. /*
  116. * unlock after writing
  117. */
  118. static inline void __up_write(struct rw_semaphore *sem)
  119. {
  120. smp_wmb();
  121. if (atomic_sub_return(RWSEM_ACTIVE_WRITE_BIAS,
  122. (atomic_t *)(&sem->count)) < 0)
  123. rwsem_wake(sem);
  124. }
  125. /*
  126. * implement atomic add functionality
  127. */
  128. static inline void rwsem_atomic_add(int delta, struct rw_semaphore *sem)
  129. {
  130. atomic_add(delta, (atomic_t *)(&sem->count));
  131. }
  132. /*
  133. * downgrade write lock to read lock
  134. */
  135. static inline void __downgrade_write(struct rw_semaphore *sem)
  136. {
  137. int tmp;
  138. smp_wmb();
  139. tmp = atomic_add_return(-RWSEM_WAITING_BIAS, (atomic_t *)(&sem->count));
  140. if (tmp < 0)
  141. rwsem_downgrade_wake(sem);
  142. }
  143. static inline void __down_write_nested(struct rw_semaphore *sem, int subclass)
  144. {
  145. __down_write(sem);
  146. }
  147. /*
  148. * implement exchange and add functionality
  149. */
  150. static inline int rwsem_atomic_update(int delta, struct rw_semaphore *sem)
  151. {
  152. smp_mb();
  153. return atomic_add_return(delta, (atomic_t *)(&sem->count));
  154. }
  155. static inline int rwsem_is_locked(struct rw_semaphore *sem)
  156. {
  157. return (sem->count != 0);
  158. }
  159. #endif /* __KERNEL__ */
  160. #endif /* _ASM_SH_RWSEM_H */