rwsem.h 4.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184
  1. /*
  2. * include/asm-ppc/rwsem.h: R/W semaphores for SH using the stuff
  3. * in lib/rwsem.c.
  4. */
  5. #ifndef _ASM_SH_RWSEM_H
  6. #define _ASM_SH_RWSEM_H
  7. #ifdef __KERNEL__
  8. #include <linux/list.h>
  9. #include <linux/spinlock.h>
  10. #include <asm/atomic.h>
  11. #include <asm/system.h>
  12. /*
  13. * the semaphore definition
  14. */
  15. struct rw_semaphore {
  16. long count;
  17. #define RWSEM_UNLOCKED_VALUE 0x00000000
  18. #define RWSEM_ACTIVE_BIAS 0x00000001
  19. #define RWSEM_ACTIVE_MASK 0x0000ffff
  20. #define RWSEM_WAITING_BIAS (-0x00010000)
  21. #define RWSEM_ACTIVE_READ_BIAS RWSEM_ACTIVE_BIAS
  22. #define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS)
  23. spinlock_t wait_lock;
  24. struct list_head wait_list;
  25. #ifdef CONFIG_DEBUG_LOCK_ALLOC
  26. struct lockdep_map dep_map;
  27. #endif
  28. };
  29. #ifdef CONFIG_DEBUG_LOCK_ALLOC
  30. # define __RWSEM_DEP_MAP_INIT(lockname) , .dep_map = { .name = #lockname }
  31. #else
  32. # define __RWSEM_DEP_MAP_INIT(lockname)
  33. #endif
  34. #define __RWSEM_INITIALIZER(name) \
  35. { RWSEM_UNLOCKED_VALUE, SPIN_LOCK_UNLOCKED, \
  36. LIST_HEAD_INIT((name).wait_list) \
  37. __RWSEM_DEP_MAP_INIT(name) }
  38. #define DECLARE_RWSEM(name) \
  39. struct rw_semaphore name = __RWSEM_INITIALIZER(name)
  40. extern struct rw_semaphore *rwsem_down_read_failed(struct rw_semaphore *sem);
  41. extern struct rw_semaphore *rwsem_down_write_failed(struct rw_semaphore *sem);
  42. extern struct rw_semaphore *rwsem_wake(struct rw_semaphore *sem);
  43. extern struct rw_semaphore *rwsem_downgrade_wake(struct rw_semaphore *sem);
  44. extern void __init_rwsem(struct rw_semaphore *sem, const char *name,
  45. struct lock_class_key *key);
  46. #define init_rwsem(sem) \
  47. do { \
  48. static struct lock_class_key __key; \
  49. \
  50. __init_rwsem((sem), #sem, &__key); \
  51. } while (0)
  52. static inline void init_rwsem(struct rw_semaphore *sem)
  53. {
  54. sem->count = RWSEM_UNLOCKED_VALUE;
  55. spin_lock_init(&sem->wait_lock);
  56. INIT_LIST_HEAD(&sem->wait_list);
  57. }
  58. /*
  59. * lock for reading
  60. */
  61. static inline void __down_read(struct rw_semaphore *sem)
  62. {
  63. if (atomic_inc_return((atomic_t *)(&sem->count)) > 0)
  64. smp_wmb();
  65. else
  66. rwsem_down_read_failed(sem);
  67. }
  68. static inline int __down_read_trylock(struct rw_semaphore *sem)
  69. {
  70. int tmp;
  71. while ((tmp = sem->count) >= 0) {
  72. if (tmp == cmpxchg(&sem->count, tmp,
  73. tmp + RWSEM_ACTIVE_READ_BIAS)) {
  74. smp_wmb();
  75. return 1;
  76. }
  77. }
  78. return 0;
  79. }
  80. /*
  81. * lock for writing
  82. */
  83. static inline void __down_write(struct rw_semaphore *sem)
  84. {
  85. int tmp;
  86. tmp = atomic_add_return(RWSEM_ACTIVE_WRITE_BIAS,
  87. (atomic_t *)(&sem->count));
  88. if (tmp == RWSEM_ACTIVE_WRITE_BIAS)
  89. smp_wmb();
  90. else
  91. rwsem_down_write_failed(sem);
  92. }
  93. static inline int __down_write_trylock(struct rw_semaphore *sem)
  94. {
  95. int tmp;
  96. tmp = cmpxchg(&sem->count, RWSEM_UNLOCKED_VALUE,
  97. RWSEM_ACTIVE_WRITE_BIAS);
  98. smp_wmb();
  99. return tmp == RWSEM_UNLOCKED_VALUE;
  100. }
  101. /*
  102. * unlock after reading
  103. */
  104. static inline void __up_read(struct rw_semaphore *sem)
  105. {
  106. int tmp;
  107. smp_wmb();
  108. tmp = atomic_dec_return((atomic_t *)(&sem->count));
  109. if (tmp < -1 && (tmp & RWSEM_ACTIVE_MASK) == 0)
  110. rwsem_wake(sem);
  111. }
  112. /*
  113. * unlock after writing
  114. */
  115. static inline void __up_write(struct rw_semaphore *sem)
  116. {
  117. smp_wmb();
  118. if (atomic_sub_return(RWSEM_ACTIVE_WRITE_BIAS,
  119. (atomic_t *)(&sem->count)) < 0)
  120. rwsem_wake(sem);
  121. }
  122. /*
  123. * implement atomic add functionality
  124. */
  125. static inline void rwsem_atomic_add(int delta, struct rw_semaphore *sem)
  126. {
  127. atomic_add(delta, (atomic_t *)(&sem->count));
  128. }
  129. /*
  130. * downgrade write lock to read lock
  131. */
  132. static inline void __downgrade_write(struct rw_semaphore *sem)
  133. {
  134. int tmp;
  135. smp_wmb();
  136. tmp = atomic_add_return(-RWSEM_WAITING_BIAS, (atomic_t *)(&sem->count));
  137. if (tmp < 0)
  138. rwsem_downgrade_wake(sem);
  139. }
  140. static inline void __down_write_nested(struct rw_semaphore *sem, int subclass)
  141. {
  142. __down_write(sem);
  143. }
  144. /*
  145. * implement exchange and add functionality
  146. */
  147. static inline int rwsem_atomic_update(int delta, struct rw_semaphore *sem)
  148. {
  149. smp_mb();
  150. return atomic_add_return(delta, (atomic_t *)(&sem->count));
  151. }
  152. static inline int rwsem_is_locked(struct rw_semaphore *sem)
  153. {
  154. return (sem->count != 0);
  155. }
  156. #endif /* __KERNEL__ */
  157. #endif /* _ASM_SH_RWSEM_H */