rwsem.h 3.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172
  1. /*
  2. * include/asm-ppc/rwsem.h: R/W semaphores for PPC using the stuff
  3. * in lib/rwsem.c. Adapted largely from include/asm-i386/rwsem.h
  4. * by Paul Mackerras <paulus@samba.org>.
  5. */
  6. #ifndef _PPC_RWSEM_H
  7. #define _PPC_RWSEM_H
  8. #ifdef __KERNEL__
  9. #include <linux/list.h>
  10. #include <linux/spinlock.h>
  11. #include <asm/atomic.h>
  12. #include <asm/system.h>
  13. /*
  14. * the semaphore definition
  15. */
  16. struct rw_semaphore {
  17. /* XXX this should be able to be an atomic_t -- paulus */
  18. signed long count;
  19. #define RWSEM_UNLOCKED_VALUE 0x00000000
  20. #define RWSEM_ACTIVE_BIAS 0x00000001
  21. #define RWSEM_ACTIVE_MASK 0x0000ffff
  22. #define RWSEM_WAITING_BIAS (-0x00010000)
  23. #define RWSEM_ACTIVE_READ_BIAS RWSEM_ACTIVE_BIAS
  24. #define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS)
  25. spinlock_t wait_lock;
  26. struct list_head wait_list;
  27. #if RWSEM_DEBUG
  28. int debug;
  29. #endif
  30. };
  31. /*
  32. * initialisation
  33. */
  34. #if RWSEM_DEBUG
  35. #define __RWSEM_DEBUG_INIT , 0
  36. #else
  37. #define __RWSEM_DEBUG_INIT /* */
  38. #endif
  39. #define __RWSEM_INITIALIZER(name) \
  40. { RWSEM_UNLOCKED_VALUE, SPIN_LOCK_UNLOCKED, \
  41. LIST_HEAD_INIT((name).wait_list) \
  42. __RWSEM_DEBUG_INIT }
  43. #define DECLARE_RWSEM(name) \
  44. struct rw_semaphore name = __RWSEM_INITIALIZER(name)
  45. extern struct rw_semaphore *rwsem_down_read_failed(struct rw_semaphore *sem);
  46. extern struct rw_semaphore *rwsem_down_write_failed(struct rw_semaphore *sem);
  47. extern struct rw_semaphore *rwsem_wake(struct rw_semaphore *sem);
  48. extern struct rw_semaphore *rwsem_downgrade_wake(struct rw_semaphore *sem);
  49. static inline void init_rwsem(struct rw_semaphore *sem)
  50. {
  51. sem->count = RWSEM_UNLOCKED_VALUE;
  52. spin_lock_init(&sem->wait_lock);
  53. INIT_LIST_HEAD(&sem->wait_list);
  54. #if RWSEM_DEBUG
  55. sem->debug = 0;
  56. #endif
  57. }
  58. /*
  59. * lock for reading
  60. */
  61. static inline void __down_read(struct rw_semaphore *sem)
  62. {
  63. if (atomic_inc_return((atomic_t *)(&sem->count)) > 0)
  64. smp_wmb();
  65. else
  66. rwsem_down_read_failed(sem);
  67. }
  68. static inline int __down_read_trylock(struct rw_semaphore *sem)
  69. {
  70. int tmp;
  71. while ((tmp = sem->count) >= 0) {
  72. if (tmp == cmpxchg(&sem->count, tmp,
  73. tmp + RWSEM_ACTIVE_READ_BIAS)) {
  74. smp_wmb();
  75. return 1;
  76. }
  77. }
  78. return 0;
  79. }
  80. /*
  81. * lock for writing
  82. */
  83. static inline void __down_write(struct rw_semaphore *sem)
  84. {
  85. int tmp;
  86. tmp = atomic_add_return(RWSEM_ACTIVE_WRITE_BIAS,
  87. (atomic_t *)(&sem->count));
  88. if (tmp == RWSEM_ACTIVE_WRITE_BIAS)
  89. smp_wmb();
  90. else
  91. rwsem_down_write_failed(sem);
  92. }
  93. static inline int __down_write_trylock(struct rw_semaphore *sem)
  94. {
  95. int tmp;
  96. tmp = cmpxchg(&sem->count, RWSEM_UNLOCKED_VALUE,
  97. RWSEM_ACTIVE_WRITE_BIAS);
  98. smp_wmb();
  99. return tmp == RWSEM_UNLOCKED_VALUE;
  100. }
  101. /*
  102. * unlock after reading
  103. */
  104. static inline void __up_read(struct rw_semaphore *sem)
  105. {
  106. int tmp;
  107. smp_wmb();
  108. tmp = atomic_dec_return((atomic_t *)(&sem->count));
  109. if (tmp < -1 && (tmp & RWSEM_ACTIVE_MASK) == 0)
  110. rwsem_wake(sem);
  111. }
  112. /*
  113. * unlock after writing
  114. */
  115. static inline void __up_write(struct rw_semaphore *sem)
  116. {
  117. smp_wmb();
  118. if (atomic_sub_return(RWSEM_ACTIVE_WRITE_BIAS,
  119. (atomic_t *)(&sem->count)) < 0)
  120. rwsem_wake(sem);
  121. }
  122. /*
  123. * implement atomic add functionality
  124. */
  125. static inline void rwsem_atomic_add(int delta, struct rw_semaphore *sem)
  126. {
  127. atomic_add(delta, (atomic_t *)(&sem->count));
  128. }
  129. /*
  130. * downgrade write lock to read lock
  131. */
  132. static inline void __downgrade_write(struct rw_semaphore *sem)
  133. {
  134. int tmp;
  135. smp_wmb();
  136. tmp = atomic_add_return(-RWSEM_WAITING_BIAS, (atomic_t *)(&sem->count));
  137. if (tmp < 0)
  138. rwsem_downgrade_wake(sem);
  139. }
  140. /*
  141. * implement exchange and add functionality
  142. */
  143. static inline int rwsem_atomic_update(int delta, struct rw_semaphore *sem)
  144. {
  145. smp_mb();
  146. return atomic_add_return(delta, (atomic_t *)(&sem->count));
  147. }
  148. #endif /* __KERNEL__ */
  149. #endif /* _PPC_RWSEM_XADD_H */