rwsem.h 3.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168
  1. #ifndef _ASM_POWERPC_RWSEM_H
  2. #define _ASM_POWERPC_RWSEM_H
  3. #ifdef __KERNEL__
  4. /*
  5. * include/asm-ppc64/rwsem.h: R/W semaphores for PPC using the stuff
  6. * in lib/rwsem.c. Adapted largely from include/asm-i386/rwsem.h
  7. * by Paul Mackerras <paulus@samba.org>.
  8. */
  9. #include <linux/list.h>
  10. #include <linux/spinlock.h>
  11. #include <asm/atomic.h>
  12. #include <asm/system.h>
  13. /*
  14. * the semaphore definition
  15. */
  16. struct rw_semaphore {
  17. /* XXX this should be able to be an atomic_t -- paulus */
  18. signed int count;
  19. #define RWSEM_UNLOCKED_VALUE 0x00000000
  20. #define RWSEM_ACTIVE_BIAS 0x00000001
  21. #define RWSEM_ACTIVE_MASK 0x0000ffff
  22. #define RWSEM_WAITING_BIAS (-0x00010000)
  23. #define RWSEM_ACTIVE_READ_BIAS RWSEM_ACTIVE_BIAS
  24. #define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS)
  25. spinlock_t wait_lock;
  26. struct list_head wait_list;
  27. #if RWSEM_DEBUG
  28. int debug;
  29. #endif
  30. };
  31. /*
  32. * initialisation
  33. */
  34. #if RWSEM_DEBUG
  35. #define __RWSEM_DEBUG_INIT , 0
  36. #else
  37. #define __RWSEM_DEBUG_INIT /* */
  38. #endif
  39. #define __RWSEM_INITIALIZER(name) \
  40. { RWSEM_UNLOCKED_VALUE, SPIN_LOCK_UNLOCKED, \
  41. LIST_HEAD_INIT((name).wait_list) \
  42. __RWSEM_DEBUG_INIT }
  43. #define DECLARE_RWSEM(name) \
  44. struct rw_semaphore name = __RWSEM_INITIALIZER(name)
  45. extern struct rw_semaphore *rwsem_down_read_failed(struct rw_semaphore *sem);
  46. extern struct rw_semaphore *rwsem_down_write_failed(struct rw_semaphore *sem);
  47. extern struct rw_semaphore *rwsem_wake(struct rw_semaphore *sem);
  48. extern struct rw_semaphore *rwsem_downgrade_wake(struct rw_semaphore *sem);
  49. static inline void init_rwsem(struct rw_semaphore *sem)
  50. {
  51. sem->count = RWSEM_UNLOCKED_VALUE;
  52. spin_lock_init(&sem->wait_lock);
  53. INIT_LIST_HEAD(&sem->wait_list);
  54. #if RWSEM_DEBUG
  55. sem->debug = 0;
  56. #endif
  57. }
  58. /*
  59. * lock for reading
  60. */
  61. static inline void __down_read(struct rw_semaphore *sem)
  62. {
  63. if (unlikely(atomic_inc_return((atomic_t *)(&sem->count)) <= 0))
  64. rwsem_down_read_failed(sem);
  65. }
  66. static inline int __down_read_trylock(struct rw_semaphore *sem)
  67. {
  68. int tmp;
  69. while ((tmp = sem->count) >= 0) {
  70. if (tmp == cmpxchg(&sem->count, tmp,
  71. tmp + RWSEM_ACTIVE_READ_BIAS)) {
  72. return 1;
  73. }
  74. }
  75. return 0;
  76. }
  77. /*
  78. * lock for writing
  79. */
  80. static inline void __down_write(struct rw_semaphore *sem)
  81. {
  82. int tmp;
  83. tmp = atomic_add_return(RWSEM_ACTIVE_WRITE_BIAS,
  84. (atomic_t *)(&sem->count));
  85. if (unlikely(tmp != RWSEM_ACTIVE_WRITE_BIAS))
  86. rwsem_down_write_failed(sem);
  87. }
  88. static inline int __down_write_trylock(struct rw_semaphore *sem)
  89. {
  90. int tmp;
  91. tmp = cmpxchg(&sem->count, RWSEM_UNLOCKED_VALUE,
  92. RWSEM_ACTIVE_WRITE_BIAS);
  93. return tmp == RWSEM_UNLOCKED_VALUE;
  94. }
  95. /*
  96. * unlock after reading
  97. */
  98. static inline void __up_read(struct rw_semaphore *sem)
  99. {
  100. int tmp;
  101. tmp = atomic_dec_return((atomic_t *)(&sem->count));
  102. if (unlikely(tmp < -1 && (tmp & RWSEM_ACTIVE_MASK) == 0))
  103. rwsem_wake(sem);
  104. }
  105. /*
  106. * unlock after writing
  107. */
  108. static inline void __up_write(struct rw_semaphore *sem)
  109. {
  110. if (unlikely(atomic_sub_return(RWSEM_ACTIVE_WRITE_BIAS,
  111. (atomic_t *)(&sem->count)) < 0))
  112. rwsem_wake(sem);
  113. }
  114. /*
  115. * implement atomic add functionality
  116. */
  117. static inline void rwsem_atomic_add(int delta, struct rw_semaphore *sem)
  118. {
  119. atomic_add(delta, (atomic_t *)(&sem->count));
  120. }
  121. /*
  122. * downgrade write lock to read lock
  123. */
  124. static inline void __downgrade_write(struct rw_semaphore *sem)
  125. {
  126. int tmp;
  127. tmp = atomic_add_return(-RWSEM_WAITING_BIAS, (atomic_t *)(&sem->count));
  128. if (tmp < 0)
  129. rwsem_downgrade_wake(sem);
  130. }
  131. /*
  132. * implement exchange and add functionality
  133. */
  134. static inline int rwsem_atomic_update(int delta, struct rw_semaphore *sem)
  135. {
  136. return atomic_add_return(delta, (atomic_t *)(&sem->count));
  137. }
  138. static inline int rwsem_is_locked(struct rw_semaphore *sem)
  139. {
  140. return (sem->count != 0);
  141. }
  142. #endif /* __KERNEL__ */
  143. #endif /* _ASM_POWERPC_RWSEM_H */