rwsem.h 3.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167
  1. /*
  2. * include/asm-ppc64/rwsem.h: R/W semaphores for PPC using the stuff
  3. * in lib/rwsem.c. Adapted largely from include/asm-i386/rwsem.h
  4. * by Paul Mackerras <paulus@samba.org>.
  5. *
  6. * This program is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU General Public License
  8. * as published by the Free Software Foundation; either version
  9. * 2 of the License, or (at your option) any later version.
  10. */
  11. #ifndef _PPC64_RWSEM_H
  12. #define _PPC64_RWSEM_H
  13. #ifdef __KERNEL__
  14. #include <linux/list.h>
  15. #include <linux/spinlock.h>
  16. #include <asm/atomic.h>
  17. #include <asm/system.h>
  18. /*
  19. * the semaphore definition
  20. */
  21. struct rw_semaphore {
  22. /* XXX this should be able to be an atomic_t -- paulus */
  23. signed int count;
  24. #define RWSEM_UNLOCKED_VALUE 0x00000000
  25. #define RWSEM_ACTIVE_BIAS 0x00000001
  26. #define RWSEM_ACTIVE_MASK 0x0000ffff
  27. #define RWSEM_WAITING_BIAS (-0x00010000)
  28. #define RWSEM_ACTIVE_READ_BIAS RWSEM_ACTIVE_BIAS
  29. #define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS)
  30. spinlock_t wait_lock;
  31. struct list_head wait_list;
  32. #if RWSEM_DEBUG
  33. int debug;
  34. #endif
  35. };
  36. /*
  37. * initialisation
  38. */
  39. #if RWSEM_DEBUG
  40. #define __RWSEM_DEBUG_INIT , 0
  41. #else
  42. #define __RWSEM_DEBUG_INIT /* */
  43. #endif
  44. #define __RWSEM_INITIALIZER(name) \
  45. { RWSEM_UNLOCKED_VALUE, SPIN_LOCK_UNLOCKED, \
  46. LIST_HEAD_INIT((name).wait_list) \
  47. __RWSEM_DEBUG_INIT }
  48. #define DECLARE_RWSEM(name) \
  49. struct rw_semaphore name = __RWSEM_INITIALIZER(name)
  50. extern struct rw_semaphore *rwsem_down_read_failed(struct rw_semaphore *sem);
  51. extern struct rw_semaphore *rwsem_down_write_failed(struct rw_semaphore *sem);
  52. extern struct rw_semaphore *rwsem_wake(struct rw_semaphore *sem);
  53. extern struct rw_semaphore *rwsem_downgrade_wake(struct rw_semaphore *sem);
  54. static inline void init_rwsem(struct rw_semaphore *sem)
  55. {
  56. sem->count = RWSEM_UNLOCKED_VALUE;
  57. spin_lock_init(&sem->wait_lock);
  58. INIT_LIST_HEAD(&sem->wait_list);
  59. #if RWSEM_DEBUG
  60. sem->debug = 0;
  61. #endif
  62. }
  63. /*
  64. * lock for reading
  65. */
  66. static inline void __down_read(struct rw_semaphore *sem)
  67. {
  68. if (unlikely(atomic_inc_return((atomic_t *)(&sem->count)) <= 0))
  69. rwsem_down_read_failed(sem);
  70. }
  71. static inline int __down_read_trylock(struct rw_semaphore *sem)
  72. {
  73. int tmp;
  74. while ((tmp = sem->count) >= 0) {
  75. if (tmp == cmpxchg(&sem->count, tmp,
  76. tmp + RWSEM_ACTIVE_READ_BIAS)) {
  77. return 1;
  78. }
  79. }
  80. return 0;
  81. }
  82. /*
  83. * lock for writing
  84. */
  85. static inline void __down_write(struct rw_semaphore *sem)
  86. {
  87. int tmp;
  88. tmp = atomic_add_return(RWSEM_ACTIVE_WRITE_BIAS,
  89. (atomic_t *)(&sem->count));
  90. if (unlikely(tmp != RWSEM_ACTIVE_WRITE_BIAS))
  91. rwsem_down_write_failed(sem);
  92. }
  93. static inline int __down_write_trylock(struct rw_semaphore *sem)
  94. {
  95. int tmp;
  96. tmp = cmpxchg(&sem->count, RWSEM_UNLOCKED_VALUE,
  97. RWSEM_ACTIVE_WRITE_BIAS);
  98. return tmp == RWSEM_UNLOCKED_VALUE;
  99. }
  100. /*
  101. * unlock after reading
  102. */
  103. static inline void __up_read(struct rw_semaphore *sem)
  104. {
  105. int tmp;
  106. tmp = atomic_dec_return((atomic_t *)(&sem->count));
  107. if (unlikely(tmp < -1 && (tmp & RWSEM_ACTIVE_MASK) == 0))
  108. rwsem_wake(sem);
  109. }
  110. /*
  111. * unlock after writing
  112. */
  113. static inline void __up_write(struct rw_semaphore *sem)
  114. {
  115. if (unlikely(atomic_sub_return(RWSEM_ACTIVE_WRITE_BIAS,
  116. (atomic_t *)(&sem->count)) < 0))
  117. rwsem_wake(sem);
  118. }
  119. /*
  120. * implement atomic add functionality
  121. */
  122. static inline void rwsem_atomic_add(int delta, struct rw_semaphore *sem)
  123. {
  124. atomic_add(delta, (atomic_t *)(&sem->count));
  125. }
  126. /*
  127. * downgrade write lock to read lock
  128. */
  129. static inline void __downgrade_write(struct rw_semaphore *sem)
  130. {
  131. int tmp;
  132. tmp = atomic_add_return(-RWSEM_WAITING_BIAS, (atomic_t *)(&sem->count));
  133. if (tmp < 0)
  134. rwsem_downgrade_wake(sem);
  135. }
  136. /*
  137. * implement exchange and add functionality
  138. */
  139. static inline int rwsem_atomic_update(int delta, struct rw_semaphore *sem)
  140. {
  141. return atomic_add_return(delta, (atomic_t *)(&sem->count));
  142. }
  143. #endif /* __KERNEL__ */
  144. #endif /* _PPC_RWSEM_XADD_H */