rwsem.h 3.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152
  1. #ifndef _ASM_POWERPC_RWSEM_H
  2. #define _ASM_POWERPC_RWSEM_H
  3. #ifdef __KERNEL__
  4. /*
  5. * include/asm-powerpc/rwsem.h: R/W semaphores for PPC using the stuff
  6. * in lib/rwsem.c. Adapted largely from include/asm-i386/rwsem.h
  7. * by Paul Mackerras <paulus@samba.org>.
  8. */
  9. #include <linux/list.h>
  10. #include <linux/spinlock.h>
  11. #include <asm/atomic.h>
  12. #include <asm/system.h>
  13. /*
  14. * the semaphore definition
  15. */
  16. struct rw_semaphore {
  17. /* XXX this should be able to be an atomic_t -- paulus */
  18. signed int count;
  19. #define RWSEM_UNLOCKED_VALUE 0x00000000
  20. #define RWSEM_ACTIVE_BIAS 0x00000001
  21. #define RWSEM_ACTIVE_MASK 0x0000ffff
  22. #define RWSEM_WAITING_BIAS (-0x00010000)
  23. #define RWSEM_ACTIVE_READ_BIAS RWSEM_ACTIVE_BIAS
  24. #define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS)
  25. spinlock_t wait_lock;
  26. struct list_head wait_list;
  27. };
  28. #define __RWSEM_INITIALIZER(name) \
  29. { RWSEM_UNLOCKED_VALUE, SPIN_LOCK_UNLOCKED, \
  30. LIST_HEAD_INIT((name).wait_list) }
  31. #define DECLARE_RWSEM(name) \
  32. struct rw_semaphore name = __RWSEM_INITIALIZER(name)
  33. extern struct rw_semaphore *rwsem_down_read_failed(struct rw_semaphore *sem);
  34. extern struct rw_semaphore *rwsem_down_write_failed(struct rw_semaphore *sem);
  35. extern struct rw_semaphore *rwsem_wake(struct rw_semaphore *sem);
  36. extern struct rw_semaphore *rwsem_downgrade_wake(struct rw_semaphore *sem);
  37. static inline void init_rwsem(struct rw_semaphore *sem)
  38. {
  39. sem->count = RWSEM_UNLOCKED_VALUE;
  40. spin_lock_init(&sem->wait_lock);
  41. INIT_LIST_HEAD(&sem->wait_list);
  42. }
  43. /*
  44. * lock for reading
  45. */
  46. static inline void __down_read(struct rw_semaphore *sem)
  47. {
  48. if (unlikely(atomic_inc_return((atomic_t *)(&sem->count)) <= 0))
  49. rwsem_down_read_failed(sem);
  50. }
  51. static inline int __down_read_trylock(struct rw_semaphore *sem)
  52. {
  53. int tmp;
  54. while ((tmp = sem->count) >= 0) {
  55. if (tmp == cmpxchg(&sem->count, tmp,
  56. tmp + RWSEM_ACTIVE_READ_BIAS)) {
  57. return 1;
  58. }
  59. }
  60. return 0;
  61. }
  62. /*
  63. * lock for writing
  64. */
  65. static inline void __down_write(struct rw_semaphore *sem)
  66. {
  67. int tmp;
  68. tmp = atomic_add_return(RWSEM_ACTIVE_WRITE_BIAS,
  69. (atomic_t *)(&sem->count));
  70. if (unlikely(tmp != RWSEM_ACTIVE_WRITE_BIAS))
  71. rwsem_down_write_failed(sem);
  72. }
  73. static inline int __down_write_trylock(struct rw_semaphore *sem)
  74. {
  75. int tmp;
  76. tmp = cmpxchg(&sem->count, RWSEM_UNLOCKED_VALUE,
  77. RWSEM_ACTIVE_WRITE_BIAS);
  78. return tmp == RWSEM_UNLOCKED_VALUE;
  79. }
  80. /*
  81. * unlock after reading
  82. */
  83. static inline void __up_read(struct rw_semaphore *sem)
  84. {
  85. int tmp;
  86. tmp = atomic_dec_return((atomic_t *)(&sem->count));
  87. if (unlikely(tmp < -1 && (tmp & RWSEM_ACTIVE_MASK) == 0))
  88. rwsem_wake(sem);
  89. }
  90. /*
  91. * unlock after writing
  92. */
  93. static inline void __up_write(struct rw_semaphore *sem)
  94. {
  95. if (unlikely(atomic_sub_return(RWSEM_ACTIVE_WRITE_BIAS,
  96. (atomic_t *)(&sem->count)) < 0))
  97. rwsem_wake(sem);
  98. }
  99. /*
  100. * implement atomic add functionality
  101. */
  102. static inline void rwsem_atomic_add(int delta, struct rw_semaphore *sem)
  103. {
  104. atomic_add(delta, (atomic_t *)(&sem->count));
  105. }
  106. /*
  107. * downgrade write lock to read lock
  108. */
  109. static inline void __downgrade_write(struct rw_semaphore *sem)
  110. {
  111. int tmp;
  112. tmp = atomic_add_return(-RWSEM_WAITING_BIAS, (atomic_t *)(&sem->count));
  113. if (tmp < 0)
  114. rwsem_downgrade_wake(sem);
  115. }
  116. /*
  117. * implement exchange and add functionality
  118. */
  119. static inline int rwsem_atomic_update(int delta, struct rw_semaphore *sem)
  120. {
  121. return atomic_add_return(delta, (atomic_t *)(&sem->count));
  122. }
  123. static inline int rwsem_is_locked(struct rw_semaphore *sem)
  124. {
  125. return (sem->count != 0);
  126. }
  127. #endif /* __KERNEL__ */
  128. #endif /* _ASM_POWERPC_RWSEM_H */