rwsem.h 4.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173
  1. #ifndef _ASM_POWERPC_RWSEM_H
  2. #define _ASM_POWERPC_RWSEM_H
  3. #ifndef _LINUX_RWSEM_H
  4. #error "Please don't include <asm/rwsem.h> directly, use <linux/rwsem.h> instead."
  5. #endif
  6. #ifdef __KERNEL__
  7. /*
  8. * R/W semaphores for PPC using the stuff in lib/rwsem.c.
  9. * Adapted largely from include/asm-i386/rwsem.h
  10. * by Paul Mackerras <paulus@samba.org>.
  11. */
  12. #include <linux/list.h>
  13. #include <linux/spinlock.h>
  14. #include <asm/atomic.h>
  15. #include <asm/system.h>
  16. /*
  17. * the semaphore definition
  18. */
  19. struct rw_semaphore {
  20. /* XXX this should be able to be an atomic_t -- paulus */
  21. signed int count;
  22. #define RWSEM_UNLOCKED_VALUE 0x00000000
  23. #define RWSEM_ACTIVE_BIAS 0x00000001
  24. #define RWSEM_ACTIVE_MASK 0x0000ffff
  25. #define RWSEM_WAITING_BIAS (-0x00010000)
  26. #define RWSEM_ACTIVE_READ_BIAS RWSEM_ACTIVE_BIAS
  27. #define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS)
  28. spinlock_t wait_lock;
  29. struct list_head wait_list;
  30. #ifdef CONFIG_DEBUG_LOCK_ALLOC
  31. struct lockdep_map dep_map;
  32. #endif
  33. };
  34. #ifdef CONFIG_DEBUG_LOCK_ALLOC
  35. # define __RWSEM_DEP_MAP_INIT(lockname) , .dep_map = { .name = #lockname }
  36. #else
  37. # define __RWSEM_DEP_MAP_INIT(lockname)
  38. #endif
  39. #define __RWSEM_INITIALIZER(name) \
  40. { RWSEM_UNLOCKED_VALUE, __SPIN_LOCK_UNLOCKED((name).wait_lock), \
  41. LIST_HEAD_INIT((name).wait_list) __RWSEM_DEP_MAP_INIT(name) }
  42. #define DECLARE_RWSEM(name) \
  43. struct rw_semaphore name = __RWSEM_INITIALIZER(name)
  44. extern struct rw_semaphore *rwsem_down_read_failed(struct rw_semaphore *sem);
  45. extern struct rw_semaphore *rwsem_down_write_failed(struct rw_semaphore *sem);
  46. extern struct rw_semaphore *rwsem_wake(struct rw_semaphore *sem);
  47. extern struct rw_semaphore *rwsem_downgrade_wake(struct rw_semaphore *sem);
  48. extern void __init_rwsem(struct rw_semaphore *sem, const char *name,
  49. struct lock_class_key *key);
  50. #define init_rwsem(sem) \
  51. do { \
  52. static struct lock_class_key __key; \
  53. \
  54. __init_rwsem((sem), #sem, &__key); \
  55. } while (0)
  56. /*
  57. * lock for reading
  58. */
  59. static inline void __down_read(struct rw_semaphore *sem)
  60. {
  61. if (unlikely(atomic_inc_return((atomic_t *)(&sem->count)) <= 0))
  62. rwsem_down_read_failed(sem);
  63. }
  64. static inline int __down_read_trylock(struct rw_semaphore *sem)
  65. {
  66. int tmp;
  67. while ((tmp = sem->count) >= 0) {
  68. if (tmp == cmpxchg(&sem->count, tmp,
  69. tmp + RWSEM_ACTIVE_READ_BIAS)) {
  70. return 1;
  71. }
  72. }
  73. return 0;
  74. }
  75. /*
  76. * lock for writing
  77. */
  78. static inline void __down_write_nested(struct rw_semaphore *sem, int subclass)
  79. {
  80. int tmp;
  81. tmp = atomic_add_return(RWSEM_ACTIVE_WRITE_BIAS,
  82. (atomic_t *)(&sem->count));
  83. if (unlikely(tmp != RWSEM_ACTIVE_WRITE_BIAS))
  84. rwsem_down_write_failed(sem);
  85. }
  86. static inline void __down_write(struct rw_semaphore *sem)
  87. {
  88. __down_write_nested(sem, 0);
  89. }
  90. static inline int __down_write_trylock(struct rw_semaphore *sem)
  91. {
  92. int tmp;
  93. tmp = cmpxchg(&sem->count, RWSEM_UNLOCKED_VALUE,
  94. RWSEM_ACTIVE_WRITE_BIAS);
  95. return tmp == RWSEM_UNLOCKED_VALUE;
  96. }
  97. /*
  98. * unlock after reading
  99. */
  100. static inline void __up_read(struct rw_semaphore *sem)
  101. {
  102. int tmp;
  103. tmp = atomic_dec_return((atomic_t *)(&sem->count));
  104. if (unlikely(tmp < -1 && (tmp & RWSEM_ACTIVE_MASK) == 0))
  105. rwsem_wake(sem);
  106. }
  107. /*
  108. * unlock after writing
  109. */
  110. static inline void __up_write(struct rw_semaphore *sem)
  111. {
  112. if (unlikely(atomic_sub_return(RWSEM_ACTIVE_WRITE_BIAS,
  113. (atomic_t *)(&sem->count)) < 0))
  114. rwsem_wake(sem);
  115. }
  116. /*
  117. * implement atomic add functionality
  118. */
  119. static inline void rwsem_atomic_add(int delta, struct rw_semaphore *sem)
  120. {
  121. atomic_add(delta, (atomic_t *)(&sem->count));
  122. }
  123. /*
  124. * downgrade write lock to read lock
  125. */
  126. static inline void __downgrade_write(struct rw_semaphore *sem)
  127. {
  128. int tmp;
  129. tmp = atomic_add_return(-RWSEM_WAITING_BIAS, (atomic_t *)(&sem->count));
  130. if (tmp < 0)
  131. rwsem_downgrade_wake(sem);
  132. }
  133. /*
  134. * implement exchange and add functionality
  135. */
  136. static inline int rwsem_atomic_update(int delta, struct rw_semaphore *sem)
  137. {
  138. return atomic_add_return(delta, (atomic_t *)(&sem->count));
  139. }
  140. static inline int rwsem_is_locked(struct rw_semaphore *sem)
  141. {
  142. return (sem->count != 0);
  143. }
  144. #endif /* __KERNEL__ */
  145. #endif /* _ASM_POWERPC_RWSEM_H */