rwsem.h 3.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175
  1. /*
  2. * include/asm-xtensa/rwsem.h
  3. *
  4. * This file is subject to the terms and conditions of the GNU General Public
  5. * License. See the file "COPYING" in the main directory of this archive
  6. * for more details.
  7. *
  8. * Largely copied from include/asm-ppc/rwsem.h
  9. *
  10. * Copyright (C) 2001 - 2005 Tensilica Inc.
  11. */
  12. #ifndef _XTENSA_RWSEM_H
  13. #define _XTENSA_RWSEM_H
  14. #include <linux/list.h>
  15. #include <linux/spinlock.h>
  16. #include <asm/atomic.h>
  17. #include <asm/system.h>
  18. /*
  19. * the semaphore definition
  20. */
  21. struct rw_semaphore {
  22. signed long count;
  23. #define RWSEM_UNLOCKED_VALUE 0x00000000
  24. #define RWSEM_ACTIVE_BIAS 0x00000001
  25. #define RWSEM_ACTIVE_MASK 0x0000ffff
  26. #define RWSEM_WAITING_BIAS (-0x00010000)
  27. #define RWSEM_ACTIVE_READ_BIAS RWSEM_ACTIVE_BIAS
  28. #define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS)
  29. spinlock_t wait_lock;
  30. struct list_head wait_list;
  31. #if RWSEM_DEBUG
  32. int debug;
  33. #endif
  34. };
  35. /*
  36. * initialisation
  37. */
  38. #if RWSEM_DEBUG
  39. #define __RWSEM_DEBUG_INIT , 0
  40. #else
  41. #define __RWSEM_DEBUG_INIT /* */
  42. #endif
  43. #define __RWSEM_INITIALIZER(name) \
  44. { RWSEM_UNLOCKED_VALUE, SPIN_LOCK_UNLOCKED, \
  45. LIST_HEAD_INIT((name).wait_list) \
  46. __RWSEM_DEBUG_INIT }
  47. #define DECLARE_RWSEM(name) \
  48. struct rw_semaphore name = __RWSEM_INITIALIZER(name)
  49. extern struct rw_semaphore *rwsem_down_read_failed(struct rw_semaphore *sem);
  50. extern struct rw_semaphore *rwsem_down_write_failed(struct rw_semaphore *sem);
  51. extern struct rw_semaphore *rwsem_wake(struct rw_semaphore *sem);
  52. extern struct rw_semaphore *rwsem_downgrade_wake(struct rw_semaphore *sem);
  53. static inline void init_rwsem(struct rw_semaphore *sem)
  54. {
  55. sem->count = RWSEM_UNLOCKED_VALUE;
  56. spin_lock_init(&sem->wait_lock);
  57. INIT_LIST_HEAD(&sem->wait_list);
  58. #if RWSEM_DEBUG
  59. sem->debug = 0;
  60. #endif
  61. }
  62. /*
  63. * lock for reading
  64. */
  65. static inline void __down_read(struct rw_semaphore *sem)
  66. {
  67. if (atomic_add_return(1,(atomic_t *)(&sem->count)) > 0)
  68. smp_wmb();
  69. else
  70. rwsem_down_read_failed(sem);
  71. }
  72. static inline int __down_read_trylock(struct rw_semaphore *sem)
  73. {
  74. int tmp;
  75. while ((tmp = sem->count) >= 0) {
  76. if (tmp == cmpxchg(&sem->count, tmp,
  77. tmp + RWSEM_ACTIVE_READ_BIAS)) {
  78. smp_wmb();
  79. return 1;
  80. }
  81. }
  82. return 0;
  83. }
  84. /*
  85. * lock for writing
  86. */
  87. static inline void __down_write(struct rw_semaphore *sem)
  88. {
  89. int tmp;
  90. tmp = atomic_add_return(RWSEM_ACTIVE_WRITE_BIAS,
  91. (atomic_t *)(&sem->count));
  92. if (tmp == RWSEM_ACTIVE_WRITE_BIAS)
  93. smp_wmb();
  94. else
  95. rwsem_down_write_failed(sem);
  96. }
  97. static inline int __down_write_trylock(struct rw_semaphore *sem)
  98. {
  99. int tmp;
  100. tmp = cmpxchg(&sem->count, RWSEM_UNLOCKED_VALUE,
  101. RWSEM_ACTIVE_WRITE_BIAS);
  102. smp_wmb();
  103. return tmp == RWSEM_UNLOCKED_VALUE;
  104. }
  105. /*
  106. * unlock after reading
  107. */
  108. static inline void __up_read(struct rw_semaphore *sem)
  109. {
  110. int tmp;
  111. smp_wmb();
  112. tmp = atomic_sub_return(1,(atomic_t *)(&sem->count));
  113. if (tmp < -1 && (tmp & RWSEM_ACTIVE_MASK) == 0)
  114. rwsem_wake(sem);
  115. }
  116. /*
  117. * unlock after writing
  118. */
  119. static inline void __up_write(struct rw_semaphore *sem)
  120. {
  121. smp_wmb();
  122. if (atomic_sub_return(RWSEM_ACTIVE_WRITE_BIAS,
  123. (atomic_t *)(&sem->count)) < 0)
  124. rwsem_wake(sem);
  125. }
  126. /*
  127. * implement atomic add functionality
  128. */
  129. static inline void rwsem_atomic_add(int delta, struct rw_semaphore *sem)
  130. {
  131. atomic_add(delta, (atomic_t *)(&sem->count));
  132. }
  133. /*
  134. * downgrade write lock to read lock
  135. */
  136. static inline void __downgrade_write(struct rw_semaphore *sem)
  137. {
  138. int tmp;
  139. smp_wmb();
  140. tmp = atomic_add_return(-RWSEM_WAITING_BIAS, (atomic_t *)(&sem->count));
  141. if (tmp < 0)
  142. rwsem_downgrade_wake(sem);
  143. }
  144. /*
  145. * implement exchange and add functionality
  146. */
  147. static inline int rwsem_atomic_update(int delta, struct rw_semaphore *sem)
  148. {
  149. smp_mb();
  150. return atomic_add_return(delta, (atomic_t *)(&sem->count));
  151. }
  152. #endif /* _XTENSA_RWSEM_XADD_H */