rwsem.h 5.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259
  1. #ifndef _ALPHA_RWSEM_H
  2. #define _ALPHA_RWSEM_H
  3. /*
  4. * Written by Ivan Kokshaysky <ink@jurassic.park.msu.ru>, 2001.
  5. * Based on asm-alpha/semaphore.h and asm-i386/rwsem.h
  6. */
  7. #ifndef _LINUX_RWSEM_H
  8. #error "please don't include asm/rwsem.h directly, use linux/rwsem.h instead"
  9. #endif
  10. #ifdef __KERNEL__
  11. #include <linux/compiler.h>
  12. #include <linux/list.h>
  13. #include <linux/spinlock.h>
  14. struct rwsem_waiter;
  15. extern struct rw_semaphore *rwsem_down_read_failed(struct rw_semaphore *sem);
  16. extern struct rw_semaphore *rwsem_down_write_failed(struct rw_semaphore *sem);
  17. extern struct rw_semaphore *rwsem_wake(struct rw_semaphore *);
  18. extern struct rw_semaphore *rwsem_downgrade_wake(struct rw_semaphore *sem);
  19. /*
  20. * the semaphore definition
  21. */
  22. struct rw_semaphore {
  23. long count;
  24. #define RWSEM_UNLOCKED_VALUE 0x0000000000000000L
  25. #define RWSEM_ACTIVE_BIAS 0x0000000000000001L
  26. #define RWSEM_ACTIVE_MASK 0x00000000ffffffffL
  27. #define RWSEM_WAITING_BIAS (-0x0000000100000000L)
  28. #define RWSEM_ACTIVE_READ_BIAS RWSEM_ACTIVE_BIAS
  29. #define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS)
  30. spinlock_t wait_lock;
  31. struct list_head wait_list;
  32. };
  33. #define __RWSEM_INITIALIZER(name) \
  34. { RWSEM_UNLOCKED_VALUE, SPIN_LOCK_UNLOCKED, \
  35. LIST_HEAD_INIT((name).wait_list) }
  36. #define DECLARE_RWSEM(name) \
  37. struct rw_semaphore name = __RWSEM_INITIALIZER(name)
  38. static inline void init_rwsem(struct rw_semaphore *sem)
  39. {
  40. sem->count = RWSEM_UNLOCKED_VALUE;
  41. spin_lock_init(&sem->wait_lock);
  42. INIT_LIST_HEAD(&sem->wait_list);
  43. }
  44. static inline void __down_read(struct rw_semaphore *sem)
  45. {
  46. long oldcount;
  47. #ifndef CONFIG_SMP
  48. oldcount = sem->count;
  49. sem->count += RWSEM_ACTIVE_READ_BIAS;
  50. #else
  51. long temp;
  52. __asm__ __volatile__(
  53. "1: ldq_l %0,%1\n"
  54. " addq %0,%3,%2\n"
  55. " stq_c %2,%1\n"
  56. " beq %2,2f\n"
  57. " mb\n"
  58. ".subsection 2\n"
  59. "2: br 1b\n"
  60. ".previous"
  61. :"=&r" (oldcount), "=m" (sem->count), "=&r" (temp)
  62. :"Ir" (RWSEM_ACTIVE_READ_BIAS), "m" (sem->count) : "memory");
  63. #endif
  64. if (unlikely(oldcount < 0))
  65. rwsem_down_read_failed(sem);
  66. }
  67. /*
  68. * trylock for reading -- returns 1 if successful, 0 if contention
  69. */
  70. static inline int __down_read_trylock(struct rw_semaphore *sem)
  71. {
  72. long old, new, res;
  73. res = sem->count;
  74. do {
  75. new = res + RWSEM_ACTIVE_READ_BIAS;
  76. if (new <= 0)
  77. break;
  78. old = res;
  79. res = cmpxchg(&sem->count, old, new);
  80. } while (res != old);
  81. return res >= 0 ? 1 : 0;
  82. }
  83. static inline void __down_write(struct rw_semaphore *sem)
  84. {
  85. long oldcount;
  86. #ifndef CONFIG_SMP
  87. oldcount = sem->count;
  88. sem->count += RWSEM_ACTIVE_WRITE_BIAS;
  89. #else
  90. long temp;
  91. __asm__ __volatile__(
  92. "1: ldq_l %0,%1\n"
  93. " addq %0,%3,%2\n"
  94. " stq_c %2,%1\n"
  95. " beq %2,2f\n"
  96. " mb\n"
  97. ".subsection 2\n"
  98. "2: br 1b\n"
  99. ".previous"
  100. :"=&r" (oldcount), "=m" (sem->count), "=&r" (temp)
  101. :"Ir" (RWSEM_ACTIVE_WRITE_BIAS), "m" (sem->count) : "memory");
  102. #endif
  103. if (unlikely(oldcount))
  104. rwsem_down_write_failed(sem);
  105. }
  106. /*
  107. * trylock for writing -- returns 1 if successful, 0 if contention
  108. */
  109. static inline int __down_write_trylock(struct rw_semaphore *sem)
  110. {
  111. long ret = cmpxchg(&sem->count, RWSEM_UNLOCKED_VALUE,
  112. RWSEM_ACTIVE_WRITE_BIAS);
  113. if (ret == RWSEM_UNLOCKED_VALUE)
  114. return 1;
  115. return 0;
  116. }
  117. static inline void __up_read(struct rw_semaphore *sem)
  118. {
  119. long oldcount;
  120. #ifndef CONFIG_SMP
  121. oldcount = sem->count;
  122. sem->count -= RWSEM_ACTIVE_READ_BIAS;
  123. #else
  124. long temp;
  125. __asm__ __volatile__(
  126. " mb\n"
  127. "1: ldq_l %0,%1\n"
  128. " subq %0,%3,%2\n"
  129. " stq_c %2,%1\n"
  130. " beq %2,2f\n"
  131. ".subsection 2\n"
  132. "2: br 1b\n"
  133. ".previous"
  134. :"=&r" (oldcount), "=m" (sem->count), "=&r" (temp)
  135. :"Ir" (RWSEM_ACTIVE_READ_BIAS), "m" (sem->count) : "memory");
  136. #endif
  137. if (unlikely(oldcount < 0))
  138. if ((int)oldcount - RWSEM_ACTIVE_READ_BIAS == 0)
  139. rwsem_wake(sem);
  140. }
  141. static inline void __up_write(struct rw_semaphore *sem)
  142. {
  143. long count;
  144. #ifndef CONFIG_SMP
  145. sem->count -= RWSEM_ACTIVE_WRITE_BIAS;
  146. count = sem->count;
  147. #else
  148. long temp;
  149. __asm__ __volatile__(
  150. " mb\n"
  151. "1: ldq_l %0,%1\n"
  152. " subq %0,%3,%2\n"
  153. " stq_c %2,%1\n"
  154. " beq %2,2f\n"
  155. " subq %0,%3,%0\n"
  156. ".subsection 2\n"
  157. "2: br 1b\n"
  158. ".previous"
  159. :"=&r" (count), "=m" (sem->count), "=&r" (temp)
  160. :"Ir" (RWSEM_ACTIVE_WRITE_BIAS), "m" (sem->count) : "memory");
  161. #endif
  162. if (unlikely(count))
  163. if ((int)count == 0)
  164. rwsem_wake(sem);
  165. }
  166. /*
  167. * downgrade write lock to read lock
  168. */
  169. static inline void __downgrade_write(struct rw_semaphore *sem)
  170. {
  171. long oldcount;
  172. #ifndef CONFIG_SMP
  173. oldcount = sem->count;
  174. sem->count -= RWSEM_WAITING_BIAS;
  175. #else
  176. long temp;
  177. __asm__ __volatile__(
  178. "1: ldq_l %0,%1\n"
  179. " addq %0,%3,%2\n"
  180. " stq_c %2,%1\n"
  181. " beq %2,2f\n"
  182. " mb\n"
  183. ".subsection 2\n"
  184. "2: br 1b\n"
  185. ".previous"
  186. :"=&r" (oldcount), "=m" (sem->count), "=&r" (temp)
  187. :"Ir" (-RWSEM_WAITING_BIAS), "m" (sem->count) : "memory");
  188. #endif
  189. if (unlikely(oldcount < 0))
  190. rwsem_downgrade_wake(sem);
  191. }
  192. static inline void rwsem_atomic_add(long val, struct rw_semaphore *sem)
  193. {
  194. #ifndef CONFIG_SMP
  195. sem->count += val;
  196. #else
  197. long temp;
  198. __asm__ __volatile__(
  199. "1: ldq_l %0,%1\n"
  200. " addq %0,%2,%0\n"
  201. " stq_c %0,%1\n"
  202. " beq %0,2f\n"
  203. ".subsection 2\n"
  204. "2: br 1b\n"
  205. ".previous"
  206. :"=&r" (temp), "=m" (sem->count)
  207. :"Ir" (val), "m" (sem->count));
  208. #endif
  209. }
  210. static inline long rwsem_atomic_update(long val, struct rw_semaphore *sem)
  211. {
  212. #ifndef CONFIG_SMP
  213. sem->count += val;
  214. return sem->count;
  215. #else
  216. long ret, temp;
  217. __asm__ __volatile__(
  218. "1: ldq_l %0,%1\n"
  219. " addq %0,%3,%2\n"
  220. " addq %0,%3,%0\n"
  221. " stq_c %2,%1\n"
  222. " beq %2,2f\n"
  223. ".subsection 2\n"
  224. "2: br 1b\n"
  225. ".previous"
  226. :"=&r" (ret), "=m" (sem->count), "=&r" (temp)
  227. :"Ir" (val), "m" (sem->count));
  228. return ret;
  229. #endif
  230. }
  231. static inline int rwsem_is_locked(struct rw_semaphore *sem)
  232. {
  233. return (sem->count != 0);
  234. }
  235. #endif /* __KERNEL__ */
  236. #endif /* _ALPHA_RWSEM_H */