rwsem.h 5.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271
  1. #ifndef _ALPHA_RWSEM_H
  2. #define _ALPHA_RWSEM_H
  3. /*
  4. * Written by Ivan Kokshaysky <ink@jurassic.park.msu.ru>, 2001.
  5. * Based on asm-alpha/semaphore.h and asm-i386/rwsem.h
  6. */
  7. #ifndef _LINUX_RWSEM_H
  8. #error "please don't include asm/rwsem.h directly, use linux/rwsem.h instead"
  9. #endif
  10. #ifdef __KERNEL__
  11. #include <linux/compiler.h>
  12. #include <linux/list.h>
  13. #include <linux/spinlock.h>
  14. struct rwsem_waiter;
  15. extern struct rw_semaphore *rwsem_down_read_failed(struct rw_semaphore *sem);
  16. extern struct rw_semaphore *rwsem_down_write_failed(struct rw_semaphore *sem);
  17. extern struct rw_semaphore *rwsem_wake(struct rw_semaphore *);
  18. extern struct rw_semaphore *rwsem_downgrade_wake(struct rw_semaphore *sem);
  19. /*
  20. * the semaphore definition
  21. */
  22. struct rw_semaphore {
  23. long count;
  24. #define RWSEM_UNLOCKED_VALUE 0x0000000000000000L
  25. #define RWSEM_ACTIVE_BIAS 0x0000000000000001L
  26. #define RWSEM_ACTIVE_MASK 0x00000000ffffffffL
  27. #define RWSEM_WAITING_BIAS (-0x0000000100000000L)
  28. #define RWSEM_ACTIVE_READ_BIAS RWSEM_ACTIVE_BIAS
  29. #define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS)
  30. spinlock_t wait_lock;
  31. struct list_head wait_list;
  32. #if RWSEM_DEBUG
  33. int debug;
  34. #endif
  35. };
  36. #if RWSEM_DEBUG
  37. #define __RWSEM_DEBUG_INIT , 0
  38. #else
  39. #define __RWSEM_DEBUG_INIT /* */
  40. #endif
  41. #define __RWSEM_INITIALIZER(name) \
  42. { RWSEM_UNLOCKED_VALUE, SPIN_LOCK_UNLOCKED, \
  43. LIST_HEAD_INIT((name).wait_list) __RWSEM_DEBUG_INIT }
  44. #define DECLARE_RWSEM(name) \
  45. struct rw_semaphore name = __RWSEM_INITIALIZER(name)
  46. static inline void init_rwsem(struct rw_semaphore *sem)
  47. {
  48. sem->count = RWSEM_UNLOCKED_VALUE;
  49. spin_lock_init(&sem->wait_lock);
  50. INIT_LIST_HEAD(&sem->wait_list);
  51. #if RWSEM_DEBUG
  52. sem->debug = 0;
  53. #endif
  54. }
  55. static inline void __down_read(struct rw_semaphore *sem)
  56. {
  57. long oldcount;
  58. #ifndef CONFIG_SMP
  59. oldcount = sem->count;
  60. sem->count += RWSEM_ACTIVE_READ_BIAS;
  61. #else
  62. long temp;
  63. __asm__ __volatile__(
  64. "1: ldq_l %0,%1\n"
  65. " addq %0,%3,%2\n"
  66. " stq_c %2,%1\n"
  67. " beq %2,2f\n"
  68. " mb\n"
  69. ".subsection 2\n"
  70. "2: br 1b\n"
  71. ".previous"
  72. :"=&r" (oldcount), "=m" (sem->count), "=&r" (temp)
  73. :"Ir" (RWSEM_ACTIVE_READ_BIAS), "m" (sem->count) : "memory");
  74. #endif
  75. if (unlikely(oldcount < 0))
  76. rwsem_down_read_failed(sem);
  77. }
  78. /*
  79. * trylock for reading -- returns 1 if successful, 0 if contention
  80. */
  81. static inline int __down_read_trylock(struct rw_semaphore *sem)
  82. {
  83. long old, new, res;
  84. res = sem->count;
  85. do {
  86. new = res + RWSEM_ACTIVE_READ_BIAS;
  87. if (new <= 0)
  88. break;
  89. old = res;
  90. res = cmpxchg(&sem->count, old, new);
  91. } while (res != old);
  92. return res >= 0 ? 1 : 0;
  93. }
  94. static inline void __down_write(struct rw_semaphore *sem)
  95. {
  96. long oldcount;
  97. #ifndef CONFIG_SMP
  98. oldcount = sem->count;
  99. sem->count += RWSEM_ACTIVE_WRITE_BIAS;
  100. #else
  101. long temp;
  102. __asm__ __volatile__(
  103. "1: ldq_l %0,%1\n"
  104. " addq %0,%3,%2\n"
  105. " stq_c %2,%1\n"
  106. " beq %2,2f\n"
  107. " mb\n"
  108. ".subsection 2\n"
  109. "2: br 1b\n"
  110. ".previous"
  111. :"=&r" (oldcount), "=m" (sem->count), "=&r" (temp)
  112. :"Ir" (RWSEM_ACTIVE_WRITE_BIAS), "m" (sem->count) : "memory");
  113. #endif
  114. if (unlikely(oldcount))
  115. rwsem_down_write_failed(sem);
  116. }
  117. /*
  118. * trylock for writing -- returns 1 if successful, 0 if contention
  119. */
  120. static inline int __down_write_trylock(struct rw_semaphore *sem)
  121. {
  122. long ret = cmpxchg(&sem->count, RWSEM_UNLOCKED_VALUE,
  123. RWSEM_ACTIVE_WRITE_BIAS);
  124. if (ret == RWSEM_UNLOCKED_VALUE)
  125. return 1;
  126. return 0;
  127. }
  128. static inline void __up_read(struct rw_semaphore *sem)
  129. {
  130. long oldcount;
  131. #ifndef CONFIG_SMP
  132. oldcount = sem->count;
  133. sem->count -= RWSEM_ACTIVE_READ_BIAS;
  134. #else
  135. long temp;
  136. __asm__ __volatile__(
  137. " mb\n"
  138. "1: ldq_l %0,%1\n"
  139. " subq %0,%3,%2\n"
  140. " stq_c %2,%1\n"
  141. " beq %2,2f\n"
  142. ".subsection 2\n"
  143. "2: br 1b\n"
  144. ".previous"
  145. :"=&r" (oldcount), "=m" (sem->count), "=&r" (temp)
  146. :"Ir" (RWSEM_ACTIVE_READ_BIAS), "m" (sem->count) : "memory");
  147. #endif
  148. if (unlikely(oldcount < 0))
  149. if ((int)oldcount - RWSEM_ACTIVE_READ_BIAS == 0)
  150. rwsem_wake(sem);
  151. }
  152. static inline void __up_write(struct rw_semaphore *sem)
  153. {
  154. long count;
  155. #ifndef CONFIG_SMP
  156. sem->count -= RWSEM_ACTIVE_WRITE_BIAS;
  157. count = sem->count;
  158. #else
  159. long temp;
  160. __asm__ __volatile__(
  161. " mb\n"
  162. "1: ldq_l %0,%1\n"
  163. " subq %0,%3,%2\n"
  164. " stq_c %2,%1\n"
  165. " beq %2,2f\n"
  166. " subq %0,%3,%0\n"
  167. ".subsection 2\n"
  168. "2: br 1b\n"
  169. ".previous"
  170. :"=&r" (count), "=m" (sem->count), "=&r" (temp)
  171. :"Ir" (RWSEM_ACTIVE_WRITE_BIAS), "m" (sem->count) : "memory");
  172. #endif
  173. if (unlikely(count))
  174. if ((int)count == 0)
  175. rwsem_wake(sem);
  176. }
  177. /*
  178. * downgrade write lock to read lock
  179. */
  180. static inline void __downgrade_write(struct rw_semaphore *sem)
  181. {
  182. long oldcount;
  183. #ifndef CONFIG_SMP
  184. oldcount = sem->count;
  185. sem->count -= RWSEM_WAITING_BIAS;
  186. #else
  187. long temp;
  188. __asm__ __volatile__(
  189. "1: ldq_l %0,%1\n"
  190. " addq %0,%3,%2\n"
  191. " stq_c %2,%1\n"
  192. " beq %2,2f\n"
  193. " mb\n"
  194. ".subsection 2\n"
  195. "2: br 1b\n"
  196. ".previous"
  197. :"=&r" (oldcount), "=m" (sem->count), "=&r" (temp)
  198. :"Ir" (-RWSEM_WAITING_BIAS), "m" (sem->count) : "memory");
  199. #endif
  200. if (unlikely(oldcount < 0))
  201. rwsem_downgrade_wake(sem);
  202. }
  203. static inline void rwsem_atomic_add(long val, struct rw_semaphore *sem)
  204. {
  205. #ifndef CONFIG_SMP
  206. sem->count += val;
  207. #else
  208. long temp;
  209. __asm__ __volatile__(
  210. "1: ldq_l %0,%1\n"
  211. " addq %0,%2,%0\n"
  212. " stq_c %0,%1\n"
  213. " beq %0,2f\n"
  214. ".subsection 2\n"
  215. "2: br 1b\n"
  216. ".previous"
  217. :"=&r" (temp), "=m" (sem->count)
  218. :"Ir" (val), "m" (sem->count));
  219. #endif
  220. }
  221. static inline long rwsem_atomic_update(long val, struct rw_semaphore *sem)
  222. {
  223. #ifndef CONFIG_SMP
  224. sem->count += val;
  225. return sem->count;
  226. #else
  227. long ret, temp;
  228. __asm__ __volatile__(
  229. "1: ldq_l %0,%1\n"
  230. " addq %0,%3,%2\n"
  231. " addq %0,%3,%0\n"
  232. " stq_c %2,%1\n"
  233. " beq %2,2f\n"
  234. ".subsection 2\n"
  235. "2: br 1b\n"
  236. ".previous"
  237. :"=&r" (ret), "=m" (sem->count), "=&r" (temp)
  238. :"Ir" (val), "m" (sem->count));
  239. return ret;
  240. #endif
  241. }
  242. static inline int rwsem_is_locked(struct rw_semaphore *sem)
  243. {
  244. return (sem->count != 0);
  245. }
  246. #endif /* __KERNEL__ */
  247. #endif /* _ALPHA_RWSEM_H */