rwsem.h 5.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255
  1. #ifndef _ALPHA_RWSEM_H
  2. #define _ALPHA_RWSEM_H
  3. /*
  4. * Written by Ivan Kokshaysky <ink@jurassic.park.msu.ru>, 2001.
  5. * Based on asm-alpha/semaphore.h and asm-i386/rwsem.h
  6. */
  7. #ifndef _LINUX_RWSEM_H
  8. #error "please don't include asm/rwsem.h directly, use linux/rwsem.h instead"
  9. #endif
  10. #ifdef __KERNEL__
  11. #include <linux/compiler.h>
  12. extern struct rw_semaphore *rwsem_down_read_failed(struct rw_semaphore *sem);
  13. extern struct rw_semaphore *rwsem_down_write_failed(struct rw_semaphore *sem);
  14. extern struct rw_semaphore *rwsem_wake(struct rw_semaphore *);
  15. extern struct rw_semaphore *rwsem_downgrade_wake(struct rw_semaphore *sem);
  16. /*
  17. * the semaphore definition
  18. */
  19. struct rw_semaphore {
  20. long count;
  21. #define RWSEM_UNLOCKED_VALUE 0x0000000000000000L
  22. #define RWSEM_ACTIVE_BIAS 0x0000000000000001L
  23. #define RWSEM_ACTIVE_MASK 0x00000000ffffffffL
  24. #define RWSEM_WAITING_BIAS (-0x0000000100000000L)
  25. #define RWSEM_ACTIVE_READ_BIAS RWSEM_ACTIVE_BIAS
  26. #define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS)
  27. spinlock_t wait_lock;
  28. struct list_head wait_list;
  29. };
  30. #define __RWSEM_INITIALIZER(name) \
  31. { RWSEM_UNLOCKED_VALUE, __SPIN_LOCK_UNLOCKED((name).wait_lock), \
  32. LIST_HEAD_INIT((name).wait_list) }
  33. #define DECLARE_RWSEM(name) \
  34. struct rw_semaphore name = __RWSEM_INITIALIZER(name)
  35. static inline void init_rwsem(struct rw_semaphore *sem)
  36. {
  37. sem->count = RWSEM_UNLOCKED_VALUE;
  38. spin_lock_init(&sem->wait_lock);
  39. INIT_LIST_HEAD(&sem->wait_list);
  40. }
  41. static inline void __down_read(struct rw_semaphore *sem)
  42. {
  43. long oldcount;
  44. #ifndef CONFIG_SMP
  45. oldcount = sem->count;
  46. sem->count += RWSEM_ACTIVE_READ_BIAS;
  47. #else
  48. long temp;
  49. __asm__ __volatile__(
  50. "1: ldq_l %0,%1\n"
  51. " addq %0,%3,%2\n"
  52. " stq_c %2,%1\n"
  53. " beq %2,2f\n"
  54. " mb\n"
  55. ".subsection 2\n"
  56. "2: br 1b\n"
  57. ".previous"
  58. :"=&r" (oldcount), "=m" (sem->count), "=&r" (temp)
  59. :"Ir" (RWSEM_ACTIVE_READ_BIAS), "m" (sem->count) : "memory");
  60. #endif
  61. if (unlikely(oldcount < 0))
  62. rwsem_down_read_failed(sem);
  63. }
  64. /*
  65. * trylock for reading -- returns 1 if successful, 0 if contention
  66. */
  67. static inline int __down_read_trylock(struct rw_semaphore *sem)
  68. {
  69. long old, new, res;
  70. res = sem->count;
  71. do {
  72. new = res + RWSEM_ACTIVE_READ_BIAS;
  73. if (new <= 0)
  74. break;
  75. old = res;
  76. res = cmpxchg(&sem->count, old, new);
  77. } while (res != old);
  78. return res >= 0 ? 1 : 0;
  79. }
  80. static inline void __down_write(struct rw_semaphore *sem)
  81. {
  82. long oldcount;
  83. #ifndef CONFIG_SMP
  84. oldcount = sem->count;
  85. sem->count += RWSEM_ACTIVE_WRITE_BIAS;
  86. #else
  87. long temp;
  88. __asm__ __volatile__(
  89. "1: ldq_l %0,%1\n"
  90. " addq %0,%3,%2\n"
  91. " stq_c %2,%1\n"
  92. " beq %2,2f\n"
  93. " mb\n"
  94. ".subsection 2\n"
  95. "2: br 1b\n"
  96. ".previous"
  97. :"=&r" (oldcount), "=m" (sem->count), "=&r" (temp)
  98. :"Ir" (RWSEM_ACTIVE_WRITE_BIAS), "m" (sem->count) : "memory");
  99. #endif
  100. if (unlikely(oldcount))
  101. rwsem_down_write_failed(sem);
  102. }
  103. /*
  104. * trylock for writing -- returns 1 if successful, 0 if contention
  105. */
  106. static inline int __down_write_trylock(struct rw_semaphore *sem)
  107. {
  108. long ret = cmpxchg(&sem->count, RWSEM_UNLOCKED_VALUE,
  109. RWSEM_ACTIVE_WRITE_BIAS);
  110. if (ret == RWSEM_UNLOCKED_VALUE)
  111. return 1;
  112. return 0;
  113. }
  114. static inline void __up_read(struct rw_semaphore *sem)
  115. {
  116. long oldcount;
  117. #ifndef CONFIG_SMP
  118. oldcount = sem->count;
  119. sem->count -= RWSEM_ACTIVE_READ_BIAS;
  120. #else
  121. long temp;
  122. __asm__ __volatile__(
  123. " mb\n"
  124. "1: ldq_l %0,%1\n"
  125. " subq %0,%3,%2\n"
  126. " stq_c %2,%1\n"
  127. " beq %2,2f\n"
  128. ".subsection 2\n"
  129. "2: br 1b\n"
  130. ".previous"
  131. :"=&r" (oldcount), "=m" (sem->count), "=&r" (temp)
  132. :"Ir" (RWSEM_ACTIVE_READ_BIAS), "m" (sem->count) : "memory");
  133. #endif
  134. if (unlikely(oldcount < 0))
  135. if ((int)oldcount - RWSEM_ACTIVE_READ_BIAS == 0)
  136. rwsem_wake(sem);
  137. }
  138. static inline void __up_write(struct rw_semaphore *sem)
  139. {
  140. long count;
  141. #ifndef CONFIG_SMP
  142. sem->count -= RWSEM_ACTIVE_WRITE_BIAS;
  143. count = sem->count;
  144. #else
  145. long temp;
  146. __asm__ __volatile__(
  147. " mb\n"
  148. "1: ldq_l %0,%1\n"
  149. " subq %0,%3,%2\n"
  150. " stq_c %2,%1\n"
  151. " beq %2,2f\n"
  152. " subq %0,%3,%0\n"
  153. ".subsection 2\n"
  154. "2: br 1b\n"
  155. ".previous"
  156. :"=&r" (count), "=m" (sem->count), "=&r" (temp)
  157. :"Ir" (RWSEM_ACTIVE_WRITE_BIAS), "m" (sem->count) : "memory");
  158. #endif
  159. if (unlikely(count))
  160. if ((int)count == 0)
  161. rwsem_wake(sem);
  162. }
  163. /*
  164. * downgrade write lock to read lock
  165. */
  166. static inline void __downgrade_write(struct rw_semaphore *sem)
  167. {
  168. long oldcount;
  169. #ifndef CONFIG_SMP
  170. oldcount = sem->count;
  171. sem->count -= RWSEM_WAITING_BIAS;
  172. #else
  173. long temp;
  174. __asm__ __volatile__(
  175. "1: ldq_l %0,%1\n"
  176. " addq %0,%3,%2\n"
  177. " stq_c %2,%1\n"
  178. " beq %2,2f\n"
  179. " mb\n"
  180. ".subsection 2\n"
  181. "2: br 1b\n"
  182. ".previous"
  183. :"=&r" (oldcount), "=m" (sem->count), "=&r" (temp)
  184. :"Ir" (-RWSEM_WAITING_BIAS), "m" (sem->count) : "memory");
  185. #endif
  186. if (unlikely(oldcount < 0))
  187. rwsem_downgrade_wake(sem);
  188. }
  189. static inline void rwsem_atomic_add(long val, struct rw_semaphore *sem)
  190. {
  191. #ifndef CONFIG_SMP
  192. sem->count += val;
  193. #else
  194. long temp;
  195. __asm__ __volatile__(
  196. "1: ldq_l %0,%1\n"
  197. " addq %0,%2,%0\n"
  198. " stq_c %0,%1\n"
  199. " beq %0,2f\n"
  200. ".subsection 2\n"
  201. "2: br 1b\n"
  202. ".previous"
  203. :"=&r" (temp), "=m" (sem->count)
  204. :"Ir" (val), "m" (sem->count));
  205. #endif
  206. }
  207. static inline long rwsem_atomic_update(long val, struct rw_semaphore *sem)
  208. {
  209. #ifndef CONFIG_SMP
  210. sem->count += val;
  211. return sem->count;
  212. #else
  213. long ret, temp;
  214. __asm__ __volatile__(
  215. "1: ldq_l %0,%1\n"
  216. " addq %0,%3,%2\n"
  217. " addq %0,%3,%0\n"
  218. " stq_c %2,%1\n"
  219. " beq %2,2f\n"
  220. ".subsection 2\n"
  221. "2: br 1b\n"
  222. ".previous"
  223. :"=&r" (ret), "=m" (sem->count), "=&r" (temp)
  224. :"Ir" (val), "m" (sem->count));
  225. return ret;
  226. #endif
  227. }
  228. static inline int rwsem_is_locked(struct rw_semaphore *sem)
  229. {
  230. return (sem->count != 0);
  231. }
  232. #endif /* __KERNEL__ */
  233. #endif /* _ALPHA_RWSEM_H */