rwsem.h 8.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360
  1. #ifndef _S390_RWSEM_H
  2. #define _S390_RWSEM_H
  3. /*
  4. * include/asm-s390/rwsem.h
  5. *
  6. * S390 version
  7. * Copyright (C) 2002 IBM Deutschland Entwicklung GmbH, IBM Corporation
  8. * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
  9. *
  10. * Based on asm-alpha/semaphore.h and asm-i386/rwsem.h
  11. */
  12. /*
  13. *
  14. * The MSW of the count is the negated number of active writers and waiting
  15. * lockers, and the LSW is the total number of active locks
  16. *
  17. * The lock count is initialized to 0 (no active and no waiting lockers).
  18. *
  19. * When a writer subtracts WRITE_BIAS, it'll get 0xffff0001 for the case of an
  20. * uncontended lock. This can be determined because XADD returns the old value.
  21. * Readers increment by 1 and see a positive value when uncontended, negative
  22. * if there are writers (and maybe) readers waiting (in which case it goes to
  23. * sleep).
  24. *
  25. * The value of WAITING_BIAS supports up to 32766 waiting processes. This can
  26. * be extended to 65534 by manually checking the whole MSW rather than relying
  27. * on the S flag.
  28. *
  29. * The value of ACTIVE_BIAS supports up to 65535 active processes.
  30. *
  31. * This should be totally fair - if anything is waiting, a process that wants a
  32. * lock will go to the back of the queue. When the currently active lock is
  33. * released, if there's a writer at the front of the queue, then that and only
  34. * that will be woken up; if there's a bunch of consequtive readers at the
  35. * front, then they'll all be woken up, but no other readers will be.
  36. */
  37. #ifndef _LINUX_RWSEM_H
  38. #error "please don't include asm/rwsem.h directly, use linux/rwsem.h instead"
  39. #endif
  40. #ifdef __KERNEL__
  41. #include <linux/list.h>
  42. #include <linux/spinlock.h>
  43. struct rwsem_waiter;
  44. extern struct rw_semaphore *rwsem_down_read_failed(struct rw_semaphore *);
  45. extern struct rw_semaphore *rwsem_down_write_failed(struct rw_semaphore *);
  46. extern struct rw_semaphore *rwsem_wake(struct rw_semaphore *);
  47. extern struct rw_semaphore *rwsem_downgrade_wake(struct rw_semaphore *);
  48. extern struct rw_semaphore *rwsem_downgrade_write(struct rw_semaphore *);
  49. /*
  50. * the semaphore definition
  51. */
  52. struct rw_semaphore {
  53. signed long count;
  54. spinlock_t wait_lock;
  55. struct list_head wait_list;
  56. };
  57. #ifndef __s390x__
  58. #define RWSEM_UNLOCKED_VALUE 0x00000000
  59. #define RWSEM_ACTIVE_BIAS 0x00000001
  60. #define RWSEM_ACTIVE_MASK 0x0000ffff
  61. #define RWSEM_WAITING_BIAS (-0x00010000)
  62. #else /* __s390x__ */
  63. #define RWSEM_UNLOCKED_VALUE 0x0000000000000000L
  64. #define RWSEM_ACTIVE_BIAS 0x0000000000000001L
  65. #define RWSEM_ACTIVE_MASK 0x00000000ffffffffL
  66. #define RWSEM_WAITING_BIAS (-0x0000000100000000L)
  67. #endif /* __s390x__ */
  68. #define RWSEM_ACTIVE_READ_BIAS RWSEM_ACTIVE_BIAS
  69. #define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS)
  70. /*
  71. * initialisation
  72. */
  73. #define __RWSEM_INITIALIZER(name) \
  74. { RWSEM_UNLOCKED_VALUE, SPIN_LOCK_UNLOCKED, LIST_HEAD_INIT((name).wait_list) }
  75. #define DECLARE_RWSEM(name) \
  76. struct rw_semaphore name = __RWSEM_INITIALIZER(name)
  77. static inline void init_rwsem(struct rw_semaphore *sem)
  78. {
  79. sem->count = RWSEM_UNLOCKED_VALUE;
  80. spin_lock_init(&sem->wait_lock);
  81. INIT_LIST_HEAD(&sem->wait_list);
  82. }
  83. /*
  84. * lock for reading
  85. */
  86. static inline void __down_read(struct rw_semaphore *sem)
  87. {
  88. signed long old, new;
  89. __asm__ __volatile__(
  90. #ifndef __s390x__
  91. " l %0,0(%3)\n"
  92. "0: lr %1,%0\n"
  93. " ahi %1,%5\n"
  94. " cs %0,%1,0(%3)\n"
  95. " jl 0b"
  96. #else /* __s390x__ */
  97. " lg %0,0(%3)\n"
  98. "0: lgr %1,%0\n"
  99. " aghi %1,%5\n"
  100. " csg %0,%1,0(%3)\n"
  101. " jl 0b"
  102. #endif /* __s390x__ */
  103. : "=&d" (old), "=&d" (new), "=m" (sem->count)
  104. : "a" (&sem->count), "m" (sem->count),
  105. "i" (RWSEM_ACTIVE_READ_BIAS) : "cc", "memory" );
  106. if (old < 0)
  107. rwsem_down_read_failed(sem);
  108. }
  109. /*
  110. * trylock for reading -- returns 1 if successful, 0 if contention
  111. */
  112. static inline int __down_read_trylock(struct rw_semaphore *sem)
  113. {
  114. signed long old, new;
  115. __asm__ __volatile__(
  116. #ifndef __s390x__
  117. " l %0,0(%3)\n"
  118. "0: ltr %1,%0\n"
  119. " jm 1f\n"
  120. " ahi %1,%5\n"
  121. " cs %0,%1,0(%3)\n"
  122. " jl 0b\n"
  123. "1:"
  124. #else /* __s390x__ */
  125. " lg %0,0(%3)\n"
  126. "0: ltgr %1,%0\n"
  127. " jm 1f\n"
  128. " aghi %1,%5\n"
  129. " csg %0,%1,0(%3)\n"
  130. " jl 0b\n"
  131. "1:"
  132. #endif /* __s390x__ */
  133. : "=&d" (old), "=&d" (new), "=m" (sem->count)
  134. : "a" (&sem->count), "m" (sem->count),
  135. "i" (RWSEM_ACTIVE_READ_BIAS) : "cc", "memory" );
  136. return old >= 0 ? 1 : 0;
  137. }
  138. /*
  139. * lock for writing
  140. */
  141. static inline void __down_write(struct rw_semaphore *sem)
  142. {
  143. signed long old, new, tmp;
  144. tmp = RWSEM_ACTIVE_WRITE_BIAS;
  145. __asm__ __volatile__(
  146. #ifndef __s390x__
  147. " l %0,0(%3)\n"
  148. "0: lr %1,%0\n"
  149. " a %1,%5\n"
  150. " cs %0,%1,0(%3)\n"
  151. " jl 0b"
  152. #else /* __s390x__ */
  153. " lg %0,0(%3)\n"
  154. "0: lgr %1,%0\n"
  155. " ag %1,%5\n"
  156. " csg %0,%1,0(%3)\n"
  157. " jl 0b"
  158. #endif /* __s390x__ */
  159. : "=&d" (old), "=&d" (new), "=m" (sem->count)
  160. : "a" (&sem->count), "m" (sem->count), "m" (tmp)
  161. : "cc", "memory" );
  162. if (old != 0)
  163. rwsem_down_write_failed(sem);
  164. }
  165. /*
  166. * trylock for writing -- returns 1 if successful, 0 if contention
  167. */
  168. static inline int __down_write_trylock(struct rw_semaphore *sem)
  169. {
  170. signed long old;
  171. __asm__ __volatile__(
  172. #ifndef __s390x__
  173. " l %0,0(%2)\n"
  174. "0: ltr %0,%0\n"
  175. " jnz 1f\n"
  176. " cs %0,%4,0(%2)\n"
  177. " jl 0b\n"
  178. #else /* __s390x__ */
  179. " lg %0,0(%2)\n"
  180. "0: ltgr %0,%0\n"
  181. " jnz 1f\n"
  182. " csg %0,%4,0(%2)\n"
  183. " jl 0b\n"
  184. #endif /* __s390x__ */
  185. "1:"
  186. : "=&d" (old), "=m" (sem->count)
  187. : "a" (&sem->count), "m" (sem->count),
  188. "d" (RWSEM_ACTIVE_WRITE_BIAS) : "cc", "memory" );
  189. return (old == RWSEM_UNLOCKED_VALUE) ? 1 : 0;
  190. }
  191. /*
  192. * unlock after reading
  193. */
  194. static inline void __up_read(struct rw_semaphore *sem)
  195. {
  196. signed long old, new;
  197. __asm__ __volatile__(
  198. #ifndef __s390x__
  199. " l %0,0(%3)\n"
  200. "0: lr %1,%0\n"
  201. " ahi %1,%5\n"
  202. " cs %0,%1,0(%3)\n"
  203. " jl 0b"
  204. #else /* __s390x__ */
  205. " lg %0,0(%3)\n"
  206. "0: lgr %1,%0\n"
  207. " aghi %1,%5\n"
  208. " csg %0,%1,0(%3)\n"
  209. " jl 0b"
  210. #endif /* __s390x__ */
  211. : "=&d" (old), "=&d" (new), "=m" (sem->count)
  212. : "a" (&sem->count), "m" (sem->count),
  213. "i" (-RWSEM_ACTIVE_READ_BIAS)
  214. : "cc", "memory" );
  215. if (new < 0)
  216. if ((new & RWSEM_ACTIVE_MASK) == 0)
  217. rwsem_wake(sem);
  218. }
  219. /*
  220. * unlock after writing
  221. */
  222. static inline void __up_write(struct rw_semaphore *sem)
  223. {
  224. signed long old, new, tmp;
  225. tmp = -RWSEM_ACTIVE_WRITE_BIAS;
  226. __asm__ __volatile__(
  227. #ifndef __s390x__
  228. " l %0,0(%3)\n"
  229. "0: lr %1,%0\n"
  230. " a %1,%5\n"
  231. " cs %0,%1,0(%3)\n"
  232. " jl 0b"
  233. #else /* __s390x__ */
  234. " lg %0,0(%3)\n"
  235. "0: lgr %1,%0\n"
  236. " ag %1,%5\n"
  237. " csg %0,%1,0(%3)\n"
  238. " jl 0b"
  239. #endif /* __s390x__ */
  240. : "=&d" (old), "=&d" (new), "=m" (sem->count)
  241. : "a" (&sem->count), "m" (sem->count), "m" (tmp)
  242. : "cc", "memory" );
  243. if (new < 0)
  244. if ((new & RWSEM_ACTIVE_MASK) == 0)
  245. rwsem_wake(sem);
  246. }
  247. /*
  248. * downgrade write lock to read lock
  249. */
  250. static inline void __downgrade_write(struct rw_semaphore *sem)
  251. {
  252. signed long old, new, tmp;
  253. tmp = -RWSEM_WAITING_BIAS;
  254. __asm__ __volatile__(
  255. #ifndef __s390x__
  256. " l %0,0(%3)\n"
  257. "0: lr %1,%0\n"
  258. " a %1,%5\n"
  259. " cs %0,%1,0(%3)\n"
  260. " jl 0b"
  261. #else /* __s390x__ */
  262. " lg %0,0(%3)\n"
  263. "0: lgr %1,%0\n"
  264. " ag %1,%5\n"
  265. " csg %0,%1,0(%3)\n"
  266. " jl 0b"
  267. #endif /* __s390x__ */
  268. : "=&d" (old), "=&d" (new), "=m" (sem->count)
  269. : "a" (&sem->count), "m" (sem->count), "m" (tmp)
  270. : "cc", "memory" );
  271. if (new > 1)
  272. rwsem_downgrade_wake(sem);
  273. }
  274. /*
  275. * implement atomic add functionality
  276. */
  277. static inline void rwsem_atomic_add(long delta, struct rw_semaphore *sem)
  278. {
  279. signed long old, new;
  280. __asm__ __volatile__(
  281. #ifndef __s390x__
  282. " l %0,0(%3)\n"
  283. "0: lr %1,%0\n"
  284. " ar %1,%5\n"
  285. " cs %0,%1,0(%3)\n"
  286. " jl 0b"
  287. #else /* __s390x__ */
  288. " lg %0,0(%3)\n"
  289. "0: lgr %1,%0\n"
  290. " agr %1,%5\n"
  291. " csg %0,%1,0(%3)\n"
  292. " jl 0b"
  293. #endif /* __s390x__ */
  294. : "=&d" (old), "=&d" (new), "=m" (sem->count)
  295. : "a" (&sem->count), "m" (sem->count), "d" (delta)
  296. : "cc", "memory" );
  297. }
  298. /*
  299. * implement exchange and add functionality
  300. */
  301. static inline long rwsem_atomic_update(long delta, struct rw_semaphore *sem)
  302. {
  303. signed long old, new;
  304. __asm__ __volatile__(
  305. #ifndef __s390x__
  306. " l %0,0(%3)\n"
  307. "0: lr %1,%0\n"
  308. " ar %1,%5\n"
  309. " cs %0,%1,0(%3)\n"
  310. " jl 0b"
  311. #else /* __s390x__ */
  312. " lg %0,0(%3)\n"
  313. "0: lgr %1,%0\n"
  314. " agr %1,%5\n"
  315. " csg %0,%1,0(%3)\n"
  316. " jl 0b"
  317. #endif /* __s390x__ */
  318. : "=&d" (old), "=&d" (new), "=m" (sem->count)
  319. : "a" (&sem->count), "m" (sem->count), "d" (delta)
  320. : "cc", "memory" );
  321. return new;
  322. }
  323. static inline int rwsem_is_locked(struct rw_semaphore *sem)
  324. {
  325. return (sem->count != 0);
  326. }
  327. #endif /* __KERNEL__ */
  328. #endif /* _S390_RWSEM_H */