rwsem.h 7.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318
  1. #ifndef _S390_RWSEM_H
  2. #define _S390_RWSEM_H
  3. /*
  4. * S390 version
  5. * Copyright IBM Corp. 2002
  6. * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
  7. *
  8. * Based on asm-alpha/semaphore.h and asm-i386/rwsem.h
  9. */
  10. /*
  11. *
  12. * The MSW of the count is the negated number of active writers and waiting
  13. * lockers, and the LSW is the total number of active locks
  14. *
  15. * The lock count is initialized to 0 (no active and no waiting lockers).
  16. *
  17. * When a writer subtracts WRITE_BIAS, it'll get 0xffff0001 for the case of an
  18. * uncontended lock. This can be determined because XADD returns the old value.
  19. * Readers increment by 1 and see a positive value when uncontended, negative
  20. * if there are writers (and maybe) readers waiting (in which case it goes to
  21. * sleep).
  22. *
  23. * The value of WAITING_BIAS supports up to 32766 waiting processes. This can
  24. * be extended to 65534 by manually checking the whole MSW rather than relying
  25. * on the S flag.
  26. *
  27. * The value of ACTIVE_BIAS supports up to 65535 active processes.
  28. *
  29. * This should be totally fair - if anything is waiting, a process that wants a
  30. * lock will go to the back of the queue. When the currently active lock is
  31. * released, if there's a writer at the front of the queue, then that and only
  32. * that will be woken up; if there's a bunch of consequtive readers at the
  33. * front, then they'll all be woken up, but no other readers will be.
  34. */
  35. #ifndef _LINUX_RWSEM_H
  36. #error "please don't include asm/rwsem.h directly, use linux/rwsem.h instead"
  37. #endif
  38. #ifndef CONFIG_64BIT
  39. #define RWSEM_UNLOCKED_VALUE 0x00000000
  40. #define RWSEM_ACTIVE_BIAS 0x00000001
  41. #define RWSEM_ACTIVE_MASK 0x0000ffff
  42. #define RWSEM_WAITING_BIAS (-0x00010000)
  43. #else /* CONFIG_64BIT */
  44. #define RWSEM_UNLOCKED_VALUE 0x0000000000000000L
  45. #define RWSEM_ACTIVE_BIAS 0x0000000000000001L
  46. #define RWSEM_ACTIVE_MASK 0x00000000ffffffffL
  47. #define RWSEM_WAITING_BIAS (-0x0000000100000000L)
  48. #endif /* CONFIG_64BIT */
  49. #define RWSEM_ACTIVE_READ_BIAS RWSEM_ACTIVE_BIAS
  50. #define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS)
  51. /*
  52. * lock for reading
  53. */
  54. static inline void __down_read(struct rw_semaphore *sem)
  55. {
  56. signed long old, new;
  57. asm volatile(
  58. #ifndef CONFIG_64BIT
  59. " l %0,%2\n"
  60. "0: lr %1,%0\n"
  61. " ahi %1,%4\n"
  62. " cs %0,%1,%2\n"
  63. " jl 0b"
  64. #else /* CONFIG_64BIT */
  65. " lg %0,%2\n"
  66. "0: lgr %1,%0\n"
  67. " aghi %1,%4\n"
  68. " csg %0,%1,%2\n"
  69. " jl 0b"
  70. #endif /* CONFIG_64BIT */
  71. : "=&d" (old), "=&d" (new), "=Q" (sem->count)
  72. : "Q" (sem->count), "i" (RWSEM_ACTIVE_READ_BIAS)
  73. : "cc", "memory");
  74. if (old < 0)
  75. rwsem_down_read_failed(sem);
  76. }
  77. /*
  78. * trylock for reading -- returns 1 if successful, 0 if contention
  79. */
  80. static inline int __down_read_trylock(struct rw_semaphore *sem)
  81. {
  82. signed long old, new;
  83. asm volatile(
  84. #ifndef CONFIG_64BIT
  85. " l %0,%2\n"
  86. "0: ltr %1,%0\n"
  87. " jm 1f\n"
  88. " ahi %1,%4\n"
  89. " cs %0,%1,%2\n"
  90. " jl 0b\n"
  91. "1:"
  92. #else /* CONFIG_64BIT */
  93. " lg %0,%2\n"
  94. "0: ltgr %1,%0\n"
  95. " jm 1f\n"
  96. " aghi %1,%4\n"
  97. " csg %0,%1,%2\n"
  98. " jl 0b\n"
  99. "1:"
  100. #endif /* CONFIG_64BIT */
  101. : "=&d" (old), "=&d" (new), "=Q" (sem->count)
  102. : "Q" (sem->count), "i" (RWSEM_ACTIVE_READ_BIAS)
  103. : "cc", "memory");
  104. return old >= 0 ? 1 : 0;
  105. }
  106. /*
  107. * lock for writing
  108. */
  109. static inline void __down_write_nested(struct rw_semaphore *sem, int subclass)
  110. {
  111. signed long old, new, tmp;
  112. tmp = RWSEM_ACTIVE_WRITE_BIAS;
  113. asm volatile(
  114. #ifndef CONFIG_64BIT
  115. " l %0,%2\n"
  116. "0: lr %1,%0\n"
  117. " a %1,%4\n"
  118. " cs %0,%1,%2\n"
  119. " jl 0b"
  120. #else /* CONFIG_64BIT */
  121. " lg %0,%2\n"
  122. "0: lgr %1,%0\n"
  123. " ag %1,%4\n"
  124. " csg %0,%1,%2\n"
  125. " jl 0b"
  126. #endif /* CONFIG_64BIT */
  127. : "=&d" (old), "=&d" (new), "=Q" (sem->count)
  128. : "Q" (sem->count), "m" (tmp)
  129. : "cc", "memory");
  130. if (old != 0)
  131. rwsem_down_write_failed(sem);
  132. }
  133. static inline void __down_write(struct rw_semaphore *sem)
  134. {
  135. __down_write_nested(sem, 0);
  136. }
  137. /*
  138. * trylock for writing -- returns 1 if successful, 0 if contention
  139. */
  140. static inline int __down_write_trylock(struct rw_semaphore *sem)
  141. {
  142. signed long old;
  143. asm volatile(
  144. #ifndef CONFIG_64BIT
  145. " l %0,%1\n"
  146. "0: ltr %0,%0\n"
  147. " jnz 1f\n"
  148. " cs %0,%3,%1\n"
  149. " jl 0b\n"
  150. #else /* CONFIG_64BIT */
  151. " lg %0,%1\n"
  152. "0: ltgr %0,%0\n"
  153. " jnz 1f\n"
  154. " csg %0,%3,%1\n"
  155. " jl 0b\n"
  156. #endif /* CONFIG_64BIT */
  157. "1:"
  158. : "=&d" (old), "=Q" (sem->count)
  159. : "Q" (sem->count), "d" (RWSEM_ACTIVE_WRITE_BIAS)
  160. : "cc", "memory");
  161. return (old == RWSEM_UNLOCKED_VALUE) ? 1 : 0;
  162. }
  163. /*
  164. * unlock after reading
  165. */
  166. static inline void __up_read(struct rw_semaphore *sem)
  167. {
  168. signed long old, new;
  169. asm volatile(
  170. #ifndef CONFIG_64BIT
  171. " l %0,%2\n"
  172. "0: lr %1,%0\n"
  173. " ahi %1,%4\n"
  174. " cs %0,%1,%2\n"
  175. " jl 0b"
  176. #else /* CONFIG_64BIT */
  177. " lg %0,%2\n"
  178. "0: lgr %1,%0\n"
  179. " aghi %1,%4\n"
  180. " csg %0,%1,%2\n"
  181. " jl 0b"
  182. #endif /* CONFIG_64BIT */
  183. : "=&d" (old), "=&d" (new), "=Q" (sem->count)
  184. : "Q" (sem->count), "i" (-RWSEM_ACTIVE_READ_BIAS)
  185. : "cc", "memory");
  186. if (new < 0)
  187. if ((new & RWSEM_ACTIVE_MASK) == 0)
  188. rwsem_wake(sem);
  189. }
  190. /*
  191. * unlock after writing
  192. */
  193. static inline void __up_write(struct rw_semaphore *sem)
  194. {
  195. signed long old, new, tmp;
  196. tmp = -RWSEM_ACTIVE_WRITE_BIAS;
  197. asm volatile(
  198. #ifndef CONFIG_64BIT
  199. " l %0,%2\n"
  200. "0: lr %1,%0\n"
  201. " a %1,%4\n"
  202. " cs %0,%1,%2\n"
  203. " jl 0b"
  204. #else /* CONFIG_64BIT */
  205. " lg %0,%2\n"
  206. "0: lgr %1,%0\n"
  207. " ag %1,%4\n"
  208. " csg %0,%1,%2\n"
  209. " jl 0b"
  210. #endif /* CONFIG_64BIT */
  211. : "=&d" (old), "=&d" (new), "=Q" (sem->count)
  212. : "Q" (sem->count), "m" (tmp)
  213. : "cc", "memory");
  214. if (new < 0)
  215. if ((new & RWSEM_ACTIVE_MASK) == 0)
  216. rwsem_wake(sem);
  217. }
  218. /*
  219. * downgrade write lock to read lock
  220. */
  221. static inline void __downgrade_write(struct rw_semaphore *sem)
  222. {
  223. signed long old, new, tmp;
  224. tmp = -RWSEM_WAITING_BIAS;
  225. asm volatile(
  226. #ifndef CONFIG_64BIT
  227. " l %0,%2\n"
  228. "0: lr %1,%0\n"
  229. " a %1,%4\n"
  230. " cs %0,%1,%2\n"
  231. " jl 0b"
  232. #else /* CONFIG_64BIT */
  233. " lg %0,%2\n"
  234. "0: lgr %1,%0\n"
  235. " ag %1,%4\n"
  236. " csg %0,%1,%2\n"
  237. " jl 0b"
  238. #endif /* CONFIG_64BIT */
  239. : "=&d" (old), "=&d" (new), "=Q" (sem->count)
  240. : "Q" (sem->count), "m" (tmp)
  241. : "cc", "memory");
  242. if (new > 1)
  243. rwsem_downgrade_wake(sem);
  244. }
  245. /*
  246. * implement atomic add functionality
  247. */
  248. static inline void rwsem_atomic_add(long delta, struct rw_semaphore *sem)
  249. {
  250. signed long old, new;
  251. asm volatile(
  252. #ifndef CONFIG_64BIT
  253. " l %0,%2\n"
  254. "0: lr %1,%0\n"
  255. " ar %1,%4\n"
  256. " cs %0,%1,%2\n"
  257. " jl 0b"
  258. #else /* CONFIG_64BIT */
  259. " lg %0,%2\n"
  260. "0: lgr %1,%0\n"
  261. " agr %1,%4\n"
  262. " csg %0,%1,%2\n"
  263. " jl 0b"
  264. #endif /* CONFIG_64BIT */
  265. : "=&d" (old), "=&d" (new), "=Q" (sem->count)
  266. : "Q" (sem->count), "d" (delta)
  267. : "cc", "memory");
  268. }
  269. /*
  270. * implement exchange and add functionality
  271. */
  272. static inline long rwsem_atomic_update(long delta, struct rw_semaphore *sem)
  273. {
  274. signed long old, new;
  275. asm volatile(
  276. #ifndef CONFIG_64BIT
  277. " l %0,%2\n"
  278. "0: lr %1,%0\n"
  279. " ar %1,%4\n"
  280. " cs %0,%1,%2\n"
  281. " jl 0b"
  282. #else /* CONFIG_64BIT */
  283. " lg %0,%2\n"
  284. "0: lgr %1,%0\n"
  285. " agr %1,%4\n"
  286. " csg %0,%1,%2\n"
  287. " jl 0b"
  288. #endif /* CONFIG_64BIT */
  289. : "=&d" (old), "=&d" (new), "=Q" (sem->count)
  290. : "Q" (sem->count), "d" (delta)
  291. : "cc", "memory");
  292. return new;
  293. }
  294. #endif /* _S390_RWSEM_H */