rwsem.h 8.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386
  1. #ifndef _S390_RWSEM_H
  2. #define _S390_RWSEM_H
  3. /*
  4. * include/asm-s390/rwsem.h
  5. *
  6. * S390 version
  7. * Copyright (C) 2002 IBM Deutschland Entwicklung GmbH, IBM Corporation
  8. * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
  9. *
  10. * Based on asm-alpha/semaphore.h and asm-i386/rwsem.h
  11. */
  12. /*
  13. *
  14. * The MSW of the count is the negated number of active writers and waiting
  15. * lockers, and the LSW is the total number of active locks
  16. *
  17. * The lock count is initialized to 0 (no active and no waiting lockers).
  18. *
  19. * When a writer subtracts WRITE_BIAS, it'll get 0xffff0001 for the case of an
  20. * uncontended lock. This can be determined because XADD returns the old value.
  21. * Readers increment by 1 and see a positive value when uncontended, negative
  22. * if there are writers (and maybe) readers waiting (in which case it goes to
  23. * sleep).
  24. *
  25. * The value of WAITING_BIAS supports up to 32766 waiting processes. This can
  26. * be extended to 65534 by manually checking the whole MSW rather than relying
  27. * on the S flag.
  28. *
  29. * The value of ACTIVE_BIAS supports up to 65535 active processes.
  30. *
  31. * This should be totally fair - if anything is waiting, a process that wants a
  32. * lock will go to the back of the queue. When the currently active lock is
  33. * released, if there's a writer at the front of the queue, then that and only
  34. * that will be woken up; if there's a bunch of consequtive readers at the
  35. * front, then they'll all be woken up, but no other readers will be.
  36. */
  37. #ifndef _LINUX_RWSEM_H
  38. #error "please don't include asm/rwsem.h directly, use linux/rwsem.h instead"
  39. #endif
  40. #ifdef __KERNEL__
  41. #include <linux/list.h>
  42. #include <linux/spinlock.h>
  43. struct rwsem_waiter;
  44. extern struct rw_semaphore *rwsem_down_read_failed(struct rw_semaphore *);
  45. extern struct rw_semaphore *rwsem_down_write_failed(struct rw_semaphore *);
  46. extern struct rw_semaphore *rwsem_wake(struct rw_semaphore *);
  47. extern struct rw_semaphore *rwsem_downgrade_wake(struct rw_semaphore *);
  48. extern struct rw_semaphore *rwsem_downgrade_write(struct rw_semaphore *);
  49. /*
  50. * the semaphore definition
  51. */
  52. struct rw_semaphore {
  53. signed long count;
  54. spinlock_t wait_lock;
  55. struct list_head wait_list;
  56. #ifdef CONFIG_DEBUG_LOCK_ALLOC
  57. struct lockdep_map dep_map;
  58. #endif
  59. };
  60. #ifndef __s390x__
  61. #define RWSEM_UNLOCKED_VALUE 0x00000000
  62. #define RWSEM_ACTIVE_BIAS 0x00000001
  63. #define RWSEM_ACTIVE_MASK 0x0000ffff
  64. #define RWSEM_WAITING_BIAS (-0x00010000)
  65. #else /* __s390x__ */
  66. #define RWSEM_UNLOCKED_VALUE 0x0000000000000000L
  67. #define RWSEM_ACTIVE_BIAS 0x0000000000000001L
  68. #define RWSEM_ACTIVE_MASK 0x00000000ffffffffL
  69. #define RWSEM_WAITING_BIAS (-0x0000000100000000L)
  70. #endif /* __s390x__ */
  71. #define RWSEM_ACTIVE_READ_BIAS RWSEM_ACTIVE_BIAS
  72. #define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS)
  73. /*
  74. * initialisation
  75. */
  76. #ifdef CONFIG_DEBUG_LOCK_ALLOC
  77. # define __RWSEM_DEP_MAP_INIT(lockname) , .dep_map = { .name = #lockname }
  78. #else
  79. # define __RWSEM_DEP_MAP_INIT(lockname)
  80. #endif
  81. #define __RWSEM_INITIALIZER(name) \
  82. { RWSEM_UNLOCKED_VALUE, __SPIN_LOCK_UNLOCKED((name).wait.lock), \
  83. LIST_HEAD_INIT((name).wait_list) __RWSEM_DEP_MAP_INIT(name) }
  84. #define DECLARE_RWSEM(name) \
  85. struct rw_semaphore name = __RWSEM_INITIALIZER(name)
  86. static inline void init_rwsem(struct rw_semaphore *sem)
  87. {
  88. sem->count = RWSEM_UNLOCKED_VALUE;
  89. spin_lock_init(&sem->wait_lock);
  90. INIT_LIST_HEAD(&sem->wait_list);
  91. }
  92. extern void __init_rwsem(struct rw_semaphore *sem, const char *name,
  93. struct lock_class_key *key);
  94. #define init_rwsem(sem) \
  95. do { \
  96. static struct lock_class_key __key; \
  97. \
  98. __init_rwsem((sem), #sem, &__key); \
  99. } while (0)
  100. /*
  101. * lock for reading
  102. */
  103. static inline void __down_read(struct rw_semaphore *sem)
  104. {
  105. signed long old, new;
  106. asm volatile(
  107. #ifndef __s390x__
  108. " l %0,%2\n"
  109. "0: lr %1,%0\n"
  110. " ahi %1,%4\n"
  111. " cs %0,%1,%2\n"
  112. " jl 0b"
  113. #else /* __s390x__ */
  114. " lg %0,%2\n"
  115. "0: lgr %1,%0\n"
  116. " aghi %1,%4\n"
  117. " csg %0,%1,%2\n"
  118. " jl 0b"
  119. #endif /* __s390x__ */
  120. : "=&d" (old), "=&d" (new), "=Q" (sem->count)
  121. : "Q" (sem->count), "i" (RWSEM_ACTIVE_READ_BIAS)
  122. : "cc", "memory");
  123. if (old < 0)
  124. rwsem_down_read_failed(sem);
  125. }
  126. /*
  127. * trylock for reading -- returns 1 if successful, 0 if contention
  128. */
  129. static inline int __down_read_trylock(struct rw_semaphore *sem)
  130. {
  131. signed long old, new;
  132. asm volatile(
  133. #ifndef __s390x__
  134. " l %0,%2\n"
  135. "0: ltr %1,%0\n"
  136. " jm 1f\n"
  137. " ahi %1,%4\n"
  138. " cs %0,%1,%2\n"
  139. " jl 0b\n"
  140. "1:"
  141. #else /* __s390x__ */
  142. " lg %0,%2\n"
  143. "0: ltgr %1,%0\n"
  144. " jm 1f\n"
  145. " aghi %1,%4\n"
  146. " csg %0,%1,%2\n"
  147. " jl 0b\n"
  148. "1:"
  149. #endif /* __s390x__ */
  150. : "=&d" (old), "=&d" (new), "=Q" (sem->count)
  151. : "Q" (sem->count), "i" (RWSEM_ACTIVE_READ_BIAS)
  152. : "cc", "memory");
  153. return old >= 0 ? 1 : 0;
  154. }
  155. /*
  156. * lock for writing
  157. */
  158. static inline void __down_write_nested(struct rw_semaphore *sem, int subclass)
  159. {
  160. signed long old, new, tmp;
  161. tmp = RWSEM_ACTIVE_WRITE_BIAS;
  162. asm volatile(
  163. #ifndef __s390x__
  164. " l %0,%2\n"
  165. "0: lr %1,%0\n"
  166. " a %1,%4\n"
  167. " cs %0,%1,%2\n"
  168. " jl 0b"
  169. #else /* __s390x__ */
  170. " lg %0,%2\n"
  171. "0: lgr %1,%0\n"
  172. " ag %1,%4\n"
  173. " csg %0,%1,%2\n"
  174. " jl 0b"
  175. #endif /* __s390x__ */
  176. : "=&d" (old), "=&d" (new), "=Q" (sem->count)
  177. : "Q" (sem->count), "m" (tmp)
  178. : "cc", "memory");
  179. if (old != 0)
  180. rwsem_down_write_failed(sem);
  181. }
  182. static inline void __down_write(struct rw_semaphore *sem)
  183. {
  184. __down_write_nested(sem, 0);
  185. }
  186. /*
  187. * trylock for writing -- returns 1 if successful, 0 if contention
  188. */
  189. static inline int __down_write_trylock(struct rw_semaphore *sem)
  190. {
  191. signed long old;
  192. asm volatile(
  193. #ifndef __s390x__
  194. " l %0,%1\n"
  195. "0: ltr %0,%0\n"
  196. " jnz 1f\n"
  197. " cs %0,%3,%1\n"
  198. " jl 0b\n"
  199. #else /* __s390x__ */
  200. " lg %0,%1\n"
  201. "0: ltgr %0,%0\n"
  202. " jnz 1f\n"
  203. " csg %0,%3,%1\n"
  204. " jl 0b\n"
  205. #endif /* __s390x__ */
  206. "1:"
  207. : "=&d" (old), "=Q" (sem->count)
  208. : "Q" (sem->count), "d" (RWSEM_ACTIVE_WRITE_BIAS)
  209. : "cc", "memory");
  210. return (old == RWSEM_UNLOCKED_VALUE) ? 1 : 0;
  211. }
  212. /*
  213. * unlock after reading
  214. */
  215. static inline void __up_read(struct rw_semaphore *sem)
  216. {
  217. signed long old, new;
  218. asm volatile(
  219. #ifndef __s390x__
  220. " l %0,%2\n"
  221. "0: lr %1,%0\n"
  222. " ahi %1,%4\n"
  223. " cs %0,%1,%2\n"
  224. " jl 0b"
  225. #else /* __s390x__ */
  226. " lg %0,%2\n"
  227. "0: lgr %1,%0\n"
  228. " aghi %1,%4\n"
  229. " csg %0,%1,%2\n"
  230. " jl 0b"
  231. #endif /* __s390x__ */
  232. : "=&d" (old), "=&d" (new), "=Q" (sem->count)
  233. : "Q" (sem->count), "i" (-RWSEM_ACTIVE_READ_BIAS)
  234. : "cc", "memory");
  235. if (new < 0)
  236. if ((new & RWSEM_ACTIVE_MASK) == 0)
  237. rwsem_wake(sem);
  238. }
  239. /*
  240. * unlock after writing
  241. */
  242. static inline void __up_write(struct rw_semaphore *sem)
  243. {
  244. signed long old, new, tmp;
  245. tmp = -RWSEM_ACTIVE_WRITE_BIAS;
  246. asm volatile(
  247. #ifndef __s390x__
  248. " l %0,%2\n"
  249. "0: lr %1,%0\n"
  250. " a %1,%4\n"
  251. " cs %0,%1,%2\n"
  252. " jl 0b"
  253. #else /* __s390x__ */
  254. " lg %0,%2\n"
  255. "0: lgr %1,%0\n"
  256. " ag %1,%4\n"
  257. " csg %0,%1,%2\n"
  258. " jl 0b"
  259. #endif /* __s390x__ */
  260. : "=&d" (old), "=&d" (new), "=Q" (sem->count)
  261. : "Q" (sem->count), "m" (tmp)
  262. : "cc", "memory");
  263. if (new < 0)
  264. if ((new & RWSEM_ACTIVE_MASK) == 0)
  265. rwsem_wake(sem);
  266. }
  267. /*
  268. * downgrade write lock to read lock
  269. */
  270. static inline void __downgrade_write(struct rw_semaphore *sem)
  271. {
  272. signed long old, new, tmp;
  273. tmp = -RWSEM_WAITING_BIAS;
  274. asm volatile(
  275. #ifndef __s390x__
  276. " l %0,%2\n"
  277. "0: lr %1,%0\n"
  278. " a %1,%4\n"
  279. " cs %0,%1,%2\n"
  280. " jl 0b"
  281. #else /* __s390x__ */
  282. " lg %0,%2\n"
  283. "0: lgr %1,%0\n"
  284. " ag %1,%4\n"
  285. " csg %0,%1,%2\n"
  286. " jl 0b"
  287. #endif /* __s390x__ */
  288. : "=&d" (old), "=&d" (new), "=Q" (sem->count)
  289. : "Q" (sem->count), "m" (tmp)
  290. : "cc", "memory");
  291. if (new > 1)
  292. rwsem_downgrade_wake(sem);
  293. }
  294. /*
  295. * implement atomic add functionality
  296. */
  297. static inline void rwsem_atomic_add(long delta, struct rw_semaphore *sem)
  298. {
  299. signed long old, new;
  300. asm volatile(
  301. #ifndef __s390x__
  302. " l %0,%2\n"
  303. "0: lr %1,%0\n"
  304. " ar %1,%4\n"
  305. " cs %0,%1,%2\n"
  306. " jl 0b"
  307. #else /* __s390x__ */
  308. " lg %0,%2\n"
  309. "0: lgr %1,%0\n"
  310. " agr %1,%4\n"
  311. " csg %0,%1,%2\n"
  312. " jl 0b"
  313. #endif /* __s390x__ */
  314. : "=&d" (old), "=&d" (new), "=Q" (sem->count)
  315. : "Q" (sem->count), "d" (delta)
  316. : "cc", "memory");
  317. }
  318. /*
  319. * implement exchange and add functionality
  320. */
  321. static inline long rwsem_atomic_update(long delta, struct rw_semaphore *sem)
  322. {
  323. signed long old, new;
  324. asm volatile(
  325. #ifndef __s390x__
  326. " l %0,%2\n"
  327. "0: lr %1,%0\n"
  328. " ar %1,%4\n"
  329. " cs %0,%1,%2\n"
  330. " jl 0b"
  331. #else /* __s390x__ */
  332. " lg %0,%2\n"
  333. "0: lgr %1,%0\n"
  334. " agr %1,%4\n"
  335. " csg %0,%1,%2\n"
  336. " jl 0b"
  337. #endif /* __s390x__ */
  338. : "=&d" (old), "=&d" (new), "=Q" (sem->count)
  339. : "Q" (sem->count), "d" (delta)
  340. : "cc", "memory");
  341. return new;
  342. }
  343. static inline int rwsem_is_locked(struct rw_semaphore *sem)
  344. {
  345. return (sem->count != 0);
  346. }
  347. #endif /* __KERNEL__ */
  348. #endif /* _S390_RWSEM_H */