rwsem.h 8.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387
  1. #ifndef _S390_RWSEM_H
  2. #define _S390_RWSEM_H
  3. /*
  4. * include/asm-s390/rwsem.h
  5. *
  6. * S390 version
  7. * Copyright (C) 2002 IBM Deutschland Entwicklung GmbH, IBM Corporation
  8. * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
  9. *
  10. * Based on asm-alpha/semaphore.h and asm-i386/rwsem.h
  11. */
  12. /*
  13. *
  14. * The MSW of the count is the negated number of active writers and waiting
  15. * lockers, and the LSW is the total number of active locks
  16. *
  17. * The lock count is initialized to 0 (no active and no waiting lockers).
  18. *
  19. * When a writer subtracts WRITE_BIAS, it'll get 0xffff0001 for the case of an
  20. * uncontended lock. This can be determined because XADD returns the old value.
  21. * Readers increment by 1 and see a positive value when uncontended, negative
  22. * if there are writers (and maybe) readers waiting (in which case it goes to
  23. * sleep).
  24. *
  25. * The value of WAITING_BIAS supports up to 32766 waiting processes. This can
  26. * be extended to 65534 by manually checking the whole MSW rather than relying
  27. * on the S flag.
  28. *
  29. * The value of ACTIVE_BIAS supports up to 65535 active processes.
  30. *
  31. * This should be totally fair - if anything is waiting, a process that wants a
  32. * lock will go to the back of the queue. When the currently active lock is
  33. * released, if there's a writer at the front of the queue, then that and only
  34. * that will be woken up; if there's a bunch of consequtive readers at the
  35. * front, then they'll all be woken up, but no other readers will be.
  36. */
  37. #ifndef _LINUX_RWSEM_H
  38. #error "please don't include asm/rwsem.h directly, use linux/rwsem.h instead"
  39. #endif
  40. #ifdef __KERNEL__
  41. #include <linux/list.h>
  42. #include <linux/spinlock.h>
  43. struct rwsem_waiter;
  44. extern struct rw_semaphore *rwsem_down_read_failed(struct rw_semaphore *);
  45. extern struct rw_semaphore *rwsem_down_write_failed(struct rw_semaphore *);
  46. extern struct rw_semaphore *rwsem_wake(struct rw_semaphore *);
  47. extern struct rw_semaphore *rwsem_downgrade_wake(struct rw_semaphore *);
  48. extern struct rw_semaphore *rwsem_downgrade_write(struct rw_semaphore *);
  49. /*
  50. * the semaphore definition
  51. */
  52. struct rw_semaphore {
  53. signed long count;
  54. spinlock_t wait_lock;
  55. struct list_head wait_list;
  56. #ifdef CONFIG_DEBUG_LOCK_ALLOC
  57. struct lockdep_map dep_map;
  58. #endif
  59. };
  60. #ifndef __s390x__
  61. #define RWSEM_UNLOCKED_VALUE 0x00000000
  62. #define RWSEM_ACTIVE_BIAS 0x00000001
  63. #define RWSEM_ACTIVE_MASK 0x0000ffff
  64. #define RWSEM_WAITING_BIAS (-0x00010000)
  65. #else /* __s390x__ */
  66. #define RWSEM_UNLOCKED_VALUE 0x0000000000000000L
  67. #define RWSEM_ACTIVE_BIAS 0x0000000000000001L
  68. #define RWSEM_ACTIVE_MASK 0x00000000ffffffffL
  69. #define RWSEM_WAITING_BIAS (-0x0000000100000000L)
  70. #endif /* __s390x__ */
  71. #define RWSEM_ACTIVE_READ_BIAS RWSEM_ACTIVE_BIAS
  72. #define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS)
  73. /*
  74. * initialisation
  75. */
  76. #ifdef CONFIG_DEBUG_LOCK_ALLOC
  77. # define __RWSEM_DEP_MAP_INIT(lockname) , .dep_map = { .name = #lockname }
  78. #else
  79. # define __RWSEM_DEP_MAP_INIT(lockname)
  80. #endif
  81. #define __RWSEM_INITIALIZER(name) \
  82. { RWSEM_UNLOCKED_VALUE, __SPIN_LOCK_UNLOCKED((name).wait.lock), \
  83. LIST_HEAD_INIT((name).wait_list) __RWSEM_DEP_MAP_INIT(name) }
  84. #define DECLARE_RWSEM(name) \
  85. struct rw_semaphore name = __RWSEM_INITIALIZER(name)
  86. static inline void init_rwsem(struct rw_semaphore *sem)
  87. {
  88. sem->count = RWSEM_UNLOCKED_VALUE;
  89. spin_lock_init(&sem->wait_lock);
  90. INIT_LIST_HEAD(&sem->wait_list);
  91. }
  92. extern void __init_rwsem(struct rw_semaphore *sem, const char *name,
  93. struct lock_class_key *key);
  94. #define init_rwsem(sem) \
  95. do { \
  96. static struct lock_class_key __key; \
  97. \
  98. __init_rwsem((sem), #sem, &__key); \
  99. } while (0)
  100. /*
  101. * lock for reading
  102. */
  103. static inline void __down_read(struct rw_semaphore *sem)
  104. {
  105. signed long old, new;
  106. asm volatile(
  107. #ifndef __s390x__
  108. " l %0,0(%3)\n"
  109. "0: lr %1,%0\n"
  110. " ahi %1,%5\n"
  111. " cs %0,%1,0(%3)\n"
  112. " jl 0b"
  113. #else /* __s390x__ */
  114. " lg %0,0(%3)\n"
  115. "0: lgr %1,%0\n"
  116. " aghi %1,%5\n"
  117. " csg %0,%1,0(%3)\n"
  118. " jl 0b"
  119. #endif /* __s390x__ */
  120. : "=&d" (old), "=&d" (new), "=m" (sem->count)
  121. : "a" (&sem->count), "m" (sem->count),
  122. "i" (RWSEM_ACTIVE_READ_BIAS) : "cc", "memory");
  123. if (old < 0)
  124. rwsem_down_read_failed(sem);
  125. }
  126. /*
  127. * trylock for reading -- returns 1 if successful, 0 if contention
  128. */
  129. static inline int __down_read_trylock(struct rw_semaphore *sem)
  130. {
  131. signed long old, new;
  132. asm volatile(
  133. #ifndef __s390x__
  134. " l %0,0(%3)\n"
  135. "0: ltr %1,%0\n"
  136. " jm 1f\n"
  137. " ahi %1,%5\n"
  138. " cs %0,%1,0(%3)\n"
  139. " jl 0b\n"
  140. "1:"
  141. #else /* __s390x__ */
  142. " lg %0,0(%3)\n"
  143. "0: ltgr %1,%0\n"
  144. " jm 1f\n"
  145. " aghi %1,%5\n"
  146. " csg %0,%1,0(%3)\n"
  147. " jl 0b\n"
  148. "1:"
  149. #endif /* __s390x__ */
  150. : "=&d" (old), "=&d" (new), "=m" (sem->count)
  151. : "a" (&sem->count), "m" (sem->count),
  152. "i" (RWSEM_ACTIVE_READ_BIAS) : "cc", "memory");
  153. return old >= 0 ? 1 : 0;
  154. }
  155. /*
  156. * lock for writing
  157. */
  158. static inline void __down_write_nested(struct rw_semaphore *sem, int subclass)
  159. {
  160. signed long old, new, tmp;
  161. tmp = RWSEM_ACTIVE_WRITE_BIAS;
  162. asm volatile(
  163. #ifndef __s390x__
  164. " l %0,0(%3)\n"
  165. "0: lr %1,%0\n"
  166. " a %1,%5\n"
  167. " cs %0,%1,0(%3)\n"
  168. " jl 0b"
  169. #else /* __s390x__ */
  170. " lg %0,0(%3)\n"
  171. "0: lgr %1,%0\n"
  172. " ag %1,%5\n"
  173. " csg %0,%1,0(%3)\n"
  174. " jl 0b"
  175. #endif /* __s390x__ */
  176. : "=&d" (old), "=&d" (new), "=m" (sem->count)
  177. : "a" (&sem->count), "m" (sem->count), "m" (tmp)
  178. : "cc", "memory");
  179. if (old != 0)
  180. rwsem_down_write_failed(sem);
  181. }
  182. static inline void __down_write(struct rw_semaphore *sem)
  183. {
  184. __down_write_nested(sem, 0);
  185. }
  186. /*
  187. * trylock for writing -- returns 1 if successful, 0 if contention
  188. */
  189. static inline int __down_write_trylock(struct rw_semaphore *sem)
  190. {
  191. signed long old;
  192. asm volatile(
  193. #ifndef __s390x__
  194. " l %0,0(%2)\n"
  195. "0: ltr %0,%0\n"
  196. " jnz 1f\n"
  197. " cs %0,%4,0(%2)\n"
  198. " jl 0b\n"
  199. #else /* __s390x__ */
  200. " lg %0,0(%2)\n"
  201. "0: ltgr %0,%0\n"
  202. " jnz 1f\n"
  203. " csg %0,%4,0(%2)\n"
  204. " jl 0b\n"
  205. #endif /* __s390x__ */
  206. "1:"
  207. : "=&d" (old), "=m" (sem->count)
  208. : "a" (&sem->count), "m" (sem->count),
  209. "d" (RWSEM_ACTIVE_WRITE_BIAS) : "cc", "memory");
  210. return (old == RWSEM_UNLOCKED_VALUE) ? 1 : 0;
  211. }
  212. /*
  213. * unlock after reading
  214. */
  215. static inline void __up_read(struct rw_semaphore *sem)
  216. {
  217. signed long old, new;
  218. asm volatile(
  219. #ifndef __s390x__
  220. " l %0,0(%3)\n"
  221. "0: lr %1,%0\n"
  222. " ahi %1,%5\n"
  223. " cs %0,%1,0(%3)\n"
  224. " jl 0b"
  225. #else /* __s390x__ */
  226. " lg %0,0(%3)\n"
  227. "0: lgr %1,%0\n"
  228. " aghi %1,%5\n"
  229. " csg %0,%1,0(%3)\n"
  230. " jl 0b"
  231. #endif /* __s390x__ */
  232. : "=&d" (old), "=&d" (new), "=m" (sem->count)
  233. : "a" (&sem->count), "m" (sem->count),
  234. "i" (-RWSEM_ACTIVE_READ_BIAS)
  235. : "cc", "memory");
  236. if (new < 0)
  237. if ((new & RWSEM_ACTIVE_MASK) == 0)
  238. rwsem_wake(sem);
  239. }
  240. /*
  241. * unlock after writing
  242. */
  243. static inline void __up_write(struct rw_semaphore *sem)
  244. {
  245. signed long old, new, tmp;
  246. tmp = -RWSEM_ACTIVE_WRITE_BIAS;
  247. asm volatile(
  248. #ifndef __s390x__
  249. " l %0,0(%3)\n"
  250. "0: lr %1,%0\n"
  251. " a %1,%5\n"
  252. " cs %0,%1,0(%3)\n"
  253. " jl 0b"
  254. #else /* __s390x__ */
  255. " lg %0,0(%3)\n"
  256. "0: lgr %1,%0\n"
  257. " ag %1,%5\n"
  258. " csg %0,%1,0(%3)\n"
  259. " jl 0b"
  260. #endif /* __s390x__ */
  261. : "=&d" (old), "=&d" (new), "=m" (sem->count)
  262. : "a" (&sem->count), "m" (sem->count), "m" (tmp)
  263. : "cc", "memory");
  264. if (new < 0)
  265. if ((new & RWSEM_ACTIVE_MASK) == 0)
  266. rwsem_wake(sem);
  267. }
  268. /*
  269. * downgrade write lock to read lock
  270. */
  271. static inline void __downgrade_write(struct rw_semaphore *sem)
  272. {
  273. signed long old, new, tmp;
  274. tmp = -RWSEM_WAITING_BIAS;
  275. asm volatile(
  276. #ifndef __s390x__
  277. " l %0,0(%3)\n"
  278. "0: lr %1,%0\n"
  279. " a %1,%5\n"
  280. " cs %0,%1,0(%3)\n"
  281. " jl 0b"
  282. #else /* __s390x__ */
  283. " lg %0,0(%3)\n"
  284. "0: lgr %1,%0\n"
  285. " ag %1,%5\n"
  286. " csg %0,%1,0(%3)\n"
  287. " jl 0b"
  288. #endif /* __s390x__ */
  289. : "=&d" (old), "=&d" (new), "=m" (sem->count)
  290. : "a" (&sem->count), "m" (sem->count), "m" (tmp)
  291. : "cc", "memory");
  292. if (new > 1)
  293. rwsem_downgrade_wake(sem);
  294. }
  295. /*
  296. * implement atomic add functionality
  297. */
  298. static inline void rwsem_atomic_add(long delta, struct rw_semaphore *sem)
  299. {
  300. signed long old, new;
  301. asm volatile(
  302. #ifndef __s390x__
  303. " l %0,0(%3)\n"
  304. "0: lr %1,%0\n"
  305. " ar %1,%5\n"
  306. " cs %0,%1,0(%3)\n"
  307. " jl 0b"
  308. #else /* __s390x__ */
  309. " lg %0,0(%3)\n"
  310. "0: lgr %1,%0\n"
  311. " agr %1,%5\n"
  312. " csg %0,%1,0(%3)\n"
  313. " jl 0b"
  314. #endif /* __s390x__ */
  315. : "=&d" (old), "=&d" (new), "=m" (sem->count)
  316. : "a" (&sem->count), "m" (sem->count), "d" (delta)
  317. : "cc", "memory");
  318. }
  319. /*
  320. * implement exchange and add functionality
  321. */
  322. static inline long rwsem_atomic_update(long delta, struct rw_semaphore *sem)
  323. {
  324. signed long old, new;
  325. asm volatile(
  326. #ifndef __s390x__
  327. " l %0,0(%3)\n"
  328. "0: lr %1,%0\n"
  329. " ar %1,%5\n"
  330. " cs %0,%1,0(%3)\n"
  331. " jl 0b"
  332. #else /* __s390x__ */
  333. " lg %0,0(%3)\n"
  334. "0: lgr %1,%0\n"
  335. " agr %1,%5\n"
  336. " csg %0,%1,0(%3)\n"
  337. " jl 0b"
  338. #endif /* __s390x__ */
  339. : "=&d" (old), "=&d" (new), "=m" (sem->count)
  340. : "a" (&sem->count), "m" (sem->count), "d" (delta)
  341. : "cc", "memory");
  342. return new;
  343. }
  344. static inline int rwsem_is_locked(struct rw_semaphore *sem)
  345. {
  346. return (sem->count != 0);
  347. }
  348. #endif /* __KERNEL__ */
  349. #endif /* _S390_RWSEM_H */