rwsem.c 8.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299
  1. /* rwsem.c: R/W semaphores: contention handling functions
  2. *
  3. * Written by David Howells (dhowells@redhat.com).
  4. * Derived from arch/i386/kernel/semaphore.c
  5. *
  6. * Writer lock-stealing by Alex Shi <alex.shi@intel.com>
  7. */
  8. #include <linux/rwsem.h>
  9. #include <linux/sched.h>
  10. #include <linux/init.h>
  11. #include <linux/export.h>
  12. /*
  13. * Initialize an rwsem:
  14. */
  15. void __init_rwsem(struct rw_semaphore *sem, const char *name,
  16. struct lock_class_key *key)
  17. {
  18. #ifdef CONFIG_DEBUG_LOCK_ALLOC
  19. /*
  20. * Make sure we are not reinitializing a held semaphore:
  21. */
  22. debug_check_no_locks_freed((void *)sem, sizeof(*sem));
  23. lockdep_init_map(&sem->dep_map, name, key, 0);
  24. #endif
  25. sem->count = RWSEM_UNLOCKED_VALUE;
  26. raw_spin_lock_init(&sem->wait_lock);
  27. INIT_LIST_HEAD(&sem->wait_list);
  28. }
  29. EXPORT_SYMBOL(__init_rwsem);
  30. struct rwsem_waiter {
  31. struct list_head list;
  32. struct task_struct *task;
  33. unsigned int flags;
  34. #define RWSEM_WAITING_FOR_READ 0x00000001
  35. #define RWSEM_WAITING_FOR_WRITE 0x00000002
  36. };
  37. /* Wake types for __rwsem_do_wake(). Note that RWSEM_WAKE_NO_ACTIVE and
  38. * RWSEM_WAKE_READ_OWNED imply that the spinlock must have been kept held
  39. * since the rwsem value was observed.
  40. */
  41. #define RWSEM_WAKE_ANY 0 /* Wake whatever's at head of wait list */
  42. #define RWSEM_WAKE_NO_ACTIVE 1 /* rwsem was observed with no active thread */
  43. #define RWSEM_WAKE_READ_OWNED 2 /* rwsem was observed to be read owned */
  44. /*
  45. * handle the lock release when processes blocked on it that can now run
  46. * - if we come here from up_xxxx(), then:
  47. * - the 'active part' of count (&0x0000ffff) reached 0 (but may have changed)
  48. * - the 'waiting part' of count (&0xffff0000) is -ve (and will still be so)
  49. * - there must be someone on the queue
  50. * - the spinlock must be held by the caller
  51. * - woken process blocks are discarded from the list after having task zeroed
  52. * - writers are only woken if downgrading is false
  53. */
  54. static struct rw_semaphore *
  55. __rwsem_do_wake(struct rw_semaphore *sem, int wake_type)
  56. {
  57. struct rwsem_waiter *waiter;
  58. struct task_struct *tsk;
  59. struct list_head *next;
  60. signed long woken, loop, adjustment;
  61. waiter = list_entry(sem->wait_list.next, struct rwsem_waiter, list);
  62. if (!(waiter->flags & RWSEM_WAITING_FOR_WRITE))
  63. goto readers_only;
  64. if (wake_type == RWSEM_WAKE_READ_OWNED)
  65. /* Another active reader was observed, so wakeup is not
  66. * likely to succeed. Save the atomic op.
  67. */
  68. goto out;
  69. /* Wake up the writing waiter and let the task grab the sem: */
  70. wake_up_process(waiter->task);
  71. goto out;
  72. readers_only:
  73. /* If we come here from up_xxxx(), another thread might have reached
  74. * rwsem_down_failed_common() before we acquired the spinlock and
  75. * woken up a waiter, making it now active. We prefer to check for
  76. * this first in order to not spend too much time with the spinlock
  77. * held if we're not going to be able to wake up readers in the end.
  78. *
  79. * Note that we do not need to update the rwsem count: any writer
  80. * trying to acquire rwsem will run rwsem_down_write_failed() due
  81. * to the waiting threads and block trying to acquire the spinlock.
  82. *
  83. * We use a dummy atomic update in order to acquire the cache line
  84. * exclusively since we expect to succeed and run the final rwsem
  85. * count adjustment pretty soon.
  86. */
  87. if (wake_type == RWSEM_WAKE_ANY &&
  88. rwsem_atomic_update(0, sem) < RWSEM_WAITING_BIAS)
  89. /* Someone grabbed the sem for write already */
  90. goto out;
  91. /* Grant an infinite number of read locks to the readers at the front
  92. * of the queue. Note we increment the 'active part' of the count by
  93. * the number of readers before waking any processes up.
  94. */
  95. woken = 0;
  96. do {
  97. woken++;
  98. if (waiter->list.next == &sem->wait_list)
  99. break;
  100. waiter = list_entry(waiter->list.next,
  101. struct rwsem_waiter, list);
  102. } while (waiter->flags & RWSEM_WAITING_FOR_READ);
  103. adjustment = woken * RWSEM_ACTIVE_READ_BIAS;
  104. if (waiter->flags & RWSEM_WAITING_FOR_READ)
  105. /* hit end of list above */
  106. adjustment -= RWSEM_WAITING_BIAS;
  107. rwsem_atomic_add(adjustment, sem);
  108. next = sem->wait_list.next;
  109. for (loop = woken; loop > 0; loop--) {
  110. waiter = list_entry(next, struct rwsem_waiter, list);
  111. next = waiter->list.next;
  112. tsk = waiter->task;
  113. smp_mb();
  114. waiter->task = NULL;
  115. wake_up_process(tsk);
  116. put_task_struct(tsk);
  117. }
  118. sem->wait_list.next = next;
  119. next->prev = &sem->wait_list;
  120. out:
  121. return sem;
  122. }
  123. /* Try to get write sem, caller holds sem->wait_lock: */
  124. static int try_get_writer_sem(struct rw_semaphore *sem,
  125. struct rwsem_waiter *waiter)
  126. {
  127. struct rwsem_waiter *fwaiter;
  128. long oldcount, adjustment;
  129. /* only steal when first waiter is writing */
  130. fwaiter = list_entry(sem->wait_list.next, struct rwsem_waiter, list);
  131. if (!(fwaiter->flags & RWSEM_WAITING_FOR_WRITE))
  132. return 0;
  133. adjustment = RWSEM_ACTIVE_WRITE_BIAS;
  134. /* Only one waiter in the queue: */
  135. if (fwaiter == waiter && waiter->list.next == &sem->wait_list)
  136. adjustment -= RWSEM_WAITING_BIAS;
  137. try_again_write:
  138. oldcount = rwsem_atomic_update(adjustment, sem) - adjustment;
  139. if (!(oldcount & RWSEM_ACTIVE_MASK)) {
  140. /* No active lock: */
  141. struct task_struct *tsk = waiter->task;
  142. list_del(&waiter->list);
  143. smp_mb();
  144. put_task_struct(tsk);
  145. tsk->state = TASK_RUNNING;
  146. return 1;
  147. }
  148. /* some one grabbed the sem already */
  149. if (rwsem_atomic_update(-adjustment, sem) & RWSEM_ACTIVE_MASK)
  150. return 0;
  151. goto try_again_write;
  152. }
  153. /*
  154. * wait for a lock to be granted
  155. */
  156. static struct rw_semaphore __sched *
  157. rwsem_down_failed_common(struct rw_semaphore *sem,
  158. unsigned int flags, signed long adjustment)
  159. {
  160. struct rwsem_waiter waiter;
  161. struct task_struct *tsk = current;
  162. signed long count;
  163. set_task_state(tsk, TASK_UNINTERRUPTIBLE);
  164. /* set up my own style of waitqueue */
  165. raw_spin_lock_irq(&sem->wait_lock);
  166. waiter.task = tsk;
  167. waiter.flags = flags;
  168. get_task_struct(tsk);
  169. if (list_empty(&sem->wait_list))
  170. adjustment += RWSEM_WAITING_BIAS;
  171. list_add_tail(&waiter.list, &sem->wait_list);
  172. /* we're now waiting on the lock, but no longer actively locking */
  173. count = rwsem_atomic_update(adjustment, sem);
  174. /* If there are no active locks, wake the front queued process(es) up.
  175. *
  176. * Alternatively, if we're called from a failed down_write(), there
  177. * were already threads queued before us and there are no active
  178. * writers, the lock must be read owned; so we try to wake any read
  179. * locks that were queued ahead of us. */
  180. if (count == RWSEM_WAITING_BIAS)
  181. sem = __rwsem_do_wake(sem, RWSEM_WAKE_NO_ACTIVE);
  182. else if (count > RWSEM_WAITING_BIAS &&
  183. adjustment == -RWSEM_ACTIVE_WRITE_BIAS)
  184. sem = __rwsem_do_wake(sem, RWSEM_WAKE_READ_OWNED);
  185. raw_spin_unlock_irq(&sem->wait_lock);
  186. /* wait to be given the lock */
  187. for (;;) {
  188. if (!waiter.task)
  189. break;
  190. raw_spin_lock_irq(&sem->wait_lock);
  191. /* Try to get the writer sem, may steal from the head writer: */
  192. if (flags == RWSEM_WAITING_FOR_WRITE)
  193. if (try_get_writer_sem(sem, &waiter)) {
  194. raw_spin_unlock_irq(&sem->wait_lock);
  195. return sem;
  196. }
  197. raw_spin_unlock_irq(&sem->wait_lock);
  198. schedule();
  199. set_task_state(tsk, TASK_UNINTERRUPTIBLE);
  200. }
  201. tsk->state = TASK_RUNNING;
  202. return sem;
  203. }
  204. /*
  205. * wait for the read lock to be granted
  206. */
  207. struct rw_semaphore __sched *rwsem_down_read_failed(struct rw_semaphore *sem)
  208. {
  209. return rwsem_down_failed_common(sem, RWSEM_WAITING_FOR_READ,
  210. -RWSEM_ACTIVE_READ_BIAS);
  211. }
  212. /*
  213. * wait for the write lock to be granted
  214. */
  215. struct rw_semaphore __sched *rwsem_down_write_failed(struct rw_semaphore *sem)
  216. {
  217. return rwsem_down_failed_common(sem, RWSEM_WAITING_FOR_WRITE,
  218. -RWSEM_ACTIVE_WRITE_BIAS);
  219. }
  220. /*
  221. * handle waking up a waiter on the semaphore
  222. * - up_read/up_write has decremented the active part of count if we come here
  223. */
  224. struct rw_semaphore *rwsem_wake(struct rw_semaphore *sem)
  225. {
  226. unsigned long flags;
  227. raw_spin_lock_irqsave(&sem->wait_lock, flags);
  228. /* do nothing if list empty */
  229. if (!list_empty(&sem->wait_list))
  230. sem = __rwsem_do_wake(sem, RWSEM_WAKE_ANY);
  231. raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
  232. return sem;
  233. }
  234. /*
  235. * downgrade a write lock into a read lock
  236. * - caller incremented waiting part of count and discovered it still negative
  237. * - just wake up any readers at the front of the queue
  238. */
  239. struct rw_semaphore *rwsem_downgrade_wake(struct rw_semaphore *sem)
  240. {
  241. unsigned long flags;
  242. raw_spin_lock_irqsave(&sem->wait_lock, flags);
  243. /* do nothing if list empty */
  244. if (!list_empty(&sem->wait_list))
  245. sem = __rwsem_do_wake(sem, RWSEM_WAKE_READ_OWNED);
  246. raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
  247. return sem;
  248. }
  249. EXPORT_SYMBOL(rwsem_down_read_failed);
  250. EXPORT_SYMBOL(rwsem_down_write_failed);
  251. EXPORT_SYMBOL(rwsem_wake);
  252. EXPORT_SYMBOL(rwsem_downgrade_wake);