rwsem.c 6.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237
  1. /* rwsem.c: R/W semaphores: contention handling functions
  2. *
  3. * Written by David Howells (dhowells@redhat.com).
  4. * Derived from arch/i386/kernel/semaphore.c
  5. */
  6. #include <linux/rwsem.h>
  7. #include <linux/sched.h>
  8. #include <linux/init.h>
  9. #include <linux/module.h>
  10. struct rwsem_waiter {
  11. struct list_head list;
  12. struct task_struct *task;
  13. unsigned int flags;
  14. #define RWSEM_WAITING_FOR_READ 0x00000001
  15. #define RWSEM_WAITING_FOR_WRITE 0x00000002
  16. };
  17. /*
  18. * handle the lock release when processes blocked on it that can now run
  19. * - if we come here from up_xxxx(), then:
  20. * - the 'active part' of count (&0x0000ffff) reached 0 (but may have changed)
  21. * - the 'waiting part' of count (&0xffff0000) is -ve (and will still be so)
  22. * - there must be someone on the queue
  23. * - the spinlock must be held by the caller
  24. * - woken process blocks are discarded from the list after having task zeroed
  25. * - writers are only woken if downgrading is false
  26. */
  27. static inline struct rw_semaphore *
  28. __rwsem_do_wake(struct rw_semaphore *sem, int downgrading)
  29. {
  30. struct rwsem_waiter *waiter;
  31. struct task_struct *tsk;
  32. struct list_head *next;
  33. signed long oldcount, woken, loop;
  34. if (downgrading)
  35. goto dont_wake_writers;
  36. /* if we came through an up_xxxx() call, we only only wake someone up
  37. * if we can transition the active part of the count from 0 -> 1
  38. */
  39. try_again:
  40. oldcount = rwsem_atomic_update(RWSEM_ACTIVE_BIAS, sem)
  41. - RWSEM_ACTIVE_BIAS;
  42. if (oldcount & RWSEM_ACTIVE_MASK)
  43. goto undo;
  44. waiter = list_entry(sem->wait_list.next, struct rwsem_waiter, list);
  45. /* try to grant a single write lock if there's a writer at the front
  46. * of the queue - note we leave the 'active part' of the count
  47. * incremented by 1 and the waiting part incremented by 0x00010000
  48. */
  49. if (!(waiter->flags & RWSEM_WAITING_FOR_WRITE))
  50. goto readers_only;
  51. /* We must be careful not to touch 'waiter' after we set ->task = NULL.
  52. * It is an allocated on the waiter's stack and may become invalid at
  53. * any time after that point (due to a wakeup from another source).
  54. */
  55. list_del(&waiter->list);
  56. tsk = waiter->task;
  57. smp_mb();
  58. waiter->task = NULL;
  59. wake_up_process(tsk);
  60. put_task_struct(tsk);
  61. goto out;
  62. /* don't want to wake any writers */
  63. dont_wake_writers:
  64. waiter = list_entry(sem->wait_list.next, struct rwsem_waiter, list);
  65. if (waiter->flags & RWSEM_WAITING_FOR_WRITE)
  66. goto out;
  67. /* grant an infinite number of read locks to the readers at the front
  68. * of the queue
  69. * - note we increment the 'active part' of the count by the number of
  70. * readers before waking any processes up
  71. */
  72. readers_only:
  73. woken = 0;
  74. do {
  75. woken++;
  76. if (waiter->list.next == &sem->wait_list)
  77. break;
  78. waiter = list_entry(waiter->list.next,
  79. struct rwsem_waiter, list);
  80. } while (waiter->flags & RWSEM_WAITING_FOR_READ);
  81. loop = woken;
  82. woken *= RWSEM_ACTIVE_BIAS - RWSEM_WAITING_BIAS;
  83. if (!downgrading)
  84. /* we'd already done one increment earlier */
  85. woken -= RWSEM_ACTIVE_BIAS;
  86. rwsem_atomic_add(woken, sem);
  87. next = sem->wait_list.next;
  88. for (; loop > 0; loop--) {
  89. waiter = list_entry(next, struct rwsem_waiter, list);
  90. next = waiter->list.next;
  91. tsk = waiter->task;
  92. smp_mb();
  93. waiter->task = NULL;
  94. wake_up_process(tsk);
  95. put_task_struct(tsk);
  96. }
  97. sem->wait_list.next = next;
  98. next->prev = &sem->wait_list;
  99. out:
  100. return sem;
  101. /* undo the change to count, but check for a transition 1->0 */
  102. undo:
  103. if (rwsem_atomic_update(-RWSEM_ACTIVE_BIAS, sem) != 0)
  104. goto out;
  105. goto try_again;
  106. }
  107. /*
  108. * wait for a lock to be granted
  109. */
  110. static inline struct rw_semaphore *
  111. rwsem_down_failed_common(struct rw_semaphore *sem,
  112. struct rwsem_waiter *waiter, signed long adjustment)
  113. {
  114. struct task_struct *tsk = current;
  115. signed long count;
  116. set_task_state(tsk, TASK_UNINTERRUPTIBLE);
  117. /* set up my own style of waitqueue */
  118. spin_lock_irq(&sem->wait_lock);
  119. waiter->task = tsk;
  120. get_task_struct(tsk);
  121. list_add_tail(&waiter->list, &sem->wait_list);
  122. /* we're now waiting on the lock, but no longer actively read-locking */
  123. count = rwsem_atomic_update(adjustment, sem);
  124. /* if there are no active locks, wake the front queued process(es) up */
  125. if (!(count & RWSEM_ACTIVE_MASK))
  126. sem = __rwsem_do_wake(sem, 0);
  127. spin_unlock_irq(&sem->wait_lock);
  128. /* wait to be given the lock */
  129. for (;;) {
  130. if (!waiter->task)
  131. break;
  132. schedule();
  133. set_task_state(tsk, TASK_UNINTERRUPTIBLE);
  134. }
  135. tsk->state = TASK_RUNNING;
  136. return sem;
  137. }
  138. /*
  139. * wait for the read lock to be granted
  140. */
  141. struct rw_semaphore fastcall __sched *
  142. rwsem_down_read_failed(struct rw_semaphore *sem)
  143. {
  144. struct rwsem_waiter waiter;
  145. waiter.flags = RWSEM_WAITING_FOR_READ;
  146. rwsem_down_failed_common(sem, &waiter,
  147. RWSEM_WAITING_BIAS - RWSEM_ACTIVE_BIAS);
  148. return sem;
  149. }
  150. /*
  151. * wait for the write lock to be granted
  152. */
  153. struct rw_semaphore fastcall __sched *
  154. rwsem_down_write_failed(struct rw_semaphore *sem)
  155. {
  156. struct rwsem_waiter waiter;
  157. waiter.flags = RWSEM_WAITING_FOR_WRITE;
  158. rwsem_down_failed_common(sem, &waiter, -RWSEM_ACTIVE_BIAS);
  159. return sem;
  160. }
  161. /*
  162. * handle waking up a waiter on the semaphore
  163. * - up_read/up_write has decremented the active part of count if we come here
  164. */
  165. struct rw_semaphore fastcall *rwsem_wake(struct rw_semaphore *sem)
  166. {
  167. unsigned long flags;
  168. spin_lock_irqsave(&sem->wait_lock, flags);
  169. /* do nothing if list empty */
  170. if (!list_empty(&sem->wait_list))
  171. sem = __rwsem_do_wake(sem, 0);
  172. spin_unlock_irqrestore(&sem->wait_lock, flags);
  173. return sem;
  174. }
  175. /*
  176. * downgrade a write lock into a read lock
  177. * - caller incremented waiting part of count and discovered it still negative
  178. * - just wake up any readers at the front of the queue
  179. */
  180. struct rw_semaphore fastcall *rwsem_downgrade_wake(struct rw_semaphore *sem)
  181. {
  182. unsigned long flags;
  183. spin_lock_irqsave(&sem->wait_lock, flags);
  184. /* do nothing if list empty */
  185. if (!list_empty(&sem->wait_list))
  186. sem = __rwsem_do_wake(sem, 1);
  187. spin_unlock_irqrestore(&sem->wait_lock, flags);
  188. return sem;
  189. }
  190. EXPORT_SYMBOL(rwsem_down_read_failed);
  191. EXPORT_SYMBOL(rwsem_down_write_failed);
  192. EXPORT_SYMBOL(rwsem_wake);
  193. EXPORT_SYMBOL(rwsem_downgrade_wake);