rwsem-spinlock.c 7.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344
  1. /* rwsem-spinlock.c: R/W semaphores: contention handling functions for
  2. * generic spinlock implementation
  3. *
  4. * Copyright (c) 2001 David Howells (dhowells@redhat.com).
  5. * - Derived partially from idea by Andrea Arcangeli <andrea@suse.de>
  6. * - Derived also from comments by Linus
  7. */
  8. #include <linux/rwsem.h>
  9. #include <linux/sched.h>
  10. #include <linux/module.h>
  11. struct rwsem_waiter {
  12. struct list_head list;
  13. struct task_struct *task;
  14. unsigned int flags;
  15. #define RWSEM_WAITING_FOR_READ 0x00000001
  16. #define RWSEM_WAITING_FOR_WRITE 0x00000002
  17. };
  18. #if RWSEM_DEBUG
  19. void rwsemtrace(struct rw_semaphore *sem, const char *str)
  20. {
  21. if (sem->debug)
  22. printk("[%d] %s({%d,%d})\n",
  23. current->pid, str, sem->activity,
  24. list_empty(&sem->wait_list) ? 0 : 1);
  25. }
  26. #endif
  27. /*
  28. * initialise the semaphore
  29. */
  30. void fastcall init_rwsem(struct rw_semaphore *sem)
  31. {
  32. sem->activity = 0;
  33. spin_lock_init(&sem->wait_lock);
  34. INIT_LIST_HEAD(&sem->wait_list);
  35. #if RWSEM_DEBUG
  36. sem->debug = 0;
  37. #endif
  38. }
  39. /*
  40. * handle the lock release when processes blocked on it that can now run
  41. * - if we come here, then:
  42. * - the 'active count' _reached_ zero
  43. * - the 'waiting count' is non-zero
  44. * - the spinlock must be held by the caller
  45. * - woken process blocks are discarded from the list after having task zeroed
  46. * - writers are only woken if wakewrite is non-zero
  47. */
  48. static inline struct rw_semaphore *
  49. __rwsem_do_wake(struct rw_semaphore *sem, int wakewrite)
  50. {
  51. struct rwsem_waiter *waiter;
  52. struct task_struct *tsk;
  53. int woken;
  54. rwsemtrace(sem, "Entering __rwsem_do_wake");
  55. waiter = list_entry(sem->wait_list.next, struct rwsem_waiter, list);
  56. if (!wakewrite) {
  57. if (waiter->flags & RWSEM_WAITING_FOR_WRITE)
  58. goto out;
  59. goto dont_wake_writers;
  60. }
  61. /* if we are allowed to wake writers try to grant a single write lock
  62. * if there's a writer at the front of the queue
  63. * - we leave the 'waiting count' incremented to signify potential
  64. * contention
  65. */
  66. if (waiter->flags & RWSEM_WAITING_FOR_WRITE) {
  67. sem->activity = -1;
  68. list_del(&waiter->list);
  69. tsk = waiter->task;
  70. /* Don't touch waiter after ->task has been NULLed */
  71. smp_mb();
  72. waiter->task = NULL;
  73. wake_up_process(tsk);
  74. put_task_struct(tsk);
  75. goto out;
  76. }
  77. /* grant an infinite number of read locks to the front of the queue */
  78. dont_wake_writers:
  79. woken = 0;
  80. while (waiter->flags & RWSEM_WAITING_FOR_READ) {
  81. struct list_head *next = waiter->list.next;
  82. list_del(&waiter->list);
  83. tsk = waiter->task;
  84. smp_mb();
  85. waiter->task = NULL;
  86. wake_up_process(tsk);
  87. put_task_struct(tsk);
  88. woken++;
  89. if (list_empty(&sem->wait_list))
  90. break;
  91. waiter = list_entry(next, struct rwsem_waiter, list);
  92. }
  93. sem->activity += woken;
  94. out:
  95. rwsemtrace(sem, "Leaving __rwsem_do_wake");
  96. return sem;
  97. }
  98. /*
  99. * wake a single writer
  100. */
  101. static inline struct rw_semaphore *
  102. __rwsem_wake_one_writer(struct rw_semaphore *sem)
  103. {
  104. struct rwsem_waiter *waiter;
  105. struct task_struct *tsk;
  106. sem->activity = -1;
  107. waiter = list_entry(sem->wait_list.next, struct rwsem_waiter, list);
  108. list_del(&waiter->list);
  109. tsk = waiter->task;
  110. smp_mb();
  111. waiter->task = NULL;
  112. wake_up_process(tsk);
  113. put_task_struct(tsk);
  114. return sem;
  115. }
  116. /*
  117. * get a read lock on the semaphore
  118. */
  119. void fastcall __sched __down_read(struct rw_semaphore *sem)
  120. {
  121. struct rwsem_waiter waiter;
  122. struct task_struct *tsk;
  123. rwsemtrace(sem, "Entering __down_read");
  124. spin_lock_irq(&sem->wait_lock);
  125. if (sem->activity >= 0 && list_empty(&sem->wait_list)) {
  126. /* granted */
  127. sem->activity++;
  128. spin_unlock_irq(&sem->wait_lock);
  129. goto out;
  130. }
  131. tsk = current;
  132. set_task_state(tsk, TASK_UNINTERRUPTIBLE);
  133. /* set up my own style of waitqueue */
  134. waiter.task = tsk;
  135. waiter.flags = RWSEM_WAITING_FOR_READ;
  136. get_task_struct(tsk);
  137. list_add_tail(&waiter.list, &sem->wait_list);
  138. /* we don't need to touch the semaphore struct anymore */
  139. spin_unlock_irq(&sem->wait_lock);
  140. /* wait to be given the lock */
  141. for (;;) {
  142. if (!waiter.task)
  143. break;
  144. schedule();
  145. set_task_state(tsk, TASK_UNINTERRUPTIBLE);
  146. }
  147. tsk->state = TASK_RUNNING;
  148. out:
  149. rwsemtrace(sem, "Leaving __down_read");
  150. }
  151. /*
  152. * trylock for reading -- returns 1 if successful, 0 if contention
  153. */
  154. int fastcall __down_read_trylock(struct rw_semaphore *sem)
  155. {
  156. unsigned long flags;
  157. int ret = 0;
  158. rwsemtrace(sem, "Entering __down_read_trylock");
  159. spin_lock_irqsave(&sem->wait_lock, flags);
  160. if (sem->activity >= 0 && list_empty(&sem->wait_list)) {
  161. /* granted */
  162. sem->activity++;
  163. ret = 1;
  164. }
  165. spin_unlock_irqrestore(&sem->wait_lock, flags);
  166. rwsemtrace(sem, "Leaving __down_read_trylock");
  167. return ret;
  168. }
  169. /*
  170. * get a write lock on the semaphore
  171. * - we increment the waiting count anyway to indicate an exclusive lock
  172. */
  173. void fastcall __sched __down_write(struct rw_semaphore *sem)
  174. {
  175. struct rwsem_waiter waiter;
  176. struct task_struct *tsk;
  177. rwsemtrace(sem, "Entering __down_write");
  178. spin_lock_irq(&sem->wait_lock);
  179. if (sem->activity == 0 && list_empty(&sem->wait_list)) {
  180. /* granted */
  181. sem->activity = -1;
  182. spin_unlock_irq(&sem->wait_lock);
  183. goto out;
  184. }
  185. tsk = current;
  186. set_task_state(tsk, TASK_UNINTERRUPTIBLE);
  187. /* set up my own style of waitqueue */
  188. waiter.task = tsk;
  189. waiter.flags = RWSEM_WAITING_FOR_WRITE;
  190. get_task_struct(tsk);
  191. list_add_tail(&waiter.list, &sem->wait_list);
  192. /* we don't need to touch the semaphore struct anymore */
  193. spin_unlock_irq(&sem->wait_lock);
  194. /* wait to be given the lock */
  195. for (;;) {
  196. if (!waiter.task)
  197. break;
  198. schedule();
  199. set_task_state(tsk, TASK_UNINTERRUPTIBLE);
  200. }
  201. tsk->state = TASK_RUNNING;
  202. out:
  203. rwsemtrace(sem, "Leaving __down_write");
  204. }
  205. /*
  206. * trylock for writing -- returns 1 if successful, 0 if contention
  207. */
  208. int fastcall __down_write_trylock(struct rw_semaphore *sem)
  209. {
  210. unsigned long flags;
  211. int ret = 0;
  212. rwsemtrace(sem, "Entering __down_write_trylock");
  213. spin_lock_irqsave(&sem->wait_lock, flags);
  214. if (sem->activity == 0 && list_empty(&sem->wait_list)) {
  215. /* granted */
  216. sem->activity = -1;
  217. ret = 1;
  218. }
  219. spin_unlock_irqrestore(&sem->wait_lock, flags);
  220. rwsemtrace(sem, "Leaving __down_write_trylock");
  221. return ret;
  222. }
  223. /*
  224. * release a read lock on the semaphore
  225. */
  226. void fastcall __up_read(struct rw_semaphore *sem)
  227. {
  228. unsigned long flags;
  229. rwsemtrace(sem, "Entering __up_read");
  230. spin_lock_irqsave(&sem->wait_lock, flags);
  231. if (--sem->activity == 0 && !list_empty(&sem->wait_list))
  232. sem = __rwsem_wake_one_writer(sem);
  233. spin_unlock_irqrestore(&sem->wait_lock, flags);
  234. rwsemtrace(sem, "Leaving __up_read");
  235. }
  236. /*
  237. * release a write lock on the semaphore
  238. */
  239. void fastcall __up_write(struct rw_semaphore *sem)
  240. {
  241. unsigned long flags;
  242. rwsemtrace(sem, "Entering __up_write");
  243. spin_lock_irqsave(&sem->wait_lock, flags);
  244. sem->activity = 0;
  245. if (!list_empty(&sem->wait_list))
  246. sem = __rwsem_do_wake(sem, 1);
  247. spin_unlock_irqrestore(&sem->wait_lock, flags);
  248. rwsemtrace(sem, "Leaving __up_write");
  249. }
  250. /*
  251. * downgrade a write lock into a read lock
  252. * - just wake up any readers at the front of the queue
  253. */
  254. void fastcall __downgrade_write(struct rw_semaphore *sem)
  255. {
  256. unsigned long flags;
  257. rwsemtrace(sem, "Entering __downgrade_write");
  258. spin_lock_irqsave(&sem->wait_lock, flags);
  259. sem->activity = 1;
  260. if (!list_empty(&sem->wait_list))
  261. sem = __rwsem_do_wake(sem, 0);
  262. spin_unlock_irqrestore(&sem->wait_lock, flags);
  263. rwsemtrace(sem, "Leaving __downgrade_write");
  264. }
  265. EXPORT_SYMBOL(init_rwsem);
  266. EXPORT_SYMBOL(__down_read);
  267. EXPORT_SYMBOL(__down_read_trylock);
  268. EXPORT_SYMBOL(__down_write);
  269. EXPORT_SYMBOL(__down_write_trylock);
  270. EXPORT_SYMBOL(__up_read);
  271. EXPORT_SYMBOL(__up_write);
  272. EXPORT_SYMBOL(__downgrade_write);
  273. #if RWSEM_DEBUG
  274. EXPORT_SYMBOL(rwsemtrace);
  275. #endif