semaphore.c 6.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237
  1. /*
  2. * Copyright (c) 2008 Intel Corporation
  3. * Author: Matthew Wilcox <willy@linux.intel.com>
  4. *
  5. * Distributed under the terms of the GNU GPL, version 2
  6. */
  7. #include <linux/compiler.h>
  8. #include <linux/kernel.h>
  9. #include <linux/module.h>
  10. #include <linux/sched.h>
  11. #include <linux/semaphore.h>
  12. #include <linux/spinlock.h>
  13. /*
  14. * Some notes on the implementation:
  15. *
  16. * down_trylock() and up() can be called from interrupt context.
  17. * So we have to disable interrupts when taking the lock.
  18. *
  19. * The ->count variable, if positive, defines how many more tasks can
  20. * acquire the semaphore. If negative, it represents how many tasks are
  21. * waiting on the semaphore (*). If zero, no tasks are waiting, and no more
  22. * tasks can acquire the semaphore.
  23. *
  24. * (*) Except for the window between one task calling up() and the task
  25. * sleeping in a __down_common() waking up. In order to avoid a third task
  26. * coming in and stealing the second task's wakeup, we leave the ->count
  27. * negative. If we have a more complex situation, the ->count may become
  28. * zero or negative (eg a semaphore with count = 2, three tasks attempt to
  29. * acquire it, one sleeps, two finish and call up(), the second task to call
  30. * up() notices that the list is empty and just increments count).
  31. */
  32. static noinline void __down(struct semaphore *sem);
  33. static noinline int __down_interruptible(struct semaphore *sem);
  34. static noinline int __down_killable(struct semaphore *sem);
  35. static noinline int __down_timeout(struct semaphore *sem, long jiffies);
  36. static noinline void __up(struct semaphore *sem);
  37. void down(struct semaphore *sem)
  38. {
  39. unsigned long flags;
  40. spin_lock_irqsave(&sem->lock, flags);
  41. if (unlikely(sem->count-- <= 0))
  42. __down(sem);
  43. spin_unlock_irqrestore(&sem->lock, flags);
  44. }
  45. EXPORT_SYMBOL(down);
  46. int down_interruptible(struct semaphore *sem)
  47. {
  48. unsigned long flags;
  49. int result = 0;
  50. spin_lock_irqsave(&sem->lock, flags);
  51. if (unlikely(sem->count-- <= 0))
  52. result = __down_interruptible(sem);
  53. spin_unlock_irqrestore(&sem->lock, flags);
  54. return result;
  55. }
  56. EXPORT_SYMBOL(down_interruptible);
  57. int down_killable(struct semaphore *sem)
  58. {
  59. unsigned long flags;
  60. int result = 0;
  61. spin_lock_irqsave(&sem->lock, flags);
  62. if (unlikely(sem->count-- <= 0))
  63. result = __down_killable(sem);
  64. spin_unlock_irqrestore(&sem->lock, flags);
  65. return result;
  66. }
  67. EXPORT_SYMBOL(down_killable);
  68. /**
  69. * down_trylock - try to acquire the semaphore, without waiting
  70. * @sem: the semaphore to be acquired
  71. *
  72. * Try to acquire the semaphore atomically. Returns 0 if the mutex has
  73. * been acquired successfully and 1 if it is contended.
  74. *
  75. * NOTE: This return value is inverted from both spin_trylock and
  76. * mutex_trylock! Be careful about this when converting code.
  77. *
  78. * Unlike mutex_trylock, this function can be used from interrupt context,
  79. * and the semaphore can be released by any task or interrupt.
  80. */
  81. int down_trylock(struct semaphore *sem)
  82. {
  83. unsigned long flags;
  84. int count;
  85. spin_lock_irqsave(&sem->lock, flags);
  86. count = sem->count - 1;
  87. if (likely(count >= 0))
  88. sem->count = count;
  89. spin_unlock_irqrestore(&sem->lock, flags);
  90. return (count < 0);
  91. }
  92. EXPORT_SYMBOL(down_trylock);
  93. int down_timeout(struct semaphore *sem, long jiffies)
  94. {
  95. unsigned long flags;
  96. int result = 0;
  97. spin_lock_irqsave(&sem->lock, flags);
  98. if (unlikely(sem->count-- <= 0))
  99. result = __down_timeout(sem, jiffies);
  100. spin_unlock_irqrestore(&sem->lock, flags);
  101. return result;
  102. }
  103. EXPORT_SYMBOL(down_timeout);
  104. void up(struct semaphore *sem)
  105. {
  106. unsigned long flags;
  107. spin_lock_irqsave(&sem->lock, flags);
  108. if (likely(sem->count >= 0))
  109. sem->count++;
  110. else
  111. __up(sem);
  112. spin_unlock_irqrestore(&sem->lock, flags);
  113. }
  114. EXPORT_SYMBOL(up);
  115. /* Functions for the contended case */
  116. struct semaphore_waiter {
  117. struct list_head list;
  118. struct task_struct *task;
  119. int up;
  120. };
  121. /*
  122. * Wake up a process waiting on a semaphore. We need to call this from both
  123. * __up and __down_common as it's possible to race a task into the semaphore
  124. * if it comes in at just the right time between two tasks calling up() and
  125. * a third task waking up. This function assumes the wait_list is already
  126. * checked for being non-empty.
  127. */
  128. static noinline void __sched __up_down_common(struct semaphore *sem)
  129. {
  130. struct semaphore_waiter *waiter = list_first_entry(&sem->wait_list,
  131. struct semaphore_waiter, list);
  132. list_del(&waiter->list);
  133. waiter->up = 1;
  134. wake_up_process(waiter->task);
  135. }
  136. /*
  137. * Because this function is inlined, the 'state' parameter will be
  138. * constant, and thus optimised away by the compiler. Likewise the
  139. * 'timeout' parameter for the cases without timeouts.
  140. */
  141. static inline int __sched __down_common(struct semaphore *sem, long state,
  142. long timeout)
  143. {
  144. int result = 0;
  145. struct task_struct *task = current;
  146. struct semaphore_waiter waiter;
  147. list_add_tail(&waiter.list, &sem->wait_list);
  148. waiter.task = task;
  149. waiter.up = 0;
  150. for (;;) {
  151. if (state == TASK_INTERRUPTIBLE && signal_pending(task))
  152. goto interrupted;
  153. if (state == TASK_KILLABLE && fatal_signal_pending(task))
  154. goto interrupted;
  155. if (timeout <= 0)
  156. goto timed_out;
  157. __set_task_state(task, state);
  158. spin_unlock_irq(&sem->lock);
  159. timeout = schedule_timeout(timeout);
  160. spin_lock_irq(&sem->lock);
  161. if (waiter.up)
  162. goto woken;
  163. }
  164. timed_out:
  165. list_del(&waiter.list);
  166. result = -ETIME;
  167. goto woken;
  168. interrupted:
  169. list_del(&waiter.list);
  170. result = -EINTR;
  171. woken:
  172. /*
  173. * Account for the process which woke us up. For the case where
  174. * we're interrupted, we need to increment the count on our own
  175. * behalf. I don't believe we can hit the case where the
  176. * sem->count hits zero, *and* there's a second task sleeping,
  177. * but it doesn't hurt, that's not a commonly exercised path and
  178. * it's not a performance path either.
  179. */
  180. if (unlikely((++sem->count >= 0) && !list_empty(&sem->wait_list)))
  181. __up_down_common(sem);
  182. return result;
  183. }
  184. static noinline void __sched __down(struct semaphore *sem)
  185. {
  186. __down_common(sem, TASK_UNINTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT);
  187. }
  188. static noinline int __sched __down_interruptible(struct semaphore *sem)
  189. {
  190. return __down_common(sem, TASK_INTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT);
  191. }
  192. static noinline int __sched __down_killable(struct semaphore *sem)
  193. {
  194. return __down_common(sem, TASK_KILLABLE, MAX_SCHEDULE_TIMEOUT);
  195. }
  196. static noinline int __sched __down_timeout(struct semaphore *sem, long jiffies)
  197. {
  198. return __down_common(sem, TASK_UNINTERRUPTIBLE, jiffies);
  199. }
  200. static noinline void __sched __up(struct semaphore *sem)
  201. {
  202. if (unlikely(list_empty(&sem->wait_list)))
  203. sem->count++;
  204. else
  205. __up_down_common(sem);
  206. }