mutex.c 9.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360
  1. /*
  2. * kernel/mutex.c
  3. *
  4. * Mutexes: blocking mutual exclusion locks
  5. *
  6. * Started by Ingo Molnar:
  7. *
  8. * Copyright (C) 2004, 2005, 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
  9. *
  10. * Many thanks to Arjan van de Ven, Thomas Gleixner, Steven Rostedt and
  11. * David Howells for suggestions and improvements.
  12. *
  13. * Also see Documentation/mutex-design.txt.
  14. */
  15. #include <linux/mutex.h>
  16. #include <linux/sched.h>
  17. #include <linux/module.h>
  18. #include <linux/spinlock.h>
  19. #include <linux/interrupt.h>
  20. #include <linux/debug_locks.h>
  21. /*
  22. * In the DEBUG case we are using the "NULL fastpath" for mutexes,
  23. * which forces all calls into the slowpath:
  24. */
  25. #ifdef CONFIG_DEBUG_MUTEXES
  26. # include "mutex-debug.h"
  27. # include <asm-generic/mutex-null.h>
  28. #else
  29. # include "mutex.h"
  30. # include <asm/mutex.h>
  31. #endif
  32. /***
  33. * mutex_init - initialize the mutex
  34. * @lock: the mutex to be initialized
  35. *
  36. * Initialize the mutex to unlocked state.
  37. *
  38. * It is not allowed to initialize an already locked mutex.
  39. */
  40. void
  41. __mutex_init(struct mutex *lock, const char *name, struct lock_class_key *key)
  42. {
  43. atomic_set(&lock->count, 1);
  44. spin_lock_init(&lock->wait_lock);
  45. INIT_LIST_HEAD(&lock->wait_list);
  46. debug_mutex_init(lock, name, key);
  47. }
  48. EXPORT_SYMBOL(__mutex_init);
  49. #ifndef CONFIG_DEBUG_LOCK_ALLOC
  50. /*
  51. * We split the mutex lock/unlock logic into separate fastpath and
  52. * slowpath functions, to reduce the register pressure on the fastpath.
  53. * We also put the fastpath first in the kernel image, to make sure the
  54. * branch is predicted by the CPU as default-untaken.
  55. */
  56. static void fastcall noinline __sched
  57. __mutex_lock_slowpath(atomic_t *lock_count);
  58. /***
  59. * mutex_lock - acquire the mutex
  60. * @lock: the mutex to be acquired
  61. *
  62. * Lock the mutex exclusively for this task. If the mutex is not
  63. * available right now, it will sleep until it can get it.
  64. *
  65. * The mutex must later on be released by the same task that
  66. * acquired it. Recursive locking is not allowed. The task
  67. * may not exit without first unlocking the mutex. Also, kernel
  68. * memory where the mutex resides mutex must not be freed with
  69. * the mutex still locked. The mutex must first be initialized
  70. * (or statically defined) before it can be locked. memset()-ing
  71. * the mutex to 0 is not allowed.
  72. *
  73. * ( The CONFIG_DEBUG_MUTEXES .config option turns on debugging
  74. * checks that will enforce the restrictions and will also do
  75. * deadlock debugging. )
  76. *
  77. * This function is similar to (but not equivalent to) down().
  78. */
  79. void inline fastcall __sched mutex_lock(struct mutex *lock)
  80. {
  81. might_sleep();
  82. /*
  83. * The locking fastpath is the 1->0 transition from
  84. * 'unlocked' into 'locked' state.
  85. */
  86. __mutex_fastpath_lock(&lock->count, __mutex_lock_slowpath);
  87. }
  88. EXPORT_SYMBOL(mutex_lock);
  89. #endif
  90. static void fastcall noinline __sched
  91. __mutex_unlock_slowpath(atomic_t *lock_count);
  92. /***
  93. * mutex_unlock - release the mutex
  94. * @lock: the mutex to be released
  95. *
  96. * Unlock a mutex that has been locked by this task previously.
  97. *
  98. * This function must not be used in interrupt context. Unlocking
  99. * of a not locked mutex is not allowed.
  100. *
  101. * This function is similar to (but not equivalent to) up().
  102. */
  103. void fastcall __sched mutex_unlock(struct mutex *lock)
  104. {
  105. /*
  106. * The unlocking fastpath is the 0->1 transition from 'locked'
  107. * into 'unlocked' state:
  108. */
  109. __mutex_fastpath_unlock(&lock->count, __mutex_unlock_slowpath);
  110. }
  111. EXPORT_SYMBOL(mutex_unlock);
  112. /*
  113. * Lock a mutex (possibly interruptible), slowpath:
  114. */
  115. static inline int __sched
  116. __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
  117. unsigned long ip)
  118. {
  119. struct task_struct *task = current;
  120. struct mutex_waiter waiter;
  121. unsigned int old_val;
  122. unsigned long flags;
  123. spin_lock_mutex(&lock->wait_lock, flags);
  124. debug_mutex_lock_common(lock, &waiter);
  125. mutex_acquire(&lock->dep_map, subclass, 0, ip);
  126. debug_mutex_add_waiter(lock, &waiter, task_thread_info(task));
  127. /* add waiting tasks to the end of the waitqueue (FIFO): */
  128. list_add_tail(&waiter.list, &lock->wait_list);
  129. waiter.task = task;
  130. old_val = atomic_xchg(&lock->count, -1);
  131. if (old_val == 1)
  132. goto done;
  133. lock_contended(&lock->dep_map, ip);
  134. for (;;) {
  135. /*
  136. * Lets try to take the lock again - this is needed even if
  137. * we get here for the first time (shortly after failing to
  138. * acquire the lock), to make sure that we get a wakeup once
  139. * it's unlocked. Later on, if we sleep, this is the
  140. * operation that gives us the lock. We xchg it to -1, so
  141. * that when we release the lock, we properly wake up the
  142. * other waiters:
  143. */
  144. old_val = atomic_xchg(&lock->count, -1);
  145. if (old_val == 1)
  146. break;
  147. /*
  148. * got a signal? (This code gets eliminated in the
  149. * TASK_UNINTERRUPTIBLE case.)
  150. */
  151. if (unlikely(state == TASK_INTERRUPTIBLE &&
  152. signal_pending(task))) {
  153. mutex_remove_waiter(lock, &waiter, task_thread_info(task));
  154. mutex_release(&lock->dep_map, 1, ip);
  155. spin_unlock_mutex(&lock->wait_lock, flags);
  156. debug_mutex_free_waiter(&waiter);
  157. return -EINTR;
  158. }
  159. __set_task_state(task, state);
  160. /* didnt get the lock, go to sleep: */
  161. spin_unlock_mutex(&lock->wait_lock, flags);
  162. schedule();
  163. spin_lock_mutex(&lock->wait_lock, flags);
  164. }
  165. done:
  166. lock_acquired(&lock->dep_map);
  167. /* got the lock - rejoice! */
  168. mutex_remove_waiter(lock, &waiter, task_thread_info(task));
  169. debug_mutex_set_owner(lock, task_thread_info(task));
  170. /* set it to 0 if there are no waiters left: */
  171. if (likely(list_empty(&lock->wait_list)))
  172. atomic_set(&lock->count, 0);
  173. spin_unlock_mutex(&lock->wait_lock, flags);
  174. debug_mutex_free_waiter(&waiter);
  175. return 0;
  176. }
  177. #ifdef CONFIG_DEBUG_LOCK_ALLOC
  178. void __sched
  179. mutex_lock_nested(struct mutex *lock, unsigned int subclass)
  180. {
  181. might_sleep();
  182. __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, subclass, _RET_IP_);
  183. }
  184. EXPORT_SYMBOL_GPL(mutex_lock_nested);
  185. int __sched
  186. mutex_lock_interruptible_nested(struct mutex *lock, unsigned int subclass)
  187. {
  188. might_sleep();
  189. return __mutex_lock_common(lock, TASK_INTERRUPTIBLE, subclass, _RET_IP_);
  190. }
  191. EXPORT_SYMBOL_GPL(mutex_lock_interruptible_nested);
  192. #endif
  193. /*
  194. * Release the lock, slowpath:
  195. */
  196. static fastcall inline void
  197. __mutex_unlock_common_slowpath(atomic_t *lock_count, int nested)
  198. {
  199. struct mutex *lock = container_of(lock_count, struct mutex, count);
  200. unsigned long flags;
  201. spin_lock_mutex(&lock->wait_lock, flags);
  202. mutex_release(&lock->dep_map, nested, _RET_IP_);
  203. debug_mutex_unlock(lock);
  204. /*
  205. * some architectures leave the lock unlocked in the fastpath failure
  206. * case, others need to leave it locked. In the later case we have to
  207. * unlock it here
  208. */
  209. if (__mutex_slowpath_needs_to_unlock())
  210. atomic_set(&lock->count, 1);
  211. if (!list_empty(&lock->wait_list)) {
  212. /* get the first entry from the wait-list: */
  213. struct mutex_waiter *waiter =
  214. list_entry(lock->wait_list.next,
  215. struct mutex_waiter, list);
  216. debug_mutex_wake_waiter(lock, waiter);
  217. wake_up_process(waiter->task);
  218. }
  219. debug_mutex_clear_owner(lock);
  220. spin_unlock_mutex(&lock->wait_lock, flags);
  221. }
  222. /*
  223. * Release the lock, slowpath:
  224. */
  225. static fastcall noinline void
  226. __mutex_unlock_slowpath(atomic_t *lock_count)
  227. {
  228. __mutex_unlock_common_slowpath(lock_count, 1);
  229. }
  230. #ifndef CONFIG_DEBUG_LOCK_ALLOC
  231. /*
  232. * Here come the less common (and hence less performance-critical) APIs:
  233. * mutex_lock_interruptible() and mutex_trylock().
  234. */
  235. static int fastcall noinline __sched
  236. __mutex_lock_interruptible_slowpath(atomic_t *lock_count);
  237. /***
  238. * mutex_lock_interruptible - acquire the mutex, interruptable
  239. * @lock: the mutex to be acquired
  240. *
  241. * Lock the mutex like mutex_lock(), and return 0 if the mutex has
  242. * been acquired or sleep until the mutex becomes available. If a
  243. * signal arrives while waiting for the lock then this function
  244. * returns -EINTR.
  245. *
  246. * This function is similar to (but not equivalent to) down_interruptible().
  247. */
  248. int fastcall __sched mutex_lock_interruptible(struct mutex *lock)
  249. {
  250. might_sleep();
  251. return __mutex_fastpath_lock_retval
  252. (&lock->count, __mutex_lock_interruptible_slowpath);
  253. }
  254. EXPORT_SYMBOL(mutex_lock_interruptible);
  255. static void fastcall noinline __sched
  256. __mutex_lock_slowpath(atomic_t *lock_count)
  257. {
  258. struct mutex *lock = container_of(lock_count, struct mutex, count);
  259. __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, 0, _RET_IP_);
  260. }
  261. static int fastcall noinline __sched
  262. __mutex_lock_interruptible_slowpath(atomic_t *lock_count)
  263. {
  264. struct mutex *lock = container_of(lock_count, struct mutex, count);
  265. return __mutex_lock_common(lock, TASK_INTERRUPTIBLE, 0, _RET_IP_);
  266. }
  267. #endif
  268. /*
  269. * Spinlock based trylock, we take the spinlock and check whether we
  270. * can get the lock:
  271. */
  272. static inline int __mutex_trylock_slowpath(atomic_t *lock_count)
  273. {
  274. struct mutex *lock = container_of(lock_count, struct mutex, count);
  275. unsigned long flags;
  276. int prev;
  277. spin_lock_mutex(&lock->wait_lock, flags);
  278. prev = atomic_xchg(&lock->count, -1);
  279. if (likely(prev == 1)) {
  280. debug_mutex_set_owner(lock, current_thread_info());
  281. mutex_acquire(&lock->dep_map, 0, 1, _RET_IP_);
  282. }
  283. /* Set it back to 0 if there are no waiters: */
  284. if (likely(list_empty(&lock->wait_list)))
  285. atomic_set(&lock->count, 0);
  286. spin_unlock_mutex(&lock->wait_lock, flags);
  287. return prev == 1;
  288. }
  289. /***
  290. * mutex_trylock - try acquire the mutex, without waiting
  291. * @lock: the mutex to be acquired
  292. *
  293. * Try to acquire the mutex atomically. Returns 1 if the mutex
  294. * has been acquired successfully, and 0 on contention.
  295. *
  296. * NOTE: this function follows the spin_trylock() convention, so
  297. * it is negated to the down_trylock() return values! Be careful
  298. * about this when converting semaphore users to mutexes.
  299. *
  300. * This function must not be used in interrupt context. The
  301. * mutex must be released by the same task that acquired it.
  302. */
  303. int fastcall __sched mutex_trylock(struct mutex *lock)
  304. {
  305. return __mutex_fastpath_trylock(&lock->count,
  306. __mutex_trylock_slowpath);
  307. }
  308. EXPORT_SYMBOL(mutex_trylock);