rtmutex.c 27 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060
  1. /*
  2. * RT-Mutexes: simple blocking mutual exclusion locks with PI support
  3. *
  4. * started by Ingo Molnar and Thomas Gleixner.
  5. *
  6. * Copyright (C) 2004-2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
  7. * Copyright (C) 2005-2006 Timesys Corp., Thomas Gleixner <tglx@timesys.com>
  8. * Copyright (C) 2005 Kihon Technologies Inc., Steven Rostedt
  9. * Copyright (C) 2006 Esben Nielsen
  10. *
  11. * See Documentation/rt-mutex-design.txt for details.
  12. */
  13. #include <linux/spinlock.h>
  14. #include <linux/export.h>
  15. #include <linux/sched.h>
  16. #include <linux/sched/rt.h>
  17. #include <linux/timer.h>
  18. #include "rtmutex_common.h"
  19. /*
  20. * lock->owner state tracking:
  21. *
  22. * lock->owner holds the task_struct pointer of the owner. Bit 0
  23. * is used to keep track of the "lock has waiters" state.
  24. *
  25. * owner bit0
  26. * NULL 0 lock is free (fast acquire possible)
  27. * NULL 1 lock is free and has waiters and the top waiter
  28. * is going to take the lock*
  29. * taskpointer 0 lock is held (fast release possible)
  30. * taskpointer 1 lock is held and has waiters**
  31. *
  32. * The fast atomic compare exchange based acquire and release is only
  33. * possible when bit 0 of lock->owner is 0.
  34. *
  35. * (*) It also can be a transitional state when grabbing the lock
  36. * with ->wait_lock is held. To prevent any fast path cmpxchg to the lock,
  37. * we need to set the bit0 before looking at the lock, and the owner may be
  38. * NULL in this small time, hence this can be a transitional state.
  39. *
  40. * (**) There is a small time when bit 0 is set but there are no
  41. * waiters. This can happen when grabbing the lock in the slow path.
  42. * To prevent a cmpxchg of the owner releasing the lock, we need to
  43. * set this bit before looking at the lock.
  44. */
  45. static void
  46. rt_mutex_set_owner(struct rt_mutex *lock, struct task_struct *owner)
  47. {
  48. unsigned long val = (unsigned long)owner;
  49. if (rt_mutex_has_waiters(lock))
  50. val |= RT_MUTEX_HAS_WAITERS;
  51. lock->owner = (struct task_struct *)val;
  52. }
  53. static inline void clear_rt_mutex_waiters(struct rt_mutex *lock)
  54. {
  55. lock->owner = (struct task_struct *)
  56. ((unsigned long)lock->owner & ~RT_MUTEX_HAS_WAITERS);
  57. }
  58. static void fixup_rt_mutex_waiters(struct rt_mutex *lock)
  59. {
  60. if (!rt_mutex_has_waiters(lock))
  61. clear_rt_mutex_waiters(lock);
  62. }
  63. /*
  64. * We can speed up the acquire/release, if the architecture
  65. * supports cmpxchg and if there's no debugging state to be set up
  66. */
  67. #if defined(__HAVE_ARCH_CMPXCHG) && !defined(CONFIG_DEBUG_RT_MUTEXES)
  68. # define rt_mutex_cmpxchg(l,c,n) (cmpxchg(&l->owner, c, n) == c)
  69. static inline void mark_rt_mutex_waiters(struct rt_mutex *lock)
  70. {
  71. unsigned long owner, *p = (unsigned long *) &lock->owner;
  72. do {
  73. owner = *p;
  74. } while (cmpxchg(p, owner, owner | RT_MUTEX_HAS_WAITERS) != owner);
  75. }
  76. #else
  77. # define rt_mutex_cmpxchg(l,c,n) (0)
  78. static inline void mark_rt_mutex_waiters(struct rt_mutex *lock)
  79. {
  80. lock->owner = (struct task_struct *)
  81. ((unsigned long)lock->owner | RT_MUTEX_HAS_WAITERS);
  82. }
  83. #endif
  84. /*
  85. * Calculate task priority from the waiter list priority
  86. *
  87. * Return task->normal_prio when the waiter list is empty or when
  88. * the waiter is not allowed to do priority boosting
  89. */
  90. int rt_mutex_getprio(struct task_struct *task)
  91. {
  92. if (likely(!task_has_pi_waiters(task)))
  93. return task->normal_prio;
  94. return min(task_top_pi_waiter(task)->pi_list_entry.prio,
  95. task->normal_prio);
  96. }
  97. /*
  98. * Adjust the priority of a task, after its pi_waiters got modified.
  99. *
  100. * This can be both boosting and unboosting. task->pi_lock must be held.
  101. */
  102. static void __rt_mutex_adjust_prio(struct task_struct *task)
  103. {
  104. int prio = rt_mutex_getprio(task);
  105. if (task->prio != prio)
  106. rt_mutex_setprio(task, prio);
  107. }
  108. /*
  109. * Adjust task priority (undo boosting). Called from the exit path of
  110. * rt_mutex_slowunlock() and rt_mutex_slowlock().
  111. *
  112. * (Note: We do this outside of the protection of lock->wait_lock to
  113. * allow the lock to be taken while or before we readjust the priority
  114. * of task. We do not use the spin_xx_mutex() variants here as we are
  115. * outside of the debug path.)
  116. */
  117. static void rt_mutex_adjust_prio(struct task_struct *task)
  118. {
  119. unsigned long flags;
  120. raw_spin_lock_irqsave(&task->pi_lock, flags);
  121. __rt_mutex_adjust_prio(task);
  122. raw_spin_unlock_irqrestore(&task->pi_lock, flags);
  123. }
  124. /*
  125. * Max number of times we'll walk the boosting chain:
  126. */
  127. int max_lock_depth = 1024;
  128. /*
  129. * Adjust the priority chain. Also used for deadlock detection.
  130. * Decreases task's usage by one - may thus free the task.
  131. *
  132. * @task: the task owning the mutex (owner) for which a chain walk is probably
  133. * needed
  134. * @deadlock_detect: do we have to carry out deadlock detection?
  135. * @orig_lock: the mutex (can be NULL if we are walking the chain to recheck
  136. * things for a task that has just got its priority adjusted, and
  137. * is waiting on a mutex)
  138. * @orig_waiter: rt_mutex_waiter struct for the task that has just donated
  139. * its priority to the mutex owner (can be NULL in the case
  140. * depicted above or if the top waiter is gone away and we are
  141. * actually deboosting the owner)
  142. * @top_task: the current top waiter
  143. *
  144. * Returns 0 or -EDEADLK.
  145. */
  146. static int rt_mutex_adjust_prio_chain(struct task_struct *task,
  147. int deadlock_detect,
  148. struct rt_mutex *orig_lock,
  149. struct rt_mutex_waiter *orig_waiter,
  150. struct task_struct *top_task)
  151. {
  152. struct rt_mutex *lock;
  153. struct rt_mutex_waiter *waiter, *top_waiter = orig_waiter;
  154. int detect_deadlock, ret = 0, depth = 0;
  155. unsigned long flags;
  156. detect_deadlock = debug_rt_mutex_detect_deadlock(orig_waiter,
  157. deadlock_detect);
  158. /*
  159. * The (de)boosting is a step by step approach with a lot of
  160. * pitfalls. We want this to be preemptible and we want hold a
  161. * maximum of two locks per step. So we have to check
  162. * carefully whether things change under us.
  163. */
  164. again:
  165. if (++depth > max_lock_depth) {
  166. static int prev_max;
  167. /*
  168. * Print this only once. If the admin changes the limit,
  169. * print a new message when reaching the limit again.
  170. */
  171. if (prev_max != max_lock_depth) {
  172. prev_max = max_lock_depth;
  173. printk(KERN_WARNING "Maximum lock depth %d reached "
  174. "task: %s (%d)\n", max_lock_depth,
  175. top_task->comm, task_pid_nr(top_task));
  176. }
  177. put_task_struct(task);
  178. return deadlock_detect ? -EDEADLK : 0;
  179. }
  180. retry:
  181. /*
  182. * Task can not go away as we did a get_task() before !
  183. */
  184. raw_spin_lock_irqsave(&task->pi_lock, flags);
  185. waiter = task->pi_blocked_on;
  186. /*
  187. * Check whether the end of the boosting chain has been
  188. * reached or the state of the chain has changed while we
  189. * dropped the locks.
  190. */
  191. if (!waiter)
  192. goto out_unlock_pi;
  193. /*
  194. * Check the orig_waiter state. After we dropped the locks,
  195. * the previous owner of the lock might have released the lock.
  196. */
  197. if (orig_waiter && !rt_mutex_owner(orig_lock))
  198. goto out_unlock_pi;
  199. /*
  200. * Drop out, when the task has no waiters. Note,
  201. * top_waiter can be NULL, when we are in the deboosting
  202. * mode!
  203. */
  204. if (top_waiter && (!task_has_pi_waiters(task) ||
  205. top_waiter != task_top_pi_waiter(task)))
  206. goto out_unlock_pi;
  207. /*
  208. * When deadlock detection is off then we check, if further
  209. * priority adjustment is necessary.
  210. */
  211. if (!detect_deadlock && waiter->list_entry.prio == task->prio)
  212. goto out_unlock_pi;
  213. lock = waiter->lock;
  214. if (!raw_spin_trylock(&lock->wait_lock)) {
  215. raw_spin_unlock_irqrestore(&task->pi_lock, flags);
  216. cpu_relax();
  217. goto retry;
  218. }
  219. /* Deadlock detection */
  220. if (lock == orig_lock || rt_mutex_owner(lock) == top_task) {
  221. debug_rt_mutex_deadlock(deadlock_detect, orig_waiter, lock);
  222. raw_spin_unlock(&lock->wait_lock);
  223. ret = deadlock_detect ? -EDEADLK : 0;
  224. goto out_unlock_pi;
  225. }
  226. top_waiter = rt_mutex_top_waiter(lock);
  227. /* Requeue the waiter */
  228. plist_del(&waiter->list_entry, &lock->wait_list);
  229. waiter->list_entry.prio = task->prio;
  230. plist_add(&waiter->list_entry, &lock->wait_list);
  231. /* Release the task */
  232. raw_spin_unlock_irqrestore(&task->pi_lock, flags);
  233. if (!rt_mutex_owner(lock)) {
  234. /*
  235. * If the requeue above changed the top waiter, then we need
  236. * to wake the new top waiter up to try to get the lock.
  237. */
  238. if (top_waiter != rt_mutex_top_waiter(lock))
  239. wake_up_process(rt_mutex_top_waiter(lock)->task);
  240. raw_spin_unlock(&lock->wait_lock);
  241. goto out_put_task;
  242. }
  243. put_task_struct(task);
  244. /* Grab the next task */
  245. task = rt_mutex_owner(lock);
  246. get_task_struct(task);
  247. raw_spin_lock_irqsave(&task->pi_lock, flags);
  248. if (waiter == rt_mutex_top_waiter(lock)) {
  249. /* Boost the owner */
  250. plist_del(&top_waiter->pi_list_entry, &task->pi_waiters);
  251. waiter->pi_list_entry.prio = waiter->list_entry.prio;
  252. plist_add(&waiter->pi_list_entry, &task->pi_waiters);
  253. __rt_mutex_adjust_prio(task);
  254. } else if (top_waiter == waiter) {
  255. /* Deboost the owner */
  256. plist_del(&waiter->pi_list_entry, &task->pi_waiters);
  257. waiter = rt_mutex_top_waiter(lock);
  258. waiter->pi_list_entry.prio = waiter->list_entry.prio;
  259. plist_add(&waiter->pi_list_entry, &task->pi_waiters);
  260. __rt_mutex_adjust_prio(task);
  261. }
  262. raw_spin_unlock_irqrestore(&task->pi_lock, flags);
  263. top_waiter = rt_mutex_top_waiter(lock);
  264. raw_spin_unlock(&lock->wait_lock);
  265. if (!detect_deadlock && waiter != top_waiter)
  266. goto out_put_task;
  267. goto again;
  268. out_unlock_pi:
  269. raw_spin_unlock_irqrestore(&task->pi_lock, flags);
  270. out_put_task:
  271. put_task_struct(task);
  272. return ret;
  273. }
  274. /*
  275. * Try to take an rt-mutex
  276. *
  277. * Must be called with lock->wait_lock held.
  278. *
  279. * @lock: the lock to be acquired.
  280. * @task: the task which wants to acquire the lock
  281. * @waiter: the waiter that is queued to the lock's wait list. (could be NULL)
  282. */
  283. static int try_to_take_rt_mutex(struct rt_mutex *lock, struct task_struct *task,
  284. struct rt_mutex_waiter *waiter)
  285. {
  286. /*
  287. * We have to be careful here if the atomic speedups are
  288. * enabled, such that, when
  289. * - no other waiter is on the lock
  290. * - the lock has been released since we did the cmpxchg
  291. * the lock can be released or taken while we are doing the
  292. * checks and marking the lock with RT_MUTEX_HAS_WAITERS.
  293. *
  294. * The atomic acquire/release aware variant of
  295. * mark_rt_mutex_waiters uses a cmpxchg loop. After setting
  296. * the WAITERS bit, the atomic release / acquire can not
  297. * happen anymore and lock->wait_lock protects us from the
  298. * non-atomic case.
  299. *
  300. * Note, that this might set lock->owner =
  301. * RT_MUTEX_HAS_WAITERS in the case the lock is not contended
  302. * any more. This is fixed up when we take the ownership.
  303. * This is the transitional state explained at the top of this file.
  304. */
  305. mark_rt_mutex_waiters(lock);
  306. if (rt_mutex_owner(lock))
  307. return 0;
  308. /*
  309. * It will get the lock because of one of these conditions:
  310. * 1) there is no waiter
  311. * 2) higher priority than waiters
  312. * 3) it is top waiter
  313. */
  314. if (rt_mutex_has_waiters(lock)) {
  315. if (task->prio >= rt_mutex_top_waiter(lock)->list_entry.prio) {
  316. if (!waiter || waiter != rt_mutex_top_waiter(lock))
  317. return 0;
  318. }
  319. }
  320. if (waiter || rt_mutex_has_waiters(lock)) {
  321. unsigned long flags;
  322. struct rt_mutex_waiter *top;
  323. raw_spin_lock_irqsave(&task->pi_lock, flags);
  324. /* remove the queued waiter. */
  325. if (waiter) {
  326. plist_del(&waiter->list_entry, &lock->wait_list);
  327. task->pi_blocked_on = NULL;
  328. }
  329. /*
  330. * We have to enqueue the top waiter(if it exists) into
  331. * task->pi_waiters list.
  332. */
  333. if (rt_mutex_has_waiters(lock)) {
  334. top = rt_mutex_top_waiter(lock);
  335. top->pi_list_entry.prio = top->list_entry.prio;
  336. plist_add(&top->pi_list_entry, &task->pi_waiters);
  337. }
  338. raw_spin_unlock_irqrestore(&task->pi_lock, flags);
  339. }
  340. /* We got the lock. */
  341. debug_rt_mutex_lock(lock);
  342. rt_mutex_set_owner(lock, task);
  343. rt_mutex_deadlock_account_lock(lock, task);
  344. return 1;
  345. }
  346. /*
  347. * Task blocks on lock.
  348. *
  349. * Prepare waiter and propagate pi chain
  350. *
  351. * This must be called with lock->wait_lock held.
  352. */
  353. static int task_blocks_on_rt_mutex(struct rt_mutex *lock,
  354. struct rt_mutex_waiter *waiter,
  355. struct task_struct *task,
  356. int detect_deadlock)
  357. {
  358. struct task_struct *owner = rt_mutex_owner(lock);
  359. struct rt_mutex_waiter *top_waiter = waiter;
  360. unsigned long flags;
  361. int chain_walk = 0, res;
  362. raw_spin_lock_irqsave(&task->pi_lock, flags);
  363. __rt_mutex_adjust_prio(task);
  364. waiter->task = task;
  365. waiter->lock = lock;
  366. plist_node_init(&waiter->list_entry, task->prio);
  367. plist_node_init(&waiter->pi_list_entry, task->prio);
  368. /* Get the top priority waiter on the lock */
  369. if (rt_mutex_has_waiters(lock))
  370. top_waiter = rt_mutex_top_waiter(lock);
  371. plist_add(&waiter->list_entry, &lock->wait_list);
  372. task->pi_blocked_on = waiter;
  373. raw_spin_unlock_irqrestore(&task->pi_lock, flags);
  374. if (!owner)
  375. return 0;
  376. if (waiter == rt_mutex_top_waiter(lock)) {
  377. raw_spin_lock_irqsave(&owner->pi_lock, flags);
  378. plist_del(&top_waiter->pi_list_entry, &owner->pi_waiters);
  379. plist_add(&waiter->pi_list_entry, &owner->pi_waiters);
  380. __rt_mutex_adjust_prio(owner);
  381. if (owner->pi_blocked_on)
  382. chain_walk = 1;
  383. raw_spin_unlock_irqrestore(&owner->pi_lock, flags);
  384. }
  385. else if (debug_rt_mutex_detect_deadlock(waiter, detect_deadlock))
  386. chain_walk = 1;
  387. if (!chain_walk)
  388. return 0;
  389. /*
  390. * The owner can't disappear while holding a lock,
  391. * so the owner struct is protected by wait_lock.
  392. * Gets dropped in rt_mutex_adjust_prio_chain()!
  393. */
  394. get_task_struct(owner);
  395. raw_spin_unlock(&lock->wait_lock);
  396. res = rt_mutex_adjust_prio_chain(owner, detect_deadlock, lock, waiter,
  397. task);
  398. raw_spin_lock(&lock->wait_lock);
  399. return res;
  400. }
  401. /*
  402. * Wake up the next waiter on the lock.
  403. *
  404. * Remove the top waiter from the current tasks waiter list and wake it up.
  405. *
  406. * Called with lock->wait_lock held.
  407. */
  408. static void wakeup_next_waiter(struct rt_mutex *lock)
  409. {
  410. struct rt_mutex_waiter *waiter;
  411. unsigned long flags;
  412. raw_spin_lock_irqsave(&current->pi_lock, flags);
  413. waiter = rt_mutex_top_waiter(lock);
  414. /*
  415. * Remove it from current->pi_waiters. We do not adjust a
  416. * possible priority boost right now. We execute wakeup in the
  417. * boosted mode and go back to normal after releasing
  418. * lock->wait_lock.
  419. */
  420. plist_del(&waiter->pi_list_entry, &current->pi_waiters);
  421. rt_mutex_set_owner(lock, NULL);
  422. raw_spin_unlock_irqrestore(&current->pi_lock, flags);
  423. wake_up_process(waiter->task);
  424. }
  425. /*
  426. * Remove a waiter from a lock and give up
  427. *
  428. * Must be called with lock->wait_lock held and
  429. * have just failed to try_to_take_rt_mutex().
  430. */
  431. static void remove_waiter(struct rt_mutex *lock,
  432. struct rt_mutex_waiter *waiter)
  433. {
  434. int first = (waiter == rt_mutex_top_waiter(lock));
  435. struct task_struct *owner = rt_mutex_owner(lock);
  436. unsigned long flags;
  437. int chain_walk = 0;
  438. raw_spin_lock_irqsave(&current->pi_lock, flags);
  439. plist_del(&waiter->list_entry, &lock->wait_list);
  440. current->pi_blocked_on = NULL;
  441. raw_spin_unlock_irqrestore(&current->pi_lock, flags);
  442. if (!owner)
  443. return;
  444. if (first) {
  445. raw_spin_lock_irqsave(&owner->pi_lock, flags);
  446. plist_del(&waiter->pi_list_entry, &owner->pi_waiters);
  447. if (rt_mutex_has_waiters(lock)) {
  448. struct rt_mutex_waiter *next;
  449. next = rt_mutex_top_waiter(lock);
  450. plist_add(&next->pi_list_entry, &owner->pi_waiters);
  451. }
  452. __rt_mutex_adjust_prio(owner);
  453. if (owner->pi_blocked_on)
  454. chain_walk = 1;
  455. raw_spin_unlock_irqrestore(&owner->pi_lock, flags);
  456. }
  457. WARN_ON(!plist_node_empty(&waiter->pi_list_entry));
  458. if (!chain_walk)
  459. return;
  460. /* gets dropped in rt_mutex_adjust_prio_chain()! */
  461. get_task_struct(owner);
  462. raw_spin_unlock(&lock->wait_lock);
  463. rt_mutex_adjust_prio_chain(owner, 0, lock, NULL, current);
  464. raw_spin_lock(&lock->wait_lock);
  465. }
  466. /*
  467. * Recheck the pi chain, in case we got a priority setting
  468. *
  469. * Called from sched_setscheduler
  470. */
  471. void rt_mutex_adjust_pi(struct task_struct *task)
  472. {
  473. struct rt_mutex_waiter *waiter;
  474. unsigned long flags;
  475. raw_spin_lock_irqsave(&task->pi_lock, flags);
  476. waiter = task->pi_blocked_on;
  477. if (!waiter || waiter->list_entry.prio == task->prio) {
  478. raw_spin_unlock_irqrestore(&task->pi_lock, flags);
  479. return;
  480. }
  481. raw_spin_unlock_irqrestore(&task->pi_lock, flags);
  482. /* gets dropped in rt_mutex_adjust_prio_chain()! */
  483. get_task_struct(task);
  484. rt_mutex_adjust_prio_chain(task, 0, NULL, NULL, task);
  485. }
  486. /**
  487. * __rt_mutex_slowlock() - Perform the wait-wake-try-to-take loop
  488. * @lock: the rt_mutex to take
  489. * @state: the state the task should block in (TASK_INTERRUPTIBLE
  490. * or TASK_UNINTERRUPTIBLE)
  491. * @timeout: the pre-initialized and started timer, or NULL for none
  492. * @waiter: the pre-initialized rt_mutex_waiter
  493. *
  494. * lock->wait_lock must be held by the caller.
  495. */
  496. static int __sched
  497. __rt_mutex_slowlock(struct rt_mutex *lock, int state,
  498. struct hrtimer_sleeper *timeout,
  499. struct rt_mutex_waiter *waiter)
  500. {
  501. int ret = 0;
  502. for (;;) {
  503. /* Try to acquire the lock: */
  504. if (try_to_take_rt_mutex(lock, current, waiter))
  505. break;
  506. /*
  507. * TASK_INTERRUPTIBLE checks for signals and
  508. * timeout. Ignored otherwise.
  509. */
  510. if (unlikely(state == TASK_INTERRUPTIBLE)) {
  511. /* Signal pending? */
  512. if (signal_pending(current))
  513. ret = -EINTR;
  514. if (timeout && !timeout->task)
  515. ret = -ETIMEDOUT;
  516. if (ret)
  517. break;
  518. }
  519. raw_spin_unlock(&lock->wait_lock);
  520. debug_rt_mutex_print_deadlock(waiter);
  521. schedule_rt_mutex(lock);
  522. raw_spin_lock(&lock->wait_lock);
  523. set_current_state(state);
  524. }
  525. return ret;
  526. }
  527. /*
  528. * Slow path lock function:
  529. */
  530. static int __sched
  531. rt_mutex_slowlock(struct rt_mutex *lock, int state,
  532. struct hrtimer_sleeper *timeout,
  533. int detect_deadlock)
  534. {
  535. struct rt_mutex_waiter waiter;
  536. int ret = 0;
  537. debug_rt_mutex_init_waiter(&waiter);
  538. raw_spin_lock(&lock->wait_lock);
  539. /* Try to acquire the lock again: */
  540. if (try_to_take_rt_mutex(lock, current, NULL)) {
  541. raw_spin_unlock(&lock->wait_lock);
  542. return 0;
  543. }
  544. set_current_state(state);
  545. /* Setup the timer, when timeout != NULL */
  546. if (unlikely(timeout)) {
  547. hrtimer_start_expires(&timeout->timer, HRTIMER_MODE_ABS);
  548. if (!hrtimer_active(&timeout->timer))
  549. timeout->task = NULL;
  550. }
  551. ret = task_blocks_on_rt_mutex(lock, &waiter, current, detect_deadlock);
  552. if (likely(!ret))
  553. ret = __rt_mutex_slowlock(lock, state, timeout, &waiter);
  554. set_current_state(TASK_RUNNING);
  555. if (unlikely(ret))
  556. remove_waiter(lock, &waiter);
  557. /*
  558. * try_to_take_rt_mutex() sets the waiter bit
  559. * unconditionally. We might have to fix that up.
  560. */
  561. fixup_rt_mutex_waiters(lock);
  562. raw_spin_unlock(&lock->wait_lock);
  563. /* Remove pending timer: */
  564. if (unlikely(timeout))
  565. hrtimer_cancel(&timeout->timer);
  566. debug_rt_mutex_free_waiter(&waiter);
  567. return ret;
  568. }
  569. /*
  570. * Slow path try-lock function:
  571. */
  572. static inline int
  573. rt_mutex_slowtrylock(struct rt_mutex *lock)
  574. {
  575. int ret = 0;
  576. raw_spin_lock(&lock->wait_lock);
  577. if (likely(rt_mutex_owner(lock) != current)) {
  578. ret = try_to_take_rt_mutex(lock, current, NULL);
  579. /*
  580. * try_to_take_rt_mutex() sets the lock waiters
  581. * bit unconditionally. Clean this up.
  582. */
  583. fixup_rt_mutex_waiters(lock);
  584. }
  585. raw_spin_unlock(&lock->wait_lock);
  586. return ret;
  587. }
  588. /*
  589. * Slow path to release a rt-mutex:
  590. */
  591. static void __sched
  592. rt_mutex_slowunlock(struct rt_mutex *lock)
  593. {
  594. raw_spin_lock(&lock->wait_lock);
  595. debug_rt_mutex_unlock(lock);
  596. rt_mutex_deadlock_account_unlock(current);
  597. if (!rt_mutex_has_waiters(lock)) {
  598. lock->owner = NULL;
  599. raw_spin_unlock(&lock->wait_lock);
  600. return;
  601. }
  602. wakeup_next_waiter(lock);
  603. raw_spin_unlock(&lock->wait_lock);
  604. /* Undo pi boosting if necessary: */
  605. rt_mutex_adjust_prio(current);
  606. }
  607. /*
  608. * debug aware fast / slowpath lock,trylock,unlock
  609. *
  610. * The atomic acquire/release ops are compiled away, when either the
  611. * architecture does not support cmpxchg or when debugging is enabled.
  612. */
  613. static inline int
  614. rt_mutex_fastlock(struct rt_mutex *lock, int state,
  615. int detect_deadlock,
  616. int (*slowfn)(struct rt_mutex *lock, int state,
  617. struct hrtimer_sleeper *timeout,
  618. int detect_deadlock))
  619. {
  620. if (!detect_deadlock && likely(rt_mutex_cmpxchg(lock, NULL, current))) {
  621. rt_mutex_deadlock_account_lock(lock, current);
  622. return 0;
  623. } else
  624. return slowfn(lock, state, NULL, detect_deadlock);
  625. }
  626. static inline int
  627. rt_mutex_timed_fastlock(struct rt_mutex *lock, int state,
  628. struct hrtimer_sleeper *timeout, int detect_deadlock,
  629. int (*slowfn)(struct rt_mutex *lock, int state,
  630. struct hrtimer_sleeper *timeout,
  631. int detect_deadlock))
  632. {
  633. if (!detect_deadlock && likely(rt_mutex_cmpxchg(lock, NULL, current))) {
  634. rt_mutex_deadlock_account_lock(lock, current);
  635. return 0;
  636. } else
  637. return slowfn(lock, state, timeout, detect_deadlock);
  638. }
  639. static inline int
  640. rt_mutex_fasttrylock(struct rt_mutex *lock,
  641. int (*slowfn)(struct rt_mutex *lock))
  642. {
  643. if (likely(rt_mutex_cmpxchg(lock, NULL, current))) {
  644. rt_mutex_deadlock_account_lock(lock, current);
  645. return 1;
  646. }
  647. return slowfn(lock);
  648. }
  649. static inline void
  650. rt_mutex_fastunlock(struct rt_mutex *lock,
  651. void (*slowfn)(struct rt_mutex *lock))
  652. {
  653. if (likely(rt_mutex_cmpxchg(lock, current, NULL)))
  654. rt_mutex_deadlock_account_unlock(current);
  655. else
  656. slowfn(lock);
  657. }
  658. /**
  659. * rt_mutex_lock - lock a rt_mutex
  660. *
  661. * @lock: the rt_mutex to be locked
  662. */
  663. void __sched rt_mutex_lock(struct rt_mutex *lock)
  664. {
  665. might_sleep();
  666. rt_mutex_fastlock(lock, TASK_UNINTERRUPTIBLE, 0, rt_mutex_slowlock);
  667. }
  668. EXPORT_SYMBOL_GPL(rt_mutex_lock);
  669. /**
  670. * rt_mutex_lock_interruptible - lock a rt_mutex interruptible
  671. *
  672. * @lock: the rt_mutex to be locked
  673. * @detect_deadlock: deadlock detection on/off
  674. *
  675. * Returns:
  676. * 0 on success
  677. * -EINTR when interrupted by a signal
  678. * -EDEADLK when the lock would deadlock (when deadlock detection is on)
  679. */
  680. int __sched rt_mutex_lock_interruptible(struct rt_mutex *lock,
  681. int detect_deadlock)
  682. {
  683. might_sleep();
  684. return rt_mutex_fastlock(lock, TASK_INTERRUPTIBLE,
  685. detect_deadlock, rt_mutex_slowlock);
  686. }
  687. EXPORT_SYMBOL_GPL(rt_mutex_lock_interruptible);
  688. /**
  689. * rt_mutex_timed_lock - lock a rt_mutex interruptible
  690. * the timeout structure is provided
  691. * by the caller
  692. *
  693. * @lock: the rt_mutex to be locked
  694. * @timeout: timeout structure or NULL (no timeout)
  695. * @detect_deadlock: deadlock detection on/off
  696. *
  697. * Returns:
  698. * 0 on success
  699. * -EINTR when interrupted by a signal
  700. * -ETIMEDOUT when the timeout expired
  701. * -EDEADLK when the lock would deadlock (when deadlock detection is on)
  702. */
  703. int
  704. rt_mutex_timed_lock(struct rt_mutex *lock, struct hrtimer_sleeper *timeout,
  705. int detect_deadlock)
  706. {
  707. might_sleep();
  708. return rt_mutex_timed_fastlock(lock, TASK_INTERRUPTIBLE, timeout,
  709. detect_deadlock, rt_mutex_slowlock);
  710. }
  711. EXPORT_SYMBOL_GPL(rt_mutex_timed_lock);
  712. /**
  713. * rt_mutex_trylock - try to lock a rt_mutex
  714. *
  715. * @lock: the rt_mutex to be locked
  716. *
  717. * Returns 1 on success and 0 on contention
  718. */
  719. int __sched rt_mutex_trylock(struct rt_mutex *lock)
  720. {
  721. return rt_mutex_fasttrylock(lock, rt_mutex_slowtrylock);
  722. }
  723. EXPORT_SYMBOL_GPL(rt_mutex_trylock);
  724. /**
  725. * rt_mutex_unlock - unlock a rt_mutex
  726. *
  727. * @lock: the rt_mutex to be unlocked
  728. */
  729. void __sched rt_mutex_unlock(struct rt_mutex *lock)
  730. {
  731. rt_mutex_fastunlock(lock, rt_mutex_slowunlock);
  732. }
  733. EXPORT_SYMBOL_GPL(rt_mutex_unlock);
  734. /**
  735. * rt_mutex_destroy - mark a mutex unusable
  736. * @lock: the mutex to be destroyed
  737. *
  738. * This function marks the mutex uninitialized, and any subsequent
  739. * use of the mutex is forbidden. The mutex must not be locked when
  740. * this function is called.
  741. */
  742. void rt_mutex_destroy(struct rt_mutex *lock)
  743. {
  744. WARN_ON(rt_mutex_is_locked(lock));
  745. #ifdef CONFIG_DEBUG_RT_MUTEXES
  746. lock->magic = NULL;
  747. #endif
  748. }
  749. EXPORT_SYMBOL_GPL(rt_mutex_destroy);
  750. /**
  751. * __rt_mutex_init - initialize the rt lock
  752. *
  753. * @lock: the rt lock to be initialized
  754. *
  755. * Initialize the rt lock to unlocked state.
  756. *
  757. * Initializing of a locked rt lock is not allowed
  758. */
  759. void __rt_mutex_init(struct rt_mutex *lock, const char *name)
  760. {
  761. lock->owner = NULL;
  762. raw_spin_lock_init(&lock->wait_lock);
  763. plist_head_init(&lock->wait_list);
  764. debug_rt_mutex_init(lock, name);
  765. }
  766. EXPORT_SYMBOL_GPL(__rt_mutex_init);
  767. /**
  768. * rt_mutex_init_proxy_locked - initialize and lock a rt_mutex on behalf of a
  769. * proxy owner
  770. *
  771. * @lock: the rt_mutex to be locked
  772. * @proxy_owner:the task to set as owner
  773. *
  774. * No locking. Caller has to do serializing itself
  775. * Special API call for PI-futex support
  776. */
  777. void rt_mutex_init_proxy_locked(struct rt_mutex *lock,
  778. struct task_struct *proxy_owner)
  779. {
  780. __rt_mutex_init(lock, NULL);
  781. debug_rt_mutex_proxy_lock(lock, proxy_owner);
  782. rt_mutex_set_owner(lock, proxy_owner);
  783. rt_mutex_deadlock_account_lock(lock, proxy_owner);
  784. }
  785. /**
  786. * rt_mutex_proxy_unlock - release a lock on behalf of owner
  787. *
  788. * @lock: the rt_mutex to be locked
  789. *
  790. * No locking. Caller has to do serializing itself
  791. * Special API call for PI-futex support
  792. */
  793. void rt_mutex_proxy_unlock(struct rt_mutex *lock,
  794. struct task_struct *proxy_owner)
  795. {
  796. debug_rt_mutex_proxy_unlock(lock);
  797. rt_mutex_set_owner(lock, NULL);
  798. rt_mutex_deadlock_account_unlock(proxy_owner);
  799. }
  800. /**
  801. * rt_mutex_start_proxy_lock() - Start lock acquisition for another task
  802. * @lock: the rt_mutex to take
  803. * @waiter: the pre-initialized rt_mutex_waiter
  804. * @task: the task to prepare
  805. * @detect_deadlock: perform deadlock detection (1) or not (0)
  806. *
  807. * Returns:
  808. * 0 - task blocked on lock
  809. * 1 - acquired the lock for task, caller should wake it up
  810. * <0 - error
  811. *
  812. * Special API call for FUTEX_REQUEUE_PI support.
  813. */
  814. int rt_mutex_start_proxy_lock(struct rt_mutex *lock,
  815. struct rt_mutex_waiter *waiter,
  816. struct task_struct *task, int detect_deadlock)
  817. {
  818. int ret;
  819. raw_spin_lock(&lock->wait_lock);
  820. if (try_to_take_rt_mutex(lock, task, NULL)) {
  821. raw_spin_unlock(&lock->wait_lock);
  822. return 1;
  823. }
  824. ret = task_blocks_on_rt_mutex(lock, waiter, task, detect_deadlock);
  825. if (ret && !rt_mutex_owner(lock)) {
  826. /*
  827. * Reset the return value. We might have
  828. * returned with -EDEADLK and the owner
  829. * released the lock while we were walking the
  830. * pi chain. Let the waiter sort it out.
  831. */
  832. ret = 0;
  833. }
  834. if (unlikely(ret))
  835. remove_waiter(lock, waiter);
  836. raw_spin_unlock(&lock->wait_lock);
  837. debug_rt_mutex_print_deadlock(waiter);
  838. return ret;
  839. }
  840. /**
  841. * rt_mutex_next_owner - return the next owner of the lock
  842. *
  843. * @lock: the rt lock query
  844. *
  845. * Returns the next owner of the lock or NULL
  846. *
  847. * Caller has to serialize against other accessors to the lock
  848. * itself.
  849. *
  850. * Special API call for PI-futex support
  851. */
  852. struct task_struct *rt_mutex_next_owner(struct rt_mutex *lock)
  853. {
  854. if (!rt_mutex_has_waiters(lock))
  855. return NULL;
  856. return rt_mutex_top_waiter(lock)->task;
  857. }
  858. /**
  859. * rt_mutex_finish_proxy_lock() - Complete lock acquisition
  860. * @lock: the rt_mutex we were woken on
  861. * @to: the timeout, null if none. hrtimer should already have
  862. * been started.
  863. * @waiter: the pre-initialized rt_mutex_waiter
  864. * @detect_deadlock: perform deadlock detection (1) or not (0)
  865. *
  866. * Complete the lock acquisition started our behalf by another thread.
  867. *
  868. * Returns:
  869. * 0 - success
  870. * <0 - error, one of -EINTR, -ETIMEDOUT, or -EDEADLK
  871. *
  872. * Special API call for PI-futex requeue support
  873. */
  874. int rt_mutex_finish_proxy_lock(struct rt_mutex *lock,
  875. struct hrtimer_sleeper *to,
  876. struct rt_mutex_waiter *waiter,
  877. int detect_deadlock)
  878. {
  879. int ret;
  880. raw_spin_lock(&lock->wait_lock);
  881. set_current_state(TASK_INTERRUPTIBLE);
  882. ret = __rt_mutex_slowlock(lock, TASK_INTERRUPTIBLE, to, waiter);
  883. set_current_state(TASK_RUNNING);
  884. if (unlikely(ret))
  885. remove_waiter(lock, waiter);
  886. /*
  887. * try_to_take_rt_mutex() sets the waiter bit unconditionally. We might
  888. * have to fix that up.
  889. */
  890. fixup_rt_mutex_waiters(lock);
  891. raw_spin_unlock(&lock->wait_lock);
  892. return ret;
  893. }