rtmutex.c 26 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054
  1. /*
  2. * RT-Mutexes: simple blocking mutual exclusion locks with PI support
  3. *
  4. * started by Ingo Molnar and Thomas Gleixner.
  5. *
  6. * Copyright (C) 2004-2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
  7. * Copyright (C) 2005-2006 Timesys Corp., Thomas Gleixner <tglx@timesys.com>
  8. * Copyright (C) 2005 Kihon Technologies Inc., Steven Rostedt
  9. * Copyright (C) 2006 Esben Nielsen
  10. *
  11. * See Documentation/rt-mutex-design.txt for details.
  12. */
  13. #include <linux/spinlock.h>
  14. #include <linux/export.h>
  15. #include <linux/sched.h>
  16. #include <linux/timer.h>
  17. #include "rtmutex_common.h"
  18. /*
  19. * lock->owner state tracking:
  20. *
  21. * lock->owner holds the task_struct pointer of the owner. Bit 0
  22. * is used to keep track of the "lock has waiters" state.
  23. *
  24. * owner bit0
  25. * NULL 0 lock is free (fast acquire possible)
  26. * NULL 1 lock is free and has waiters and the top waiter
  27. * is going to take the lock*
  28. * taskpointer 0 lock is held (fast release possible)
  29. * taskpointer 1 lock is held and has waiters**
  30. *
  31. * The fast atomic compare exchange based acquire and release is only
  32. * possible when bit 0 of lock->owner is 0.
  33. *
  34. * (*) It also can be a transitional state when grabbing the lock
  35. * with ->wait_lock is held. To prevent any fast path cmpxchg to the lock,
  36. * we need to set the bit0 before looking at the lock, and the owner may be
  37. * NULL in this small time, hence this can be a transitional state.
  38. *
  39. * (**) There is a small time when bit 0 is set but there are no
  40. * waiters. This can happen when grabbing the lock in the slow path.
  41. * To prevent a cmpxchg of the owner releasing the lock, we need to
  42. * set this bit before looking at the lock.
  43. */
  44. static void
  45. rt_mutex_set_owner(struct rt_mutex *lock, struct task_struct *owner)
  46. {
  47. unsigned long val = (unsigned long)owner;
  48. if (rt_mutex_has_waiters(lock))
  49. val |= RT_MUTEX_HAS_WAITERS;
  50. lock->owner = (struct task_struct *)val;
  51. }
  52. static inline void clear_rt_mutex_waiters(struct rt_mutex *lock)
  53. {
  54. lock->owner = (struct task_struct *)
  55. ((unsigned long)lock->owner & ~RT_MUTEX_HAS_WAITERS);
  56. }
  57. static void fixup_rt_mutex_waiters(struct rt_mutex *lock)
  58. {
  59. if (!rt_mutex_has_waiters(lock))
  60. clear_rt_mutex_waiters(lock);
  61. }
  62. /*
  63. * We can speed up the acquire/release, if the architecture
  64. * supports cmpxchg and if there's no debugging state to be set up
  65. */
  66. #if defined(__HAVE_ARCH_CMPXCHG) && !defined(CONFIG_DEBUG_RT_MUTEXES)
  67. # define rt_mutex_cmpxchg(l,c,n) (cmpxchg(&l->owner, c, n) == c)
  68. static inline void mark_rt_mutex_waiters(struct rt_mutex *lock)
  69. {
  70. unsigned long owner, *p = (unsigned long *) &lock->owner;
  71. do {
  72. owner = *p;
  73. } while (cmpxchg(p, owner, owner | RT_MUTEX_HAS_WAITERS) != owner);
  74. }
  75. #else
  76. # define rt_mutex_cmpxchg(l,c,n) (0)
  77. static inline void mark_rt_mutex_waiters(struct rt_mutex *lock)
  78. {
  79. lock->owner = (struct task_struct *)
  80. ((unsigned long)lock->owner | RT_MUTEX_HAS_WAITERS);
  81. }
  82. #endif
  83. /*
  84. * Calculate task priority from the waiter list priority
  85. *
  86. * Return task->normal_prio when the waiter list is empty or when
  87. * the waiter is not allowed to do priority boosting
  88. */
  89. int rt_mutex_getprio(struct task_struct *task)
  90. {
  91. if (likely(!task_has_pi_waiters(task)))
  92. return task->normal_prio;
  93. return min(task_top_pi_waiter(task)->pi_list_entry.prio,
  94. task->normal_prio);
  95. }
  96. /*
  97. * Adjust the priority of a task, after its pi_waiters got modified.
  98. *
  99. * This can be both boosting and unboosting. task->pi_lock must be held.
  100. */
  101. static void __rt_mutex_adjust_prio(struct task_struct *task)
  102. {
  103. int prio = rt_mutex_getprio(task);
  104. if (task->prio != prio)
  105. rt_mutex_setprio(task, prio);
  106. }
  107. /*
  108. * Adjust task priority (undo boosting). Called from the exit path of
  109. * rt_mutex_slowunlock() and rt_mutex_slowlock().
  110. *
  111. * (Note: We do this outside of the protection of lock->wait_lock to
  112. * allow the lock to be taken while or before we readjust the priority
  113. * of task. We do not use the spin_xx_mutex() variants here as we are
  114. * outside of the debug path.)
  115. */
  116. static void rt_mutex_adjust_prio(struct task_struct *task)
  117. {
  118. unsigned long flags;
  119. raw_spin_lock_irqsave(&task->pi_lock, flags);
  120. __rt_mutex_adjust_prio(task);
  121. raw_spin_unlock_irqrestore(&task->pi_lock, flags);
  122. }
  123. /*
  124. * Max number of times we'll walk the boosting chain:
  125. */
  126. int max_lock_depth = 1024;
  127. /*
  128. * Adjust the priority chain. Also used for deadlock detection.
  129. * Decreases task's usage by one - may thus free the task.
  130. * Returns 0 or -EDEADLK.
  131. */
  132. static int rt_mutex_adjust_prio_chain(struct task_struct *task,
  133. int deadlock_detect,
  134. struct rt_mutex *orig_lock,
  135. struct rt_mutex_waiter *orig_waiter,
  136. struct task_struct *top_task)
  137. {
  138. struct rt_mutex *lock;
  139. struct rt_mutex_waiter *waiter, *top_waiter = orig_waiter;
  140. int detect_deadlock, ret = 0, depth = 0;
  141. unsigned long flags;
  142. detect_deadlock = debug_rt_mutex_detect_deadlock(orig_waiter,
  143. deadlock_detect);
  144. /*
  145. * The (de)boosting is a step by step approach with a lot of
  146. * pitfalls. We want this to be preemptible and we want hold a
  147. * maximum of two locks per step. So we have to check
  148. * carefully whether things change under us.
  149. */
  150. again:
  151. if (++depth > max_lock_depth) {
  152. static int prev_max;
  153. /*
  154. * Print this only once. If the admin changes the limit,
  155. * print a new message when reaching the limit again.
  156. */
  157. if (prev_max != max_lock_depth) {
  158. prev_max = max_lock_depth;
  159. printk(KERN_WARNING "Maximum lock depth %d reached "
  160. "task: %s (%d)\n", max_lock_depth,
  161. top_task->comm, task_pid_nr(top_task));
  162. }
  163. put_task_struct(task);
  164. return deadlock_detect ? -EDEADLK : 0;
  165. }
  166. retry:
  167. /*
  168. * Task can not go away as we did a get_task() before !
  169. */
  170. raw_spin_lock_irqsave(&task->pi_lock, flags);
  171. waiter = task->pi_blocked_on;
  172. /*
  173. * Check whether the end of the boosting chain has been
  174. * reached or the state of the chain has changed while we
  175. * dropped the locks.
  176. */
  177. if (!waiter)
  178. goto out_unlock_pi;
  179. /*
  180. * Check the orig_waiter state. After we dropped the locks,
  181. * the previous owner of the lock might have released the lock.
  182. */
  183. if (orig_waiter && !rt_mutex_owner(orig_lock))
  184. goto out_unlock_pi;
  185. /*
  186. * Drop out, when the task has no waiters. Note,
  187. * top_waiter can be NULL, when we are in the deboosting
  188. * mode!
  189. */
  190. if (top_waiter && (!task_has_pi_waiters(task) ||
  191. top_waiter != task_top_pi_waiter(task)))
  192. goto out_unlock_pi;
  193. /*
  194. * When deadlock detection is off then we check, if further
  195. * priority adjustment is necessary.
  196. */
  197. if (!detect_deadlock && waiter->list_entry.prio == task->prio)
  198. goto out_unlock_pi;
  199. lock = waiter->lock;
  200. if (!raw_spin_trylock(&lock->wait_lock)) {
  201. raw_spin_unlock_irqrestore(&task->pi_lock, flags);
  202. cpu_relax();
  203. goto retry;
  204. }
  205. /* Deadlock detection */
  206. if (lock == orig_lock || rt_mutex_owner(lock) == top_task) {
  207. debug_rt_mutex_deadlock(deadlock_detect, orig_waiter, lock);
  208. raw_spin_unlock(&lock->wait_lock);
  209. ret = deadlock_detect ? -EDEADLK : 0;
  210. goto out_unlock_pi;
  211. }
  212. top_waiter = rt_mutex_top_waiter(lock);
  213. /* Requeue the waiter */
  214. plist_del(&waiter->list_entry, &lock->wait_list);
  215. waiter->list_entry.prio = task->prio;
  216. plist_add(&waiter->list_entry, &lock->wait_list);
  217. /* Release the task */
  218. raw_spin_unlock_irqrestore(&task->pi_lock, flags);
  219. if (!rt_mutex_owner(lock)) {
  220. /*
  221. * If the requeue above changed the top waiter, then we need
  222. * to wake the new top waiter up to try to get the lock.
  223. */
  224. if (top_waiter != rt_mutex_top_waiter(lock))
  225. wake_up_process(rt_mutex_top_waiter(lock)->task);
  226. raw_spin_unlock(&lock->wait_lock);
  227. goto out_put_task;
  228. }
  229. put_task_struct(task);
  230. /* Grab the next task */
  231. task = rt_mutex_owner(lock);
  232. get_task_struct(task);
  233. raw_spin_lock_irqsave(&task->pi_lock, flags);
  234. if (waiter == rt_mutex_top_waiter(lock)) {
  235. /* Boost the owner */
  236. plist_del(&top_waiter->pi_list_entry, &task->pi_waiters);
  237. waiter->pi_list_entry.prio = waiter->list_entry.prio;
  238. plist_add(&waiter->pi_list_entry, &task->pi_waiters);
  239. __rt_mutex_adjust_prio(task);
  240. } else if (top_waiter == waiter) {
  241. /* Deboost the owner */
  242. plist_del(&waiter->pi_list_entry, &task->pi_waiters);
  243. waiter = rt_mutex_top_waiter(lock);
  244. waiter->pi_list_entry.prio = waiter->list_entry.prio;
  245. plist_add(&waiter->pi_list_entry, &task->pi_waiters);
  246. __rt_mutex_adjust_prio(task);
  247. }
  248. raw_spin_unlock_irqrestore(&task->pi_lock, flags);
  249. top_waiter = rt_mutex_top_waiter(lock);
  250. raw_spin_unlock(&lock->wait_lock);
  251. if (!detect_deadlock && waiter != top_waiter)
  252. goto out_put_task;
  253. goto again;
  254. out_unlock_pi:
  255. raw_spin_unlock_irqrestore(&task->pi_lock, flags);
  256. out_put_task:
  257. put_task_struct(task);
  258. return ret;
  259. }
  260. /*
  261. * Try to take an rt-mutex
  262. *
  263. * Must be called with lock->wait_lock held.
  264. *
  265. * @lock: the lock to be acquired.
  266. * @task: the task which wants to acquire the lock
  267. * @waiter: the waiter that is queued to the lock's wait list. (could be NULL)
  268. */
  269. static int try_to_take_rt_mutex(struct rt_mutex *lock, struct task_struct *task,
  270. struct rt_mutex_waiter *waiter)
  271. {
  272. /*
  273. * We have to be careful here if the atomic speedups are
  274. * enabled, such that, when
  275. * - no other waiter is on the lock
  276. * - the lock has been released since we did the cmpxchg
  277. * the lock can be released or taken while we are doing the
  278. * checks and marking the lock with RT_MUTEX_HAS_WAITERS.
  279. *
  280. * The atomic acquire/release aware variant of
  281. * mark_rt_mutex_waiters uses a cmpxchg loop. After setting
  282. * the WAITERS bit, the atomic release / acquire can not
  283. * happen anymore and lock->wait_lock protects us from the
  284. * non-atomic case.
  285. *
  286. * Note, that this might set lock->owner =
  287. * RT_MUTEX_HAS_WAITERS in the case the lock is not contended
  288. * any more. This is fixed up when we take the ownership.
  289. * This is the transitional state explained at the top of this file.
  290. */
  291. mark_rt_mutex_waiters(lock);
  292. if (rt_mutex_owner(lock))
  293. return 0;
  294. /*
  295. * It will get the lock because of one of these conditions:
  296. * 1) there is no waiter
  297. * 2) higher priority than waiters
  298. * 3) it is top waiter
  299. */
  300. if (rt_mutex_has_waiters(lock)) {
  301. if (task->prio >= rt_mutex_top_waiter(lock)->list_entry.prio) {
  302. if (!waiter || waiter != rt_mutex_top_waiter(lock))
  303. return 0;
  304. }
  305. }
  306. if (waiter || rt_mutex_has_waiters(lock)) {
  307. unsigned long flags;
  308. struct rt_mutex_waiter *top;
  309. raw_spin_lock_irqsave(&task->pi_lock, flags);
  310. /* remove the queued waiter. */
  311. if (waiter) {
  312. plist_del(&waiter->list_entry, &lock->wait_list);
  313. task->pi_blocked_on = NULL;
  314. }
  315. /*
  316. * We have to enqueue the top waiter(if it exists) into
  317. * task->pi_waiters list.
  318. */
  319. if (rt_mutex_has_waiters(lock)) {
  320. top = rt_mutex_top_waiter(lock);
  321. top->pi_list_entry.prio = top->list_entry.prio;
  322. plist_add(&top->pi_list_entry, &task->pi_waiters);
  323. }
  324. raw_spin_unlock_irqrestore(&task->pi_lock, flags);
  325. }
  326. /* We got the lock. */
  327. debug_rt_mutex_lock(lock);
  328. rt_mutex_set_owner(lock, task);
  329. rt_mutex_deadlock_account_lock(lock, task);
  330. return 1;
  331. }
  332. /*
  333. * Task blocks on lock.
  334. *
  335. * Prepare waiter and propagate pi chain
  336. *
  337. * This must be called with lock->wait_lock held.
  338. */
  339. static int task_blocks_on_rt_mutex(struct rt_mutex *lock,
  340. struct rt_mutex_waiter *waiter,
  341. struct task_struct *task,
  342. int detect_deadlock)
  343. {
  344. struct task_struct *owner = rt_mutex_owner(lock);
  345. struct rt_mutex_waiter *top_waiter = waiter;
  346. unsigned long flags;
  347. int chain_walk = 0, res;
  348. raw_spin_lock_irqsave(&task->pi_lock, flags);
  349. __rt_mutex_adjust_prio(task);
  350. waiter->task = task;
  351. waiter->lock = lock;
  352. plist_node_init(&waiter->list_entry, task->prio);
  353. plist_node_init(&waiter->pi_list_entry, task->prio);
  354. /* Get the top priority waiter on the lock */
  355. if (rt_mutex_has_waiters(lock))
  356. top_waiter = rt_mutex_top_waiter(lock);
  357. plist_add(&waiter->list_entry, &lock->wait_list);
  358. task->pi_blocked_on = waiter;
  359. raw_spin_unlock_irqrestore(&task->pi_lock, flags);
  360. if (!owner)
  361. return 0;
  362. if (waiter == rt_mutex_top_waiter(lock)) {
  363. raw_spin_lock_irqsave(&owner->pi_lock, flags);
  364. plist_del(&top_waiter->pi_list_entry, &owner->pi_waiters);
  365. plist_add(&waiter->pi_list_entry, &owner->pi_waiters);
  366. __rt_mutex_adjust_prio(owner);
  367. if (owner->pi_blocked_on)
  368. chain_walk = 1;
  369. raw_spin_unlock_irqrestore(&owner->pi_lock, flags);
  370. }
  371. else if (debug_rt_mutex_detect_deadlock(waiter, detect_deadlock))
  372. chain_walk = 1;
  373. if (!chain_walk)
  374. return 0;
  375. /*
  376. * The owner can't disappear while holding a lock,
  377. * so the owner struct is protected by wait_lock.
  378. * Gets dropped in rt_mutex_adjust_prio_chain()!
  379. */
  380. get_task_struct(owner);
  381. raw_spin_unlock(&lock->wait_lock);
  382. res = rt_mutex_adjust_prio_chain(owner, detect_deadlock, lock, waiter,
  383. task);
  384. raw_spin_lock(&lock->wait_lock);
  385. return res;
  386. }
  387. /*
  388. * Wake up the next waiter on the lock.
  389. *
  390. * Remove the top waiter from the current tasks waiter list and wake it up.
  391. *
  392. * Called with lock->wait_lock held.
  393. */
  394. static void wakeup_next_waiter(struct rt_mutex *lock)
  395. {
  396. struct rt_mutex_waiter *waiter;
  397. unsigned long flags;
  398. raw_spin_lock_irqsave(&current->pi_lock, flags);
  399. waiter = rt_mutex_top_waiter(lock);
  400. /*
  401. * Remove it from current->pi_waiters. We do not adjust a
  402. * possible priority boost right now. We execute wakeup in the
  403. * boosted mode and go back to normal after releasing
  404. * lock->wait_lock.
  405. */
  406. plist_del(&waiter->pi_list_entry, &current->pi_waiters);
  407. rt_mutex_set_owner(lock, NULL);
  408. raw_spin_unlock_irqrestore(&current->pi_lock, flags);
  409. wake_up_process(waiter->task);
  410. }
  411. /*
  412. * Remove a waiter from a lock and give up
  413. *
  414. * Must be called with lock->wait_lock held and
  415. * have just failed to try_to_take_rt_mutex().
  416. */
  417. static void remove_waiter(struct rt_mutex *lock,
  418. struct rt_mutex_waiter *waiter)
  419. {
  420. int first = (waiter == rt_mutex_top_waiter(lock));
  421. struct task_struct *owner = rt_mutex_owner(lock);
  422. unsigned long flags;
  423. int chain_walk = 0;
  424. raw_spin_lock_irqsave(&current->pi_lock, flags);
  425. plist_del(&waiter->list_entry, &lock->wait_list);
  426. current->pi_blocked_on = NULL;
  427. raw_spin_unlock_irqrestore(&current->pi_lock, flags);
  428. if (!owner)
  429. return;
  430. if (first) {
  431. raw_spin_lock_irqsave(&owner->pi_lock, flags);
  432. plist_del(&waiter->pi_list_entry, &owner->pi_waiters);
  433. if (rt_mutex_has_waiters(lock)) {
  434. struct rt_mutex_waiter *next;
  435. next = rt_mutex_top_waiter(lock);
  436. plist_add(&next->pi_list_entry, &owner->pi_waiters);
  437. }
  438. __rt_mutex_adjust_prio(owner);
  439. if (owner->pi_blocked_on)
  440. chain_walk = 1;
  441. raw_spin_unlock_irqrestore(&owner->pi_lock, flags);
  442. }
  443. WARN_ON(!plist_node_empty(&waiter->pi_list_entry));
  444. if (!chain_walk)
  445. return;
  446. /* gets dropped in rt_mutex_adjust_prio_chain()! */
  447. get_task_struct(owner);
  448. raw_spin_unlock(&lock->wait_lock);
  449. rt_mutex_adjust_prio_chain(owner, 0, lock, NULL, current);
  450. raw_spin_lock(&lock->wait_lock);
  451. }
  452. /*
  453. * Recheck the pi chain, in case we got a priority setting
  454. *
  455. * Called from sched_setscheduler
  456. */
  457. void rt_mutex_adjust_pi(struct task_struct *task)
  458. {
  459. struct rt_mutex_waiter *waiter;
  460. unsigned long flags;
  461. raw_spin_lock_irqsave(&task->pi_lock, flags);
  462. waiter = task->pi_blocked_on;
  463. if (!waiter || waiter->list_entry.prio == task->prio) {
  464. raw_spin_unlock_irqrestore(&task->pi_lock, flags);
  465. return;
  466. }
  467. raw_spin_unlock_irqrestore(&task->pi_lock, flags);
  468. /* gets dropped in rt_mutex_adjust_prio_chain()! */
  469. get_task_struct(task);
  470. rt_mutex_adjust_prio_chain(task, 0, NULL, NULL, task);
  471. }
  472. /**
  473. * __rt_mutex_slowlock() - Perform the wait-wake-try-to-take loop
  474. * @lock: the rt_mutex to take
  475. * @state: the state the task should block in (TASK_INTERRUPTIBLE
  476. * or TASK_UNINTERRUPTIBLE)
  477. * @timeout: the pre-initialized and started timer, or NULL for none
  478. * @waiter: the pre-initialized rt_mutex_waiter
  479. *
  480. * lock->wait_lock must be held by the caller.
  481. */
  482. static int __sched
  483. __rt_mutex_slowlock(struct rt_mutex *lock, int state,
  484. struct hrtimer_sleeper *timeout,
  485. struct rt_mutex_waiter *waiter)
  486. {
  487. int ret = 0;
  488. int was_disabled;
  489. for (;;) {
  490. /* Try to acquire the lock: */
  491. if (try_to_take_rt_mutex(lock, current, waiter))
  492. break;
  493. /*
  494. * TASK_INTERRUPTIBLE checks for signals and
  495. * timeout. Ignored otherwise.
  496. */
  497. if (unlikely(state == TASK_INTERRUPTIBLE)) {
  498. /* Signal pending? */
  499. if (signal_pending(current))
  500. ret = -EINTR;
  501. if (timeout && !timeout->task)
  502. ret = -ETIMEDOUT;
  503. if (ret)
  504. break;
  505. }
  506. raw_spin_unlock(&lock->wait_lock);
  507. was_disabled = irqs_disabled();
  508. if (was_disabled)
  509. local_irq_enable();
  510. debug_rt_mutex_print_deadlock(waiter);
  511. schedule_rt_mutex(lock);
  512. if (was_disabled)
  513. local_irq_disable();
  514. raw_spin_lock(&lock->wait_lock);
  515. set_current_state(state);
  516. }
  517. return ret;
  518. }
  519. /*
  520. * Slow path lock function:
  521. */
  522. static int __sched
  523. rt_mutex_slowlock(struct rt_mutex *lock, int state,
  524. struct hrtimer_sleeper *timeout,
  525. int detect_deadlock)
  526. {
  527. struct rt_mutex_waiter waiter;
  528. int ret = 0;
  529. debug_rt_mutex_init_waiter(&waiter);
  530. raw_spin_lock(&lock->wait_lock);
  531. /* Try to acquire the lock again: */
  532. if (try_to_take_rt_mutex(lock, current, NULL)) {
  533. raw_spin_unlock(&lock->wait_lock);
  534. return 0;
  535. }
  536. set_current_state(state);
  537. /* Setup the timer, when timeout != NULL */
  538. if (unlikely(timeout)) {
  539. hrtimer_start_expires(&timeout->timer, HRTIMER_MODE_ABS);
  540. if (!hrtimer_active(&timeout->timer))
  541. timeout->task = NULL;
  542. }
  543. ret = task_blocks_on_rt_mutex(lock, &waiter, current, detect_deadlock);
  544. if (likely(!ret))
  545. ret = __rt_mutex_slowlock(lock, state, timeout, &waiter);
  546. set_current_state(TASK_RUNNING);
  547. if (unlikely(ret))
  548. remove_waiter(lock, &waiter);
  549. /*
  550. * try_to_take_rt_mutex() sets the waiter bit
  551. * unconditionally. We might have to fix that up.
  552. */
  553. fixup_rt_mutex_waiters(lock);
  554. raw_spin_unlock(&lock->wait_lock);
  555. /* Remove pending timer: */
  556. if (unlikely(timeout))
  557. hrtimer_cancel(&timeout->timer);
  558. debug_rt_mutex_free_waiter(&waiter);
  559. return ret;
  560. }
  561. /*
  562. * Slow path try-lock function:
  563. */
  564. static inline int
  565. rt_mutex_slowtrylock(struct rt_mutex *lock)
  566. {
  567. int ret = 0;
  568. raw_spin_lock(&lock->wait_lock);
  569. if (likely(rt_mutex_owner(lock) != current)) {
  570. ret = try_to_take_rt_mutex(lock, current, NULL);
  571. /*
  572. * try_to_take_rt_mutex() sets the lock waiters
  573. * bit unconditionally. Clean this up.
  574. */
  575. fixup_rt_mutex_waiters(lock);
  576. }
  577. raw_spin_unlock(&lock->wait_lock);
  578. return ret;
  579. }
  580. /*
  581. * Slow path to release a rt-mutex:
  582. */
  583. static void __sched
  584. rt_mutex_slowunlock(struct rt_mutex *lock)
  585. {
  586. raw_spin_lock(&lock->wait_lock);
  587. debug_rt_mutex_unlock(lock);
  588. rt_mutex_deadlock_account_unlock(current);
  589. if (!rt_mutex_has_waiters(lock)) {
  590. lock->owner = NULL;
  591. raw_spin_unlock(&lock->wait_lock);
  592. return;
  593. }
  594. wakeup_next_waiter(lock);
  595. raw_spin_unlock(&lock->wait_lock);
  596. /* Undo pi boosting if necessary: */
  597. rt_mutex_adjust_prio(current);
  598. }
  599. /*
  600. * debug aware fast / slowpath lock,trylock,unlock
  601. *
  602. * The atomic acquire/release ops are compiled away, when either the
  603. * architecture does not support cmpxchg or when debugging is enabled.
  604. */
  605. static inline int
  606. rt_mutex_fastlock(struct rt_mutex *lock, int state,
  607. int detect_deadlock,
  608. int (*slowfn)(struct rt_mutex *lock, int state,
  609. struct hrtimer_sleeper *timeout,
  610. int detect_deadlock))
  611. {
  612. if (!detect_deadlock && likely(rt_mutex_cmpxchg(lock, NULL, current))) {
  613. rt_mutex_deadlock_account_lock(lock, current);
  614. return 0;
  615. } else
  616. return slowfn(lock, state, NULL, detect_deadlock);
  617. }
  618. static inline int
  619. rt_mutex_timed_fastlock(struct rt_mutex *lock, int state,
  620. struct hrtimer_sleeper *timeout, int detect_deadlock,
  621. int (*slowfn)(struct rt_mutex *lock, int state,
  622. struct hrtimer_sleeper *timeout,
  623. int detect_deadlock))
  624. {
  625. if (!detect_deadlock && likely(rt_mutex_cmpxchg(lock, NULL, current))) {
  626. rt_mutex_deadlock_account_lock(lock, current);
  627. return 0;
  628. } else
  629. return slowfn(lock, state, timeout, detect_deadlock);
  630. }
  631. static inline int
  632. rt_mutex_fasttrylock(struct rt_mutex *lock,
  633. int (*slowfn)(struct rt_mutex *lock))
  634. {
  635. if (likely(rt_mutex_cmpxchg(lock, NULL, current))) {
  636. rt_mutex_deadlock_account_lock(lock, current);
  637. return 1;
  638. }
  639. return slowfn(lock);
  640. }
  641. static inline void
  642. rt_mutex_fastunlock(struct rt_mutex *lock,
  643. void (*slowfn)(struct rt_mutex *lock))
  644. {
  645. if (likely(rt_mutex_cmpxchg(lock, current, NULL)))
  646. rt_mutex_deadlock_account_unlock(current);
  647. else
  648. slowfn(lock);
  649. }
  650. /**
  651. * rt_mutex_lock - lock a rt_mutex
  652. *
  653. * @lock: the rt_mutex to be locked
  654. */
  655. void __sched rt_mutex_lock(struct rt_mutex *lock)
  656. {
  657. might_sleep();
  658. rt_mutex_fastlock(lock, TASK_UNINTERRUPTIBLE, 0, rt_mutex_slowlock);
  659. }
  660. EXPORT_SYMBOL_GPL(rt_mutex_lock);
  661. /**
  662. * rt_mutex_lock_interruptible - lock a rt_mutex interruptible
  663. *
  664. * @lock: the rt_mutex to be locked
  665. * @detect_deadlock: deadlock detection on/off
  666. *
  667. * Returns:
  668. * 0 on success
  669. * -EINTR when interrupted by a signal
  670. * -EDEADLK when the lock would deadlock (when deadlock detection is on)
  671. */
  672. int __sched rt_mutex_lock_interruptible(struct rt_mutex *lock,
  673. int detect_deadlock)
  674. {
  675. might_sleep();
  676. return rt_mutex_fastlock(lock, TASK_INTERRUPTIBLE,
  677. detect_deadlock, rt_mutex_slowlock);
  678. }
  679. EXPORT_SYMBOL_GPL(rt_mutex_lock_interruptible);
  680. /**
  681. * rt_mutex_timed_lock - lock a rt_mutex interruptible
  682. * the timeout structure is provided
  683. * by the caller
  684. *
  685. * @lock: the rt_mutex to be locked
  686. * @timeout: timeout structure or NULL (no timeout)
  687. * @detect_deadlock: deadlock detection on/off
  688. *
  689. * Returns:
  690. * 0 on success
  691. * -EINTR when interrupted by a signal
  692. * -ETIMEDOUT when the timeout expired
  693. * -EDEADLK when the lock would deadlock (when deadlock detection is on)
  694. */
  695. int
  696. rt_mutex_timed_lock(struct rt_mutex *lock, struct hrtimer_sleeper *timeout,
  697. int detect_deadlock)
  698. {
  699. might_sleep();
  700. return rt_mutex_timed_fastlock(lock, TASK_INTERRUPTIBLE, timeout,
  701. detect_deadlock, rt_mutex_slowlock);
  702. }
  703. EXPORT_SYMBOL_GPL(rt_mutex_timed_lock);
  704. /**
  705. * rt_mutex_trylock - try to lock a rt_mutex
  706. *
  707. * @lock: the rt_mutex to be locked
  708. *
  709. * Returns 1 on success and 0 on contention
  710. */
  711. int __sched rt_mutex_trylock(struct rt_mutex *lock)
  712. {
  713. return rt_mutex_fasttrylock(lock, rt_mutex_slowtrylock);
  714. }
  715. EXPORT_SYMBOL_GPL(rt_mutex_trylock);
  716. /**
  717. * rt_mutex_unlock - unlock a rt_mutex
  718. *
  719. * @lock: the rt_mutex to be unlocked
  720. */
  721. void __sched rt_mutex_unlock(struct rt_mutex *lock)
  722. {
  723. rt_mutex_fastunlock(lock, rt_mutex_slowunlock);
  724. }
  725. EXPORT_SYMBOL_GPL(rt_mutex_unlock);
  726. /**
  727. * rt_mutex_destroy - mark a mutex unusable
  728. * @lock: the mutex to be destroyed
  729. *
  730. * This function marks the mutex uninitialized, and any subsequent
  731. * use of the mutex is forbidden. The mutex must not be locked when
  732. * this function is called.
  733. */
  734. void rt_mutex_destroy(struct rt_mutex *lock)
  735. {
  736. WARN_ON(rt_mutex_is_locked(lock));
  737. #ifdef CONFIG_DEBUG_RT_MUTEXES
  738. lock->magic = NULL;
  739. #endif
  740. }
  741. EXPORT_SYMBOL_GPL(rt_mutex_destroy);
  742. /**
  743. * __rt_mutex_init - initialize the rt lock
  744. *
  745. * @lock: the rt lock to be initialized
  746. *
  747. * Initialize the rt lock to unlocked state.
  748. *
  749. * Initializing of a locked rt lock is not allowed
  750. */
  751. void __rt_mutex_init(struct rt_mutex *lock, const char *name)
  752. {
  753. lock->owner = NULL;
  754. raw_spin_lock_init(&lock->wait_lock);
  755. plist_head_init(&lock->wait_list);
  756. debug_rt_mutex_init(lock, name);
  757. }
  758. EXPORT_SYMBOL_GPL(__rt_mutex_init);
  759. /**
  760. * rt_mutex_init_proxy_locked - initialize and lock a rt_mutex on behalf of a
  761. * proxy owner
  762. *
  763. * @lock: the rt_mutex to be locked
  764. * @proxy_owner:the task to set as owner
  765. *
  766. * No locking. Caller has to do serializing itself
  767. * Special API call for PI-futex support
  768. */
  769. void rt_mutex_init_proxy_locked(struct rt_mutex *lock,
  770. struct task_struct *proxy_owner)
  771. {
  772. __rt_mutex_init(lock, NULL);
  773. debug_rt_mutex_proxy_lock(lock, proxy_owner);
  774. rt_mutex_set_owner(lock, proxy_owner);
  775. rt_mutex_deadlock_account_lock(lock, proxy_owner);
  776. }
  777. /**
  778. * rt_mutex_proxy_unlock - release a lock on behalf of owner
  779. *
  780. * @lock: the rt_mutex to be locked
  781. *
  782. * No locking. Caller has to do serializing itself
  783. * Special API call for PI-futex support
  784. */
  785. void rt_mutex_proxy_unlock(struct rt_mutex *lock,
  786. struct task_struct *proxy_owner)
  787. {
  788. debug_rt_mutex_proxy_unlock(lock);
  789. rt_mutex_set_owner(lock, NULL);
  790. rt_mutex_deadlock_account_unlock(proxy_owner);
  791. }
  792. /**
  793. * rt_mutex_start_proxy_lock() - Start lock acquisition for another task
  794. * @lock: the rt_mutex to take
  795. * @waiter: the pre-initialized rt_mutex_waiter
  796. * @task: the task to prepare
  797. * @detect_deadlock: perform deadlock detection (1) or not (0)
  798. *
  799. * Returns:
  800. * 0 - task blocked on lock
  801. * 1 - acquired the lock for task, caller should wake it up
  802. * <0 - error
  803. *
  804. * Special API call for FUTEX_REQUEUE_PI support.
  805. */
  806. int rt_mutex_start_proxy_lock(struct rt_mutex *lock,
  807. struct rt_mutex_waiter *waiter,
  808. struct task_struct *task, int detect_deadlock)
  809. {
  810. int ret;
  811. raw_spin_lock(&lock->wait_lock);
  812. if (try_to_take_rt_mutex(lock, task, NULL)) {
  813. raw_spin_unlock(&lock->wait_lock);
  814. return 1;
  815. }
  816. ret = task_blocks_on_rt_mutex(lock, waiter, task, detect_deadlock);
  817. if (ret && !rt_mutex_owner(lock)) {
  818. /*
  819. * Reset the return value. We might have
  820. * returned with -EDEADLK and the owner
  821. * released the lock while we were walking the
  822. * pi chain. Let the waiter sort it out.
  823. */
  824. ret = 0;
  825. }
  826. if (unlikely(ret))
  827. remove_waiter(lock, waiter);
  828. raw_spin_unlock(&lock->wait_lock);
  829. debug_rt_mutex_print_deadlock(waiter);
  830. return ret;
  831. }
  832. /**
  833. * rt_mutex_next_owner - return the next owner of the lock
  834. *
  835. * @lock: the rt lock query
  836. *
  837. * Returns the next owner of the lock or NULL
  838. *
  839. * Caller has to serialize against other accessors to the lock
  840. * itself.
  841. *
  842. * Special API call for PI-futex support
  843. */
  844. struct task_struct *rt_mutex_next_owner(struct rt_mutex *lock)
  845. {
  846. if (!rt_mutex_has_waiters(lock))
  847. return NULL;
  848. return rt_mutex_top_waiter(lock)->task;
  849. }
  850. /**
  851. * rt_mutex_finish_proxy_lock() - Complete lock acquisition
  852. * @lock: the rt_mutex we were woken on
  853. * @to: the timeout, null if none. hrtimer should already have
  854. * been started.
  855. * @waiter: the pre-initialized rt_mutex_waiter
  856. * @detect_deadlock: perform deadlock detection (1) or not (0)
  857. *
  858. * Complete the lock acquisition started our behalf by another thread.
  859. *
  860. * Returns:
  861. * 0 - success
  862. * <0 - error, one of -EINTR, -ETIMEDOUT, or -EDEADLK
  863. *
  864. * Special API call for PI-futex requeue support
  865. */
  866. int rt_mutex_finish_proxy_lock(struct rt_mutex *lock,
  867. struct hrtimer_sleeper *to,
  868. struct rt_mutex_waiter *waiter,
  869. int detect_deadlock)
  870. {
  871. int ret;
  872. raw_spin_lock(&lock->wait_lock);
  873. set_current_state(TASK_INTERRUPTIBLE);
  874. ret = __rt_mutex_slowlock(lock, TASK_INTERRUPTIBLE, to, waiter);
  875. set_current_state(TASK_RUNNING);
  876. if (unlikely(ret))
  877. remove_waiter(lock, waiter);
  878. /*
  879. * try_to_take_rt_mutex() sets the waiter bit unconditionally. We might
  880. * have to fix that up.
  881. */
  882. fixup_rt_mutex_waiters(lock);
  883. raw_spin_unlock(&lock->wait_lock);
  884. return ret;
  885. }