futex.c 52 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128
  1. /*
  2. * Fast Userspace Mutexes (which I call "Futexes!").
  3. * (C) Rusty Russell, IBM 2002
  4. *
  5. * Generalized futexes, futex requeueing, misc fixes by Ingo Molnar
  6. * (C) Copyright 2003 Red Hat Inc, All Rights Reserved
  7. *
  8. * Removed page pinning, fix privately mapped COW pages and other cleanups
  9. * (C) Copyright 2003, 2004 Jamie Lokier
  10. *
  11. * Robust futex support started by Ingo Molnar
  12. * (C) Copyright 2006 Red Hat Inc, All Rights Reserved
  13. * Thanks to Thomas Gleixner for suggestions, analysis and fixes.
  14. *
  15. * PI-futex support started by Ingo Molnar and Thomas Gleixner
  16. * Copyright (C) 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
  17. * Copyright (C) 2006 Timesys Corp., Thomas Gleixner <tglx@timesys.com>
  18. *
  19. * PRIVATE futexes by Eric Dumazet
  20. * Copyright (C) 2007 Eric Dumazet <dada1@cosmosbay.com>
  21. *
  22. * Thanks to Ben LaHaise for yelling "hashed waitqueues" loudly
  23. * enough at me, Linus for the original (flawed) idea, Matthew
  24. * Kirkwood for proof-of-concept implementation.
  25. *
  26. * "The futexes are also cursed."
  27. * "But they come in a choice of three flavours!"
  28. *
  29. * This program is free software; you can redistribute it and/or modify
  30. * it under the terms of the GNU General Public License as published by
  31. * the Free Software Foundation; either version 2 of the License, or
  32. * (at your option) any later version.
  33. *
  34. * This program is distributed in the hope that it will be useful,
  35. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  36. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  37. * GNU General Public License for more details.
  38. *
  39. * You should have received a copy of the GNU General Public License
  40. * along with this program; if not, write to the Free Software
  41. * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
  42. */
  43. #include <linux/slab.h>
  44. #include <linux/poll.h>
  45. #include <linux/fs.h>
  46. #include <linux/file.h>
  47. #include <linux/jhash.h>
  48. #include <linux/init.h>
  49. #include <linux/futex.h>
  50. #include <linux/mount.h>
  51. #include <linux/pagemap.h>
  52. #include <linux/syscalls.h>
  53. #include <linux/signal.h>
  54. #include <linux/module.h>
  55. #include <linux/magic.h>
  56. #include <linux/pid.h>
  57. #include <linux/nsproxy.h>
  58. #include <asm/futex.h>
  59. #include "rtmutex_common.h"
  60. int __read_mostly futex_cmpxchg_enabled;
  61. #define FUTEX_HASHBITS (CONFIG_BASE_SMALL ? 4 : 8)
  62. /*
  63. * Priority Inheritance state:
  64. */
  65. struct futex_pi_state {
  66. /*
  67. * list of 'owned' pi_state instances - these have to be
  68. * cleaned up in do_exit() if the task exits prematurely:
  69. */
  70. struct list_head list;
  71. /*
  72. * The PI object:
  73. */
  74. struct rt_mutex pi_mutex;
  75. struct task_struct *owner;
  76. atomic_t refcount;
  77. union futex_key key;
  78. };
  79. /*
  80. * We use this hashed waitqueue instead of a normal wait_queue_t, so
  81. * we can wake only the relevant ones (hashed queues may be shared).
  82. *
  83. * A futex_q has a woken state, just like tasks have TASK_RUNNING.
  84. * It is considered woken when plist_node_empty(&q->list) || q->lock_ptr == 0.
  85. * The order of wakup is always to make the first condition true, then
  86. * wake up q->waiter, then make the second condition true.
  87. */
  88. struct futex_q {
  89. struct plist_node list;
  90. /* There can only be a single waiter */
  91. wait_queue_head_t waiter;
  92. /* Which hash list lock to use: */
  93. spinlock_t *lock_ptr;
  94. /* Key which the futex is hashed on: */
  95. union futex_key key;
  96. /* Optional priority inheritance state: */
  97. struct futex_pi_state *pi_state;
  98. struct task_struct *task;
  99. /* Bitset for the optional bitmasked wakeup */
  100. u32 bitset;
  101. };
  102. /*
  103. * Hash buckets are shared by all the futex_keys that hash to the same
  104. * location. Each key may have multiple futex_q structures, one for each task
  105. * waiting on a futex.
  106. */
  107. struct futex_hash_bucket {
  108. spinlock_t lock;
  109. struct plist_head chain;
  110. };
  111. static struct futex_hash_bucket futex_queues[1<<FUTEX_HASHBITS];
  112. /*
  113. * We hash on the keys returned from get_futex_key (see below).
  114. */
  115. static struct futex_hash_bucket *hash_futex(union futex_key *key)
  116. {
  117. u32 hash = jhash2((u32*)&key->both.word,
  118. (sizeof(key->both.word)+sizeof(key->both.ptr))/4,
  119. key->both.offset);
  120. return &futex_queues[hash & ((1 << FUTEX_HASHBITS)-1)];
  121. }
  122. /*
  123. * Return 1 if two futex_keys are equal, 0 otherwise.
  124. */
  125. static inline int match_futex(union futex_key *key1, union futex_key *key2)
  126. {
  127. return (key1->both.word == key2->both.word
  128. && key1->both.ptr == key2->both.ptr
  129. && key1->both.offset == key2->both.offset);
  130. }
  131. /*
  132. * Take a reference to the resource addressed by a key.
  133. * Can be called while holding spinlocks.
  134. *
  135. */
  136. static void get_futex_key_refs(union futex_key *key)
  137. {
  138. if (!key->both.ptr)
  139. return;
  140. switch (key->both.offset & (FUT_OFF_INODE|FUT_OFF_MMSHARED)) {
  141. case FUT_OFF_INODE:
  142. atomic_inc(&key->shared.inode->i_count);
  143. break;
  144. case FUT_OFF_MMSHARED:
  145. atomic_inc(&key->private.mm->mm_count);
  146. break;
  147. }
  148. }
  149. /*
  150. * Drop a reference to the resource addressed by a key.
  151. * The hash bucket spinlock must not be held.
  152. */
  153. static void drop_futex_key_refs(union futex_key *key)
  154. {
  155. if (!key->both.ptr) {
  156. /* If we're here then we tried to put a key we failed to get */
  157. WARN_ON_ONCE(1);
  158. return;
  159. }
  160. switch (key->both.offset & (FUT_OFF_INODE|FUT_OFF_MMSHARED)) {
  161. case FUT_OFF_INODE:
  162. iput(key->shared.inode);
  163. break;
  164. case FUT_OFF_MMSHARED:
  165. mmdrop(key->private.mm);
  166. break;
  167. }
  168. }
  169. /**
  170. * get_futex_key - Get parameters which are the keys for a futex.
  171. * @uaddr: virtual address of the futex
  172. * @fshared: 0 for a PROCESS_PRIVATE futex, 1 for PROCESS_SHARED
  173. * @key: address where result is stored.
  174. *
  175. * Returns a negative error code or 0
  176. * The key words are stored in *key on success.
  177. *
  178. * For shared mappings, it's (page->index, vma->vm_file->f_path.dentry->d_inode,
  179. * offset_within_page). For private mappings, it's (uaddr, current->mm).
  180. * We can usually work out the index without swapping in the page.
  181. *
  182. * lock_page() might sleep, the caller should not hold a spinlock.
  183. */
  184. static int get_futex_key(u32 __user *uaddr, int fshared, union futex_key *key)
  185. {
  186. unsigned long address = (unsigned long)uaddr;
  187. struct mm_struct *mm = current->mm;
  188. struct page *page;
  189. int err;
  190. /*
  191. * The futex address must be "naturally" aligned.
  192. */
  193. key->both.offset = address % PAGE_SIZE;
  194. if (unlikely((address % sizeof(u32)) != 0))
  195. return -EINVAL;
  196. address -= key->both.offset;
  197. /*
  198. * PROCESS_PRIVATE futexes are fast.
  199. * As the mm cannot disappear under us and the 'key' only needs
  200. * virtual address, we dont even have to find the underlying vma.
  201. * Note : We do have to check 'uaddr' is a valid user address,
  202. * but access_ok() should be faster than find_vma()
  203. */
  204. if (!fshared) {
  205. if (unlikely(!access_ok(VERIFY_WRITE, uaddr, sizeof(u32))))
  206. return -EFAULT;
  207. key->private.mm = mm;
  208. key->private.address = address;
  209. get_futex_key_refs(key);
  210. return 0;
  211. }
  212. again:
  213. err = get_user_pages_fast(address, 1, 0, &page);
  214. if (err < 0)
  215. return err;
  216. lock_page(page);
  217. if (!page->mapping) {
  218. unlock_page(page);
  219. put_page(page);
  220. goto again;
  221. }
  222. /*
  223. * Private mappings are handled in a simple way.
  224. *
  225. * NOTE: When userspace waits on a MAP_SHARED mapping, even if
  226. * it's a read-only handle, it's expected that futexes attach to
  227. * the object not the particular process.
  228. */
  229. if (PageAnon(page)) {
  230. key->both.offset |= FUT_OFF_MMSHARED; /* ref taken on mm */
  231. key->private.mm = mm;
  232. key->private.address = address;
  233. } else {
  234. key->both.offset |= FUT_OFF_INODE; /* inode-based key */
  235. key->shared.inode = page->mapping->host;
  236. key->shared.pgoff = page->index;
  237. }
  238. get_futex_key_refs(key);
  239. unlock_page(page);
  240. put_page(page);
  241. return 0;
  242. }
  243. static inline
  244. void put_futex_key(int fshared, union futex_key *key)
  245. {
  246. drop_futex_key_refs(key);
  247. }
  248. /**
  249. * futex_top_waiter() - Return the highest priority waiter on a futex
  250. * @hb: the hash bucket the futex_q's reside in
  251. * @key: the futex key (to distinguish it from other futex futex_q's)
  252. *
  253. * Must be called with the hb lock held.
  254. */
  255. static struct futex_q *futex_top_waiter(struct futex_hash_bucket *hb,
  256. union futex_key *key)
  257. {
  258. struct futex_q *this;
  259. plist_for_each_entry(this, &hb->chain, list) {
  260. if (match_futex(&this->key, key))
  261. return this;
  262. }
  263. return NULL;
  264. }
  265. static u32 cmpxchg_futex_value_locked(u32 __user *uaddr, u32 uval, u32 newval)
  266. {
  267. u32 curval;
  268. pagefault_disable();
  269. curval = futex_atomic_cmpxchg_inatomic(uaddr, uval, newval);
  270. pagefault_enable();
  271. return curval;
  272. }
  273. static int get_futex_value_locked(u32 *dest, u32 __user *from)
  274. {
  275. int ret;
  276. pagefault_disable();
  277. ret = __copy_from_user_inatomic(dest, from, sizeof(u32));
  278. pagefault_enable();
  279. return ret ? -EFAULT : 0;
  280. }
  281. /*
  282. * PI code:
  283. */
  284. static int refill_pi_state_cache(void)
  285. {
  286. struct futex_pi_state *pi_state;
  287. if (likely(current->pi_state_cache))
  288. return 0;
  289. pi_state = kzalloc(sizeof(*pi_state), GFP_KERNEL);
  290. if (!pi_state)
  291. return -ENOMEM;
  292. INIT_LIST_HEAD(&pi_state->list);
  293. /* pi_mutex gets initialized later */
  294. pi_state->owner = NULL;
  295. atomic_set(&pi_state->refcount, 1);
  296. pi_state->key = FUTEX_KEY_INIT;
  297. current->pi_state_cache = pi_state;
  298. return 0;
  299. }
  300. static struct futex_pi_state * alloc_pi_state(void)
  301. {
  302. struct futex_pi_state *pi_state = current->pi_state_cache;
  303. WARN_ON(!pi_state);
  304. current->pi_state_cache = NULL;
  305. return pi_state;
  306. }
  307. static void free_pi_state(struct futex_pi_state *pi_state)
  308. {
  309. if (!atomic_dec_and_test(&pi_state->refcount))
  310. return;
  311. /*
  312. * If pi_state->owner is NULL, the owner is most probably dying
  313. * and has cleaned up the pi_state already
  314. */
  315. if (pi_state->owner) {
  316. spin_lock_irq(&pi_state->owner->pi_lock);
  317. list_del_init(&pi_state->list);
  318. spin_unlock_irq(&pi_state->owner->pi_lock);
  319. rt_mutex_proxy_unlock(&pi_state->pi_mutex, pi_state->owner);
  320. }
  321. if (current->pi_state_cache)
  322. kfree(pi_state);
  323. else {
  324. /*
  325. * pi_state->list is already empty.
  326. * clear pi_state->owner.
  327. * refcount is at 0 - put it back to 1.
  328. */
  329. pi_state->owner = NULL;
  330. atomic_set(&pi_state->refcount, 1);
  331. current->pi_state_cache = pi_state;
  332. }
  333. }
  334. /*
  335. * Look up the task based on what TID userspace gave us.
  336. * We dont trust it.
  337. */
  338. static struct task_struct * futex_find_get_task(pid_t pid)
  339. {
  340. struct task_struct *p;
  341. const struct cred *cred = current_cred(), *pcred;
  342. rcu_read_lock();
  343. p = find_task_by_vpid(pid);
  344. if (!p) {
  345. p = ERR_PTR(-ESRCH);
  346. } else {
  347. pcred = __task_cred(p);
  348. if (cred->euid != pcred->euid &&
  349. cred->euid != pcred->uid)
  350. p = ERR_PTR(-ESRCH);
  351. else
  352. get_task_struct(p);
  353. }
  354. rcu_read_unlock();
  355. return p;
  356. }
  357. /*
  358. * This task is holding PI mutexes at exit time => bad.
  359. * Kernel cleans up PI-state, but userspace is likely hosed.
  360. * (Robust-futex cleanup is separate and might save the day for userspace.)
  361. */
  362. void exit_pi_state_list(struct task_struct *curr)
  363. {
  364. struct list_head *next, *head = &curr->pi_state_list;
  365. struct futex_pi_state *pi_state;
  366. struct futex_hash_bucket *hb;
  367. union futex_key key = FUTEX_KEY_INIT;
  368. if (!futex_cmpxchg_enabled)
  369. return;
  370. /*
  371. * We are a ZOMBIE and nobody can enqueue itself on
  372. * pi_state_list anymore, but we have to be careful
  373. * versus waiters unqueueing themselves:
  374. */
  375. spin_lock_irq(&curr->pi_lock);
  376. while (!list_empty(head)) {
  377. next = head->next;
  378. pi_state = list_entry(next, struct futex_pi_state, list);
  379. key = pi_state->key;
  380. hb = hash_futex(&key);
  381. spin_unlock_irq(&curr->pi_lock);
  382. spin_lock(&hb->lock);
  383. spin_lock_irq(&curr->pi_lock);
  384. /*
  385. * We dropped the pi-lock, so re-check whether this
  386. * task still owns the PI-state:
  387. */
  388. if (head->next != next) {
  389. spin_unlock(&hb->lock);
  390. continue;
  391. }
  392. WARN_ON(pi_state->owner != curr);
  393. WARN_ON(list_empty(&pi_state->list));
  394. list_del_init(&pi_state->list);
  395. pi_state->owner = NULL;
  396. spin_unlock_irq(&curr->pi_lock);
  397. rt_mutex_unlock(&pi_state->pi_mutex);
  398. spin_unlock(&hb->lock);
  399. spin_lock_irq(&curr->pi_lock);
  400. }
  401. spin_unlock_irq(&curr->pi_lock);
  402. }
  403. static int
  404. lookup_pi_state(u32 uval, struct futex_hash_bucket *hb,
  405. union futex_key *key, struct futex_pi_state **ps)
  406. {
  407. struct futex_pi_state *pi_state = NULL;
  408. struct futex_q *this, *next;
  409. struct plist_head *head;
  410. struct task_struct *p;
  411. pid_t pid = uval & FUTEX_TID_MASK;
  412. head = &hb->chain;
  413. plist_for_each_entry_safe(this, next, head, list) {
  414. if (match_futex(&this->key, key)) {
  415. /*
  416. * Another waiter already exists - bump up
  417. * the refcount and return its pi_state:
  418. */
  419. pi_state = this->pi_state;
  420. /*
  421. * Userspace might have messed up non PI and PI futexes
  422. */
  423. if (unlikely(!pi_state))
  424. return -EINVAL;
  425. WARN_ON(!atomic_read(&pi_state->refcount));
  426. WARN_ON(pid && pi_state->owner &&
  427. pi_state->owner->pid != pid);
  428. atomic_inc(&pi_state->refcount);
  429. *ps = pi_state;
  430. return 0;
  431. }
  432. }
  433. /*
  434. * We are the first waiter - try to look up the real owner and attach
  435. * the new pi_state to it, but bail out when TID = 0
  436. */
  437. if (!pid)
  438. return -ESRCH;
  439. p = futex_find_get_task(pid);
  440. if (IS_ERR(p))
  441. return PTR_ERR(p);
  442. /*
  443. * We need to look at the task state flags to figure out,
  444. * whether the task is exiting. To protect against the do_exit
  445. * change of the task flags, we do this protected by
  446. * p->pi_lock:
  447. */
  448. spin_lock_irq(&p->pi_lock);
  449. if (unlikely(p->flags & PF_EXITING)) {
  450. /*
  451. * The task is on the way out. When PF_EXITPIDONE is
  452. * set, we know that the task has finished the
  453. * cleanup:
  454. */
  455. int ret = (p->flags & PF_EXITPIDONE) ? -ESRCH : -EAGAIN;
  456. spin_unlock_irq(&p->pi_lock);
  457. put_task_struct(p);
  458. return ret;
  459. }
  460. pi_state = alloc_pi_state();
  461. /*
  462. * Initialize the pi_mutex in locked state and make 'p'
  463. * the owner of it:
  464. */
  465. rt_mutex_init_proxy_locked(&pi_state->pi_mutex, p);
  466. /* Store the key for possible exit cleanups: */
  467. pi_state->key = *key;
  468. WARN_ON(!list_empty(&pi_state->list));
  469. list_add(&pi_state->list, &p->pi_state_list);
  470. pi_state->owner = p;
  471. spin_unlock_irq(&p->pi_lock);
  472. put_task_struct(p);
  473. *ps = pi_state;
  474. return 0;
  475. }
  476. /**
  477. * futex_lock_pi_atomic() - atomic work required to acquire a pi aware futex
  478. * @uaddr: the pi futex user address
  479. * @hb: the pi futex hash bucket
  480. * @key: the futex key associated with uaddr and hb
  481. * @ps: the pi_state pointer where we store the result of the lookup
  482. * @task: the task to perform the atomic lock work for. This will be
  483. * "current" except in the case of requeue pi.
  484. *
  485. * Returns:
  486. * 0 - ready to wait
  487. * 1 - acquired the lock
  488. * <0 - error
  489. *
  490. * The hb->lock and futex_key refs shall be held by the caller.
  491. */
  492. static int futex_lock_pi_atomic(u32 __user *uaddr, struct futex_hash_bucket *hb,
  493. union futex_key *key,
  494. struct futex_pi_state **ps,
  495. struct task_struct *task)
  496. {
  497. int lock_taken, ret, ownerdied = 0;
  498. u32 uval, newval, curval;
  499. retry:
  500. ret = lock_taken = 0;
  501. /*
  502. * To avoid races, we attempt to take the lock here again
  503. * (by doing a 0 -> TID atomic cmpxchg), while holding all
  504. * the locks. It will most likely not succeed.
  505. */
  506. newval = task_pid_vnr(task);
  507. curval = cmpxchg_futex_value_locked(uaddr, 0, newval);
  508. if (unlikely(curval == -EFAULT))
  509. return -EFAULT;
  510. /*
  511. * Detect deadlocks.
  512. */
  513. if ((unlikely((curval & FUTEX_TID_MASK) == task_pid_vnr(task))))
  514. return -EDEADLK;
  515. /*
  516. * Surprise - we got the lock. Just return to userspace:
  517. */
  518. if (unlikely(!curval))
  519. return 1;
  520. uval = curval;
  521. /*
  522. * Set the FUTEX_WAITERS flag, so the owner will know it has someone
  523. * to wake at the next unlock.
  524. */
  525. newval = curval | FUTEX_WAITERS;
  526. /*
  527. * There are two cases, where a futex might have no owner (the
  528. * owner TID is 0): OWNER_DIED. We take over the futex in this
  529. * case. We also do an unconditional take over, when the owner
  530. * of the futex died.
  531. *
  532. * This is safe as we are protected by the hash bucket lock !
  533. */
  534. if (unlikely(ownerdied || !(curval & FUTEX_TID_MASK))) {
  535. /* Keep the OWNER_DIED bit */
  536. newval = (curval & ~FUTEX_TID_MASK) | task_pid_vnr(task);
  537. ownerdied = 0;
  538. lock_taken = 1;
  539. }
  540. curval = cmpxchg_futex_value_locked(uaddr, uval, newval);
  541. if (unlikely(curval == -EFAULT))
  542. return -EFAULT;
  543. if (unlikely(curval != uval))
  544. goto retry;
  545. /*
  546. * We took the lock due to owner died take over.
  547. */
  548. if (unlikely(lock_taken))
  549. return 1;
  550. /*
  551. * We dont have the lock. Look up the PI state (or create it if
  552. * we are the first waiter):
  553. */
  554. ret = lookup_pi_state(uval, hb, key, ps);
  555. if (unlikely(ret)) {
  556. switch (ret) {
  557. case -ESRCH:
  558. /*
  559. * No owner found for this futex. Check if the
  560. * OWNER_DIED bit is set to figure out whether
  561. * this is a robust futex or not.
  562. */
  563. if (get_futex_value_locked(&curval, uaddr))
  564. return -EFAULT;
  565. /*
  566. * We simply start over in case of a robust
  567. * futex. The code above will take the futex
  568. * and return happy.
  569. */
  570. if (curval & FUTEX_OWNER_DIED) {
  571. ownerdied = 1;
  572. goto retry;
  573. }
  574. default:
  575. break;
  576. }
  577. }
  578. return ret;
  579. }
  580. /*
  581. * The hash bucket lock must be held when this is called.
  582. * Afterwards, the futex_q must not be accessed.
  583. */
  584. static void wake_futex(struct futex_q *q)
  585. {
  586. plist_del(&q->list, &q->list.plist);
  587. /*
  588. * The lock in wake_up_all() is a crucial memory barrier after the
  589. * plist_del() and also before assigning to q->lock_ptr.
  590. */
  591. wake_up(&q->waiter);
  592. /*
  593. * The waiting task can free the futex_q as soon as this is written,
  594. * without taking any locks. This must come last.
  595. *
  596. * A memory barrier is required here to prevent the following store to
  597. * lock_ptr from getting ahead of the wakeup. Clearing the lock at the
  598. * end of wake_up() does not prevent this store from moving.
  599. */
  600. smp_wmb();
  601. q->lock_ptr = NULL;
  602. }
  603. static int wake_futex_pi(u32 __user *uaddr, u32 uval, struct futex_q *this)
  604. {
  605. struct task_struct *new_owner;
  606. struct futex_pi_state *pi_state = this->pi_state;
  607. u32 curval, newval;
  608. if (!pi_state)
  609. return -EINVAL;
  610. spin_lock(&pi_state->pi_mutex.wait_lock);
  611. new_owner = rt_mutex_next_owner(&pi_state->pi_mutex);
  612. /*
  613. * This happens when we have stolen the lock and the original
  614. * pending owner did not enqueue itself back on the rt_mutex.
  615. * Thats not a tragedy. We know that way, that a lock waiter
  616. * is on the fly. We make the futex_q waiter the pending owner.
  617. */
  618. if (!new_owner)
  619. new_owner = this->task;
  620. /*
  621. * We pass it to the next owner. (The WAITERS bit is always
  622. * kept enabled while there is PI state around. We must also
  623. * preserve the owner died bit.)
  624. */
  625. if (!(uval & FUTEX_OWNER_DIED)) {
  626. int ret = 0;
  627. newval = FUTEX_WAITERS | task_pid_vnr(new_owner);
  628. curval = cmpxchg_futex_value_locked(uaddr, uval, newval);
  629. if (curval == -EFAULT)
  630. ret = -EFAULT;
  631. else if (curval != uval)
  632. ret = -EINVAL;
  633. if (ret) {
  634. spin_unlock(&pi_state->pi_mutex.wait_lock);
  635. return ret;
  636. }
  637. }
  638. spin_lock_irq(&pi_state->owner->pi_lock);
  639. WARN_ON(list_empty(&pi_state->list));
  640. list_del_init(&pi_state->list);
  641. spin_unlock_irq(&pi_state->owner->pi_lock);
  642. spin_lock_irq(&new_owner->pi_lock);
  643. WARN_ON(!list_empty(&pi_state->list));
  644. list_add(&pi_state->list, &new_owner->pi_state_list);
  645. pi_state->owner = new_owner;
  646. spin_unlock_irq(&new_owner->pi_lock);
  647. spin_unlock(&pi_state->pi_mutex.wait_lock);
  648. rt_mutex_unlock(&pi_state->pi_mutex);
  649. return 0;
  650. }
  651. static int unlock_futex_pi(u32 __user *uaddr, u32 uval)
  652. {
  653. u32 oldval;
  654. /*
  655. * There is no waiter, so we unlock the futex. The owner died
  656. * bit has not to be preserved here. We are the owner:
  657. */
  658. oldval = cmpxchg_futex_value_locked(uaddr, uval, 0);
  659. if (oldval == -EFAULT)
  660. return oldval;
  661. if (oldval != uval)
  662. return -EAGAIN;
  663. return 0;
  664. }
  665. /*
  666. * Express the locking dependencies for lockdep:
  667. */
  668. static inline void
  669. double_lock_hb(struct futex_hash_bucket *hb1, struct futex_hash_bucket *hb2)
  670. {
  671. if (hb1 <= hb2) {
  672. spin_lock(&hb1->lock);
  673. if (hb1 < hb2)
  674. spin_lock_nested(&hb2->lock, SINGLE_DEPTH_NESTING);
  675. } else { /* hb1 > hb2 */
  676. spin_lock(&hb2->lock);
  677. spin_lock_nested(&hb1->lock, SINGLE_DEPTH_NESTING);
  678. }
  679. }
  680. static inline void
  681. double_unlock_hb(struct futex_hash_bucket *hb1, struct futex_hash_bucket *hb2)
  682. {
  683. spin_unlock(&hb1->lock);
  684. if (hb1 != hb2)
  685. spin_unlock(&hb2->lock);
  686. }
  687. /*
  688. * Wake up waiters matching bitset queued on this futex (uaddr).
  689. */
  690. static int futex_wake(u32 __user *uaddr, int fshared, int nr_wake, u32 bitset)
  691. {
  692. struct futex_hash_bucket *hb;
  693. struct futex_q *this, *next;
  694. struct plist_head *head;
  695. union futex_key key = FUTEX_KEY_INIT;
  696. int ret;
  697. if (!bitset)
  698. return -EINVAL;
  699. ret = get_futex_key(uaddr, fshared, &key);
  700. if (unlikely(ret != 0))
  701. goto out;
  702. hb = hash_futex(&key);
  703. spin_lock(&hb->lock);
  704. head = &hb->chain;
  705. plist_for_each_entry_safe(this, next, head, list) {
  706. if (match_futex (&this->key, &key)) {
  707. if (this->pi_state) {
  708. ret = -EINVAL;
  709. break;
  710. }
  711. /* Check if one of the bits is set in both bitsets */
  712. if (!(this->bitset & bitset))
  713. continue;
  714. wake_futex(this);
  715. if (++ret >= nr_wake)
  716. break;
  717. }
  718. }
  719. spin_unlock(&hb->lock);
  720. put_futex_key(fshared, &key);
  721. out:
  722. return ret;
  723. }
  724. /*
  725. * Wake up all waiters hashed on the physical page that is mapped
  726. * to this virtual address:
  727. */
  728. static int
  729. futex_wake_op(u32 __user *uaddr1, int fshared, u32 __user *uaddr2,
  730. int nr_wake, int nr_wake2, int op)
  731. {
  732. union futex_key key1 = FUTEX_KEY_INIT, key2 = FUTEX_KEY_INIT;
  733. struct futex_hash_bucket *hb1, *hb2;
  734. struct plist_head *head;
  735. struct futex_q *this, *next;
  736. int ret, op_ret;
  737. retry:
  738. ret = get_futex_key(uaddr1, fshared, &key1);
  739. if (unlikely(ret != 0))
  740. goto out;
  741. ret = get_futex_key(uaddr2, fshared, &key2);
  742. if (unlikely(ret != 0))
  743. goto out_put_key1;
  744. hb1 = hash_futex(&key1);
  745. hb2 = hash_futex(&key2);
  746. double_lock_hb(hb1, hb2);
  747. retry_private:
  748. op_ret = futex_atomic_op_inuser(op, uaddr2);
  749. if (unlikely(op_ret < 0)) {
  750. u32 dummy;
  751. double_unlock_hb(hb1, hb2);
  752. #ifndef CONFIG_MMU
  753. /*
  754. * we don't get EFAULT from MMU faults if we don't have an MMU,
  755. * but we might get them from range checking
  756. */
  757. ret = op_ret;
  758. goto out_put_keys;
  759. #endif
  760. if (unlikely(op_ret != -EFAULT)) {
  761. ret = op_ret;
  762. goto out_put_keys;
  763. }
  764. ret = get_user(dummy, uaddr2);
  765. if (ret)
  766. goto out_put_keys;
  767. if (!fshared)
  768. goto retry_private;
  769. put_futex_key(fshared, &key2);
  770. put_futex_key(fshared, &key1);
  771. goto retry;
  772. }
  773. head = &hb1->chain;
  774. plist_for_each_entry_safe(this, next, head, list) {
  775. if (match_futex (&this->key, &key1)) {
  776. wake_futex(this);
  777. if (++ret >= nr_wake)
  778. break;
  779. }
  780. }
  781. if (op_ret > 0) {
  782. head = &hb2->chain;
  783. op_ret = 0;
  784. plist_for_each_entry_safe(this, next, head, list) {
  785. if (match_futex (&this->key, &key2)) {
  786. wake_futex(this);
  787. if (++op_ret >= nr_wake2)
  788. break;
  789. }
  790. }
  791. ret += op_ret;
  792. }
  793. double_unlock_hb(hb1, hb2);
  794. out_put_keys:
  795. put_futex_key(fshared, &key2);
  796. out_put_key1:
  797. put_futex_key(fshared, &key1);
  798. out:
  799. return ret;
  800. }
  801. /**
  802. * requeue_futex() - Requeue a futex_q from one hb to another
  803. * @q: the futex_q to requeue
  804. * @hb1: the source hash_bucket
  805. * @hb2: the target hash_bucket
  806. * @key2: the new key for the requeued futex_q
  807. */
  808. static inline
  809. void requeue_futex(struct futex_q *q, struct futex_hash_bucket *hb1,
  810. struct futex_hash_bucket *hb2, union futex_key *key2)
  811. {
  812. /*
  813. * If key1 and key2 hash to the same bucket, no need to
  814. * requeue.
  815. */
  816. if (likely(&hb1->chain != &hb2->chain)) {
  817. plist_del(&q->list, &hb1->chain);
  818. plist_add(&q->list, &hb2->chain);
  819. q->lock_ptr = &hb2->lock;
  820. #ifdef CONFIG_DEBUG_PI_LIST
  821. q->list.plist.lock = &hb2->lock;
  822. #endif
  823. }
  824. get_futex_key_refs(key2);
  825. q->key = *key2;
  826. }
  827. /*
  828. * Requeue all waiters hashed on one physical page to another
  829. * physical page.
  830. */
  831. static int futex_requeue(u32 __user *uaddr1, int fshared, u32 __user *uaddr2,
  832. int nr_wake, int nr_requeue, u32 *cmpval)
  833. {
  834. union futex_key key1 = FUTEX_KEY_INIT, key2 = FUTEX_KEY_INIT;
  835. struct futex_hash_bucket *hb1, *hb2;
  836. struct plist_head *head1;
  837. struct futex_q *this, *next;
  838. int ret, drop_count = 0;
  839. retry:
  840. ret = get_futex_key(uaddr1, fshared, &key1);
  841. if (unlikely(ret != 0))
  842. goto out;
  843. ret = get_futex_key(uaddr2, fshared, &key2);
  844. if (unlikely(ret != 0))
  845. goto out_put_key1;
  846. hb1 = hash_futex(&key1);
  847. hb2 = hash_futex(&key2);
  848. retry_private:
  849. double_lock_hb(hb1, hb2);
  850. if (likely(cmpval != NULL)) {
  851. u32 curval;
  852. ret = get_futex_value_locked(&curval, uaddr1);
  853. if (unlikely(ret)) {
  854. double_unlock_hb(hb1, hb2);
  855. ret = get_user(curval, uaddr1);
  856. if (ret)
  857. goto out_put_keys;
  858. if (!fshared)
  859. goto retry_private;
  860. put_futex_key(fshared, &key2);
  861. put_futex_key(fshared, &key1);
  862. goto retry;
  863. }
  864. if (curval != *cmpval) {
  865. ret = -EAGAIN;
  866. goto out_unlock;
  867. }
  868. }
  869. head1 = &hb1->chain;
  870. plist_for_each_entry_safe(this, next, head1, list) {
  871. if (!match_futex (&this->key, &key1))
  872. continue;
  873. if (++ret <= nr_wake) {
  874. wake_futex(this);
  875. } else {
  876. requeue_futex(this, hb1, hb2, &key2);
  877. drop_count++;
  878. if (ret - nr_wake >= nr_requeue)
  879. break;
  880. }
  881. }
  882. out_unlock:
  883. double_unlock_hb(hb1, hb2);
  884. /* drop_futex_key_refs() must be called outside the spinlocks. */
  885. while (--drop_count >= 0)
  886. drop_futex_key_refs(&key1);
  887. out_put_keys:
  888. put_futex_key(fshared, &key2);
  889. out_put_key1:
  890. put_futex_key(fshared, &key1);
  891. out:
  892. return ret;
  893. }
  894. /* The key must be already stored in q->key. */
  895. static inline struct futex_hash_bucket *queue_lock(struct futex_q *q)
  896. {
  897. struct futex_hash_bucket *hb;
  898. init_waitqueue_head(&q->waiter);
  899. get_futex_key_refs(&q->key);
  900. hb = hash_futex(&q->key);
  901. q->lock_ptr = &hb->lock;
  902. spin_lock(&hb->lock);
  903. return hb;
  904. }
  905. static inline void queue_me(struct futex_q *q, struct futex_hash_bucket *hb)
  906. {
  907. int prio;
  908. /*
  909. * The priority used to register this element is
  910. * - either the real thread-priority for the real-time threads
  911. * (i.e. threads with a priority lower than MAX_RT_PRIO)
  912. * - or MAX_RT_PRIO for non-RT threads.
  913. * Thus, all RT-threads are woken first in priority order, and
  914. * the others are woken last, in FIFO order.
  915. */
  916. prio = min(current->normal_prio, MAX_RT_PRIO);
  917. plist_node_init(&q->list, prio);
  918. #ifdef CONFIG_DEBUG_PI_LIST
  919. q->list.plist.lock = &hb->lock;
  920. #endif
  921. plist_add(&q->list, &hb->chain);
  922. q->task = current;
  923. spin_unlock(&hb->lock);
  924. }
  925. static inline void
  926. queue_unlock(struct futex_q *q, struct futex_hash_bucket *hb)
  927. {
  928. spin_unlock(&hb->lock);
  929. drop_futex_key_refs(&q->key);
  930. }
  931. /*
  932. * queue_me and unqueue_me must be called as a pair, each
  933. * exactly once. They are called with the hashed spinlock held.
  934. */
  935. /* Return 1 if we were still queued (ie. 0 means we were woken) */
  936. static int unqueue_me(struct futex_q *q)
  937. {
  938. spinlock_t *lock_ptr;
  939. int ret = 0;
  940. /* In the common case we don't take the spinlock, which is nice. */
  941. retry:
  942. lock_ptr = q->lock_ptr;
  943. barrier();
  944. if (lock_ptr != NULL) {
  945. spin_lock(lock_ptr);
  946. /*
  947. * q->lock_ptr can change between reading it and
  948. * spin_lock(), causing us to take the wrong lock. This
  949. * corrects the race condition.
  950. *
  951. * Reasoning goes like this: if we have the wrong lock,
  952. * q->lock_ptr must have changed (maybe several times)
  953. * between reading it and the spin_lock(). It can
  954. * change again after the spin_lock() but only if it was
  955. * already changed before the spin_lock(). It cannot,
  956. * however, change back to the original value. Therefore
  957. * we can detect whether we acquired the correct lock.
  958. */
  959. if (unlikely(lock_ptr != q->lock_ptr)) {
  960. spin_unlock(lock_ptr);
  961. goto retry;
  962. }
  963. WARN_ON(plist_node_empty(&q->list));
  964. plist_del(&q->list, &q->list.plist);
  965. BUG_ON(q->pi_state);
  966. spin_unlock(lock_ptr);
  967. ret = 1;
  968. }
  969. drop_futex_key_refs(&q->key);
  970. return ret;
  971. }
  972. /*
  973. * PI futexes can not be requeued and must remove themself from the
  974. * hash bucket. The hash bucket lock (i.e. lock_ptr) is held on entry
  975. * and dropped here.
  976. */
  977. static void unqueue_me_pi(struct futex_q *q)
  978. {
  979. WARN_ON(plist_node_empty(&q->list));
  980. plist_del(&q->list, &q->list.plist);
  981. BUG_ON(!q->pi_state);
  982. free_pi_state(q->pi_state);
  983. q->pi_state = NULL;
  984. spin_unlock(q->lock_ptr);
  985. drop_futex_key_refs(&q->key);
  986. }
  987. /*
  988. * Fixup the pi_state owner with the new owner.
  989. *
  990. * Must be called with hash bucket lock held and mm->sem held for non
  991. * private futexes.
  992. */
  993. static int fixup_pi_state_owner(u32 __user *uaddr, struct futex_q *q,
  994. struct task_struct *newowner, int fshared)
  995. {
  996. u32 newtid = task_pid_vnr(newowner) | FUTEX_WAITERS;
  997. struct futex_pi_state *pi_state = q->pi_state;
  998. struct task_struct *oldowner = pi_state->owner;
  999. u32 uval, curval, newval;
  1000. int ret;
  1001. /* Owner died? */
  1002. if (!pi_state->owner)
  1003. newtid |= FUTEX_OWNER_DIED;
  1004. /*
  1005. * We are here either because we stole the rtmutex from the
  1006. * pending owner or we are the pending owner which failed to
  1007. * get the rtmutex. We have to replace the pending owner TID
  1008. * in the user space variable. This must be atomic as we have
  1009. * to preserve the owner died bit here.
  1010. *
  1011. * Note: We write the user space value _before_ changing the pi_state
  1012. * because we can fault here. Imagine swapped out pages or a fork
  1013. * that marked all the anonymous memory readonly for cow.
  1014. *
  1015. * Modifying pi_state _before_ the user space value would
  1016. * leave the pi_state in an inconsistent state when we fault
  1017. * here, because we need to drop the hash bucket lock to
  1018. * handle the fault. This might be observed in the PID check
  1019. * in lookup_pi_state.
  1020. */
  1021. retry:
  1022. if (get_futex_value_locked(&uval, uaddr))
  1023. goto handle_fault;
  1024. while (1) {
  1025. newval = (uval & FUTEX_OWNER_DIED) | newtid;
  1026. curval = cmpxchg_futex_value_locked(uaddr, uval, newval);
  1027. if (curval == -EFAULT)
  1028. goto handle_fault;
  1029. if (curval == uval)
  1030. break;
  1031. uval = curval;
  1032. }
  1033. /*
  1034. * We fixed up user space. Now we need to fix the pi_state
  1035. * itself.
  1036. */
  1037. if (pi_state->owner != NULL) {
  1038. spin_lock_irq(&pi_state->owner->pi_lock);
  1039. WARN_ON(list_empty(&pi_state->list));
  1040. list_del_init(&pi_state->list);
  1041. spin_unlock_irq(&pi_state->owner->pi_lock);
  1042. }
  1043. pi_state->owner = newowner;
  1044. spin_lock_irq(&newowner->pi_lock);
  1045. WARN_ON(!list_empty(&pi_state->list));
  1046. list_add(&pi_state->list, &newowner->pi_state_list);
  1047. spin_unlock_irq(&newowner->pi_lock);
  1048. return 0;
  1049. /*
  1050. * To handle the page fault we need to drop the hash bucket
  1051. * lock here. That gives the other task (either the pending
  1052. * owner itself or the task which stole the rtmutex) the
  1053. * chance to try the fixup of the pi_state. So once we are
  1054. * back from handling the fault we need to check the pi_state
  1055. * after reacquiring the hash bucket lock and before trying to
  1056. * do another fixup. When the fixup has been done already we
  1057. * simply return.
  1058. */
  1059. handle_fault:
  1060. spin_unlock(q->lock_ptr);
  1061. ret = get_user(uval, uaddr);
  1062. spin_lock(q->lock_ptr);
  1063. /*
  1064. * Check if someone else fixed it for us:
  1065. */
  1066. if (pi_state->owner != oldowner)
  1067. return 0;
  1068. if (ret)
  1069. return ret;
  1070. goto retry;
  1071. }
  1072. /*
  1073. * In case we must use restart_block to restart a futex_wait,
  1074. * we encode in the 'flags' shared capability
  1075. */
  1076. #define FLAGS_SHARED 0x01
  1077. #define FLAGS_CLOCKRT 0x02
  1078. #define FLAGS_HAS_TIMEOUT 0x04
  1079. static long futex_wait_restart(struct restart_block *restart);
  1080. /**
  1081. * fixup_owner() - Post lock pi_state and corner case management
  1082. * @uaddr: user address of the futex
  1083. * @fshared: whether the futex is shared (1) or not (0)
  1084. * @q: futex_q (contains pi_state and access to the rt_mutex)
  1085. * @locked: if the attempt to take the rt_mutex succeeded (1) or not (0)
  1086. *
  1087. * After attempting to lock an rt_mutex, this function is called to cleanup
  1088. * the pi_state owner as well as handle race conditions that may allow us to
  1089. * acquire the lock. Must be called with the hb lock held.
  1090. *
  1091. * Returns:
  1092. * 1 - success, lock taken
  1093. * 0 - success, lock not taken
  1094. * <0 - on error (-EFAULT)
  1095. */
  1096. static int fixup_owner(u32 __user *uaddr, int fshared, struct futex_q *q,
  1097. int locked)
  1098. {
  1099. struct task_struct *owner;
  1100. int ret = 0;
  1101. if (locked) {
  1102. /*
  1103. * Got the lock. We might not be the anticipated owner if we
  1104. * did a lock-steal - fix up the PI-state in that case:
  1105. */
  1106. if (q->pi_state->owner != current)
  1107. ret = fixup_pi_state_owner(uaddr, q, current, fshared);
  1108. goto out;
  1109. }
  1110. /*
  1111. * Catch the rare case, where the lock was released when we were on the
  1112. * way back before we locked the hash bucket.
  1113. */
  1114. if (q->pi_state->owner == current) {
  1115. /*
  1116. * Try to get the rt_mutex now. This might fail as some other
  1117. * task acquired the rt_mutex after we removed ourself from the
  1118. * rt_mutex waiters list.
  1119. */
  1120. if (rt_mutex_trylock(&q->pi_state->pi_mutex)) {
  1121. locked = 1;
  1122. goto out;
  1123. }
  1124. /*
  1125. * pi_state is incorrect, some other task did a lock steal and
  1126. * we returned due to timeout or signal without taking the
  1127. * rt_mutex. Too late. We can access the rt_mutex_owner without
  1128. * locking, as the other task is now blocked on the hash bucket
  1129. * lock. Fix the state up.
  1130. */
  1131. owner = rt_mutex_owner(&q->pi_state->pi_mutex);
  1132. ret = fixup_pi_state_owner(uaddr, q, owner, fshared);
  1133. goto out;
  1134. }
  1135. /*
  1136. * Paranoia check. If we did not take the lock, then we should not be
  1137. * the owner, nor the pending owner, of the rt_mutex.
  1138. */
  1139. if (rt_mutex_owner(&q->pi_state->pi_mutex) == current)
  1140. printk(KERN_ERR "fixup_owner: ret = %d pi-mutex: %p "
  1141. "pi-state %p\n", ret,
  1142. q->pi_state->pi_mutex.owner,
  1143. q->pi_state->owner);
  1144. out:
  1145. return ret ? ret : locked;
  1146. }
  1147. /**
  1148. * futex_wait_queue_me() - queue_me() and wait for wakeup, timeout, or signal
  1149. * @hb: the futex hash bucket, must be locked by the caller
  1150. * @q: the futex_q to queue up on
  1151. * @timeout: the prepared hrtimer_sleeper, or null for no timeout
  1152. * @wait: the wait_queue to add to the futex_q after queueing in the hb
  1153. */
  1154. static void futex_wait_queue_me(struct futex_hash_bucket *hb, struct futex_q *q,
  1155. struct hrtimer_sleeper *timeout,
  1156. wait_queue_t *wait)
  1157. {
  1158. queue_me(q, hb);
  1159. /*
  1160. * There might have been scheduling since the queue_me(), as we
  1161. * cannot hold a spinlock across the get_user() in case it
  1162. * faults, and we cannot just set TASK_INTERRUPTIBLE state when
  1163. * queueing ourselves into the futex hash. This code thus has to
  1164. * rely on the futex_wake() code removing us from hash when it
  1165. * wakes us up.
  1166. */
  1167. /* add_wait_queue is the barrier after __set_current_state. */
  1168. __set_current_state(TASK_INTERRUPTIBLE);
  1169. /*
  1170. * Add current as the futex_q waiter. We don't remove ourselves from
  1171. * the wait_queue because we are the only user of it.
  1172. */
  1173. add_wait_queue(&q->waiter, wait);
  1174. /* Arm the timer */
  1175. if (timeout) {
  1176. hrtimer_start_expires(&timeout->timer, HRTIMER_MODE_ABS);
  1177. if (!hrtimer_active(&timeout->timer))
  1178. timeout->task = NULL;
  1179. }
  1180. /*
  1181. * !plist_node_empty() is safe here without any lock.
  1182. * q.lock_ptr != 0 is not safe, because of ordering against wakeup.
  1183. */
  1184. if (likely(!plist_node_empty(&q->list))) {
  1185. /*
  1186. * If the timer has already expired, current will already be
  1187. * flagged for rescheduling. Only call schedule if there
  1188. * is no timeout, or if it has yet to expire.
  1189. */
  1190. if (!timeout || timeout->task)
  1191. schedule();
  1192. }
  1193. __set_current_state(TASK_RUNNING);
  1194. }
  1195. /**
  1196. * futex_wait_setup() - Prepare to wait on a futex
  1197. * @uaddr: the futex userspace address
  1198. * @val: the expected value
  1199. * @fshared: whether the futex is shared (1) or not (0)
  1200. * @q: the associated futex_q
  1201. * @hb: storage for hash_bucket pointer to be returned to caller
  1202. *
  1203. * Setup the futex_q and locate the hash_bucket. Get the futex value and
  1204. * compare it with the expected value. Handle atomic faults internally.
  1205. * Return with the hb lock held and a q.key reference on success, and unlocked
  1206. * with no q.key reference on failure.
  1207. *
  1208. * Returns:
  1209. * 0 - uaddr contains val and hb has been locked
  1210. * <1 - -EFAULT or -EWOULDBLOCK (uaddr does not contain val) and hb is unlcoked
  1211. */
  1212. static int futex_wait_setup(u32 __user *uaddr, u32 val, int fshared,
  1213. struct futex_q *q, struct futex_hash_bucket **hb)
  1214. {
  1215. u32 uval;
  1216. int ret;
  1217. /*
  1218. * Access the page AFTER the hash-bucket is locked.
  1219. * Order is important:
  1220. *
  1221. * Userspace waiter: val = var; if (cond(val)) futex_wait(&var, val);
  1222. * Userspace waker: if (cond(var)) { var = new; futex_wake(&var); }
  1223. *
  1224. * The basic logical guarantee of a futex is that it blocks ONLY
  1225. * if cond(var) is known to be true at the time of blocking, for
  1226. * any cond. If we queued after testing *uaddr, that would open
  1227. * a race condition where we could block indefinitely with
  1228. * cond(var) false, which would violate the guarantee.
  1229. *
  1230. * A consequence is that futex_wait() can return zero and absorb
  1231. * a wakeup when *uaddr != val on entry to the syscall. This is
  1232. * rare, but normal.
  1233. */
  1234. retry:
  1235. q->key = FUTEX_KEY_INIT;
  1236. ret = get_futex_key(uaddr, fshared, &q->key);
  1237. if (unlikely(ret != 0))
  1238. goto out;
  1239. retry_private:
  1240. *hb = queue_lock(q);
  1241. ret = get_futex_value_locked(&uval, uaddr);
  1242. if (ret) {
  1243. queue_unlock(q, *hb);
  1244. ret = get_user(uval, uaddr);
  1245. if (ret)
  1246. goto out;
  1247. if (!fshared)
  1248. goto retry_private;
  1249. put_futex_key(fshared, &q->key);
  1250. goto retry;
  1251. }
  1252. if (uval != val) {
  1253. queue_unlock(q, *hb);
  1254. ret = -EWOULDBLOCK;
  1255. }
  1256. out:
  1257. if (ret)
  1258. put_futex_key(fshared, &q->key);
  1259. return ret;
  1260. }
  1261. static int futex_wait(u32 __user *uaddr, int fshared,
  1262. u32 val, ktime_t *abs_time, u32 bitset, int clockrt)
  1263. {
  1264. struct hrtimer_sleeper timeout, *to = NULL;
  1265. DECLARE_WAITQUEUE(wait, current);
  1266. struct restart_block *restart;
  1267. struct futex_hash_bucket *hb;
  1268. struct futex_q q;
  1269. int ret;
  1270. if (!bitset)
  1271. return -EINVAL;
  1272. q.pi_state = NULL;
  1273. q.bitset = bitset;
  1274. if (abs_time) {
  1275. to = &timeout;
  1276. hrtimer_init_on_stack(&to->timer, clockrt ? CLOCK_REALTIME :
  1277. CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
  1278. hrtimer_init_sleeper(to, current);
  1279. hrtimer_set_expires_range_ns(&to->timer, *abs_time,
  1280. current->timer_slack_ns);
  1281. }
  1282. /* Prepare to wait on uaddr. */
  1283. ret = futex_wait_setup(uaddr, val, fshared, &q, &hb);
  1284. if (ret)
  1285. goto out;
  1286. /* queue_me and wait for wakeup, timeout, or a signal. */
  1287. futex_wait_queue_me(hb, &q, to, &wait);
  1288. /* If we were woken (and unqueued), we succeeded, whatever. */
  1289. ret = 0;
  1290. if (!unqueue_me(&q))
  1291. goto out_put_key;
  1292. ret = -ETIMEDOUT;
  1293. if (to && !to->task)
  1294. goto out_put_key;
  1295. /*
  1296. * We expect signal_pending(current), but another thread may
  1297. * have handled it for us already.
  1298. */
  1299. ret = -ERESTARTSYS;
  1300. if (!abs_time)
  1301. goto out_put_key;
  1302. restart = &current_thread_info()->restart_block;
  1303. restart->fn = futex_wait_restart;
  1304. restart->futex.uaddr = (u32 *)uaddr;
  1305. restart->futex.val = val;
  1306. restart->futex.time = abs_time->tv64;
  1307. restart->futex.bitset = bitset;
  1308. restart->futex.flags = FLAGS_HAS_TIMEOUT;
  1309. if (fshared)
  1310. restart->futex.flags |= FLAGS_SHARED;
  1311. if (clockrt)
  1312. restart->futex.flags |= FLAGS_CLOCKRT;
  1313. ret = -ERESTART_RESTARTBLOCK;
  1314. out_put_key:
  1315. put_futex_key(fshared, &q.key);
  1316. out:
  1317. if (to) {
  1318. hrtimer_cancel(&to->timer);
  1319. destroy_hrtimer_on_stack(&to->timer);
  1320. }
  1321. return ret;
  1322. }
  1323. static long futex_wait_restart(struct restart_block *restart)
  1324. {
  1325. u32 __user *uaddr = (u32 __user *)restart->futex.uaddr;
  1326. int fshared = 0;
  1327. ktime_t t, *tp = NULL;
  1328. if (restart->futex.flags & FLAGS_HAS_TIMEOUT) {
  1329. t.tv64 = restart->futex.time;
  1330. tp = &t;
  1331. }
  1332. restart->fn = do_no_restart_syscall;
  1333. if (restart->futex.flags & FLAGS_SHARED)
  1334. fshared = 1;
  1335. return (long)futex_wait(uaddr, fshared, restart->futex.val, tp,
  1336. restart->futex.bitset,
  1337. restart->futex.flags & FLAGS_CLOCKRT);
  1338. }
  1339. /*
  1340. * Userspace tried a 0 -> TID atomic transition of the futex value
  1341. * and failed. The kernel side here does the whole locking operation:
  1342. * if there are waiters then it will block, it does PI, etc. (Due to
  1343. * races the kernel might see a 0 value of the futex too.)
  1344. */
  1345. static int futex_lock_pi(u32 __user *uaddr, int fshared,
  1346. int detect, ktime_t *time, int trylock)
  1347. {
  1348. struct hrtimer_sleeper timeout, *to = NULL;
  1349. struct futex_hash_bucket *hb;
  1350. u32 uval;
  1351. struct futex_q q;
  1352. int res, ret;
  1353. if (refill_pi_state_cache())
  1354. return -ENOMEM;
  1355. if (time) {
  1356. to = &timeout;
  1357. hrtimer_init_on_stack(&to->timer, CLOCK_REALTIME,
  1358. HRTIMER_MODE_ABS);
  1359. hrtimer_init_sleeper(to, current);
  1360. hrtimer_set_expires(&to->timer, *time);
  1361. }
  1362. q.pi_state = NULL;
  1363. retry:
  1364. q.key = FUTEX_KEY_INIT;
  1365. ret = get_futex_key(uaddr, fshared, &q.key);
  1366. if (unlikely(ret != 0))
  1367. goto out;
  1368. retry_private:
  1369. hb = queue_lock(&q);
  1370. ret = futex_lock_pi_atomic(uaddr, hb, &q.key, &q.pi_state, current);
  1371. if (unlikely(ret)) {
  1372. switch (ret) {
  1373. case 1:
  1374. /* We got the lock. */
  1375. ret = 0;
  1376. goto out_unlock_put_key;
  1377. case -EFAULT:
  1378. goto uaddr_faulted;
  1379. case -EAGAIN:
  1380. /*
  1381. * Task is exiting and we just wait for the
  1382. * exit to complete.
  1383. */
  1384. queue_unlock(&q, hb);
  1385. put_futex_key(fshared, &q.key);
  1386. cond_resched();
  1387. goto retry;
  1388. default:
  1389. goto out_unlock_put_key;
  1390. }
  1391. }
  1392. /*
  1393. * Only actually queue now that the atomic ops are done:
  1394. */
  1395. queue_me(&q, hb);
  1396. WARN_ON(!q.pi_state);
  1397. /*
  1398. * Block on the PI mutex:
  1399. */
  1400. if (!trylock)
  1401. ret = rt_mutex_timed_lock(&q.pi_state->pi_mutex, to, 1);
  1402. else {
  1403. ret = rt_mutex_trylock(&q.pi_state->pi_mutex);
  1404. /* Fixup the trylock return value: */
  1405. ret = ret ? 0 : -EWOULDBLOCK;
  1406. }
  1407. spin_lock(q.lock_ptr);
  1408. /*
  1409. * Fixup the pi_state owner and possibly acquire the lock if we
  1410. * haven't already.
  1411. */
  1412. res = fixup_owner(uaddr, fshared, &q, !ret);
  1413. /*
  1414. * If fixup_owner() returned an error, proprogate that. If it acquired
  1415. * the lock, clear our -ETIMEDOUT or -EINTR.
  1416. */
  1417. if (res)
  1418. ret = (res < 0) ? res : 0;
  1419. /*
  1420. * If fixup_owner() faulted and was unable to handle the fault, unlock
  1421. * it and return the fault to userspace.
  1422. */
  1423. if (ret && (rt_mutex_owner(&q.pi_state->pi_mutex) == current))
  1424. rt_mutex_unlock(&q.pi_state->pi_mutex);
  1425. /* Unqueue and drop the lock */
  1426. unqueue_me_pi(&q);
  1427. goto out;
  1428. out_unlock_put_key:
  1429. queue_unlock(&q, hb);
  1430. out_put_key:
  1431. put_futex_key(fshared, &q.key);
  1432. out:
  1433. if (to)
  1434. destroy_hrtimer_on_stack(&to->timer);
  1435. return ret != -EINTR ? ret : -ERESTARTNOINTR;
  1436. uaddr_faulted:
  1437. /*
  1438. * We have to r/w *(int __user *)uaddr, and we have to modify it
  1439. * atomically. Therefore, if we continue to fault after get_user()
  1440. * below, we need to handle the fault ourselves, while still holding
  1441. * the mmap_sem. This can occur if the uaddr is under contention as
  1442. * we have to drop the mmap_sem in order to call get_user().
  1443. */
  1444. queue_unlock(&q, hb);
  1445. ret = get_user(uval, uaddr);
  1446. if (ret)
  1447. goto out_put_key;
  1448. if (!fshared)
  1449. goto retry_private;
  1450. put_futex_key(fshared, &q.key);
  1451. goto retry;
  1452. }
  1453. /*
  1454. * Userspace attempted a TID -> 0 atomic transition, and failed.
  1455. * This is the in-kernel slowpath: we look up the PI state (if any),
  1456. * and do the rt-mutex unlock.
  1457. */
  1458. static int futex_unlock_pi(u32 __user *uaddr, int fshared)
  1459. {
  1460. struct futex_hash_bucket *hb;
  1461. struct futex_q *this, *next;
  1462. u32 uval;
  1463. struct plist_head *head;
  1464. union futex_key key = FUTEX_KEY_INIT;
  1465. int ret;
  1466. retry:
  1467. if (get_user(uval, uaddr))
  1468. return -EFAULT;
  1469. /*
  1470. * We release only a lock we actually own:
  1471. */
  1472. if ((uval & FUTEX_TID_MASK) != task_pid_vnr(current))
  1473. return -EPERM;
  1474. ret = get_futex_key(uaddr, fshared, &key);
  1475. if (unlikely(ret != 0))
  1476. goto out;
  1477. hb = hash_futex(&key);
  1478. spin_lock(&hb->lock);
  1479. /*
  1480. * To avoid races, try to do the TID -> 0 atomic transition
  1481. * again. If it succeeds then we can return without waking
  1482. * anyone else up:
  1483. */
  1484. if (!(uval & FUTEX_OWNER_DIED))
  1485. uval = cmpxchg_futex_value_locked(uaddr, task_pid_vnr(current), 0);
  1486. if (unlikely(uval == -EFAULT))
  1487. goto pi_faulted;
  1488. /*
  1489. * Rare case: we managed to release the lock atomically,
  1490. * no need to wake anyone else up:
  1491. */
  1492. if (unlikely(uval == task_pid_vnr(current)))
  1493. goto out_unlock;
  1494. /*
  1495. * Ok, other tasks may need to be woken up - check waiters
  1496. * and do the wakeup if necessary:
  1497. */
  1498. head = &hb->chain;
  1499. plist_for_each_entry_safe(this, next, head, list) {
  1500. if (!match_futex (&this->key, &key))
  1501. continue;
  1502. ret = wake_futex_pi(uaddr, uval, this);
  1503. /*
  1504. * The atomic access to the futex value
  1505. * generated a pagefault, so retry the
  1506. * user-access and the wakeup:
  1507. */
  1508. if (ret == -EFAULT)
  1509. goto pi_faulted;
  1510. goto out_unlock;
  1511. }
  1512. /*
  1513. * No waiters - kernel unlocks the futex:
  1514. */
  1515. if (!(uval & FUTEX_OWNER_DIED)) {
  1516. ret = unlock_futex_pi(uaddr, uval);
  1517. if (ret == -EFAULT)
  1518. goto pi_faulted;
  1519. }
  1520. out_unlock:
  1521. spin_unlock(&hb->lock);
  1522. put_futex_key(fshared, &key);
  1523. out:
  1524. return ret;
  1525. pi_faulted:
  1526. /*
  1527. * We have to r/w *(int __user *)uaddr, and we have to modify it
  1528. * atomically. Therefore, if we continue to fault after get_user()
  1529. * below, we need to handle the fault ourselves, while still holding
  1530. * the mmap_sem. This can occur if the uaddr is under contention as
  1531. * we have to drop the mmap_sem in order to call get_user().
  1532. */
  1533. spin_unlock(&hb->lock);
  1534. put_futex_key(fshared, &key);
  1535. ret = get_user(uval, uaddr);
  1536. if (!ret)
  1537. goto retry;
  1538. return ret;
  1539. }
  1540. /*
  1541. * Support for robust futexes: the kernel cleans up held futexes at
  1542. * thread exit time.
  1543. *
  1544. * Implementation: user-space maintains a per-thread list of locks it
  1545. * is holding. Upon do_exit(), the kernel carefully walks this list,
  1546. * and marks all locks that are owned by this thread with the
  1547. * FUTEX_OWNER_DIED bit, and wakes up a waiter (if any). The list is
  1548. * always manipulated with the lock held, so the list is private and
  1549. * per-thread. Userspace also maintains a per-thread 'list_op_pending'
  1550. * field, to allow the kernel to clean up if the thread dies after
  1551. * acquiring the lock, but just before it could have added itself to
  1552. * the list. There can only be one such pending lock.
  1553. */
  1554. /**
  1555. * sys_set_robust_list - set the robust-futex list head of a task
  1556. * @head: pointer to the list-head
  1557. * @len: length of the list-head, as userspace expects
  1558. */
  1559. SYSCALL_DEFINE2(set_robust_list, struct robust_list_head __user *, head,
  1560. size_t, len)
  1561. {
  1562. if (!futex_cmpxchg_enabled)
  1563. return -ENOSYS;
  1564. /*
  1565. * The kernel knows only one size for now:
  1566. */
  1567. if (unlikely(len != sizeof(*head)))
  1568. return -EINVAL;
  1569. current->robust_list = head;
  1570. return 0;
  1571. }
  1572. /**
  1573. * sys_get_robust_list - get the robust-futex list head of a task
  1574. * @pid: pid of the process [zero for current task]
  1575. * @head_ptr: pointer to a list-head pointer, the kernel fills it in
  1576. * @len_ptr: pointer to a length field, the kernel fills in the header size
  1577. */
  1578. SYSCALL_DEFINE3(get_robust_list, int, pid,
  1579. struct robust_list_head __user * __user *, head_ptr,
  1580. size_t __user *, len_ptr)
  1581. {
  1582. struct robust_list_head __user *head;
  1583. unsigned long ret;
  1584. const struct cred *cred = current_cred(), *pcred;
  1585. if (!futex_cmpxchg_enabled)
  1586. return -ENOSYS;
  1587. if (!pid)
  1588. head = current->robust_list;
  1589. else {
  1590. struct task_struct *p;
  1591. ret = -ESRCH;
  1592. rcu_read_lock();
  1593. p = find_task_by_vpid(pid);
  1594. if (!p)
  1595. goto err_unlock;
  1596. ret = -EPERM;
  1597. pcred = __task_cred(p);
  1598. if (cred->euid != pcred->euid &&
  1599. cred->euid != pcred->uid &&
  1600. !capable(CAP_SYS_PTRACE))
  1601. goto err_unlock;
  1602. head = p->robust_list;
  1603. rcu_read_unlock();
  1604. }
  1605. if (put_user(sizeof(*head), len_ptr))
  1606. return -EFAULT;
  1607. return put_user(head, head_ptr);
  1608. err_unlock:
  1609. rcu_read_unlock();
  1610. return ret;
  1611. }
  1612. /*
  1613. * Process a futex-list entry, check whether it's owned by the
  1614. * dying task, and do notification if so:
  1615. */
  1616. int handle_futex_death(u32 __user *uaddr, struct task_struct *curr, int pi)
  1617. {
  1618. u32 uval, nval, mval;
  1619. retry:
  1620. if (get_user(uval, uaddr))
  1621. return -1;
  1622. if ((uval & FUTEX_TID_MASK) == task_pid_vnr(curr)) {
  1623. /*
  1624. * Ok, this dying thread is truly holding a futex
  1625. * of interest. Set the OWNER_DIED bit atomically
  1626. * via cmpxchg, and if the value had FUTEX_WAITERS
  1627. * set, wake up a waiter (if any). (We have to do a
  1628. * futex_wake() even if OWNER_DIED is already set -
  1629. * to handle the rare but possible case of recursive
  1630. * thread-death.) The rest of the cleanup is done in
  1631. * userspace.
  1632. */
  1633. mval = (uval & FUTEX_WAITERS) | FUTEX_OWNER_DIED;
  1634. nval = futex_atomic_cmpxchg_inatomic(uaddr, uval, mval);
  1635. if (nval == -EFAULT)
  1636. return -1;
  1637. if (nval != uval)
  1638. goto retry;
  1639. /*
  1640. * Wake robust non-PI futexes here. The wakeup of
  1641. * PI futexes happens in exit_pi_state():
  1642. */
  1643. if (!pi && (uval & FUTEX_WAITERS))
  1644. futex_wake(uaddr, 1, 1, FUTEX_BITSET_MATCH_ANY);
  1645. }
  1646. return 0;
  1647. }
  1648. /*
  1649. * Fetch a robust-list pointer. Bit 0 signals PI futexes:
  1650. */
  1651. static inline int fetch_robust_entry(struct robust_list __user **entry,
  1652. struct robust_list __user * __user *head,
  1653. int *pi)
  1654. {
  1655. unsigned long uentry;
  1656. if (get_user(uentry, (unsigned long __user *)head))
  1657. return -EFAULT;
  1658. *entry = (void __user *)(uentry & ~1UL);
  1659. *pi = uentry & 1;
  1660. return 0;
  1661. }
  1662. /*
  1663. * Walk curr->robust_list (very carefully, it's a userspace list!)
  1664. * and mark any locks found there dead, and notify any waiters.
  1665. *
  1666. * We silently return on any sign of list-walking problem.
  1667. */
  1668. void exit_robust_list(struct task_struct *curr)
  1669. {
  1670. struct robust_list_head __user *head = curr->robust_list;
  1671. struct robust_list __user *entry, *next_entry, *pending;
  1672. unsigned int limit = ROBUST_LIST_LIMIT, pi, next_pi, pip;
  1673. unsigned long futex_offset;
  1674. int rc;
  1675. if (!futex_cmpxchg_enabled)
  1676. return;
  1677. /*
  1678. * Fetch the list head (which was registered earlier, via
  1679. * sys_set_robust_list()):
  1680. */
  1681. if (fetch_robust_entry(&entry, &head->list.next, &pi))
  1682. return;
  1683. /*
  1684. * Fetch the relative futex offset:
  1685. */
  1686. if (get_user(futex_offset, &head->futex_offset))
  1687. return;
  1688. /*
  1689. * Fetch any possibly pending lock-add first, and handle it
  1690. * if it exists:
  1691. */
  1692. if (fetch_robust_entry(&pending, &head->list_op_pending, &pip))
  1693. return;
  1694. next_entry = NULL; /* avoid warning with gcc */
  1695. while (entry != &head->list) {
  1696. /*
  1697. * Fetch the next entry in the list before calling
  1698. * handle_futex_death:
  1699. */
  1700. rc = fetch_robust_entry(&next_entry, &entry->next, &next_pi);
  1701. /*
  1702. * A pending lock might already be on the list, so
  1703. * don't process it twice:
  1704. */
  1705. if (entry != pending)
  1706. if (handle_futex_death((void __user *)entry + futex_offset,
  1707. curr, pi))
  1708. return;
  1709. if (rc)
  1710. return;
  1711. entry = next_entry;
  1712. pi = next_pi;
  1713. /*
  1714. * Avoid excessively long or circular lists:
  1715. */
  1716. if (!--limit)
  1717. break;
  1718. cond_resched();
  1719. }
  1720. if (pending)
  1721. handle_futex_death((void __user *)pending + futex_offset,
  1722. curr, pip);
  1723. }
  1724. long do_futex(u32 __user *uaddr, int op, u32 val, ktime_t *timeout,
  1725. u32 __user *uaddr2, u32 val2, u32 val3)
  1726. {
  1727. int clockrt, ret = -ENOSYS;
  1728. int cmd = op & FUTEX_CMD_MASK;
  1729. int fshared = 0;
  1730. if (!(op & FUTEX_PRIVATE_FLAG))
  1731. fshared = 1;
  1732. clockrt = op & FUTEX_CLOCK_REALTIME;
  1733. if (clockrt && cmd != FUTEX_WAIT_BITSET)
  1734. return -ENOSYS;
  1735. switch (cmd) {
  1736. case FUTEX_WAIT:
  1737. val3 = FUTEX_BITSET_MATCH_ANY;
  1738. case FUTEX_WAIT_BITSET:
  1739. ret = futex_wait(uaddr, fshared, val, timeout, val3, clockrt);
  1740. break;
  1741. case FUTEX_WAKE:
  1742. val3 = FUTEX_BITSET_MATCH_ANY;
  1743. case FUTEX_WAKE_BITSET:
  1744. ret = futex_wake(uaddr, fshared, val, val3);
  1745. break;
  1746. case FUTEX_REQUEUE:
  1747. ret = futex_requeue(uaddr, fshared, uaddr2, val, val2, NULL);
  1748. break;
  1749. case FUTEX_CMP_REQUEUE:
  1750. ret = futex_requeue(uaddr, fshared, uaddr2, val, val2, &val3);
  1751. break;
  1752. case FUTEX_WAKE_OP:
  1753. ret = futex_wake_op(uaddr, fshared, uaddr2, val, val2, val3);
  1754. break;
  1755. case FUTEX_LOCK_PI:
  1756. if (futex_cmpxchg_enabled)
  1757. ret = futex_lock_pi(uaddr, fshared, val, timeout, 0);
  1758. break;
  1759. case FUTEX_UNLOCK_PI:
  1760. if (futex_cmpxchg_enabled)
  1761. ret = futex_unlock_pi(uaddr, fshared);
  1762. break;
  1763. case FUTEX_TRYLOCK_PI:
  1764. if (futex_cmpxchg_enabled)
  1765. ret = futex_lock_pi(uaddr, fshared, 0, timeout, 1);
  1766. break;
  1767. default:
  1768. ret = -ENOSYS;
  1769. }
  1770. return ret;
  1771. }
  1772. SYSCALL_DEFINE6(futex, u32 __user *, uaddr, int, op, u32, val,
  1773. struct timespec __user *, utime, u32 __user *, uaddr2,
  1774. u32, val3)
  1775. {
  1776. struct timespec ts;
  1777. ktime_t t, *tp = NULL;
  1778. u32 val2 = 0;
  1779. int cmd = op & FUTEX_CMD_MASK;
  1780. if (utime && (cmd == FUTEX_WAIT || cmd == FUTEX_LOCK_PI ||
  1781. cmd == FUTEX_WAIT_BITSET)) {
  1782. if (copy_from_user(&ts, utime, sizeof(ts)) != 0)
  1783. return -EFAULT;
  1784. if (!timespec_valid(&ts))
  1785. return -EINVAL;
  1786. t = timespec_to_ktime(ts);
  1787. if (cmd == FUTEX_WAIT)
  1788. t = ktime_add_safe(ktime_get(), t);
  1789. tp = &t;
  1790. }
  1791. /*
  1792. * requeue parameter in 'utime' if cmd == FUTEX_REQUEUE.
  1793. * number of waiters to wake in 'utime' if cmd == FUTEX_WAKE_OP.
  1794. */
  1795. if (cmd == FUTEX_REQUEUE || cmd == FUTEX_CMP_REQUEUE ||
  1796. cmd == FUTEX_WAKE_OP)
  1797. val2 = (u32) (unsigned long) utime;
  1798. return do_futex(uaddr, op, val, tp, uaddr2, val2, val3);
  1799. }
  1800. static int __init futex_init(void)
  1801. {
  1802. u32 curval;
  1803. int i;
  1804. /*
  1805. * This will fail and we want it. Some arch implementations do
  1806. * runtime detection of the futex_atomic_cmpxchg_inatomic()
  1807. * functionality. We want to know that before we call in any
  1808. * of the complex code paths. Also we want to prevent
  1809. * registration of robust lists in that case. NULL is
  1810. * guaranteed to fault and we get -EFAULT on functional
  1811. * implementation, the non functional ones will return
  1812. * -ENOSYS.
  1813. */
  1814. curval = cmpxchg_futex_value_locked(NULL, 0, 0);
  1815. if (curval == -EFAULT)
  1816. futex_cmpxchg_enabled = 1;
  1817. for (i = 0; i < ARRAY_SIZE(futex_queues); i++) {
  1818. plist_head_init(&futex_queues[i].chain, &futex_queues[i].lock);
  1819. spin_lock_init(&futex_queues[i].lock);
  1820. }
  1821. return 0;
  1822. }
  1823. __initcall(futex_init);