futex.c 50 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097
  1. /*
  2. * Fast Userspace Mutexes (which I call "Futexes!").
  3. * (C) Rusty Russell, IBM 2002
  4. *
  5. * Generalized futexes, futex requeueing, misc fixes by Ingo Molnar
  6. * (C) Copyright 2003 Red Hat Inc, All Rights Reserved
  7. *
  8. * Removed page pinning, fix privately mapped COW pages and other cleanups
  9. * (C) Copyright 2003, 2004 Jamie Lokier
  10. *
  11. * Robust futex support started by Ingo Molnar
  12. * (C) Copyright 2006 Red Hat Inc, All Rights Reserved
  13. * Thanks to Thomas Gleixner for suggestions, analysis and fixes.
  14. *
  15. * PI-futex support started by Ingo Molnar and Thomas Gleixner
  16. * Copyright (C) 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
  17. * Copyright (C) 2006 Timesys Corp., Thomas Gleixner <tglx@timesys.com>
  18. *
  19. * PRIVATE futexes by Eric Dumazet
  20. * Copyright (C) 2007 Eric Dumazet <dada1@cosmosbay.com>
  21. *
  22. * Thanks to Ben LaHaise for yelling "hashed waitqueues" loudly
  23. * enough at me, Linus for the original (flawed) idea, Matthew
  24. * Kirkwood for proof-of-concept implementation.
  25. *
  26. * "The futexes are also cursed."
  27. * "But they come in a choice of three flavours!"
  28. *
  29. * This program is free software; you can redistribute it and/or modify
  30. * it under the terms of the GNU General Public License as published by
  31. * the Free Software Foundation; either version 2 of the License, or
  32. * (at your option) any later version.
  33. *
  34. * This program is distributed in the hope that it will be useful,
  35. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  36. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  37. * GNU General Public License for more details.
  38. *
  39. * You should have received a copy of the GNU General Public License
  40. * along with this program; if not, write to the Free Software
  41. * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
  42. */
  43. #include <linux/slab.h>
  44. #include <linux/poll.h>
  45. #include <linux/fs.h>
  46. #include <linux/file.h>
  47. #include <linux/jhash.h>
  48. #include <linux/init.h>
  49. #include <linux/futex.h>
  50. #include <linux/mount.h>
  51. #include <linux/pagemap.h>
  52. #include <linux/syscalls.h>
  53. #include <linux/signal.h>
  54. #include <linux/module.h>
  55. #include <linux/magic.h>
  56. #include <linux/pid.h>
  57. #include <linux/nsproxy.h>
  58. #include <asm/futex.h>
  59. #include "rtmutex_common.h"
  60. int __read_mostly futex_cmpxchg_enabled;
  61. #define FUTEX_HASHBITS (CONFIG_BASE_SMALL ? 4 : 8)
  62. /*
  63. * Priority Inheritance state:
  64. */
  65. struct futex_pi_state {
  66. /*
  67. * list of 'owned' pi_state instances - these have to be
  68. * cleaned up in do_exit() if the task exits prematurely:
  69. */
  70. struct list_head list;
  71. /*
  72. * The PI object:
  73. */
  74. struct rt_mutex pi_mutex;
  75. struct task_struct *owner;
  76. atomic_t refcount;
  77. union futex_key key;
  78. };
  79. /*
  80. * We use this hashed waitqueue instead of a normal wait_queue_t, so
  81. * we can wake only the relevant ones (hashed queues may be shared).
  82. *
  83. * A futex_q has a woken state, just like tasks have TASK_RUNNING.
  84. * It is considered woken when plist_node_empty(&q->list) || q->lock_ptr == 0.
  85. * The order of wakup is always to make the first condition true, then
  86. * wake up q->waiters, then make the second condition true.
  87. */
  88. struct futex_q {
  89. struct plist_node list;
  90. wait_queue_head_t waiters;
  91. /* Which hash list lock to use: */
  92. spinlock_t *lock_ptr;
  93. /* Key which the futex is hashed on: */
  94. union futex_key key;
  95. /* Optional priority inheritance state: */
  96. struct futex_pi_state *pi_state;
  97. struct task_struct *task;
  98. /* Bitset for the optional bitmasked wakeup */
  99. u32 bitset;
  100. };
  101. /*
  102. * Split the global futex_lock into every hash list lock.
  103. */
  104. struct futex_hash_bucket {
  105. spinlock_t lock;
  106. struct plist_head chain;
  107. };
  108. static struct futex_hash_bucket futex_queues[1<<FUTEX_HASHBITS];
  109. /*
  110. * Take mm->mmap_sem, when futex is shared
  111. */
  112. static inline void futex_lock_mm(struct rw_semaphore *fshared)
  113. {
  114. if (fshared)
  115. down_read(fshared);
  116. }
  117. /*
  118. * Release mm->mmap_sem, when the futex is shared
  119. */
  120. static inline void futex_unlock_mm(struct rw_semaphore *fshared)
  121. {
  122. if (fshared)
  123. up_read(fshared);
  124. }
  125. /*
  126. * We hash on the keys returned from get_futex_key (see below).
  127. */
  128. static struct futex_hash_bucket *hash_futex(union futex_key *key)
  129. {
  130. u32 hash = jhash2((u32*)&key->both.word,
  131. (sizeof(key->both.word)+sizeof(key->both.ptr))/4,
  132. key->both.offset);
  133. return &futex_queues[hash & ((1 << FUTEX_HASHBITS)-1)];
  134. }
  135. /*
  136. * Return 1 if two futex_keys are equal, 0 otherwise.
  137. */
  138. static inline int match_futex(union futex_key *key1, union futex_key *key2)
  139. {
  140. return (key1->both.word == key2->both.word
  141. && key1->both.ptr == key2->both.ptr
  142. && key1->both.offset == key2->both.offset);
  143. }
  144. /**
  145. * get_futex_key - Get parameters which are the keys for a futex.
  146. * @uaddr: virtual address of the futex
  147. * @shared: NULL for a PROCESS_PRIVATE futex,
  148. * &current->mm->mmap_sem for a PROCESS_SHARED futex
  149. * @key: address where result is stored.
  150. *
  151. * Returns a negative error code or 0
  152. * The key words are stored in *key on success.
  153. *
  154. * For shared mappings, it's (page->index, vma->vm_file->f_path.dentry->d_inode,
  155. * offset_within_page). For private mappings, it's (uaddr, current->mm).
  156. * We can usually work out the index without swapping in the page.
  157. *
  158. * fshared is NULL for PROCESS_PRIVATE futexes
  159. * For other futexes, it points to &current->mm->mmap_sem and
  160. * caller must have taken the reader lock. but NOT any spinlocks.
  161. */
  162. static int get_futex_key(u32 __user *uaddr, struct rw_semaphore *fshared,
  163. union futex_key *key)
  164. {
  165. unsigned long address = (unsigned long)uaddr;
  166. struct mm_struct *mm = current->mm;
  167. struct vm_area_struct *vma;
  168. struct page *page;
  169. int err;
  170. /*
  171. * The futex address must be "naturally" aligned.
  172. */
  173. key->both.offset = address % PAGE_SIZE;
  174. if (unlikely((address % sizeof(u32)) != 0))
  175. return -EINVAL;
  176. address -= key->both.offset;
  177. /*
  178. * PROCESS_PRIVATE futexes are fast.
  179. * As the mm cannot disappear under us and the 'key' only needs
  180. * virtual address, we dont even have to find the underlying vma.
  181. * Note : We do have to check 'uaddr' is a valid user address,
  182. * but access_ok() should be faster than find_vma()
  183. */
  184. if (!fshared) {
  185. if (unlikely(!access_ok(VERIFY_WRITE, uaddr, sizeof(u32))))
  186. return -EFAULT;
  187. key->private.mm = mm;
  188. key->private.address = address;
  189. return 0;
  190. }
  191. /*
  192. * The futex is hashed differently depending on whether
  193. * it's in a shared or private mapping. So check vma first.
  194. */
  195. vma = find_extend_vma(mm, address);
  196. if (unlikely(!vma))
  197. return -EFAULT;
  198. /*
  199. * Permissions.
  200. */
  201. if (unlikely((vma->vm_flags & (VM_IO|VM_READ)) != VM_READ))
  202. return (vma->vm_flags & VM_IO) ? -EPERM : -EACCES;
  203. /*
  204. * Private mappings are handled in a simple way.
  205. *
  206. * NOTE: When userspace waits on a MAP_SHARED mapping, even if
  207. * it's a read-only handle, it's expected that futexes attach to
  208. * the object not the particular process. Therefore we use
  209. * VM_MAYSHARE here, not VM_SHARED which is restricted to shared
  210. * mappings of _writable_ handles.
  211. */
  212. if (likely(!(vma->vm_flags & VM_MAYSHARE))) {
  213. key->both.offset |= FUT_OFF_MMSHARED; /* reference taken on mm */
  214. key->private.mm = mm;
  215. key->private.address = address;
  216. return 0;
  217. }
  218. /*
  219. * Linear file mappings are also simple.
  220. */
  221. key->shared.inode = vma->vm_file->f_path.dentry->d_inode;
  222. key->both.offset |= FUT_OFF_INODE; /* inode-based key. */
  223. if (likely(!(vma->vm_flags & VM_NONLINEAR))) {
  224. key->shared.pgoff = (((address - vma->vm_start) >> PAGE_SHIFT)
  225. + vma->vm_pgoff);
  226. return 0;
  227. }
  228. /*
  229. * We could walk the page table to read the non-linear
  230. * pte, and get the page index without fetching the page
  231. * from swap. But that's a lot of code to duplicate here
  232. * for a rare case, so we simply fetch the page.
  233. */
  234. err = get_user_pages(current, mm, address, 1, 0, 0, &page, NULL);
  235. if (err >= 0) {
  236. key->shared.pgoff =
  237. page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
  238. put_page(page);
  239. return 0;
  240. }
  241. return err;
  242. }
  243. /*
  244. * Take a reference to the resource addressed by a key.
  245. * Can be called while holding spinlocks.
  246. *
  247. */
  248. static void get_futex_key_refs(union futex_key *key)
  249. {
  250. if (key->both.ptr == NULL)
  251. return;
  252. switch (key->both.offset & (FUT_OFF_INODE|FUT_OFF_MMSHARED)) {
  253. case FUT_OFF_INODE:
  254. atomic_inc(&key->shared.inode->i_count);
  255. break;
  256. case FUT_OFF_MMSHARED:
  257. atomic_inc(&key->private.mm->mm_count);
  258. break;
  259. }
  260. }
  261. /*
  262. * Drop a reference to the resource addressed by a key.
  263. * The hash bucket spinlock must not be held.
  264. */
  265. static void drop_futex_key_refs(union futex_key *key)
  266. {
  267. if (!key->both.ptr)
  268. return;
  269. switch (key->both.offset & (FUT_OFF_INODE|FUT_OFF_MMSHARED)) {
  270. case FUT_OFF_INODE:
  271. iput(key->shared.inode);
  272. break;
  273. case FUT_OFF_MMSHARED:
  274. mmdrop(key->private.mm);
  275. break;
  276. }
  277. }
  278. static u32 cmpxchg_futex_value_locked(u32 __user *uaddr, u32 uval, u32 newval)
  279. {
  280. u32 curval;
  281. pagefault_disable();
  282. curval = futex_atomic_cmpxchg_inatomic(uaddr, uval, newval);
  283. pagefault_enable();
  284. return curval;
  285. }
  286. static int get_futex_value_locked(u32 *dest, u32 __user *from)
  287. {
  288. int ret;
  289. pagefault_disable();
  290. ret = __copy_from_user_inatomic(dest, from, sizeof(u32));
  291. pagefault_enable();
  292. return ret ? -EFAULT : 0;
  293. }
  294. /*
  295. * Fault handling.
  296. * if fshared is non NULL, current->mm->mmap_sem is already held
  297. */
  298. static int futex_handle_fault(unsigned long address,
  299. struct rw_semaphore *fshared, int attempt)
  300. {
  301. struct vm_area_struct * vma;
  302. struct mm_struct *mm = current->mm;
  303. int ret = -EFAULT;
  304. if (attempt > 2)
  305. return ret;
  306. if (!fshared)
  307. down_read(&mm->mmap_sem);
  308. vma = find_vma(mm, address);
  309. if (vma && address >= vma->vm_start &&
  310. (vma->vm_flags & VM_WRITE)) {
  311. int fault;
  312. fault = handle_mm_fault(mm, vma, address, 1);
  313. if (unlikely((fault & VM_FAULT_ERROR))) {
  314. #if 0
  315. /* XXX: let's do this when we verify it is OK */
  316. if (ret & VM_FAULT_OOM)
  317. ret = -ENOMEM;
  318. #endif
  319. } else {
  320. ret = 0;
  321. if (fault & VM_FAULT_MAJOR)
  322. current->maj_flt++;
  323. else
  324. current->min_flt++;
  325. }
  326. }
  327. if (!fshared)
  328. up_read(&mm->mmap_sem);
  329. return ret;
  330. }
  331. /*
  332. * PI code:
  333. */
  334. static int refill_pi_state_cache(void)
  335. {
  336. struct futex_pi_state *pi_state;
  337. if (likely(current->pi_state_cache))
  338. return 0;
  339. pi_state = kzalloc(sizeof(*pi_state), GFP_KERNEL);
  340. if (!pi_state)
  341. return -ENOMEM;
  342. INIT_LIST_HEAD(&pi_state->list);
  343. /* pi_mutex gets initialized later */
  344. pi_state->owner = NULL;
  345. atomic_set(&pi_state->refcount, 1);
  346. current->pi_state_cache = pi_state;
  347. return 0;
  348. }
  349. static struct futex_pi_state * alloc_pi_state(void)
  350. {
  351. struct futex_pi_state *pi_state = current->pi_state_cache;
  352. WARN_ON(!pi_state);
  353. current->pi_state_cache = NULL;
  354. return pi_state;
  355. }
  356. static void free_pi_state(struct futex_pi_state *pi_state)
  357. {
  358. if (!atomic_dec_and_test(&pi_state->refcount))
  359. return;
  360. /*
  361. * If pi_state->owner is NULL, the owner is most probably dying
  362. * and has cleaned up the pi_state already
  363. */
  364. if (pi_state->owner) {
  365. spin_lock_irq(&pi_state->owner->pi_lock);
  366. list_del_init(&pi_state->list);
  367. spin_unlock_irq(&pi_state->owner->pi_lock);
  368. rt_mutex_proxy_unlock(&pi_state->pi_mutex, pi_state->owner);
  369. }
  370. if (current->pi_state_cache)
  371. kfree(pi_state);
  372. else {
  373. /*
  374. * pi_state->list is already empty.
  375. * clear pi_state->owner.
  376. * refcount is at 0 - put it back to 1.
  377. */
  378. pi_state->owner = NULL;
  379. atomic_set(&pi_state->refcount, 1);
  380. current->pi_state_cache = pi_state;
  381. }
  382. }
  383. /*
  384. * Look up the task based on what TID userspace gave us.
  385. * We dont trust it.
  386. */
  387. static struct task_struct * futex_find_get_task(pid_t pid)
  388. {
  389. struct task_struct *p;
  390. rcu_read_lock();
  391. p = find_task_by_vpid(pid);
  392. if (!p || ((current->euid != p->euid) && (current->euid != p->uid)))
  393. p = ERR_PTR(-ESRCH);
  394. else
  395. get_task_struct(p);
  396. rcu_read_unlock();
  397. return p;
  398. }
  399. /*
  400. * This task is holding PI mutexes at exit time => bad.
  401. * Kernel cleans up PI-state, but userspace is likely hosed.
  402. * (Robust-futex cleanup is separate and might save the day for userspace.)
  403. */
  404. void exit_pi_state_list(struct task_struct *curr)
  405. {
  406. struct list_head *next, *head = &curr->pi_state_list;
  407. struct futex_pi_state *pi_state;
  408. struct futex_hash_bucket *hb;
  409. union futex_key key;
  410. if (!futex_cmpxchg_enabled)
  411. return;
  412. /*
  413. * We are a ZOMBIE and nobody can enqueue itself on
  414. * pi_state_list anymore, but we have to be careful
  415. * versus waiters unqueueing themselves:
  416. */
  417. spin_lock_irq(&curr->pi_lock);
  418. while (!list_empty(head)) {
  419. next = head->next;
  420. pi_state = list_entry(next, struct futex_pi_state, list);
  421. key = pi_state->key;
  422. hb = hash_futex(&key);
  423. spin_unlock_irq(&curr->pi_lock);
  424. spin_lock(&hb->lock);
  425. spin_lock_irq(&curr->pi_lock);
  426. /*
  427. * We dropped the pi-lock, so re-check whether this
  428. * task still owns the PI-state:
  429. */
  430. if (head->next != next) {
  431. spin_unlock(&hb->lock);
  432. continue;
  433. }
  434. WARN_ON(pi_state->owner != curr);
  435. WARN_ON(list_empty(&pi_state->list));
  436. list_del_init(&pi_state->list);
  437. pi_state->owner = NULL;
  438. spin_unlock_irq(&curr->pi_lock);
  439. rt_mutex_unlock(&pi_state->pi_mutex);
  440. spin_unlock(&hb->lock);
  441. spin_lock_irq(&curr->pi_lock);
  442. }
  443. spin_unlock_irq(&curr->pi_lock);
  444. }
  445. static int
  446. lookup_pi_state(u32 uval, struct futex_hash_bucket *hb,
  447. union futex_key *key, struct futex_pi_state **ps)
  448. {
  449. struct futex_pi_state *pi_state = NULL;
  450. struct futex_q *this, *next;
  451. struct plist_head *head;
  452. struct task_struct *p;
  453. pid_t pid = uval & FUTEX_TID_MASK;
  454. head = &hb->chain;
  455. plist_for_each_entry_safe(this, next, head, list) {
  456. if (match_futex(&this->key, key)) {
  457. /*
  458. * Another waiter already exists - bump up
  459. * the refcount and return its pi_state:
  460. */
  461. pi_state = this->pi_state;
  462. /*
  463. * Userspace might have messed up non PI and PI futexes
  464. */
  465. if (unlikely(!pi_state))
  466. return -EINVAL;
  467. WARN_ON(!atomic_read(&pi_state->refcount));
  468. WARN_ON(pid && pi_state->owner &&
  469. pi_state->owner->pid != pid);
  470. atomic_inc(&pi_state->refcount);
  471. *ps = pi_state;
  472. return 0;
  473. }
  474. }
  475. /*
  476. * We are the first waiter - try to look up the real owner and attach
  477. * the new pi_state to it, but bail out when TID = 0
  478. */
  479. if (!pid)
  480. return -ESRCH;
  481. p = futex_find_get_task(pid);
  482. if (IS_ERR(p))
  483. return PTR_ERR(p);
  484. /*
  485. * We need to look at the task state flags to figure out,
  486. * whether the task is exiting. To protect against the do_exit
  487. * change of the task flags, we do this protected by
  488. * p->pi_lock:
  489. */
  490. spin_lock_irq(&p->pi_lock);
  491. if (unlikely(p->flags & PF_EXITING)) {
  492. /*
  493. * The task is on the way out. When PF_EXITPIDONE is
  494. * set, we know that the task has finished the
  495. * cleanup:
  496. */
  497. int ret = (p->flags & PF_EXITPIDONE) ? -ESRCH : -EAGAIN;
  498. spin_unlock_irq(&p->pi_lock);
  499. put_task_struct(p);
  500. return ret;
  501. }
  502. pi_state = alloc_pi_state();
  503. /*
  504. * Initialize the pi_mutex in locked state and make 'p'
  505. * the owner of it:
  506. */
  507. rt_mutex_init_proxy_locked(&pi_state->pi_mutex, p);
  508. /* Store the key for possible exit cleanups: */
  509. pi_state->key = *key;
  510. WARN_ON(!list_empty(&pi_state->list));
  511. list_add(&pi_state->list, &p->pi_state_list);
  512. pi_state->owner = p;
  513. spin_unlock_irq(&p->pi_lock);
  514. put_task_struct(p);
  515. *ps = pi_state;
  516. return 0;
  517. }
  518. /*
  519. * The hash bucket lock must be held when this is called.
  520. * Afterwards, the futex_q must not be accessed.
  521. */
  522. static void wake_futex(struct futex_q *q)
  523. {
  524. plist_del(&q->list, &q->list.plist);
  525. /*
  526. * The lock in wake_up_all() is a crucial memory barrier after the
  527. * plist_del() and also before assigning to q->lock_ptr.
  528. */
  529. wake_up_all(&q->waiters);
  530. /*
  531. * The waiting task can free the futex_q as soon as this is written,
  532. * without taking any locks. This must come last.
  533. *
  534. * A memory barrier is required here to prevent the following store
  535. * to lock_ptr from getting ahead of the wakeup. Clearing the lock
  536. * at the end of wake_up_all() does not prevent this store from
  537. * moving.
  538. */
  539. smp_wmb();
  540. q->lock_ptr = NULL;
  541. }
  542. static int wake_futex_pi(u32 __user *uaddr, u32 uval, struct futex_q *this)
  543. {
  544. struct task_struct *new_owner;
  545. struct futex_pi_state *pi_state = this->pi_state;
  546. u32 curval, newval;
  547. if (!pi_state)
  548. return -EINVAL;
  549. spin_lock(&pi_state->pi_mutex.wait_lock);
  550. new_owner = rt_mutex_next_owner(&pi_state->pi_mutex);
  551. /*
  552. * This happens when we have stolen the lock and the original
  553. * pending owner did not enqueue itself back on the rt_mutex.
  554. * Thats not a tragedy. We know that way, that a lock waiter
  555. * is on the fly. We make the futex_q waiter the pending owner.
  556. */
  557. if (!new_owner)
  558. new_owner = this->task;
  559. /*
  560. * We pass it to the next owner. (The WAITERS bit is always
  561. * kept enabled while there is PI state around. We must also
  562. * preserve the owner died bit.)
  563. */
  564. if (!(uval & FUTEX_OWNER_DIED)) {
  565. int ret = 0;
  566. newval = FUTEX_WAITERS | task_pid_vnr(new_owner);
  567. curval = cmpxchg_futex_value_locked(uaddr, uval, newval);
  568. if (curval == -EFAULT)
  569. ret = -EFAULT;
  570. else if (curval != uval)
  571. ret = -EINVAL;
  572. if (ret) {
  573. spin_unlock(&pi_state->pi_mutex.wait_lock);
  574. return ret;
  575. }
  576. }
  577. spin_lock_irq(&pi_state->owner->pi_lock);
  578. WARN_ON(list_empty(&pi_state->list));
  579. list_del_init(&pi_state->list);
  580. spin_unlock_irq(&pi_state->owner->pi_lock);
  581. spin_lock_irq(&new_owner->pi_lock);
  582. WARN_ON(!list_empty(&pi_state->list));
  583. list_add(&pi_state->list, &new_owner->pi_state_list);
  584. pi_state->owner = new_owner;
  585. spin_unlock_irq(&new_owner->pi_lock);
  586. spin_unlock(&pi_state->pi_mutex.wait_lock);
  587. rt_mutex_unlock(&pi_state->pi_mutex);
  588. return 0;
  589. }
  590. static int unlock_futex_pi(u32 __user *uaddr, u32 uval)
  591. {
  592. u32 oldval;
  593. /*
  594. * There is no waiter, so we unlock the futex. The owner died
  595. * bit has not to be preserved here. We are the owner:
  596. */
  597. oldval = cmpxchg_futex_value_locked(uaddr, uval, 0);
  598. if (oldval == -EFAULT)
  599. return oldval;
  600. if (oldval != uval)
  601. return -EAGAIN;
  602. return 0;
  603. }
  604. /*
  605. * Express the locking dependencies for lockdep:
  606. */
  607. static inline void
  608. double_lock_hb(struct futex_hash_bucket *hb1, struct futex_hash_bucket *hb2)
  609. {
  610. if (hb1 <= hb2) {
  611. spin_lock(&hb1->lock);
  612. if (hb1 < hb2)
  613. spin_lock_nested(&hb2->lock, SINGLE_DEPTH_NESTING);
  614. } else { /* hb1 > hb2 */
  615. spin_lock(&hb2->lock);
  616. spin_lock_nested(&hb1->lock, SINGLE_DEPTH_NESTING);
  617. }
  618. }
  619. /*
  620. * Wake up all waiters hashed on the physical page that is mapped
  621. * to this virtual address:
  622. */
  623. static int futex_wake(u32 __user *uaddr, struct rw_semaphore *fshared,
  624. int nr_wake, u32 bitset)
  625. {
  626. struct futex_hash_bucket *hb;
  627. struct futex_q *this, *next;
  628. struct plist_head *head;
  629. union futex_key key;
  630. int ret;
  631. if (!bitset)
  632. return -EINVAL;
  633. futex_lock_mm(fshared);
  634. ret = get_futex_key(uaddr, fshared, &key);
  635. if (unlikely(ret != 0))
  636. goto out;
  637. hb = hash_futex(&key);
  638. spin_lock(&hb->lock);
  639. head = &hb->chain;
  640. plist_for_each_entry_safe(this, next, head, list) {
  641. if (match_futex (&this->key, &key)) {
  642. if (this->pi_state) {
  643. ret = -EINVAL;
  644. break;
  645. }
  646. /* Check if one of the bits is set in both bitsets */
  647. if (!(this->bitset & bitset))
  648. continue;
  649. wake_futex(this);
  650. if (++ret >= nr_wake)
  651. break;
  652. }
  653. }
  654. spin_unlock(&hb->lock);
  655. out:
  656. futex_unlock_mm(fshared);
  657. return ret;
  658. }
  659. /*
  660. * Wake up all waiters hashed on the physical page that is mapped
  661. * to this virtual address:
  662. */
  663. static int
  664. futex_wake_op(u32 __user *uaddr1, struct rw_semaphore *fshared,
  665. u32 __user *uaddr2,
  666. int nr_wake, int nr_wake2, int op)
  667. {
  668. union futex_key key1, key2;
  669. struct futex_hash_bucket *hb1, *hb2;
  670. struct plist_head *head;
  671. struct futex_q *this, *next;
  672. int ret, op_ret, attempt = 0;
  673. retryfull:
  674. futex_lock_mm(fshared);
  675. ret = get_futex_key(uaddr1, fshared, &key1);
  676. if (unlikely(ret != 0))
  677. goto out;
  678. ret = get_futex_key(uaddr2, fshared, &key2);
  679. if (unlikely(ret != 0))
  680. goto out;
  681. hb1 = hash_futex(&key1);
  682. hb2 = hash_futex(&key2);
  683. retry:
  684. double_lock_hb(hb1, hb2);
  685. op_ret = futex_atomic_op_inuser(op, uaddr2);
  686. if (unlikely(op_ret < 0)) {
  687. u32 dummy;
  688. spin_unlock(&hb1->lock);
  689. if (hb1 != hb2)
  690. spin_unlock(&hb2->lock);
  691. #ifndef CONFIG_MMU
  692. /*
  693. * we don't get EFAULT from MMU faults if we don't have an MMU,
  694. * but we might get them from range checking
  695. */
  696. ret = op_ret;
  697. goto out;
  698. #endif
  699. if (unlikely(op_ret != -EFAULT)) {
  700. ret = op_ret;
  701. goto out;
  702. }
  703. /*
  704. * futex_atomic_op_inuser needs to both read and write
  705. * *(int __user *)uaddr2, but we can't modify it
  706. * non-atomically. Therefore, if get_user below is not
  707. * enough, we need to handle the fault ourselves, while
  708. * still holding the mmap_sem.
  709. */
  710. if (attempt++) {
  711. ret = futex_handle_fault((unsigned long)uaddr2,
  712. fshared, attempt);
  713. if (ret)
  714. goto out;
  715. goto retry;
  716. }
  717. /*
  718. * If we would have faulted, release mmap_sem,
  719. * fault it in and start all over again.
  720. */
  721. futex_unlock_mm(fshared);
  722. ret = get_user(dummy, uaddr2);
  723. if (ret)
  724. return ret;
  725. goto retryfull;
  726. }
  727. head = &hb1->chain;
  728. plist_for_each_entry_safe(this, next, head, list) {
  729. if (match_futex (&this->key, &key1)) {
  730. wake_futex(this);
  731. if (++ret >= nr_wake)
  732. break;
  733. }
  734. }
  735. if (op_ret > 0) {
  736. head = &hb2->chain;
  737. op_ret = 0;
  738. plist_for_each_entry_safe(this, next, head, list) {
  739. if (match_futex (&this->key, &key2)) {
  740. wake_futex(this);
  741. if (++op_ret >= nr_wake2)
  742. break;
  743. }
  744. }
  745. ret += op_ret;
  746. }
  747. spin_unlock(&hb1->lock);
  748. if (hb1 != hb2)
  749. spin_unlock(&hb2->lock);
  750. out:
  751. futex_unlock_mm(fshared);
  752. return ret;
  753. }
  754. /*
  755. * Requeue all waiters hashed on one physical page to another
  756. * physical page.
  757. */
  758. static int futex_requeue(u32 __user *uaddr1, struct rw_semaphore *fshared,
  759. u32 __user *uaddr2,
  760. int nr_wake, int nr_requeue, u32 *cmpval)
  761. {
  762. union futex_key key1, key2;
  763. struct futex_hash_bucket *hb1, *hb2;
  764. struct plist_head *head1;
  765. struct futex_q *this, *next;
  766. int ret, drop_count = 0;
  767. retry:
  768. futex_lock_mm(fshared);
  769. ret = get_futex_key(uaddr1, fshared, &key1);
  770. if (unlikely(ret != 0))
  771. goto out;
  772. ret = get_futex_key(uaddr2, fshared, &key2);
  773. if (unlikely(ret != 0))
  774. goto out;
  775. hb1 = hash_futex(&key1);
  776. hb2 = hash_futex(&key2);
  777. double_lock_hb(hb1, hb2);
  778. if (likely(cmpval != NULL)) {
  779. u32 curval;
  780. ret = get_futex_value_locked(&curval, uaddr1);
  781. if (unlikely(ret)) {
  782. spin_unlock(&hb1->lock);
  783. if (hb1 != hb2)
  784. spin_unlock(&hb2->lock);
  785. /*
  786. * If we would have faulted, release mmap_sem, fault
  787. * it in and start all over again.
  788. */
  789. futex_unlock_mm(fshared);
  790. ret = get_user(curval, uaddr1);
  791. if (!ret)
  792. goto retry;
  793. return ret;
  794. }
  795. if (curval != *cmpval) {
  796. ret = -EAGAIN;
  797. goto out_unlock;
  798. }
  799. }
  800. head1 = &hb1->chain;
  801. plist_for_each_entry_safe(this, next, head1, list) {
  802. if (!match_futex (&this->key, &key1))
  803. continue;
  804. if (++ret <= nr_wake) {
  805. wake_futex(this);
  806. } else {
  807. /*
  808. * If key1 and key2 hash to the same bucket, no need to
  809. * requeue.
  810. */
  811. if (likely(head1 != &hb2->chain)) {
  812. plist_del(&this->list, &hb1->chain);
  813. plist_add(&this->list, &hb2->chain);
  814. this->lock_ptr = &hb2->lock;
  815. #ifdef CONFIG_DEBUG_PI_LIST
  816. this->list.plist.lock = &hb2->lock;
  817. #endif
  818. }
  819. this->key = key2;
  820. get_futex_key_refs(&key2);
  821. drop_count++;
  822. if (ret - nr_wake >= nr_requeue)
  823. break;
  824. }
  825. }
  826. out_unlock:
  827. spin_unlock(&hb1->lock);
  828. if (hb1 != hb2)
  829. spin_unlock(&hb2->lock);
  830. /* drop_futex_key_refs() must be called outside the spinlocks. */
  831. while (--drop_count >= 0)
  832. drop_futex_key_refs(&key1);
  833. out:
  834. futex_unlock_mm(fshared);
  835. return ret;
  836. }
  837. /* The key must be already stored in q->key. */
  838. static inline struct futex_hash_bucket *queue_lock(struct futex_q *q)
  839. {
  840. struct futex_hash_bucket *hb;
  841. init_waitqueue_head(&q->waiters);
  842. get_futex_key_refs(&q->key);
  843. hb = hash_futex(&q->key);
  844. q->lock_ptr = &hb->lock;
  845. spin_lock(&hb->lock);
  846. return hb;
  847. }
  848. static inline void queue_me(struct futex_q *q, struct futex_hash_bucket *hb)
  849. {
  850. int prio;
  851. /*
  852. * The priority used to register this element is
  853. * - either the real thread-priority for the real-time threads
  854. * (i.e. threads with a priority lower than MAX_RT_PRIO)
  855. * - or MAX_RT_PRIO for non-RT threads.
  856. * Thus, all RT-threads are woken first in priority order, and
  857. * the others are woken last, in FIFO order.
  858. */
  859. prio = min(current->normal_prio, MAX_RT_PRIO);
  860. plist_node_init(&q->list, prio);
  861. #ifdef CONFIG_DEBUG_PI_LIST
  862. q->list.plist.lock = &hb->lock;
  863. #endif
  864. plist_add(&q->list, &hb->chain);
  865. q->task = current;
  866. spin_unlock(&hb->lock);
  867. }
  868. static inline void
  869. queue_unlock(struct futex_q *q, struct futex_hash_bucket *hb)
  870. {
  871. spin_unlock(&hb->lock);
  872. drop_futex_key_refs(&q->key);
  873. }
  874. /*
  875. * queue_me and unqueue_me must be called as a pair, each
  876. * exactly once. They are called with the hashed spinlock held.
  877. */
  878. /* Return 1 if we were still queued (ie. 0 means we were woken) */
  879. static int unqueue_me(struct futex_q *q)
  880. {
  881. spinlock_t *lock_ptr;
  882. int ret = 0;
  883. /* In the common case we don't take the spinlock, which is nice. */
  884. retry:
  885. lock_ptr = q->lock_ptr;
  886. barrier();
  887. if (lock_ptr != NULL) {
  888. spin_lock(lock_ptr);
  889. /*
  890. * q->lock_ptr can change between reading it and
  891. * spin_lock(), causing us to take the wrong lock. This
  892. * corrects the race condition.
  893. *
  894. * Reasoning goes like this: if we have the wrong lock,
  895. * q->lock_ptr must have changed (maybe several times)
  896. * between reading it and the spin_lock(). It can
  897. * change again after the spin_lock() but only if it was
  898. * already changed before the spin_lock(). It cannot,
  899. * however, change back to the original value. Therefore
  900. * we can detect whether we acquired the correct lock.
  901. */
  902. if (unlikely(lock_ptr != q->lock_ptr)) {
  903. spin_unlock(lock_ptr);
  904. goto retry;
  905. }
  906. WARN_ON(plist_node_empty(&q->list));
  907. plist_del(&q->list, &q->list.plist);
  908. BUG_ON(q->pi_state);
  909. spin_unlock(lock_ptr);
  910. ret = 1;
  911. }
  912. drop_futex_key_refs(&q->key);
  913. return ret;
  914. }
  915. /*
  916. * PI futexes can not be requeued and must remove themself from the
  917. * hash bucket. The hash bucket lock (i.e. lock_ptr) is held on entry
  918. * and dropped here.
  919. */
  920. static void unqueue_me_pi(struct futex_q *q)
  921. {
  922. WARN_ON(plist_node_empty(&q->list));
  923. plist_del(&q->list, &q->list.plist);
  924. BUG_ON(!q->pi_state);
  925. free_pi_state(q->pi_state);
  926. q->pi_state = NULL;
  927. spin_unlock(q->lock_ptr);
  928. drop_futex_key_refs(&q->key);
  929. }
  930. /*
  931. * Fixup the pi_state owner with the new owner.
  932. *
  933. * Must be called with hash bucket lock held and mm->sem held for non
  934. * private futexes.
  935. */
  936. static int fixup_pi_state_owner(u32 __user *uaddr, struct futex_q *q,
  937. struct task_struct *newowner,
  938. struct rw_semaphore *fshared)
  939. {
  940. u32 newtid = task_pid_vnr(newowner) | FUTEX_WAITERS;
  941. struct futex_pi_state *pi_state = q->pi_state;
  942. struct task_struct *oldowner = pi_state->owner;
  943. u32 uval, curval, newval;
  944. int ret, attempt = 0;
  945. /* Owner died? */
  946. if (!pi_state->owner)
  947. newtid |= FUTEX_OWNER_DIED;
  948. /*
  949. * We are here either because we stole the rtmutex from the
  950. * pending owner or we are the pending owner which failed to
  951. * get the rtmutex. We have to replace the pending owner TID
  952. * in the user space variable. This must be atomic as we have
  953. * to preserve the owner died bit here.
  954. *
  955. * Note: We write the user space value _before_ changing the
  956. * pi_state because we can fault here. Imagine swapped out
  957. * pages or a fork, which was running right before we acquired
  958. * mmap_sem, that marked all the anonymous memory readonly for
  959. * cow.
  960. *
  961. * Modifying pi_state _before_ the user space value would
  962. * leave the pi_state in an inconsistent state when we fault
  963. * here, because we need to drop the hash bucket lock to
  964. * handle the fault. This might be observed in the PID check
  965. * in lookup_pi_state.
  966. */
  967. retry:
  968. if (get_futex_value_locked(&uval, uaddr))
  969. goto handle_fault;
  970. while (1) {
  971. newval = (uval & FUTEX_OWNER_DIED) | newtid;
  972. curval = cmpxchg_futex_value_locked(uaddr, uval, newval);
  973. if (curval == -EFAULT)
  974. goto handle_fault;
  975. if (curval == uval)
  976. break;
  977. uval = curval;
  978. }
  979. /*
  980. * We fixed up user space. Now we need to fix the pi_state
  981. * itself.
  982. */
  983. if (pi_state->owner != NULL) {
  984. spin_lock_irq(&pi_state->owner->pi_lock);
  985. WARN_ON(list_empty(&pi_state->list));
  986. list_del_init(&pi_state->list);
  987. spin_unlock_irq(&pi_state->owner->pi_lock);
  988. }
  989. pi_state->owner = newowner;
  990. spin_lock_irq(&newowner->pi_lock);
  991. WARN_ON(!list_empty(&pi_state->list));
  992. list_add(&pi_state->list, &newowner->pi_state_list);
  993. spin_unlock_irq(&newowner->pi_lock);
  994. return 0;
  995. /*
  996. * To handle the page fault we need to drop the hash bucket
  997. * lock here. That gives the other task (either the pending
  998. * owner itself or the task which stole the rtmutex) the
  999. * chance to try the fixup of the pi_state. So once we are
  1000. * back from handling the fault we need to check the pi_state
  1001. * after reacquiring the hash bucket lock and before trying to
  1002. * do another fixup. When the fixup has been done already we
  1003. * simply return.
  1004. */
  1005. handle_fault:
  1006. spin_unlock(q->lock_ptr);
  1007. ret = futex_handle_fault((unsigned long)uaddr, fshared, attempt++);
  1008. spin_lock(q->lock_ptr);
  1009. /*
  1010. * Check if someone else fixed it for us:
  1011. */
  1012. if (pi_state->owner != oldowner)
  1013. return 0;
  1014. if (ret)
  1015. return ret;
  1016. goto retry;
  1017. }
  1018. /*
  1019. * In case we must use restart_block to restart a futex_wait,
  1020. * we encode in the 'flags' shared capability
  1021. */
  1022. #define FLAGS_SHARED 1
  1023. static long futex_wait_restart(struct restart_block *restart);
  1024. static int futex_wait(u32 __user *uaddr, struct rw_semaphore *fshared,
  1025. u32 val, ktime_t *abs_time, u32 bitset)
  1026. {
  1027. struct task_struct *curr = current;
  1028. DECLARE_WAITQUEUE(wait, curr);
  1029. struct futex_hash_bucket *hb;
  1030. struct futex_q q;
  1031. u32 uval;
  1032. int ret;
  1033. struct hrtimer_sleeper t;
  1034. int rem = 0;
  1035. if (!bitset)
  1036. return -EINVAL;
  1037. q.pi_state = NULL;
  1038. q.bitset = bitset;
  1039. retry:
  1040. futex_lock_mm(fshared);
  1041. ret = get_futex_key(uaddr, fshared, &q.key);
  1042. if (unlikely(ret != 0))
  1043. goto out_release_sem;
  1044. hb = queue_lock(&q);
  1045. /*
  1046. * Access the page AFTER the futex is queued.
  1047. * Order is important:
  1048. *
  1049. * Userspace waiter: val = var; if (cond(val)) futex_wait(&var, val);
  1050. * Userspace waker: if (cond(var)) { var = new; futex_wake(&var); }
  1051. *
  1052. * The basic logical guarantee of a futex is that it blocks ONLY
  1053. * if cond(var) is known to be true at the time of blocking, for
  1054. * any cond. If we queued after testing *uaddr, that would open
  1055. * a race condition where we could block indefinitely with
  1056. * cond(var) false, which would violate the guarantee.
  1057. *
  1058. * A consequence is that futex_wait() can return zero and absorb
  1059. * a wakeup when *uaddr != val on entry to the syscall. This is
  1060. * rare, but normal.
  1061. *
  1062. * for shared futexes, we hold the mmap semaphore, so the mapping
  1063. * cannot have changed since we looked it up in get_futex_key.
  1064. */
  1065. ret = get_futex_value_locked(&uval, uaddr);
  1066. if (unlikely(ret)) {
  1067. queue_unlock(&q, hb);
  1068. /*
  1069. * If we would have faulted, release mmap_sem, fault it in and
  1070. * start all over again.
  1071. */
  1072. futex_unlock_mm(fshared);
  1073. ret = get_user(uval, uaddr);
  1074. if (!ret)
  1075. goto retry;
  1076. return ret;
  1077. }
  1078. ret = -EWOULDBLOCK;
  1079. if (uval != val)
  1080. goto out_unlock_release_sem;
  1081. /* Only actually queue if *uaddr contained val. */
  1082. queue_me(&q, hb);
  1083. /*
  1084. * Now the futex is queued and we have checked the data, we
  1085. * don't want to hold mmap_sem while we sleep.
  1086. */
  1087. futex_unlock_mm(fshared);
  1088. /*
  1089. * There might have been scheduling since the queue_me(), as we
  1090. * cannot hold a spinlock across the get_user() in case it
  1091. * faults, and we cannot just set TASK_INTERRUPTIBLE state when
  1092. * queueing ourselves into the futex hash. This code thus has to
  1093. * rely on the futex_wake() code removing us from hash when it
  1094. * wakes us up.
  1095. */
  1096. /* add_wait_queue is the barrier after __set_current_state. */
  1097. __set_current_state(TASK_INTERRUPTIBLE);
  1098. add_wait_queue(&q.waiters, &wait);
  1099. /*
  1100. * !plist_node_empty() is safe here without any lock.
  1101. * q.lock_ptr != 0 is not safe, because of ordering against wakeup.
  1102. */
  1103. if (likely(!plist_node_empty(&q.list))) {
  1104. if (!abs_time)
  1105. schedule();
  1106. else {
  1107. hrtimer_init_on_stack(&t.timer, CLOCK_MONOTONIC,
  1108. HRTIMER_MODE_ABS);
  1109. hrtimer_init_sleeper(&t, current);
  1110. t.timer.expires = *abs_time;
  1111. hrtimer_start(&t.timer, t.timer.expires,
  1112. HRTIMER_MODE_ABS);
  1113. if (!hrtimer_active(&t.timer))
  1114. t.task = NULL;
  1115. /*
  1116. * the timer could have already expired, in which
  1117. * case current would be flagged for rescheduling.
  1118. * Don't bother calling schedule.
  1119. */
  1120. if (likely(t.task))
  1121. schedule();
  1122. hrtimer_cancel(&t.timer);
  1123. /* Flag if a timeout occured */
  1124. rem = (t.task == NULL);
  1125. destroy_hrtimer_on_stack(&t.timer);
  1126. }
  1127. }
  1128. __set_current_state(TASK_RUNNING);
  1129. /*
  1130. * NOTE: we don't remove ourselves from the waitqueue because
  1131. * we are the only user of it.
  1132. */
  1133. /* If we were woken (and unqueued), we succeeded, whatever. */
  1134. if (!unqueue_me(&q))
  1135. return 0;
  1136. if (rem)
  1137. return -ETIMEDOUT;
  1138. /*
  1139. * We expect signal_pending(current), but another thread may
  1140. * have handled it for us already.
  1141. */
  1142. if (!abs_time)
  1143. return -ERESTARTSYS;
  1144. else {
  1145. struct restart_block *restart;
  1146. restart = &current_thread_info()->restart_block;
  1147. restart->fn = futex_wait_restart;
  1148. restart->futex.uaddr = (u32 *)uaddr;
  1149. restart->futex.val = val;
  1150. restart->futex.time = abs_time->tv64;
  1151. restart->futex.bitset = bitset;
  1152. restart->futex.flags = 0;
  1153. if (fshared)
  1154. restart->futex.flags |= FLAGS_SHARED;
  1155. return -ERESTART_RESTARTBLOCK;
  1156. }
  1157. out_unlock_release_sem:
  1158. queue_unlock(&q, hb);
  1159. out_release_sem:
  1160. futex_unlock_mm(fshared);
  1161. return ret;
  1162. }
  1163. static long futex_wait_restart(struct restart_block *restart)
  1164. {
  1165. u32 __user *uaddr = (u32 __user *)restart->futex.uaddr;
  1166. struct rw_semaphore *fshared = NULL;
  1167. ktime_t t;
  1168. t.tv64 = restart->futex.time;
  1169. restart->fn = do_no_restart_syscall;
  1170. if (restart->futex.flags & FLAGS_SHARED)
  1171. fshared = &current->mm->mmap_sem;
  1172. return (long)futex_wait(uaddr, fshared, restart->futex.val, &t,
  1173. restart->futex.bitset);
  1174. }
  1175. /*
  1176. * Userspace tried a 0 -> TID atomic transition of the futex value
  1177. * and failed. The kernel side here does the whole locking operation:
  1178. * if there are waiters then it will block, it does PI, etc. (Due to
  1179. * races the kernel might see a 0 value of the futex too.)
  1180. */
  1181. static int futex_lock_pi(u32 __user *uaddr, struct rw_semaphore *fshared,
  1182. int detect, ktime_t *time, int trylock)
  1183. {
  1184. struct hrtimer_sleeper timeout, *to = NULL;
  1185. struct task_struct *curr = current;
  1186. struct futex_hash_bucket *hb;
  1187. u32 uval, newval, curval;
  1188. struct futex_q q;
  1189. int ret, lock_taken, ownerdied = 0, attempt = 0;
  1190. if (refill_pi_state_cache())
  1191. return -ENOMEM;
  1192. if (time) {
  1193. to = &timeout;
  1194. hrtimer_init_on_stack(&to->timer, CLOCK_REALTIME,
  1195. HRTIMER_MODE_ABS);
  1196. hrtimer_init_sleeper(to, current);
  1197. to->timer.expires = *time;
  1198. }
  1199. q.pi_state = NULL;
  1200. retry:
  1201. futex_lock_mm(fshared);
  1202. ret = get_futex_key(uaddr, fshared, &q.key);
  1203. if (unlikely(ret != 0))
  1204. goto out_release_sem;
  1205. retry_unlocked:
  1206. hb = queue_lock(&q);
  1207. retry_locked:
  1208. ret = lock_taken = 0;
  1209. /*
  1210. * To avoid races, we attempt to take the lock here again
  1211. * (by doing a 0 -> TID atomic cmpxchg), while holding all
  1212. * the locks. It will most likely not succeed.
  1213. */
  1214. newval = task_pid_vnr(current);
  1215. curval = cmpxchg_futex_value_locked(uaddr, 0, newval);
  1216. if (unlikely(curval == -EFAULT))
  1217. goto uaddr_faulted;
  1218. /*
  1219. * Detect deadlocks. In case of REQUEUE_PI this is a valid
  1220. * situation and we return success to user space.
  1221. */
  1222. if (unlikely((curval & FUTEX_TID_MASK) == task_pid_vnr(current))) {
  1223. ret = -EDEADLK;
  1224. goto out_unlock_release_sem;
  1225. }
  1226. /*
  1227. * Surprise - we got the lock. Just return to userspace:
  1228. */
  1229. if (unlikely(!curval))
  1230. goto out_unlock_release_sem;
  1231. uval = curval;
  1232. /*
  1233. * Set the WAITERS flag, so the owner will know it has someone
  1234. * to wake at next unlock
  1235. */
  1236. newval = curval | FUTEX_WAITERS;
  1237. /*
  1238. * There are two cases, where a futex might have no owner (the
  1239. * owner TID is 0): OWNER_DIED. We take over the futex in this
  1240. * case. We also do an unconditional take over, when the owner
  1241. * of the futex died.
  1242. *
  1243. * This is safe as we are protected by the hash bucket lock !
  1244. */
  1245. if (unlikely(ownerdied || !(curval & FUTEX_TID_MASK))) {
  1246. /* Keep the OWNER_DIED bit */
  1247. newval = (curval & ~FUTEX_TID_MASK) | task_pid_vnr(current);
  1248. ownerdied = 0;
  1249. lock_taken = 1;
  1250. }
  1251. curval = cmpxchg_futex_value_locked(uaddr, uval, newval);
  1252. if (unlikely(curval == -EFAULT))
  1253. goto uaddr_faulted;
  1254. if (unlikely(curval != uval))
  1255. goto retry_locked;
  1256. /*
  1257. * We took the lock due to owner died take over.
  1258. */
  1259. if (unlikely(lock_taken))
  1260. goto out_unlock_release_sem;
  1261. /*
  1262. * We dont have the lock. Look up the PI state (or create it if
  1263. * we are the first waiter):
  1264. */
  1265. ret = lookup_pi_state(uval, hb, &q.key, &q.pi_state);
  1266. if (unlikely(ret)) {
  1267. switch (ret) {
  1268. case -EAGAIN:
  1269. /*
  1270. * Task is exiting and we just wait for the
  1271. * exit to complete.
  1272. */
  1273. queue_unlock(&q, hb);
  1274. futex_unlock_mm(fshared);
  1275. cond_resched();
  1276. goto retry;
  1277. case -ESRCH:
  1278. /*
  1279. * No owner found for this futex. Check if the
  1280. * OWNER_DIED bit is set to figure out whether
  1281. * this is a robust futex or not.
  1282. */
  1283. if (get_futex_value_locked(&curval, uaddr))
  1284. goto uaddr_faulted;
  1285. /*
  1286. * We simply start over in case of a robust
  1287. * futex. The code above will take the futex
  1288. * and return happy.
  1289. */
  1290. if (curval & FUTEX_OWNER_DIED) {
  1291. ownerdied = 1;
  1292. goto retry_locked;
  1293. }
  1294. default:
  1295. goto out_unlock_release_sem;
  1296. }
  1297. }
  1298. /*
  1299. * Only actually queue now that the atomic ops are done:
  1300. */
  1301. queue_me(&q, hb);
  1302. /*
  1303. * Now the futex is queued and we have checked the data, we
  1304. * don't want to hold mmap_sem while we sleep.
  1305. */
  1306. futex_unlock_mm(fshared);
  1307. WARN_ON(!q.pi_state);
  1308. /*
  1309. * Block on the PI mutex:
  1310. */
  1311. if (!trylock)
  1312. ret = rt_mutex_timed_lock(&q.pi_state->pi_mutex, to, 1);
  1313. else {
  1314. ret = rt_mutex_trylock(&q.pi_state->pi_mutex);
  1315. /* Fixup the trylock return value: */
  1316. ret = ret ? 0 : -EWOULDBLOCK;
  1317. }
  1318. futex_lock_mm(fshared);
  1319. spin_lock(q.lock_ptr);
  1320. if (!ret) {
  1321. /*
  1322. * Got the lock. We might not be the anticipated owner
  1323. * if we did a lock-steal - fix up the PI-state in
  1324. * that case:
  1325. */
  1326. if (q.pi_state->owner != curr)
  1327. ret = fixup_pi_state_owner(uaddr, &q, curr, fshared);
  1328. } else {
  1329. /*
  1330. * Catch the rare case, where the lock was released
  1331. * when we were on the way back before we locked the
  1332. * hash bucket.
  1333. */
  1334. if (q.pi_state->owner == curr) {
  1335. /*
  1336. * Try to get the rt_mutex now. This might
  1337. * fail as some other task acquired the
  1338. * rt_mutex after we removed ourself from the
  1339. * rt_mutex waiters list.
  1340. */
  1341. if (rt_mutex_trylock(&q.pi_state->pi_mutex))
  1342. ret = 0;
  1343. else {
  1344. /*
  1345. * pi_state is incorrect, some other
  1346. * task did a lock steal and we
  1347. * returned due to timeout or signal
  1348. * without taking the rt_mutex. Too
  1349. * late. We can access the
  1350. * rt_mutex_owner without locking, as
  1351. * the other task is now blocked on
  1352. * the hash bucket lock. Fix the state
  1353. * up.
  1354. */
  1355. struct task_struct *owner;
  1356. int res;
  1357. owner = rt_mutex_owner(&q.pi_state->pi_mutex);
  1358. res = fixup_pi_state_owner(uaddr, &q, owner,
  1359. fshared);
  1360. /* propagate -EFAULT, if the fixup failed */
  1361. if (res)
  1362. ret = res;
  1363. }
  1364. } else {
  1365. /*
  1366. * Paranoia check. If we did not take the lock
  1367. * in the trylock above, then we should not be
  1368. * the owner of the rtmutex, neither the real
  1369. * nor the pending one:
  1370. */
  1371. if (rt_mutex_owner(&q.pi_state->pi_mutex) == curr)
  1372. printk(KERN_ERR "futex_lock_pi: ret = %d "
  1373. "pi-mutex: %p pi-state %p\n", ret,
  1374. q.pi_state->pi_mutex.owner,
  1375. q.pi_state->owner);
  1376. }
  1377. }
  1378. /* Unqueue and drop the lock */
  1379. unqueue_me_pi(&q);
  1380. futex_unlock_mm(fshared);
  1381. if (to)
  1382. destroy_hrtimer_on_stack(&to->timer);
  1383. return ret != -EINTR ? ret : -ERESTARTNOINTR;
  1384. out_unlock_release_sem:
  1385. queue_unlock(&q, hb);
  1386. out_release_sem:
  1387. futex_unlock_mm(fshared);
  1388. if (to)
  1389. destroy_hrtimer_on_stack(&to->timer);
  1390. return ret;
  1391. uaddr_faulted:
  1392. /*
  1393. * We have to r/w *(int __user *)uaddr, but we can't modify it
  1394. * non-atomically. Therefore, if get_user below is not
  1395. * enough, we need to handle the fault ourselves, while
  1396. * still holding the mmap_sem.
  1397. *
  1398. * ... and hb->lock. :-) --ANK
  1399. */
  1400. queue_unlock(&q, hb);
  1401. if (attempt++) {
  1402. ret = futex_handle_fault((unsigned long)uaddr, fshared,
  1403. attempt);
  1404. if (ret)
  1405. goto out_release_sem;
  1406. goto retry_unlocked;
  1407. }
  1408. futex_unlock_mm(fshared);
  1409. ret = get_user(uval, uaddr);
  1410. if (!ret && (uval != -EFAULT))
  1411. goto retry;
  1412. if (to)
  1413. destroy_hrtimer_on_stack(&to->timer);
  1414. return ret;
  1415. }
  1416. /*
  1417. * Userspace attempted a TID -> 0 atomic transition, and failed.
  1418. * This is the in-kernel slowpath: we look up the PI state (if any),
  1419. * and do the rt-mutex unlock.
  1420. */
  1421. static int futex_unlock_pi(u32 __user *uaddr, struct rw_semaphore *fshared)
  1422. {
  1423. struct futex_hash_bucket *hb;
  1424. struct futex_q *this, *next;
  1425. u32 uval;
  1426. struct plist_head *head;
  1427. union futex_key key;
  1428. int ret, attempt = 0;
  1429. retry:
  1430. if (get_user(uval, uaddr))
  1431. return -EFAULT;
  1432. /*
  1433. * We release only a lock we actually own:
  1434. */
  1435. if ((uval & FUTEX_TID_MASK) != task_pid_vnr(current))
  1436. return -EPERM;
  1437. /*
  1438. * First take all the futex related locks:
  1439. */
  1440. futex_lock_mm(fshared);
  1441. ret = get_futex_key(uaddr, fshared, &key);
  1442. if (unlikely(ret != 0))
  1443. goto out;
  1444. hb = hash_futex(&key);
  1445. retry_unlocked:
  1446. spin_lock(&hb->lock);
  1447. /*
  1448. * To avoid races, try to do the TID -> 0 atomic transition
  1449. * again. If it succeeds then we can return without waking
  1450. * anyone else up:
  1451. */
  1452. if (!(uval & FUTEX_OWNER_DIED))
  1453. uval = cmpxchg_futex_value_locked(uaddr, task_pid_vnr(current), 0);
  1454. if (unlikely(uval == -EFAULT))
  1455. goto pi_faulted;
  1456. /*
  1457. * Rare case: we managed to release the lock atomically,
  1458. * no need to wake anyone else up:
  1459. */
  1460. if (unlikely(uval == task_pid_vnr(current)))
  1461. goto out_unlock;
  1462. /*
  1463. * Ok, other tasks may need to be woken up - check waiters
  1464. * and do the wakeup if necessary:
  1465. */
  1466. head = &hb->chain;
  1467. plist_for_each_entry_safe(this, next, head, list) {
  1468. if (!match_futex (&this->key, &key))
  1469. continue;
  1470. ret = wake_futex_pi(uaddr, uval, this);
  1471. /*
  1472. * The atomic access to the futex value
  1473. * generated a pagefault, so retry the
  1474. * user-access and the wakeup:
  1475. */
  1476. if (ret == -EFAULT)
  1477. goto pi_faulted;
  1478. goto out_unlock;
  1479. }
  1480. /*
  1481. * No waiters - kernel unlocks the futex:
  1482. */
  1483. if (!(uval & FUTEX_OWNER_DIED)) {
  1484. ret = unlock_futex_pi(uaddr, uval);
  1485. if (ret == -EFAULT)
  1486. goto pi_faulted;
  1487. }
  1488. out_unlock:
  1489. spin_unlock(&hb->lock);
  1490. out:
  1491. futex_unlock_mm(fshared);
  1492. return ret;
  1493. pi_faulted:
  1494. /*
  1495. * We have to r/w *(int __user *)uaddr, but we can't modify it
  1496. * non-atomically. Therefore, if get_user below is not
  1497. * enough, we need to handle the fault ourselves, while
  1498. * still holding the mmap_sem.
  1499. *
  1500. * ... and hb->lock. --ANK
  1501. */
  1502. spin_unlock(&hb->lock);
  1503. if (attempt++) {
  1504. ret = futex_handle_fault((unsigned long)uaddr, fshared,
  1505. attempt);
  1506. if (ret)
  1507. goto out;
  1508. uval = 0;
  1509. goto retry_unlocked;
  1510. }
  1511. futex_unlock_mm(fshared);
  1512. ret = get_user(uval, uaddr);
  1513. if (!ret && (uval != -EFAULT))
  1514. goto retry;
  1515. return ret;
  1516. }
  1517. /*
  1518. * Support for robust futexes: the kernel cleans up held futexes at
  1519. * thread exit time.
  1520. *
  1521. * Implementation: user-space maintains a per-thread list of locks it
  1522. * is holding. Upon do_exit(), the kernel carefully walks this list,
  1523. * and marks all locks that are owned by this thread with the
  1524. * FUTEX_OWNER_DIED bit, and wakes up a waiter (if any). The list is
  1525. * always manipulated with the lock held, so the list is private and
  1526. * per-thread. Userspace also maintains a per-thread 'list_op_pending'
  1527. * field, to allow the kernel to clean up if the thread dies after
  1528. * acquiring the lock, but just before it could have added itself to
  1529. * the list. There can only be one such pending lock.
  1530. */
  1531. /**
  1532. * sys_set_robust_list - set the robust-futex list head of a task
  1533. * @head: pointer to the list-head
  1534. * @len: length of the list-head, as userspace expects
  1535. */
  1536. asmlinkage long
  1537. sys_set_robust_list(struct robust_list_head __user *head,
  1538. size_t len)
  1539. {
  1540. if (!futex_cmpxchg_enabled)
  1541. return -ENOSYS;
  1542. /*
  1543. * The kernel knows only one size for now:
  1544. */
  1545. if (unlikely(len != sizeof(*head)))
  1546. return -EINVAL;
  1547. current->robust_list = head;
  1548. return 0;
  1549. }
  1550. /**
  1551. * sys_get_robust_list - get the robust-futex list head of a task
  1552. * @pid: pid of the process [zero for current task]
  1553. * @head_ptr: pointer to a list-head pointer, the kernel fills it in
  1554. * @len_ptr: pointer to a length field, the kernel fills in the header size
  1555. */
  1556. asmlinkage long
  1557. sys_get_robust_list(int pid, struct robust_list_head __user * __user *head_ptr,
  1558. size_t __user *len_ptr)
  1559. {
  1560. struct robust_list_head __user *head;
  1561. unsigned long ret;
  1562. if (!futex_cmpxchg_enabled)
  1563. return -ENOSYS;
  1564. if (!pid)
  1565. head = current->robust_list;
  1566. else {
  1567. struct task_struct *p;
  1568. ret = -ESRCH;
  1569. rcu_read_lock();
  1570. p = find_task_by_vpid(pid);
  1571. if (!p)
  1572. goto err_unlock;
  1573. ret = -EPERM;
  1574. if ((current->euid != p->euid) && (current->euid != p->uid) &&
  1575. !capable(CAP_SYS_PTRACE))
  1576. goto err_unlock;
  1577. head = p->robust_list;
  1578. rcu_read_unlock();
  1579. }
  1580. if (put_user(sizeof(*head), len_ptr))
  1581. return -EFAULT;
  1582. return put_user(head, head_ptr);
  1583. err_unlock:
  1584. rcu_read_unlock();
  1585. return ret;
  1586. }
  1587. /*
  1588. * Process a futex-list entry, check whether it's owned by the
  1589. * dying task, and do notification if so:
  1590. */
  1591. int handle_futex_death(u32 __user *uaddr, struct task_struct *curr, int pi)
  1592. {
  1593. u32 uval, nval, mval;
  1594. retry:
  1595. if (get_user(uval, uaddr))
  1596. return -1;
  1597. if ((uval & FUTEX_TID_MASK) == task_pid_vnr(curr)) {
  1598. /*
  1599. * Ok, this dying thread is truly holding a futex
  1600. * of interest. Set the OWNER_DIED bit atomically
  1601. * via cmpxchg, and if the value had FUTEX_WAITERS
  1602. * set, wake up a waiter (if any). (We have to do a
  1603. * futex_wake() even if OWNER_DIED is already set -
  1604. * to handle the rare but possible case of recursive
  1605. * thread-death.) The rest of the cleanup is done in
  1606. * userspace.
  1607. */
  1608. mval = (uval & FUTEX_WAITERS) | FUTEX_OWNER_DIED;
  1609. nval = futex_atomic_cmpxchg_inatomic(uaddr, uval, mval);
  1610. if (nval == -EFAULT)
  1611. return -1;
  1612. if (nval != uval)
  1613. goto retry;
  1614. /*
  1615. * Wake robust non-PI futexes here. The wakeup of
  1616. * PI futexes happens in exit_pi_state():
  1617. */
  1618. if (!pi && (uval & FUTEX_WAITERS))
  1619. futex_wake(uaddr, &curr->mm->mmap_sem, 1,
  1620. FUTEX_BITSET_MATCH_ANY);
  1621. }
  1622. return 0;
  1623. }
  1624. /*
  1625. * Fetch a robust-list pointer. Bit 0 signals PI futexes:
  1626. */
  1627. static inline int fetch_robust_entry(struct robust_list __user **entry,
  1628. struct robust_list __user * __user *head,
  1629. int *pi)
  1630. {
  1631. unsigned long uentry;
  1632. if (get_user(uentry, (unsigned long __user *)head))
  1633. return -EFAULT;
  1634. *entry = (void __user *)(uentry & ~1UL);
  1635. *pi = uentry & 1;
  1636. return 0;
  1637. }
  1638. /*
  1639. * Walk curr->robust_list (very carefully, it's a userspace list!)
  1640. * and mark any locks found there dead, and notify any waiters.
  1641. *
  1642. * We silently return on any sign of list-walking problem.
  1643. */
  1644. void exit_robust_list(struct task_struct *curr)
  1645. {
  1646. struct robust_list_head __user *head = curr->robust_list;
  1647. struct robust_list __user *entry, *next_entry, *pending;
  1648. unsigned int limit = ROBUST_LIST_LIMIT, pi, next_pi, pip;
  1649. unsigned long futex_offset;
  1650. int rc;
  1651. if (!futex_cmpxchg_enabled)
  1652. return;
  1653. /*
  1654. * Fetch the list head (which was registered earlier, via
  1655. * sys_set_robust_list()):
  1656. */
  1657. if (fetch_robust_entry(&entry, &head->list.next, &pi))
  1658. return;
  1659. /*
  1660. * Fetch the relative futex offset:
  1661. */
  1662. if (get_user(futex_offset, &head->futex_offset))
  1663. return;
  1664. /*
  1665. * Fetch any possibly pending lock-add first, and handle it
  1666. * if it exists:
  1667. */
  1668. if (fetch_robust_entry(&pending, &head->list_op_pending, &pip))
  1669. return;
  1670. next_entry = NULL; /* avoid warning with gcc */
  1671. while (entry != &head->list) {
  1672. /*
  1673. * Fetch the next entry in the list before calling
  1674. * handle_futex_death:
  1675. */
  1676. rc = fetch_robust_entry(&next_entry, &entry->next, &next_pi);
  1677. /*
  1678. * A pending lock might already be on the list, so
  1679. * don't process it twice:
  1680. */
  1681. if (entry != pending)
  1682. if (handle_futex_death((void __user *)entry + futex_offset,
  1683. curr, pi))
  1684. return;
  1685. if (rc)
  1686. return;
  1687. entry = next_entry;
  1688. pi = next_pi;
  1689. /*
  1690. * Avoid excessively long or circular lists:
  1691. */
  1692. if (!--limit)
  1693. break;
  1694. cond_resched();
  1695. }
  1696. if (pending)
  1697. handle_futex_death((void __user *)pending + futex_offset,
  1698. curr, pip);
  1699. }
  1700. long do_futex(u32 __user *uaddr, int op, u32 val, ktime_t *timeout,
  1701. u32 __user *uaddr2, u32 val2, u32 val3)
  1702. {
  1703. int ret = -ENOSYS;
  1704. int cmd = op & FUTEX_CMD_MASK;
  1705. struct rw_semaphore *fshared = NULL;
  1706. if (!(op & FUTEX_PRIVATE_FLAG))
  1707. fshared = &current->mm->mmap_sem;
  1708. switch (cmd) {
  1709. case FUTEX_WAIT:
  1710. val3 = FUTEX_BITSET_MATCH_ANY;
  1711. case FUTEX_WAIT_BITSET:
  1712. ret = futex_wait(uaddr, fshared, val, timeout, val3);
  1713. break;
  1714. case FUTEX_WAKE:
  1715. val3 = FUTEX_BITSET_MATCH_ANY;
  1716. case FUTEX_WAKE_BITSET:
  1717. ret = futex_wake(uaddr, fshared, val, val3);
  1718. break;
  1719. case FUTEX_REQUEUE:
  1720. ret = futex_requeue(uaddr, fshared, uaddr2, val, val2, NULL);
  1721. break;
  1722. case FUTEX_CMP_REQUEUE:
  1723. ret = futex_requeue(uaddr, fshared, uaddr2, val, val2, &val3);
  1724. break;
  1725. case FUTEX_WAKE_OP:
  1726. ret = futex_wake_op(uaddr, fshared, uaddr2, val, val2, val3);
  1727. break;
  1728. case FUTEX_LOCK_PI:
  1729. if (futex_cmpxchg_enabled)
  1730. ret = futex_lock_pi(uaddr, fshared, val, timeout, 0);
  1731. break;
  1732. case FUTEX_UNLOCK_PI:
  1733. if (futex_cmpxchg_enabled)
  1734. ret = futex_unlock_pi(uaddr, fshared);
  1735. break;
  1736. case FUTEX_TRYLOCK_PI:
  1737. if (futex_cmpxchg_enabled)
  1738. ret = futex_lock_pi(uaddr, fshared, 0, timeout, 1);
  1739. break;
  1740. default:
  1741. ret = -ENOSYS;
  1742. }
  1743. return ret;
  1744. }
  1745. asmlinkage long sys_futex(u32 __user *uaddr, int op, u32 val,
  1746. struct timespec __user *utime, u32 __user *uaddr2,
  1747. u32 val3)
  1748. {
  1749. struct timespec ts;
  1750. ktime_t t, *tp = NULL;
  1751. u32 val2 = 0;
  1752. int cmd = op & FUTEX_CMD_MASK;
  1753. if (utime && (cmd == FUTEX_WAIT || cmd == FUTEX_LOCK_PI ||
  1754. cmd == FUTEX_WAIT_BITSET)) {
  1755. if (copy_from_user(&ts, utime, sizeof(ts)) != 0)
  1756. return -EFAULT;
  1757. if (!timespec_valid(&ts))
  1758. return -EINVAL;
  1759. t = timespec_to_ktime(ts);
  1760. if (cmd == FUTEX_WAIT)
  1761. t = ktime_add_safe(ktime_get(), t);
  1762. tp = &t;
  1763. }
  1764. /*
  1765. * requeue parameter in 'utime' if cmd == FUTEX_REQUEUE.
  1766. * number of waiters to wake in 'utime' if cmd == FUTEX_WAKE_OP.
  1767. */
  1768. if (cmd == FUTEX_REQUEUE || cmd == FUTEX_CMP_REQUEUE ||
  1769. cmd == FUTEX_WAKE_OP)
  1770. val2 = (u32) (unsigned long) utime;
  1771. return do_futex(uaddr, op, val, tp, uaddr2, val2, val3);
  1772. }
  1773. static int __init futex_init(void)
  1774. {
  1775. u32 curval;
  1776. int i;
  1777. /*
  1778. * This will fail and we want it. Some arch implementations do
  1779. * runtime detection of the futex_atomic_cmpxchg_inatomic()
  1780. * functionality. We want to know that before we call in any
  1781. * of the complex code paths. Also we want to prevent
  1782. * registration of robust lists in that case. NULL is
  1783. * guaranteed to fault and we get -EFAULT on functional
  1784. * implementation, the non functional ones will return
  1785. * -ENOSYS.
  1786. */
  1787. curval = cmpxchg_futex_value_locked(NULL, 0, 0);
  1788. if (curval == -EFAULT)
  1789. futex_cmpxchg_enabled = 1;
  1790. for (i = 0; i < ARRAY_SIZE(futex_queues); i++) {
  1791. plist_head_init(&futex_queues[i].chain, &futex_queues[i].lock);
  1792. spin_lock_init(&futex_queues[i].lock);
  1793. }
  1794. return 0;
  1795. }
  1796. __initcall(futex_init);