futex.c 49 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051
  1. /*
  2. * Fast Userspace Mutexes (which I call "Futexes!").
  3. * (C) Rusty Russell, IBM 2002
  4. *
  5. * Generalized futexes, futex requeueing, misc fixes by Ingo Molnar
  6. * (C) Copyright 2003 Red Hat Inc, All Rights Reserved
  7. *
  8. * Removed page pinning, fix privately mapped COW pages and other cleanups
  9. * (C) Copyright 2003, 2004 Jamie Lokier
  10. *
  11. * Robust futex support started by Ingo Molnar
  12. * (C) Copyright 2006 Red Hat Inc, All Rights Reserved
  13. * Thanks to Thomas Gleixner for suggestions, analysis and fixes.
  14. *
  15. * PI-futex support started by Ingo Molnar and Thomas Gleixner
  16. * Copyright (C) 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
  17. * Copyright (C) 2006 Timesys Corp., Thomas Gleixner <tglx@timesys.com>
  18. *
  19. * PRIVATE futexes by Eric Dumazet
  20. * Copyright (C) 2007 Eric Dumazet <dada1@cosmosbay.com>
  21. *
  22. * Thanks to Ben LaHaise for yelling "hashed waitqueues" loudly
  23. * enough at me, Linus for the original (flawed) idea, Matthew
  24. * Kirkwood for proof-of-concept implementation.
  25. *
  26. * "The futexes are also cursed."
  27. * "But they come in a choice of three flavours!"
  28. *
  29. * This program is free software; you can redistribute it and/or modify
  30. * it under the terms of the GNU General Public License as published by
  31. * the Free Software Foundation; either version 2 of the License, or
  32. * (at your option) any later version.
  33. *
  34. * This program is distributed in the hope that it will be useful,
  35. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  36. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  37. * GNU General Public License for more details.
  38. *
  39. * You should have received a copy of the GNU General Public License
  40. * along with this program; if not, write to the Free Software
  41. * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
  42. */
  43. #include <linux/slab.h>
  44. #include <linux/poll.h>
  45. #include <linux/fs.h>
  46. #include <linux/file.h>
  47. #include <linux/jhash.h>
  48. #include <linux/init.h>
  49. #include <linux/futex.h>
  50. #include <linux/mount.h>
  51. #include <linux/pagemap.h>
  52. #include <linux/syscalls.h>
  53. #include <linux/signal.h>
  54. #include <linux/module.h>
  55. #include <linux/magic.h>
  56. #include <linux/pid.h>
  57. #include <linux/nsproxy.h>
  58. #include <asm/futex.h>
  59. #include "rtmutex_common.h"
  60. int __read_mostly futex_cmpxchg_enabled;
  61. #define FUTEX_HASHBITS (CONFIG_BASE_SMALL ? 4 : 8)
  62. /*
  63. * Priority Inheritance state:
  64. */
  65. struct futex_pi_state {
  66. /*
  67. * list of 'owned' pi_state instances - these have to be
  68. * cleaned up in do_exit() if the task exits prematurely:
  69. */
  70. struct list_head list;
  71. /*
  72. * The PI object:
  73. */
  74. struct rt_mutex pi_mutex;
  75. struct task_struct *owner;
  76. atomic_t refcount;
  77. union futex_key key;
  78. };
  79. /*
  80. * We use this hashed waitqueue instead of a normal wait_queue_t, so
  81. * we can wake only the relevant ones (hashed queues may be shared).
  82. *
  83. * A futex_q has a woken state, just like tasks have TASK_RUNNING.
  84. * It is considered woken when plist_node_empty(&q->list) || q->lock_ptr == 0.
  85. * The order of wakup is always to make the first condition true, then
  86. * wake up q->waiter, then make the second condition true.
  87. */
  88. struct futex_q {
  89. struct plist_node list;
  90. /* There can only be a single waiter */
  91. wait_queue_head_t waiter;
  92. /* Which hash list lock to use: */
  93. spinlock_t *lock_ptr;
  94. /* Key which the futex is hashed on: */
  95. union futex_key key;
  96. /* Optional priority inheritance state: */
  97. struct futex_pi_state *pi_state;
  98. struct task_struct *task;
  99. /* Bitset for the optional bitmasked wakeup */
  100. u32 bitset;
  101. };
  102. /*
  103. * Hash buckets are shared by all the futex_keys that hash to the same
  104. * location. Each key may have multiple futex_q structures, one for each task
  105. * waiting on a futex.
  106. */
  107. struct futex_hash_bucket {
  108. spinlock_t lock;
  109. struct plist_head chain;
  110. };
  111. static struct futex_hash_bucket futex_queues[1<<FUTEX_HASHBITS];
  112. /*
  113. * We hash on the keys returned from get_futex_key (see below).
  114. */
  115. static struct futex_hash_bucket *hash_futex(union futex_key *key)
  116. {
  117. u32 hash = jhash2((u32*)&key->both.word,
  118. (sizeof(key->both.word)+sizeof(key->both.ptr))/4,
  119. key->both.offset);
  120. return &futex_queues[hash & ((1 << FUTEX_HASHBITS)-1)];
  121. }
  122. /*
  123. * Return 1 if two futex_keys are equal, 0 otherwise.
  124. */
  125. static inline int match_futex(union futex_key *key1, union futex_key *key2)
  126. {
  127. return (key1->both.word == key2->both.word
  128. && key1->both.ptr == key2->both.ptr
  129. && key1->both.offset == key2->both.offset);
  130. }
  131. /*
  132. * Take a reference to the resource addressed by a key.
  133. * Can be called while holding spinlocks.
  134. *
  135. */
  136. static void get_futex_key_refs(union futex_key *key)
  137. {
  138. if (!key->both.ptr)
  139. return;
  140. switch (key->both.offset & (FUT_OFF_INODE|FUT_OFF_MMSHARED)) {
  141. case FUT_OFF_INODE:
  142. atomic_inc(&key->shared.inode->i_count);
  143. break;
  144. case FUT_OFF_MMSHARED:
  145. atomic_inc(&key->private.mm->mm_count);
  146. break;
  147. }
  148. }
  149. /*
  150. * Drop a reference to the resource addressed by a key.
  151. * The hash bucket spinlock must not be held.
  152. */
  153. static void drop_futex_key_refs(union futex_key *key)
  154. {
  155. if (!key->both.ptr) {
  156. /* If we're here then we tried to put a key we failed to get */
  157. WARN_ON_ONCE(1);
  158. return;
  159. }
  160. switch (key->both.offset & (FUT_OFF_INODE|FUT_OFF_MMSHARED)) {
  161. case FUT_OFF_INODE:
  162. iput(key->shared.inode);
  163. break;
  164. case FUT_OFF_MMSHARED:
  165. mmdrop(key->private.mm);
  166. break;
  167. }
  168. }
  169. /**
  170. * get_futex_key - Get parameters which are the keys for a futex.
  171. * @uaddr: virtual address of the futex
  172. * @fshared: 0 for a PROCESS_PRIVATE futex, 1 for PROCESS_SHARED
  173. * @key: address where result is stored.
  174. *
  175. * Returns a negative error code or 0
  176. * The key words are stored in *key on success.
  177. *
  178. * For shared mappings, it's (page->index, vma->vm_file->f_path.dentry->d_inode,
  179. * offset_within_page). For private mappings, it's (uaddr, current->mm).
  180. * We can usually work out the index without swapping in the page.
  181. *
  182. * lock_page() might sleep, the caller should not hold a spinlock.
  183. */
  184. static int get_futex_key(u32 __user *uaddr, int fshared, union futex_key *key)
  185. {
  186. unsigned long address = (unsigned long)uaddr;
  187. struct mm_struct *mm = current->mm;
  188. struct page *page;
  189. int err;
  190. /*
  191. * The futex address must be "naturally" aligned.
  192. */
  193. key->both.offset = address % PAGE_SIZE;
  194. if (unlikely((address % sizeof(u32)) != 0))
  195. return -EINVAL;
  196. address -= key->both.offset;
  197. /*
  198. * PROCESS_PRIVATE futexes are fast.
  199. * As the mm cannot disappear under us and the 'key' only needs
  200. * virtual address, we dont even have to find the underlying vma.
  201. * Note : We do have to check 'uaddr' is a valid user address,
  202. * but access_ok() should be faster than find_vma()
  203. */
  204. if (!fshared) {
  205. if (unlikely(!access_ok(VERIFY_WRITE, uaddr, sizeof(u32))))
  206. return -EFAULT;
  207. key->private.mm = mm;
  208. key->private.address = address;
  209. get_futex_key_refs(key);
  210. return 0;
  211. }
  212. again:
  213. err = get_user_pages_fast(address, 1, 0, &page);
  214. if (err < 0)
  215. return err;
  216. lock_page(page);
  217. if (!page->mapping) {
  218. unlock_page(page);
  219. put_page(page);
  220. goto again;
  221. }
  222. /*
  223. * Private mappings are handled in a simple way.
  224. *
  225. * NOTE: When userspace waits on a MAP_SHARED mapping, even if
  226. * it's a read-only handle, it's expected that futexes attach to
  227. * the object not the particular process.
  228. */
  229. if (PageAnon(page)) {
  230. key->both.offset |= FUT_OFF_MMSHARED; /* ref taken on mm */
  231. key->private.mm = mm;
  232. key->private.address = address;
  233. } else {
  234. key->both.offset |= FUT_OFF_INODE; /* inode-based key */
  235. key->shared.inode = page->mapping->host;
  236. key->shared.pgoff = page->index;
  237. }
  238. get_futex_key_refs(key);
  239. unlock_page(page);
  240. put_page(page);
  241. return 0;
  242. }
  243. static inline
  244. void put_futex_key(int fshared, union futex_key *key)
  245. {
  246. drop_futex_key_refs(key);
  247. }
  248. static u32 cmpxchg_futex_value_locked(u32 __user *uaddr, u32 uval, u32 newval)
  249. {
  250. u32 curval;
  251. pagefault_disable();
  252. curval = futex_atomic_cmpxchg_inatomic(uaddr, uval, newval);
  253. pagefault_enable();
  254. return curval;
  255. }
  256. static int get_futex_value_locked(u32 *dest, u32 __user *from)
  257. {
  258. int ret;
  259. pagefault_disable();
  260. ret = __copy_from_user_inatomic(dest, from, sizeof(u32));
  261. pagefault_enable();
  262. return ret ? -EFAULT : 0;
  263. }
  264. /*
  265. * Fault handling.
  266. */
  267. static int futex_handle_fault(unsigned long address, int attempt)
  268. {
  269. struct vm_area_struct * vma;
  270. struct mm_struct *mm = current->mm;
  271. int ret = -EFAULT;
  272. if (attempt > 2)
  273. return ret;
  274. down_read(&mm->mmap_sem);
  275. vma = find_vma(mm, address);
  276. if (vma && address >= vma->vm_start &&
  277. (vma->vm_flags & VM_WRITE)) {
  278. int fault;
  279. fault = handle_mm_fault(mm, vma, address, 1);
  280. if (unlikely((fault & VM_FAULT_ERROR))) {
  281. #if 0
  282. /* XXX: let's do this when we verify it is OK */
  283. if (ret & VM_FAULT_OOM)
  284. ret = -ENOMEM;
  285. #endif
  286. } else {
  287. ret = 0;
  288. if (fault & VM_FAULT_MAJOR)
  289. current->maj_flt++;
  290. else
  291. current->min_flt++;
  292. }
  293. }
  294. up_read(&mm->mmap_sem);
  295. return ret;
  296. }
  297. /*
  298. * PI code:
  299. */
  300. static int refill_pi_state_cache(void)
  301. {
  302. struct futex_pi_state *pi_state;
  303. if (likely(current->pi_state_cache))
  304. return 0;
  305. pi_state = kzalloc(sizeof(*pi_state), GFP_KERNEL);
  306. if (!pi_state)
  307. return -ENOMEM;
  308. INIT_LIST_HEAD(&pi_state->list);
  309. /* pi_mutex gets initialized later */
  310. pi_state->owner = NULL;
  311. atomic_set(&pi_state->refcount, 1);
  312. pi_state->key = FUTEX_KEY_INIT;
  313. current->pi_state_cache = pi_state;
  314. return 0;
  315. }
  316. static struct futex_pi_state * alloc_pi_state(void)
  317. {
  318. struct futex_pi_state *pi_state = current->pi_state_cache;
  319. WARN_ON(!pi_state);
  320. current->pi_state_cache = NULL;
  321. return pi_state;
  322. }
  323. static void free_pi_state(struct futex_pi_state *pi_state)
  324. {
  325. if (!atomic_dec_and_test(&pi_state->refcount))
  326. return;
  327. /*
  328. * If pi_state->owner is NULL, the owner is most probably dying
  329. * and has cleaned up the pi_state already
  330. */
  331. if (pi_state->owner) {
  332. spin_lock_irq(&pi_state->owner->pi_lock);
  333. list_del_init(&pi_state->list);
  334. spin_unlock_irq(&pi_state->owner->pi_lock);
  335. rt_mutex_proxy_unlock(&pi_state->pi_mutex, pi_state->owner);
  336. }
  337. if (current->pi_state_cache)
  338. kfree(pi_state);
  339. else {
  340. /*
  341. * pi_state->list is already empty.
  342. * clear pi_state->owner.
  343. * refcount is at 0 - put it back to 1.
  344. */
  345. pi_state->owner = NULL;
  346. atomic_set(&pi_state->refcount, 1);
  347. current->pi_state_cache = pi_state;
  348. }
  349. }
  350. /*
  351. * Look up the task based on what TID userspace gave us.
  352. * We dont trust it.
  353. */
  354. static struct task_struct * futex_find_get_task(pid_t pid)
  355. {
  356. struct task_struct *p;
  357. const struct cred *cred = current_cred(), *pcred;
  358. rcu_read_lock();
  359. p = find_task_by_vpid(pid);
  360. if (!p) {
  361. p = ERR_PTR(-ESRCH);
  362. } else {
  363. pcred = __task_cred(p);
  364. if (cred->euid != pcred->euid &&
  365. cred->euid != pcred->uid)
  366. p = ERR_PTR(-ESRCH);
  367. else
  368. get_task_struct(p);
  369. }
  370. rcu_read_unlock();
  371. return p;
  372. }
  373. /*
  374. * This task is holding PI mutexes at exit time => bad.
  375. * Kernel cleans up PI-state, but userspace is likely hosed.
  376. * (Robust-futex cleanup is separate and might save the day for userspace.)
  377. */
  378. void exit_pi_state_list(struct task_struct *curr)
  379. {
  380. struct list_head *next, *head = &curr->pi_state_list;
  381. struct futex_pi_state *pi_state;
  382. struct futex_hash_bucket *hb;
  383. union futex_key key = FUTEX_KEY_INIT;
  384. if (!futex_cmpxchg_enabled)
  385. return;
  386. /*
  387. * We are a ZOMBIE and nobody can enqueue itself on
  388. * pi_state_list anymore, but we have to be careful
  389. * versus waiters unqueueing themselves:
  390. */
  391. spin_lock_irq(&curr->pi_lock);
  392. while (!list_empty(head)) {
  393. next = head->next;
  394. pi_state = list_entry(next, struct futex_pi_state, list);
  395. key = pi_state->key;
  396. hb = hash_futex(&key);
  397. spin_unlock_irq(&curr->pi_lock);
  398. spin_lock(&hb->lock);
  399. spin_lock_irq(&curr->pi_lock);
  400. /*
  401. * We dropped the pi-lock, so re-check whether this
  402. * task still owns the PI-state:
  403. */
  404. if (head->next != next) {
  405. spin_unlock(&hb->lock);
  406. continue;
  407. }
  408. WARN_ON(pi_state->owner != curr);
  409. WARN_ON(list_empty(&pi_state->list));
  410. list_del_init(&pi_state->list);
  411. pi_state->owner = NULL;
  412. spin_unlock_irq(&curr->pi_lock);
  413. rt_mutex_unlock(&pi_state->pi_mutex);
  414. spin_unlock(&hb->lock);
  415. spin_lock_irq(&curr->pi_lock);
  416. }
  417. spin_unlock_irq(&curr->pi_lock);
  418. }
  419. static int
  420. lookup_pi_state(u32 uval, struct futex_hash_bucket *hb,
  421. union futex_key *key, struct futex_pi_state **ps)
  422. {
  423. struct futex_pi_state *pi_state = NULL;
  424. struct futex_q *this, *next;
  425. struct plist_head *head;
  426. struct task_struct *p;
  427. pid_t pid = uval & FUTEX_TID_MASK;
  428. head = &hb->chain;
  429. plist_for_each_entry_safe(this, next, head, list) {
  430. if (match_futex(&this->key, key)) {
  431. /*
  432. * Another waiter already exists - bump up
  433. * the refcount and return its pi_state:
  434. */
  435. pi_state = this->pi_state;
  436. /*
  437. * Userspace might have messed up non PI and PI futexes
  438. */
  439. if (unlikely(!pi_state))
  440. return -EINVAL;
  441. WARN_ON(!atomic_read(&pi_state->refcount));
  442. WARN_ON(pid && pi_state->owner &&
  443. pi_state->owner->pid != pid);
  444. atomic_inc(&pi_state->refcount);
  445. *ps = pi_state;
  446. return 0;
  447. }
  448. }
  449. /*
  450. * We are the first waiter - try to look up the real owner and attach
  451. * the new pi_state to it, but bail out when TID = 0
  452. */
  453. if (!pid)
  454. return -ESRCH;
  455. p = futex_find_get_task(pid);
  456. if (IS_ERR(p))
  457. return PTR_ERR(p);
  458. /*
  459. * We need to look at the task state flags to figure out,
  460. * whether the task is exiting. To protect against the do_exit
  461. * change of the task flags, we do this protected by
  462. * p->pi_lock:
  463. */
  464. spin_lock_irq(&p->pi_lock);
  465. if (unlikely(p->flags & PF_EXITING)) {
  466. /*
  467. * The task is on the way out. When PF_EXITPIDONE is
  468. * set, we know that the task has finished the
  469. * cleanup:
  470. */
  471. int ret = (p->flags & PF_EXITPIDONE) ? -ESRCH : -EAGAIN;
  472. spin_unlock_irq(&p->pi_lock);
  473. put_task_struct(p);
  474. return ret;
  475. }
  476. pi_state = alloc_pi_state();
  477. /*
  478. * Initialize the pi_mutex in locked state and make 'p'
  479. * the owner of it:
  480. */
  481. rt_mutex_init_proxy_locked(&pi_state->pi_mutex, p);
  482. /* Store the key for possible exit cleanups: */
  483. pi_state->key = *key;
  484. WARN_ON(!list_empty(&pi_state->list));
  485. list_add(&pi_state->list, &p->pi_state_list);
  486. pi_state->owner = p;
  487. spin_unlock_irq(&p->pi_lock);
  488. put_task_struct(p);
  489. *ps = pi_state;
  490. return 0;
  491. }
  492. /*
  493. * The hash bucket lock must be held when this is called.
  494. * Afterwards, the futex_q must not be accessed.
  495. */
  496. static void wake_futex(struct futex_q *q)
  497. {
  498. plist_del(&q->list, &q->list.plist);
  499. /*
  500. * The lock in wake_up_all() is a crucial memory barrier after the
  501. * plist_del() and also before assigning to q->lock_ptr.
  502. */
  503. wake_up(&q->waiter);
  504. /*
  505. * The waiting task can free the futex_q as soon as this is written,
  506. * without taking any locks. This must come last.
  507. *
  508. * A memory barrier is required here to prevent the following store to
  509. * lock_ptr from getting ahead of the wakeup. Clearing the lock at the
  510. * end of wake_up() does not prevent this store from moving.
  511. */
  512. smp_wmb();
  513. q->lock_ptr = NULL;
  514. }
  515. static int wake_futex_pi(u32 __user *uaddr, u32 uval, struct futex_q *this)
  516. {
  517. struct task_struct *new_owner;
  518. struct futex_pi_state *pi_state = this->pi_state;
  519. u32 curval, newval;
  520. if (!pi_state)
  521. return -EINVAL;
  522. spin_lock(&pi_state->pi_mutex.wait_lock);
  523. new_owner = rt_mutex_next_owner(&pi_state->pi_mutex);
  524. /*
  525. * This happens when we have stolen the lock and the original
  526. * pending owner did not enqueue itself back on the rt_mutex.
  527. * Thats not a tragedy. We know that way, that a lock waiter
  528. * is on the fly. We make the futex_q waiter the pending owner.
  529. */
  530. if (!new_owner)
  531. new_owner = this->task;
  532. /*
  533. * We pass it to the next owner. (The WAITERS bit is always
  534. * kept enabled while there is PI state around. We must also
  535. * preserve the owner died bit.)
  536. */
  537. if (!(uval & FUTEX_OWNER_DIED)) {
  538. int ret = 0;
  539. newval = FUTEX_WAITERS | task_pid_vnr(new_owner);
  540. curval = cmpxchg_futex_value_locked(uaddr, uval, newval);
  541. if (curval == -EFAULT)
  542. ret = -EFAULT;
  543. else if (curval != uval)
  544. ret = -EINVAL;
  545. if (ret) {
  546. spin_unlock(&pi_state->pi_mutex.wait_lock);
  547. return ret;
  548. }
  549. }
  550. spin_lock_irq(&pi_state->owner->pi_lock);
  551. WARN_ON(list_empty(&pi_state->list));
  552. list_del_init(&pi_state->list);
  553. spin_unlock_irq(&pi_state->owner->pi_lock);
  554. spin_lock_irq(&new_owner->pi_lock);
  555. WARN_ON(!list_empty(&pi_state->list));
  556. list_add(&pi_state->list, &new_owner->pi_state_list);
  557. pi_state->owner = new_owner;
  558. spin_unlock_irq(&new_owner->pi_lock);
  559. spin_unlock(&pi_state->pi_mutex.wait_lock);
  560. rt_mutex_unlock(&pi_state->pi_mutex);
  561. return 0;
  562. }
  563. static int unlock_futex_pi(u32 __user *uaddr, u32 uval)
  564. {
  565. u32 oldval;
  566. /*
  567. * There is no waiter, so we unlock the futex. The owner died
  568. * bit has not to be preserved here. We are the owner:
  569. */
  570. oldval = cmpxchg_futex_value_locked(uaddr, uval, 0);
  571. if (oldval == -EFAULT)
  572. return oldval;
  573. if (oldval != uval)
  574. return -EAGAIN;
  575. return 0;
  576. }
  577. /*
  578. * Express the locking dependencies for lockdep:
  579. */
  580. static inline void
  581. double_lock_hb(struct futex_hash_bucket *hb1, struct futex_hash_bucket *hb2)
  582. {
  583. if (hb1 <= hb2) {
  584. spin_lock(&hb1->lock);
  585. if (hb1 < hb2)
  586. spin_lock_nested(&hb2->lock, SINGLE_DEPTH_NESTING);
  587. } else { /* hb1 > hb2 */
  588. spin_lock(&hb2->lock);
  589. spin_lock_nested(&hb1->lock, SINGLE_DEPTH_NESTING);
  590. }
  591. }
  592. static inline void
  593. double_unlock_hb(struct futex_hash_bucket *hb1, struct futex_hash_bucket *hb2)
  594. {
  595. if (hb1 <= hb2) {
  596. spin_unlock(&hb2->lock);
  597. if (hb1 < hb2)
  598. spin_unlock(&hb1->lock);
  599. } else { /* hb1 > hb2 */
  600. spin_unlock(&hb1->lock);
  601. spin_unlock(&hb2->lock);
  602. }
  603. }
  604. /*
  605. * Wake up waiters matching bitset queued on this futex (uaddr).
  606. */
  607. static int futex_wake(u32 __user *uaddr, int fshared, int nr_wake, u32 bitset)
  608. {
  609. struct futex_hash_bucket *hb;
  610. struct futex_q *this, *next;
  611. struct plist_head *head;
  612. union futex_key key = FUTEX_KEY_INIT;
  613. int ret;
  614. if (!bitset)
  615. return -EINVAL;
  616. ret = get_futex_key(uaddr, fshared, &key);
  617. if (unlikely(ret != 0))
  618. goto out;
  619. hb = hash_futex(&key);
  620. spin_lock(&hb->lock);
  621. head = &hb->chain;
  622. plist_for_each_entry_safe(this, next, head, list) {
  623. if (match_futex (&this->key, &key)) {
  624. if (this->pi_state) {
  625. ret = -EINVAL;
  626. break;
  627. }
  628. /* Check if one of the bits is set in both bitsets */
  629. if (!(this->bitset & bitset))
  630. continue;
  631. wake_futex(this);
  632. if (++ret >= nr_wake)
  633. break;
  634. }
  635. }
  636. spin_unlock(&hb->lock);
  637. put_futex_key(fshared, &key);
  638. out:
  639. return ret;
  640. }
  641. /*
  642. * Wake up all waiters hashed on the physical page that is mapped
  643. * to this virtual address:
  644. */
  645. static int
  646. futex_wake_op(u32 __user *uaddr1, int fshared, u32 __user *uaddr2,
  647. int nr_wake, int nr_wake2, int op)
  648. {
  649. union futex_key key1 = FUTEX_KEY_INIT, key2 = FUTEX_KEY_INIT;
  650. struct futex_hash_bucket *hb1, *hb2;
  651. struct plist_head *head;
  652. struct futex_q *this, *next;
  653. int ret, op_ret, attempt = 0;
  654. retryfull:
  655. ret = get_futex_key(uaddr1, fshared, &key1);
  656. if (unlikely(ret != 0))
  657. goto out;
  658. ret = get_futex_key(uaddr2, fshared, &key2);
  659. if (unlikely(ret != 0))
  660. goto out_put_key1;
  661. hb1 = hash_futex(&key1);
  662. hb2 = hash_futex(&key2);
  663. retry:
  664. double_lock_hb(hb1, hb2);
  665. op_ret = futex_atomic_op_inuser(op, uaddr2);
  666. if (unlikely(op_ret < 0)) {
  667. u32 dummy;
  668. double_unlock_hb(hb1, hb2);
  669. #ifndef CONFIG_MMU
  670. /*
  671. * we don't get EFAULT from MMU faults if we don't have an MMU,
  672. * but we might get them from range checking
  673. */
  674. ret = op_ret;
  675. goto out_put_keys;
  676. #endif
  677. if (unlikely(op_ret != -EFAULT)) {
  678. ret = op_ret;
  679. goto out_put_keys;
  680. }
  681. /*
  682. * futex_atomic_op_inuser needs to both read and write
  683. * *(int __user *)uaddr2, but we can't modify it
  684. * non-atomically. Therefore, if get_user below is not
  685. * enough, we need to handle the fault ourselves, while
  686. * still holding the mmap_sem.
  687. */
  688. if (attempt++) {
  689. ret = futex_handle_fault((unsigned long)uaddr2,
  690. attempt);
  691. if (ret)
  692. goto out_put_keys;
  693. goto retry;
  694. }
  695. ret = get_user(dummy, uaddr2);
  696. if (ret)
  697. goto out_put_keys;
  698. put_futex_key(fshared, &key2);
  699. put_futex_key(fshared, &key1);
  700. goto retryfull;
  701. }
  702. head = &hb1->chain;
  703. plist_for_each_entry_safe(this, next, head, list) {
  704. if (match_futex (&this->key, &key1)) {
  705. wake_futex(this);
  706. if (++ret >= nr_wake)
  707. break;
  708. }
  709. }
  710. if (op_ret > 0) {
  711. head = &hb2->chain;
  712. op_ret = 0;
  713. plist_for_each_entry_safe(this, next, head, list) {
  714. if (match_futex (&this->key, &key2)) {
  715. wake_futex(this);
  716. if (++op_ret >= nr_wake2)
  717. break;
  718. }
  719. }
  720. ret += op_ret;
  721. }
  722. double_unlock_hb(hb1, hb2);
  723. out_put_keys:
  724. put_futex_key(fshared, &key2);
  725. out_put_key1:
  726. put_futex_key(fshared, &key1);
  727. out:
  728. return ret;
  729. }
  730. /*
  731. * Requeue all waiters hashed on one physical page to another
  732. * physical page.
  733. */
  734. static int futex_requeue(u32 __user *uaddr1, int fshared, u32 __user *uaddr2,
  735. int nr_wake, int nr_requeue, u32 *cmpval)
  736. {
  737. union futex_key key1 = FUTEX_KEY_INIT, key2 = FUTEX_KEY_INIT;
  738. struct futex_hash_bucket *hb1, *hb2;
  739. struct plist_head *head1;
  740. struct futex_q *this, *next;
  741. int ret, drop_count = 0;
  742. retry:
  743. ret = get_futex_key(uaddr1, fshared, &key1);
  744. if (unlikely(ret != 0))
  745. goto out;
  746. ret = get_futex_key(uaddr2, fshared, &key2);
  747. if (unlikely(ret != 0))
  748. goto out_put_key1;
  749. hb1 = hash_futex(&key1);
  750. hb2 = hash_futex(&key2);
  751. double_lock_hb(hb1, hb2);
  752. if (likely(cmpval != NULL)) {
  753. u32 curval;
  754. ret = get_futex_value_locked(&curval, uaddr1);
  755. if (unlikely(ret)) {
  756. double_unlock_hb(hb1, hb2);
  757. put_futex_key(fshared, &key2);
  758. put_futex_key(fshared, &key1);
  759. ret = get_user(curval, uaddr1);
  760. if (!ret)
  761. goto retry;
  762. goto out_put_keys;
  763. }
  764. if (curval != *cmpval) {
  765. ret = -EAGAIN;
  766. goto out_unlock;
  767. }
  768. }
  769. head1 = &hb1->chain;
  770. plist_for_each_entry_safe(this, next, head1, list) {
  771. if (!match_futex (&this->key, &key1))
  772. continue;
  773. if (++ret <= nr_wake) {
  774. wake_futex(this);
  775. } else {
  776. /*
  777. * If key1 and key2 hash to the same bucket, no need to
  778. * requeue.
  779. */
  780. if (likely(head1 != &hb2->chain)) {
  781. plist_del(&this->list, &hb1->chain);
  782. plist_add(&this->list, &hb2->chain);
  783. this->lock_ptr = &hb2->lock;
  784. #ifdef CONFIG_DEBUG_PI_LIST
  785. this->list.plist.lock = &hb2->lock;
  786. #endif
  787. }
  788. this->key = key2;
  789. get_futex_key_refs(&key2);
  790. drop_count++;
  791. if (ret - nr_wake >= nr_requeue)
  792. break;
  793. }
  794. }
  795. out_unlock:
  796. double_unlock_hb(hb1, hb2);
  797. /* drop_futex_key_refs() must be called outside the spinlocks. */
  798. while (--drop_count >= 0)
  799. drop_futex_key_refs(&key1);
  800. out_put_keys:
  801. put_futex_key(fshared, &key2);
  802. out_put_key1:
  803. put_futex_key(fshared, &key1);
  804. out:
  805. return ret;
  806. }
  807. /* The key must be already stored in q->key. */
  808. static inline struct futex_hash_bucket *queue_lock(struct futex_q *q)
  809. {
  810. struct futex_hash_bucket *hb;
  811. init_waitqueue_head(&q->waiter);
  812. get_futex_key_refs(&q->key);
  813. hb = hash_futex(&q->key);
  814. q->lock_ptr = &hb->lock;
  815. spin_lock(&hb->lock);
  816. return hb;
  817. }
  818. static inline void queue_me(struct futex_q *q, struct futex_hash_bucket *hb)
  819. {
  820. int prio;
  821. /*
  822. * The priority used to register this element is
  823. * - either the real thread-priority for the real-time threads
  824. * (i.e. threads with a priority lower than MAX_RT_PRIO)
  825. * - or MAX_RT_PRIO for non-RT threads.
  826. * Thus, all RT-threads are woken first in priority order, and
  827. * the others are woken last, in FIFO order.
  828. */
  829. prio = min(current->normal_prio, MAX_RT_PRIO);
  830. plist_node_init(&q->list, prio);
  831. #ifdef CONFIG_DEBUG_PI_LIST
  832. q->list.plist.lock = &hb->lock;
  833. #endif
  834. plist_add(&q->list, &hb->chain);
  835. q->task = current;
  836. spin_unlock(&hb->lock);
  837. }
  838. static inline void
  839. queue_unlock(struct futex_q *q, struct futex_hash_bucket *hb)
  840. {
  841. spin_unlock(&hb->lock);
  842. drop_futex_key_refs(&q->key);
  843. }
  844. /*
  845. * queue_me and unqueue_me must be called as a pair, each
  846. * exactly once. They are called with the hashed spinlock held.
  847. */
  848. /* Return 1 if we were still queued (ie. 0 means we were woken) */
  849. static int unqueue_me(struct futex_q *q)
  850. {
  851. spinlock_t *lock_ptr;
  852. int ret = 0;
  853. /* In the common case we don't take the spinlock, which is nice. */
  854. retry:
  855. lock_ptr = q->lock_ptr;
  856. barrier();
  857. if (lock_ptr != NULL) {
  858. spin_lock(lock_ptr);
  859. /*
  860. * q->lock_ptr can change between reading it and
  861. * spin_lock(), causing us to take the wrong lock. This
  862. * corrects the race condition.
  863. *
  864. * Reasoning goes like this: if we have the wrong lock,
  865. * q->lock_ptr must have changed (maybe several times)
  866. * between reading it and the spin_lock(). It can
  867. * change again after the spin_lock() but only if it was
  868. * already changed before the spin_lock(). It cannot,
  869. * however, change back to the original value. Therefore
  870. * we can detect whether we acquired the correct lock.
  871. */
  872. if (unlikely(lock_ptr != q->lock_ptr)) {
  873. spin_unlock(lock_ptr);
  874. goto retry;
  875. }
  876. WARN_ON(plist_node_empty(&q->list));
  877. plist_del(&q->list, &q->list.plist);
  878. BUG_ON(q->pi_state);
  879. spin_unlock(lock_ptr);
  880. ret = 1;
  881. }
  882. drop_futex_key_refs(&q->key);
  883. return ret;
  884. }
  885. /*
  886. * PI futexes can not be requeued and must remove themself from the
  887. * hash bucket. The hash bucket lock (i.e. lock_ptr) is held on entry
  888. * and dropped here.
  889. */
  890. static void unqueue_me_pi(struct futex_q *q)
  891. {
  892. WARN_ON(plist_node_empty(&q->list));
  893. plist_del(&q->list, &q->list.plist);
  894. BUG_ON(!q->pi_state);
  895. free_pi_state(q->pi_state);
  896. q->pi_state = NULL;
  897. spin_unlock(q->lock_ptr);
  898. drop_futex_key_refs(&q->key);
  899. }
  900. /*
  901. * Fixup the pi_state owner with the new owner.
  902. *
  903. * Must be called with hash bucket lock held and mm->sem held for non
  904. * private futexes.
  905. */
  906. static int fixup_pi_state_owner(u32 __user *uaddr, struct futex_q *q,
  907. struct task_struct *newowner, int fshared)
  908. {
  909. u32 newtid = task_pid_vnr(newowner) | FUTEX_WAITERS;
  910. struct futex_pi_state *pi_state = q->pi_state;
  911. struct task_struct *oldowner = pi_state->owner;
  912. u32 uval, curval, newval;
  913. int ret, attempt = 0;
  914. /* Owner died? */
  915. if (!pi_state->owner)
  916. newtid |= FUTEX_OWNER_DIED;
  917. /*
  918. * We are here either because we stole the rtmutex from the
  919. * pending owner or we are the pending owner which failed to
  920. * get the rtmutex. We have to replace the pending owner TID
  921. * in the user space variable. This must be atomic as we have
  922. * to preserve the owner died bit here.
  923. *
  924. * Note: We write the user space value _before_ changing the pi_state
  925. * because we can fault here. Imagine swapped out pages or a fork
  926. * that marked all the anonymous memory readonly for cow.
  927. *
  928. * Modifying pi_state _before_ the user space value would
  929. * leave the pi_state in an inconsistent state when we fault
  930. * here, because we need to drop the hash bucket lock to
  931. * handle the fault. This might be observed in the PID check
  932. * in lookup_pi_state.
  933. */
  934. retry:
  935. if (get_futex_value_locked(&uval, uaddr))
  936. goto handle_fault;
  937. while (1) {
  938. newval = (uval & FUTEX_OWNER_DIED) | newtid;
  939. curval = cmpxchg_futex_value_locked(uaddr, uval, newval);
  940. if (curval == -EFAULT)
  941. goto handle_fault;
  942. if (curval == uval)
  943. break;
  944. uval = curval;
  945. }
  946. /*
  947. * We fixed up user space. Now we need to fix the pi_state
  948. * itself.
  949. */
  950. if (pi_state->owner != NULL) {
  951. spin_lock_irq(&pi_state->owner->pi_lock);
  952. WARN_ON(list_empty(&pi_state->list));
  953. list_del_init(&pi_state->list);
  954. spin_unlock_irq(&pi_state->owner->pi_lock);
  955. }
  956. pi_state->owner = newowner;
  957. spin_lock_irq(&newowner->pi_lock);
  958. WARN_ON(!list_empty(&pi_state->list));
  959. list_add(&pi_state->list, &newowner->pi_state_list);
  960. spin_unlock_irq(&newowner->pi_lock);
  961. return 0;
  962. /*
  963. * To handle the page fault we need to drop the hash bucket
  964. * lock here. That gives the other task (either the pending
  965. * owner itself or the task which stole the rtmutex) the
  966. * chance to try the fixup of the pi_state. So once we are
  967. * back from handling the fault we need to check the pi_state
  968. * after reacquiring the hash bucket lock and before trying to
  969. * do another fixup. When the fixup has been done already we
  970. * simply return.
  971. */
  972. handle_fault:
  973. spin_unlock(q->lock_ptr);
  974. ret = futex_handle_fault((unsigned long)uaddr, attempt++);
  975. spin_lock(q->lock_ptr);
  976. /*
  977. * Check if someone else fixed it for us:
  978. */
  979. if (pi_state->owner != oldowner)
  980. return 0;
  981. if (ret)
  982. return ret;
  983. goto retry;
  984. }
  985. /*
  986. * In case we must use restart_block to restart a futex_wait,
  987. * we encode in the 'flags' shared capability
  988. */
  989. #define FLAGS_SHARED 0x01
  990. #define FLAGS_CLOCKRT 0x02
  991. static long futex_wait_restart(struct restart_block *restart);
  992. static int futex_wait(u32 __user *uaddr, int fshared,
  993. u32 val, ktime_t *abs_time, u32 bitset, int clockrt)
  994. {
  995. struct task_struct *curr = current;
  996. struct restart_block *restart;
  997. DECLARE_WAITQUEUE(wait, curr);
  998. struct futex_hash_bucket *hb;
  999. struct futex_q q;
  1000. u32 uval;
  1001. int ret;
  1002. struct hrtimer_sleeper t;
  1003. int rem = 0;
  1004. if (!bitset)
  1005. return -EINVAL;
  1006. q.pi_state = NULL;
  1007. q.bitset = bitset;
  1008. retry:
  1009. q.key = FUTEX_KEY_INIT;
  1010. ret = get_futex_key(uaddr, fshared, &q.key);
  1011. if (unlikely(ret != 0))
  1012. goto out;
  1013. hb = queue_lock(&q);
  1014. /*
  1015. * Access the page AFTER the hash-bucket is locked.
  1016. * Order is important:
  1017. *
  1018. * Userspace waiter: val = var; if (cond(val)) futex_wait(&var, val);
  1019. * Userspace waker: if (cond(var)) { var = new; futex_wake(&var); }
  1020. *
  1021. * The basic logical guarantee of a futex is that it blocks ONLY
  1022. * if cond(var) is known to be true at the time of blocking, for
  1023. * any cond. If we queued after testing *uaddr, that would open
  1024. * a race condition where we could block indefinitely with
  1025. * cond(var) false, which would violate the guarantee.
  1026. *
  1027. * A consequence is that futex_wait() can return zero and absorb
  1028. * a wakeup when *uaddr != val on entry to the syscall. This is
  1029. * rare, but normal.
  1030. *
  1031. * For shared futexes, we hold the mmap semaphore, so the mapping
  1032. * cannot have changed since we looked it up in get_futex_key.
  1033. */
  1034. ret = get_futex_value_locked(&uval, uaddr);
  1035. if (unlikely(ret)) {
  1036. queue_unlock(&q, hb);
  1037. put_futex_key(fshared, &q.key);
  1038. ret = get_user(uval, uaddr);
  1039. if (!ret)
  1040. goto retry;
  1041. goto out;
  1042. }
  1043. ret = -EWOULDBLOCK;
  1044. if (unlikely(uval != val)) {
  1045. queue_unlock(&q, hb);
  1046. goto out_put_key;
  1047. }
  1048. /* Only actually queue if *uaddr contained val. */
  1049. queue_me(&q, hb);
  1050. /*
  1051. * There might have been scheduling since the queue_me(), as we
  1052. * cannot hold a spinlock across the get_user() in case it
  1053. * faults, and we cannot just set TASK_INTERRUPTIBLE state when
  1054. * queueing ourselves into the futex hash. This code thus has to
  1055. * rely on the futex_wake() code removing us from hash when it
  1056. * wakes us up.
  1057. */
  1058. /* add_wait_queue is the barrier after __set_current_state. */
  1059. __set_current_state(TASK_INTERRUPTIBLE);
  1060. add_wait_queue(&q.waiter, &wait);
  1061. /*
  1062. * !plist_node_empty() is safe here without any lock.
  1063. * q.lock_ptr != 0 is not safe, because of ordering against wakeup.
  1064. */
  1065. if (likely(!plist_node_empty(&q.list))) {
  1066. if (!abs_time)
  1067. schedule();
  1068. else {
  1069. hrtimer_init_on_stack(&t.timer,
  1070. clockrt ? CLOCK_REALTIME :
  1071. CLOCK_MONOTONIC,
  1072. HRTIMER_MODE_ABS);
  1073. hrtimer_init_sleeper(&t, current);
  1074. hrtimer_set_expires_range_ns(&t.timer, *abs_time,
  1075. current->timer_slack_ns);
  1076. hrtimer_start_expires(&t.timer, HRTIMER_MODE_ABS);
  1077. if (!hrtimer_active(&t.timer))
  1078. t.task = NULL;
  1079. /*
  1080. * the timer could have already expired, in which
  1081. * case current would be flagged for rescheduling.
  1082. * Don't bother calling schedule.
  1083. */
  1084. if (likely(t.task))
  1085. schedule();
  1086. hrtimer_cancel(&t.timer);
  1087. /* Flag if a timeout occured */
  1088. rem = (t.task == NULL);
  1089. destroy_hrtimer_on_stack(&t.timer);
  1090. }
  1091. }
  1092. __set_current_state(TASK_RUNNING);
  1093. /*
  1094. * NOTE: we don't remove ourselves from the waitqueue because
  1095. * we are the only user of it.
  1096. */
  1097. /* If we were woken (and unqueued), we succeeded, whatever. */
  1098. ret = 0;
  1099. if (!unqueue_me(&q))
  1100. goto out_put_key;
  1101. ret = -ETIMEDOUT;
  1102. if (rem)
  1103. goto out_put_key;
  1104. /*
  1105. * We expect signal_pending(current), but another thread may
  1106. * have handled it for us already.
  1107. */
  1108. ret = -ERESTARTSYS;
  1109. if (!abs_time)
  1110. goto out_put_key;
  1111. restart = &current_thread_info()->restart_block;
  1112. restart->fn = futex_wait_restart;
  1113. restart->futex.uaddr = (u32 *)uaddr;
  1114. restart->futex.val = val;
  1115. restart->futex.time = abs_time->tv64;
  1116. restart->futex.bitset = bitset;
  1117. restart->futex.flags = 0;
  1118. if (fshared)
  1119. restart->futex.flags |= FLAGS_SHARED;
  1120. if (clockrt)
  1121. restart->futex.flags |= FLAGS_CLOCKRT;
  1122. ret = -ERESTART_RESTARTBLOCK;
  1123. out_put_key:
  1124. put_futex_key(fshared, &q.key);
  1125. out:
  1126. return ret;
  1127. }
  1128. static long futex_wait_restart(struct restart_block *restart)
  1129. {
  1130. u32 __user *uaddr = (u32 __user *)restart->futex.uaddr;
  1131. int fshared = 0;
  1132. ktime_t t;
  1133. t.tv64 = restart->futex.time;
  1134. restart->fn = do_no_restart_syscall;
  1135. if (restart->futex.flags & FLAGS_SHARED)
  1136. fshared = 1;
  1137. return (long)futex_wait(uaddr, fshared, restart->futex.val, &t,
  1138. restart->futex.bitset,
  1139. restart->futex.flags & FLAGS_CLOCKRT);
  1140. }
  1141. /*
  1142. * Userspace tried a 0 -> TID atomic transition of the futex value
  1143. * and failed. The kernel side here does the whole locking operation:
  1144. * if there are waiters then it will block, it does PI, etc. (Due to
  1145. * races the kernel might see a 0 value of the futex too.)
  1146. */
  1147. static int futex_lock_pi(u32 __user *uaddr, int fshared,
  1148. int detect, ktime_t *time, int trylock)
  1149. {
  1150. struct hrtimer_sleeper timeout, *to = NULL;
  1151. struct task_struct *curr = current;
  1152. struct futex_hash_bucket *hb;
  1153. u32 uval, newval, curval;
  1154. struct futex_q q;
  1155. int ret, lock_taken, ownerdied = 0, attempt = 0;
  1156. if (refill_pi_state_cache())
  1157. return -ENOMEM;
  1158. if (time) {
  1159. to = &timeout;
  1160. hrtimer_init_on_stack(&to->timer, CLOCK_REALTIME,
  1161. HRTIMER_MODE_ABS);
  1162. hrtimer_init_sleeper(to, current);
  1163. hrtimer_set_expires(&to->timer, *time);
  1164. }
  1165. q.pi_state = NULL;
  1166. retry:
  1167. q.key = FUTEX_KEY_INIT;
  1168. ret = get_futex_key(uaddr, fshared, &q.key);
  1169. if (unlikely(ret != 0))
  1170. goto out;
  1171. retry_unlocked:
  1172. hb = queue_lock(&q);
  1173. retry_locked:
  1174. ret = lock_taken = 0;
  1175. /*
  1176. * To avoid races, we attempt to take the lock here again
  1177. * (by doing a 0 -> TID atomic cmpxchg), while holding all
  1178. * the locks. It will most likely not succeed.
  1179. */
  1180. newval = task_pid_vnr(current);
  1181. curval = cmpxchg_futex_value_locked(uaddr, 0, newval);
  1182. if (unlikely(curval == -EFAULT))
  1183. goto uaddr_faulted;
  1184. /*
  1185. * Detect deadlocks. In case of REQUEUE_PI this is a valid
  1186. * situation and we return success to user space.
  1187. */
  1188. if (unlikely((curval & FUTEX_TID_MASK) == task_pid_vnr(current))) {
  1189. ret = -EDEADLK;
  1190. goto out_unlock_put_key;
  1191. }
  1192. /*
  1193. * Surprise - we got the lock. Just return to userspace:
  1194. */
  1195. if (unlikely(!curval))
  1196. goto out_unlock_put_key;
  1197. uval = curval;
  1198. /*
  1199. * Set the WAITERS flag, so the owner will know it has someone
  1200. * to wake at next unlock
  1201. */
  1202. newval = curval | FUTEX_WAITERS;
  1203. /*
  1204. * There are two cases, where a futex might have no owner (the
  1205. * owner TID is 0): OWNER_DIED. We take over the futex in this
  1206. * case. We also do an unconditional take over, when the owner
  1207. * of the futex died.
  1208. *
  1209. * This is safe as we are protected by the hash bucket lock !
  1210. */
  1211. if (unlikely(ownerdied || !(curval & FUTEX_TID_MASK))) {
  1212. /* Keep the OWNER_DIED bit */
  1213. newval = (curval & ~FUTEX_TID_MASK) | task_pid_vnr(current);
  1214. ownerdied = 0;
  1215. lock_taken = 1;
  1216. }
  1217. curval = cmpxchg_futex_value_locked(uaddr, uval, newval);
  1218. if (unlikely(curval == -EFAULT))
  1219. goto uaddr_faulted;
  1220. if (unlikely(curval != uval))
  1221. goto retry_locked;
  1222. /*
  1223. * We took the lock due to owner died take over.
  1224. */
  1225. if (unlikely(lock_taken))
  1226. goto out_unlock_put_key;
  1227. /*
  1228. * We dont have the lock. Look up the PI state (or create it if
  1229. * we are the first waiter):
  1230. */
  1231. ret = lookup_pi_state(uval, hb, &q.key, &q.pi_state);
  1232. if (unlikely(ret)) {
  1233. switch (ret) {
  1234. case -EAGAIN:
  1235. /*
  1236. * Task is exiting and we just wait for the
  1237. * exit to complete.
  1238. */
  1239. queue_unlock(&q, hb);
  1240. put_futex_key(fshared, &q.key);
  1241. cond_resched();
  1242. goto retry;
  1243. case -ESRCH:
  1244. /*
  1245. * No owner found for this futex. Check if the
  1246. * OWNER_DIED bit is set to figure out whether
  1247. * this is a robust futex or not.
  1248. */
  1249. if (get_futex_value_locked(&curval, uaddr))
  1250. goto uaddr_faulted;
  1251. /*
  1252. * We simply start over in case of a robust
  1253. * futex. The code above will take the futex
  1254. * and return happy.
  1255. */
  1256. if (curval & FUTEX_OWNER_DIED) {
  1257. ownerdied = 1;
  1258. goto retry_locked;
  1259. }
  1260. default:
  1261. goto out_unlock_put_key;
  1262. }
  1263. }
  1264. /*
  1265. * Only actually queue now that the atomic ops are done:
  1266. */
  1267. queue_me(&q, hb);
  1268. WARN_ON(!q.pi_state);
  1269. /*
  1270. * Block on the PI mutex:
  1271. */
  1272. if (!trylock)
  1273. ret = rt_mutex_timed_lock(&q.pi_state->pi_mutex, to, 1);
  1274. else {
  1275. ret = rt_mutex_trylock(&q.pi_state->pi_mutex);
  1276. /* Fixup the trylock return value: */
  1277. ret = ret ? 0 : -EWOULDBLOCK;
  1278. }
  1279. spin_lock(q.lock_ptr);
  1280. if (!ret) {
  1281. /*
  1282. * Got the lock. We might not be the anticipated owner
  1283. * if we did a lock-steal - fix up the PI-state in
  1284. * that case:
  1285. */
  1286. if (q.pi_state->owner != curr)
  1287. ret = fixup_pi_state_owner(uaddr, &q, curr, fshared);
  1288. } else {
  1289. /*
  1290. * Catch the rare case, where the lock was released
  1291. * when we were on the way back before we locked the
  1292. * hash bucket.
  1293. */
  1294. if (q.pi_state->owner == curr) {
  1295. /*
  1296. * Try to get the rt_mutex now. This might
  1297. * fail as some other task acquired the
  1298. * rt_mutex after we removed ourself from the
  1299. * rt_mutex waiters list.
  1300. */
  1301. if (rt_mutex_trylock(&q.pi_state->pi_mutex))
  1302. ret = 0;
  1303. else {
  1304. /*
  1305. * pi_state is incorrect, some other
  1306. * task did a lock steal and we
  1307. * returned due to timeout or signal
  1308. * without taking the rt_mutex. Too
  1309. * late. We can access the
  1310. * rt_mutex_owner without locking, as
  1311. * the other task is now blocked on
  1312. * the hash bucket lock. Fix the state
  1313. * up.
  1314. */
  1315. struct task_struct *owner;
  1316. int res;
  1317. owner = rt_mutex_owner(&q.pi_state->pi_mutex);
  1318. res = fixup_pi_state_owner(uaddr, &q, owner,
  1319. fshared);
  1320. /* propagate -EFAULT, if the fixup failed */
  1321. if (res)
  1322. ret = res;
  1323. }
  1324. } else {
  1325. /*
  1326. * Paranoia check. If we did not take the lock
  1327. * in the trylock above, then we should not be
  1328. * the owner of the rtmutex, neither the real
  1329. * nor the pending one:
  1330. */
  1331. if (rt_mutex_owner(&q.pi_state->pi_mutex) == curr)
  1332. printk(KERN_ERR "futex_lock_pi: ret = %d "
  1333. "pi-mutex: %p pi-state %p\n", ret,
  1334. q.pi_state->pi_mutex.owner,
  1335. q.pi_state->owner);
  1336. }
  1337. }
  1338. /*
  1339. * If fixup_pi_state_owner() faulted and was unable to handle the
  1340. * fault, unlock it and return the fault to userspace.
  1341. */
  1342. if (ret && (rt_mutex_owner(&q.pi_state->pi_mutex) == current))
  1343. rt_mutex_unlock(&q.pi_state->pi_mutex);
  1344. /* Unqueue and drop the lock */
  1345. unqueue_me_pi(&q);
  1346. if (to)
  1347. destroy_hrtimer_on_stack(&to->timer);
  1348. return ret != -EINTR ? ret : -ERESTARTNOINTR;
  1349. out_unlock_put_key:
  1350. queue_unlock(&q, hb);
  1351. out_put_key:
  1352. put_futex_key(fshared, &q.key);
  1353. out:
  1354. if (to)
  1355. destroy_hrtimer_on_stack(&to->timer);
  1356. return ret;
  1357. uaddr_faulted:
  1358. /*
  1359. * We have to r/w *(int __user *)uaddr, and we have to modify it
  1360. * atomically. Therefore, if we continue to fault after get_user()
  1361. * below, we need to handle the fault ourselves, while still holding
  1362. * the mmap_sem. This can occur if the uaddr is under contention as
  1363. * we have to drop the mmap_sem in order to call get_user().
  1364. */
  1365. queue_unlock(&q, hb);
  1366. if (attempt++) {
  1367. ret = futex_handle_fault((unsigned long)uaddr, attempt);
  1368. if (ret)
  1369. goto out_put_key;
  1370. goto retry_unlocked;
  1371. }
  1372. ret = get_user(uval, uaddr);
  1373. if (!ret)
  1374. goto retry_unlocked;
  1375. goto out_put_key;
  1376. }
  1377. /*
  1378. * Userspace attempted a TID -> 0 atomic transition, and failed.
  1379. * This is the in-kernel slowpath: we look up the PI state (if any),
  1380. * and do the rt-mutex unlock.
  1381. */
  1382. static int futex_unlock_pi(u32 __user *uaddr, int fshared)
  1383. {
  1384. struct futex_hash_bucket *hb;
  1385. struct futex_q *this, *next;
  1386. u32 uval;
  1387. struct plist_head *head;
  1388. union futex_key key = FUTEX_KEY_INIT;
  1389. int ret, attempt = 0;
  1390. retry:
  1391. if (get_user(uval, uaddr))
  1392. return -EFAULT;
  1393. /*
  1394. * We release only a lock we actually own:
  1395. */
  1396. if ((uval & FUTEX_TID_MASK) != task_pid_vnr(current))
  1397. return -EPERM;
  1398. ret = get_futex_key(uaddr, fshared, &key);
  1399. if (unlikely(ret != 0))
  1400. goto out;
  1401. hb = hash_futex(&key);
  1402. retry_unlocked:
  1403. spin_lock(&hb->lock);
  1404. /*
  1405. * To avoid races, try to do the TID -> 0 atomic transition
  1406. * again. If it succeeds then we can return without waking
  1407. * anyone else up:
  1408. */
  1409. if (!(uval & FUTEX_OWNER_DIED))
  1410. uval = cmpxchg_futex_value_locked(uaddr, task_pid_vnr(current), 0);
  1411. if (unlikely(uval == -EFAULT))
  1412. goto pi_faulted;
  1413. /*
  1414. * Rare case: we managed to release the lock atomically,
  1415. * no need to wake anyone else up:
  1416. */
  1417. if (unlikely(uval == task_pid_vnr(current)))
  1418. goto out_unlock;
  1419. /*
  1420. * Ok, other tasks may need to be woken up - check waiters
  1421. * and do the wakeup if necessary:
  1422. */
  1423. head = &hb->chain;
  1424. plist_for_each_entry_safe(this, next, head, list) {
  1425. if (!match_futex (&this->key, &key))
  1426. continue;
  1427. ret = wake_futex_pi(uaddr, uval, this);
  1428. /*
  1429. * The atomic access to the futex value
  1430. * generated a pagefault, so retry the
  1431. * user-access and the wakeup:
  1432. */
  1433. if (ret == -EFAULT)
  1434. goto pi_faulted;
  1435. goto out_unlock;
  1436. }
  1437. /*
  1438. * No waiters - kernel unlocks the futex:
  1439. */
  1440. if (!(uval & FUTEX_OWNER_DIED)) {
  1441. ret = unlock_futex_pi(uaddr, uval);
  1442. if (ret == -EFAULT)
  1443. goto pi_faulted;
  1444. }
  1445. out_unlock:
  1446. spin_unlock(&hb->lock);
  1447. put_futex_key(fshared, &key);
  1448. out:
  1449. return ret;
  1450. pi_faulted:
  1451. /*
  1452. * We have to r/w *(int __user *)uaddr, and we have to modify it
  1453. * atomically. Therefore, if we continue to fault after get_user()
  1454. * below, we need to handle the fault ourselves, while still holding
  1455. * the mmap_sem. This can occur if the uaddr is under contention as
  1456. * we have to drop the mmap_sem in order to call get_user().
  1457. */
  1458. spin_unlock(&hb->lock);
  1459. if (attempt++) {
  1460. ret = futex_handle_fault((unsigned long)uaddr, attempt);
  1461. if (ret)
  1462. goto out;
  1463. uval = 0;
  1464. goto retry_unlocked;
  1465. }
  1466. ret = get_user(uval, uaddr);
  1467. put_futex_key(fshared, &key);
  1468. if (!ret)
  1469. goto retry;
  1470. return ret;
  1471. }
  1472. /*
  1473. * Support for robust futexes: the kernel cleans up held futexes at
  1474. * thread exit time.
  1475. *
  1476. * Implementation: user-space maintains a per-thread list of locks it
  1477. * is holding. Upon do_exit(), the kernel carefully walks this list,
  1478. * and marks all locks that are owned by this thread with the
  1479. * FUTEX_OWNER_DIED bit, and wakes up a waiter (if any). The list is
  1480. * always manipulated with the lock held, so the list is private and
  1481. * per-thread. Userspace also maintains a per-thread 'list_op_pending'
  1482. * field, to allow the kernel to clean up if the thread dies after
  1483. * acquiring the lock, but just before it could have added itself to
  1484. * the list. There can only be one such pending lock.
  1485. */
  1486. /**
  1487. * sys_set_robust_list - set the robust-futex list head of a task
  1488. * @head: pointer to the list-head
  1489. * @len: length of the list-head, as userspace expects
  1490. */
  1491. SYSCALL_DEFINE2(set_robust_list, struct robust_list_head __user *, head,
  1492. size_t, len)
  1493. {
  1494. if (!futex_cmpxchg_enabled)
  1495. return -ENOSYS;
  1496. /*
  1497. * The kernel knows only one size for now:
  1498. */
  1499. if (unlikely(len != sizeof(*head)))
  1500. return -EINVAL;
  1501. current->robust_list = head;
  1502. return 0;
  1503. }
  1504. /**
  1505. * sys_get_robust_list - get the robust-futex list head of a task
  1506. * @pid: pid of the process [zero for current task]
  1507. * @head_ptr: pointer to a list-head pointer, the kernel fills it in
  1508. * @len_ptr: pointer to a length field, the kernel fills in the header size
  1509. */
  1510. SYSCALL_DEFINE3(get_robust_list, int, pid,
  1511. struct robust_list_head __user * __user *, head_ptr,
  1512. size_t __user *, len_ptr)
  1513. {
  1514. struct robust_list_head __user *head;
  1515. unsigned long ret;
  1516. const struct cred *cred = current_cred(), *pcred;
  1517. if (!futex_cmpxchg_enabled)
  1518. return -ENOSYS;
  1519. if (!pid)
  1520. head = current->robust_list;
  1521. else {
  1522. struct task_struct *p;
  1523. ret = -ESRCH;
  1524. rcu_read_lock();
  1525. p = find_task_by_vpid(pid);
  1526. if (!p)
  1527. goto err_unlock;
  1528. ret = -EPERM;
  1529. pcred = __task_cred(p);
  1530. if (cred->euid != pcred->euid &&
  1531. cred->euid != pcred->uid &&
  1532. !capable(CAP_SYS_PTRACE))
  1533. goto err_unlock;
  1534. head = p->robust_list;
  1535. rcu_read_unlock();
  1536. }
  1537. if (put_user(sizeof(*head), len_ptr))
  1538. return -EFAULT;
  1539. return put_user(head, head_ptr);
  1540. err_unlock:
  1541. rcu_read_unlock();
  1542. return ret;
  1543. }
  1544. /*
  1545. * Process a futex-list entry, check whether it's owned by the
  1546. * dying task, and do notification if so:
  1547. */
  1548. int handle_futex_death(u32 __user *uaddr, struct task_struct *curr, int pi)
  1549. {
  1550. u32 uval, nval, mval;
  1551. retry:
  1552. if (get_user(uval, uaddr))
  1553. return -1;
  1554. if ((uval & FUTEX_TID_MASK) == task_pid_vnr(curr)) {
  1555. /*
  1556. * Ok, this dying thread is truly holding a futex
  1557. * of interest. Set the OWNER_DIED bit atomically
  1558. * via cmpxchg, and if the value had FUTEX_WAITERS
  1559. * set, wake up a waiter (if any). (We have to do a
  1560. * futex_wake() even if OWNER_DIED is already set -
  1561. * to handle the rare but possible case of recursive
  1562. * thread-death.) The rest of the cleanup is done in
  1563. * userspace.
  1564. */
  1565. mval = (uval & FUTEX_WAITERS) | FUTEX_OWNER_DIED;
  1566. nval = futex_atomic_cmpxchg_inatomic(uaddr, uval, mval);
  1567. if (nval == -EFAULT)
  1568. return -1;
  1569. if (nval != uval)
  1570. goto retry;
  1571. /*
  1572. * Wake robust non-PI futexes here. The wakeup of
  1573. * PI futexes happens in exit_pi_state():
  1574. */
  1575. if (!pi && (uval & FUTEX_WAITERS))
  1576. futex_wake(uaddr, 1, 1, FUTEX_BITSET_MATCH_ANY);
  1577. }
  1578. return 0;
  1579. }
  1580. /*
  1581. * Fetch a robust-list pointer. Bit 0 signals PI futexes:
  1582. */
  1583. static inline int fetch_robust_entry(struct robust_list __user **entry,
  1584. struct robust_list __user * __user *head,
  1585. int *pi)
  1586. {
  1587. unsigned long uentry;
  1588. if (get_user(uentry, (unsigned long __user *)head))
  1589. return -EFAULT;
  1590. *entry = (void __user *)(uentry & ~1UL);
  1591. *pi = uentry & 1;
  1592. return 0;
  1593. }
  1594. /*
  1595. * Walk curr->robust_list (very carefully, it's a userspace list!)
  1596. * and mark any locks found there dead, and notify any waiters.
  1597. *
  1598. * We silently return on any sign of list-walking problem.
  1599. */
  1600. void exit_robust_list(struct task_struct *curr)
  1601. {
  1602. struct robust_list_head __user *head = curr->robust_list;
  1603. struct robust_list __user *entry, *next_entry, *pending;
  1604. unsigned int limit = ROBUST_LIST_LIMIT, pi, next_pi, pip;
  1605. unsigned long futex_offset;
  1606. int rc;
  1607. if (!futex_cmpxchg_enabled)
  1608. return;
  1609. /*
  1610. * Fetch the list head (which was registered earlier, via
  1611. * sys_set_robust_list()):
  1612. */
  1613. if (fetch_robust_entry(&entry, &head->list.next, &pi))
  1614. return;
  1615. /*
  1616. * Fetch the relative futex offset:
  1617. */
  1618. if (get_user(futex_offset, &head->futex_offset))
  1619. return;
  1620. /*
  1621. * Fetch any possibly pending lock-add first, and handle it
  1622. * if it exists:
  1623. */
  1624. if (fetch_robust_entry(&pending, &head->list_op_pending, &pip))
  1625. return;
  1626. next_entry = NULL; /* avoid warning with gcc */
  1627. while (entry != &head->list) {
  1628. /*
  1629. * Fetch the next entry in the list before calling
  1630. * handle_futex_death:
  1631. */
  1632. rc = fetch_robust_entry(&next_entry, &entry->next, &next_pi);
  1633. /*
  1634. * A pending lock might already be on the list, so
  1635. * don't process it twice:
  1636. */
  1637. if (entry != pending)
  1638. if (handle_futex_death((void __user *)entry + futex_offset,
  1639. curr, pi))
  1640. return;
  1641. if (rc)
  1642. return;
  1643. entry = next_entry;
  1644. pi = next_pi;
  1645. /*
  1646. * Avoid excessively long or circular lists:
  1647. */
  1648. if (!--limit)
  1649. break;
  1650. cond_resched();
  1651. }
  1652. if (pending)
  1653. handle_futex_death((void __user *)pending + futex_offset,
  1654. curr, pip);
  1655. }
  1656. long do_futex(u32 __user *uaddr, int op, u32 val, ktime_t *timeout,
  1657. u32 __user *uaddr2, u32 val2, u32 val3)
  1658. {
  1659. int clockrt, ret = -ENOSYS;
  1660. int cmd = op & FUTEX_CMD_MASK;
  1661. int fshared = 0;
  1662. if (!(op & FUTEX_PRIVATE_FLAG))
  1663. fshared = 1;
  1664. clockrt = op & FUTEX_CLOCK_REALTIME;
  1665. if (clockrt && cmd != FUTEX_WAIT_BITSET)
  1666. return -ENOSYS;
  1667. switch (cmd) {
  1668. case FUTEX_WAIT:
  1669. val3 = FUTEX_BITSET_MATCH_ANY;
  1670. case FUTEX_WAIT_BITSET:
  1671. ret = futex_wait(uaddr, fshared, val, timeout, val3, clockrt);
  1672. break;
  1673. case FUTEX_WAKE:
  1674. val3 = FUTEX_BITSET_MATCH_ANY;
  1675. case FUTEX_WAKE_BITSET:
  1676. ret = futex_wake(uaddr, fshared, val, val3);
  1677. break;
  1678. case FUTEX_REQUEUE:
  1679. ret = futex_requeue(uaddr, fshared, uaddr2, val, val2, NULL);
  1680. break;
  1681. case FUTEX_CMP_REQUEUE:
  1682. ret = futex_requeue(uaddr, fshared, uaddr2, val, val2, &val3);
  1683. break;
  1684. case FUTEX_WAKE_OP:
  1685. ret = futex_wake_op(uaddr, fshared, uaddr2, val, val2, val3);
  1686. break;
  1687. case FUTEX_LOCK_PI:
  1688. if (futex_cmpxchg_enabled)
  1689. ret = futex_lock_pi(uaddr, fshared, val, timeout, 0);
  1690. break;
  1691. case FUTEX_UNLOCK_PI:
  1692. if (futex_cmpxchg_enabled)
  1693. ret = futex_unlock_pi(uaddr, fshared);
  1694. break;
  1695. case FUTEX_TRYLOCK_PI:
  1696. if (futex_cmpxchg_enabled)
  1697. ret = futex_lock_pi(uaddr, fshared, 0, timeout, 1);
  1698. break;
  1699. default:
  1700. ret = -ENOSYS;
  1701. }
  1702. return ret;
  1703. }
  1704. SYSCALL_DEFINE6(futex, u32 __user *, uaddr, int, op, u32, val,
  1705. struct timespec __user *, utime, u32 __user *, uaddr2,
  1706. u32, val3)
  1707. {
  1708. struct timespec ts;
  1709. ktime_t t, *tp = NULL;
  1710. u32 val2 = 0;
  1711. int cmd = op & FUTEX_CMD_MASK;
  1712. if (utime && (cmd == FUTEX_WAIT || cmd == FUTEX_LOCK_PI ||
  1713. cmd == FUTEX_WAIT_BITSET)) {
  1714. if (copy_from_user(&ts, utime, sizeof(ts)) != 0)
  1715. return -EFAULT;
  1716. if (!timespec_valid(&ts))
  1717. return -EINVAL;
  1718. t = timespec_to_ktime(ts);
  1719. if (cmd == FUTEX_WAIT)
  1720. t = ktime_add_safe(ktime_get(), t);
  1721. tp = &t;
  1722. }
  1723. /*
  1724. * requeue parameter in 'utime' if cmd == FUTEX_REQUEUE.
  1725. * number of waiters to wake in 'utime' if cmd == FUTEX_WAKE_OP.
  1726. */
  1727. if (cmd == FUTEX_REQUEUE || cmd == FUTEX_CMP_REQUEUE ||
  1728. cmd == FUTEX_WAKE_OP)
  1729. val2 = (u32) (unsigned long) utime;
  1730. return do_futex(uaddr, op, val, tp, uaddr2, val2, val3);
  1731. }
  1732. static int __init futex_init(void)
  1733. {
  1734. u32 curval;
  1735. int i;
  1736. /*
  1737. * This will fail and we want it. Some arch implementations do
  1738. * runtime detection of the futex_atomic_cmpxchg_inatomic()
  1739. * functionality. We want to know that before we call in any
  1740. * of the complex code paths. Also we want to prevent
  1741. * registration of robust lists in that case. NULL is
  1742. * guaranteed to fault and we get -EFAULT on functional
  1743. * implementation, the non functional ones will return
  1744. * -ENOSYS.
  1745. */
  1746. curval = cmpxchg_futex_value_locked(NULL, 0, 0);
  1747. if (curval == -EFAULT)
  1748. futex_cmpxchg_enabled = 1;
  1749. for (i = 0; i < ARRAY_SIZE(futex_queues); i++) {
  1750. plist_head_init(&futex_queues[i].chain, &futex_queues[i].lock);
  1751. spin_lock_init(&futex_queues[i].lock);
  1752. }
  1753. return 0;
  1754. }
  1755. __initcall(futex_init);