futex.c 44 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888
  1. /*
  2. * Fast Userspace Mutexes (which I call "Futexes!").
  3. * (C) Rusty Russell, IBM 2002
  4. *
  5. * Generalized futexes, futex requeueing, misc fixes by Ingo Molnar
  6. * (C) Copyright 2003 Red Hat Inc, All Rights Reserved
  7. *
  8. * Removed page pinning, fix privately mapped COW pages and other cleanups
  9. * (C) Copyright 2003, 2004 Jamie Lokier
  10. *
  11. * Robust futex support started by Ingo Molnar
  12. * (C) Copyright 2006 Red Hat Inc, All Rights Reserved
  13. * Thanks to Thomas Gleixner for suggestions, analysis and fixes.
  14. *
  15. * PI-futex support started by Ingo Molnar and Thomas Gleixner
  16. * Copyright (C) 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
  17. * Copyright (C) 2006 Timesys Corp., Thomas Gleixner <tglx@timesys.com>
  18. *
  19. * Thanks to Ben LaHaise for yelling "hashed waitqueues" loudly
  20. * enough at me, Linus for the original (flawed) idea, Matthew
  21. * Kirkwood for proof-of-concept implementation.
  22. *
  23. * "The futexes are also cursed."
  24. * "But they come in a choice of three flavours!"
  25. *
  26. * This program is free software; you can redistribute it and/or modify
  27. * it under the terms of the GNU General Public License as published by
  28. * the Free Software Foundation; either version 2 of the License, or
  29. * (at your option) any later version.
  30. *
  31. * This program is distributed in the hope that it will be useful,
  32. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  33. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  34. * GNU General Public License for more details.
  35. *
  36. * You should have received a copy of the GNU General Public License
  37. * along with this program; if not, write to the Free Software
  38. * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
  39. */
  40. #include <linux/slab.h>
  41. #include <linux/poll.h>
  42. #include <linux/fs.h>
  43. #include <linux/file.h>
  44. #include <linux/jhash.h>
  45. #include <linux/init.h>
  46. #include <linux/futex.h>
  47. #include <linux/mount.h>
  48. #include <linux/pagemap.h>
  49. #include <linux/syscalls.h>
  50. #include <linux/signal.h>
  51. #include <asm/futex.h>
  52. #include "rtmutex_common.h"
  53. #define FUTEX_HASHBITS (CONFIG_BASE_SMALL ? 4 : 8)
  54. /*
  55. * Futexes are matched on equal values of this key.
  56. * The key type depends on whether it's a shared or private mapping.
  57. * Don't rearrange members without looking at hash_futex().
  58. *
  59. * offset is aligned to a multiple of sizeof(u32) (== 4) by definition.
  60. * We set bit 0 to indicate if it's an inode-based key.
  61. */
  62. union futex_key {
  63. struct {
  64. unsigned long pgoff;
  65. struct inode *inode;
  66. int offset;
  67. } shared;
  68. struct {
  69. unsigned long address;
  70. struct mm_struct *mm;
  71. int offset;
  72. } private;
  73. struct {
  74. unsigned long word;
  75. void *ptr;
  76. int offset;
  77. } both;
  78. };
  79. /*
  80. * Priority Inheritance state:
  81. */
  82. struct futex_pi_state {
  83. /*
  84. * list of 'owned' pi_state instances - these have to be
  85. * cleaned up in do_exit() if the task exits prematurely:
  86. */
  87. struct list_head list;
  88. /*
  89. * The PI object:
  90. */
  91. struct rt_mutex pi_mutex;
  92. struct task_struct *owner;
  93. atomic_t refcount;
  94. union futex_key key;
  95. };
  96. /*
  97. * We use this hashed waitqueue instead of a normal wait_queue_t, so
  98. * we can wake only the relevant ones (hashed queues may be shared).
  99. *
  100. * A futex_q has a woken state, just like tasks have TASK_RUNNING.
  101. * It is considered woken when list_empty(&q->list) || q->lock_ptr == 0.
  102. * The order of wakup is always to make the first condition true, then
  103. * wake up q->waiters, then make the second condition true.
  104. */
  105. struct futex_q {
  106. struct list_head list;
  107. wait_queue_head_t waiters;
  108. /* Which hash list lock to use: */
  109. spinlock_t *lock_ptr;
  110. /* Key which the futex is hashed on: */
  111. union futex_key key;
  112. /* For fd, sigio sent using these: */
  113. int fd;
  114. struct file *filp;
  115. /* Optional priority inheritance state: */
  116. struct futex_pi_state *pi_state;
  117. struct task_struct *task;
  118. };
  119. /*
  120. * Split the global futex_lock into every hash list lock.
  121. */
  122. struct futex_hash_bucket {
  123. spinlock_t lock;
  124. struct list_head chain;
  125. };
  126. static struct futex_hash_bucket futex_queues[1<<FUTEX_HASHBITS];
  127. /* Futex-fs vfsmount entry: */
  128. static struct vfsmount *futex_mnt;
  129. /*
  130. * We hash on the keys returned from get_futex_key (see below).
  131. */
  132. static struct futex_hash_bucket *hash_futex(union futex_key *key)
  133. {
  134. u32 hash = jhash2((u32*)&key->both.word,
  135. (sizeof(key->both.word)+sizeof(key->both.ptr))/4,
  136. key->both.offset);
  137. return &futex_queues[hash & ((1 << FUTEX_HASHBITS)-1)];
  138. }
  139. /*
  140. * Return 1 if two futex_keys are equal, 0 otherwise.
  141. */
  142. static inline int match_futex(union futex_key *key1, union futex_key *key2)
  143. {
  144. return (key1->both.word == key2->both.word
  145. && key1->both.ptr == key2->both.ptr
  146. && key1->both.offset == key2->both.offset);
  147. }
  148. /*
  149. * Get parameters which are the keys for a futex.
  150. *
  151. * For shared mappings, it's (page->index, vma->vm_file->f_dentry->d_inode,
  152. * offset_within_page). For private mappings, it's (uaddr, current->mm).
  153. * We can usually work out the index without swapping in the page.
  154. *
  155. * Returns: 0, or negative error code.
  156. * The key words are stored in *key on success.
  157. *
  158. * Should be called with &current->mm->mmap_sem but NOT any spinlocks.
  159. */
  160. static int get_futex_key(u32 __user *uaddr, union futex_key *key)
  161. {
  162. unsigned long address = (unsigned long)uaddr;
  163. struct mm_struct *mm = current->mm;
  164. struct vm_area_struct *vma;
  165. struct page *page;
  166. int err;
  167. /*
  168. * The futex address must be "naturally" aligned.
  169. */
  170. key->both.offset = address % PAGE_SIZE;
  171. if (unlikely((key->both.offset % sizeof(u32)) != 0))
  172. return -EINVAL;
  173. address -= key->both.offset;
  174. /*
  175. * The futex is hashed differently depending on whether
  176. * it's in a shared or private mapping. So check vma first.
  177. */
  178. vma = find_extend_vma(mm, address);
  179. if (unlikely(!vma))
  180. return -EFAULT;
  181. /*
  182. * Permissions.
  183. */
  184. if (unlikely((vma->vm_flags & (VM_IO|VM_READ)) != VM_READ))
  185. return (vma->vm_flags & VM_IO) ? -EPERM : -EACCES;
  186. /*
  187. * Private mappings are handled in a simple way.
  188. *
  189. * NOTE: When userspace waits on a MAP_SHARED mapping, even if
  190. * it's a read-only handle, it's expected that futexes attach to
  191. * the object not the particular process. Therefore we use
  192. * VM_MAYSHARE here, not VM_SHARED which is restricted to shared
  193. * mappings of _writable_ handles.
  194. */
  195. if (likely(!(vma->vm_flags & VM_MAYSHARE))) {
  196. key->private.mm = mm;
  197. key->private.address = address;
  198. return 0;
  199. }
  200. /*
  201. * Linear file mappings are also simple.
  202. */
  203. key->shared.inode = vma->vm_file->f_dentry->d_inode;
  204. key->both.offset++; /* Bit 0 of offset indicates inode-based key. */
  205. if (likely(!(vma->vm_flags & VM_NONLINEAR))) {
  206. key->shared.pgoff = (((address - vma->vm_start) >> PAGE_SHIFT)
  207. + vma->vm_pgoff);
  208. return 0;
  209. }
  210. /*
  211. * We could walk the page table to read the non-linear
  212. * pte, and get the page index without fetching the page
  213. * from swap. But that's a lot of code to duplicate here
  214. * for a rare case, so we simply fetch the page.
  215. */
  216. err = get_user_pages(current, mm, address, 1, 0, 0, &page, NULL);
  217. if (err >= 0) {
  218. key->shared.pgoff =
  219. page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
  220. put_page(page);
  221. return 0;
  222. }
  223. return err;
  224. }
  225. /*
  226. * Take a reference to the resource addressed by a key.
  227. * Can be called while holding spinlocks.
  228. *
  229. * NOTE: mmap_sem MUST be held between get_futex_key() and calling this
  230. * function, if it is called at all. mmap_sem keeps key->shared.inode valid.
  231. */
  232. static inline void get_key_refs(union futex_key *key)
  233. {
  234. if (key->both.ptr != 0) {
  235. if (key->both.offset & 1)
  236. atomic_inc(&key->shared.inode->i_count);
  237. else
  238. atomic_inc(&key->private.mm->mm_count);
  239. }
  240. }
  241. /*
  242. * Drop a reference to the resource addressed by a key.
  243. * The hash bucket spinlock must not be held.
  244. */
  245. static void drop_key_refs(union futex_key *key)
  246. {
  247. if (key->both.ptr != 0) {
  248. if (key->both.offset & 1)
  249. iput(key->shared.inode);
  250. else
  251. mmdrop(key->private.mm);
  252. }
  253. }
  254. static inline int get_futex_value_locked(u32 *dest, u32 __user *from)
  255. {
  256. int ret;
  257. inc_preempt_count();
  258. ret = __copy_from_user_inatomic(dest, from, sizeof(u32));
  259. dec_preempt_count();
  260. return ret ? -EFAULT : 0;
  261. }
  262. /*
  263. * Fault handling. Called with current->mm->mmap_sem held.
  264. */
  265. static int futex_handle_fault(unsigned long address, int attempt)
  266. {
  267. struct vm_area_struct * vma;
  268. struct mm_struct *mm = current->mm;
  269. if (attempt >= 2 || !(vma = find_vma(mm, address)) ||
  270. vma->vm_start > address || !(vma->vm_flags & VM_WRITE))
  271. return -EFAULT;
  272. switch (handle_mm_fault(mm, vma, address, 1)) {
  273. case VM_FAULT_MINOR:
  274. current->min_flt++;
  275. break;
  276. case VM_FAULT_MAJOR:
  277. current->maj_flt++;
  278. break;
  279. default:
  280. return -EFAULT;
  281. }
  282. return 0;
  283. }
  284. /*
  285. * PI code:
  286. */
  287. static int refill_pi_state_cache(void)
  288. {
  289. struct futex_pi_state *pi_state;
  290. if (likely(current->pi_state_cache))
  291. return 0;
  292. pi_state = kmalloc(sizeof(*pi_state), GFP_KERNEL);
  293. if (!pi_state)
  294. return -ENOMEM;
  295. memset(pi_state, 0, sizeof(*pi_state));
  296. INIT_LIST_HEAD(&pi_state->list);
  297. /* pi_mutex gets initialized later */
  298. pi_state->owner = NULL;
  299. atomic_set(&pi_state->refcount, 1);
  300. current->pi_state_cache = pi_state;
  301. return 0;
  302. }
  303. static struct futex_pi_state * alloc_pi_state(void)
  304. {
  305. struct futex_pi_state *pi_state = current->pi_state_cache;
  306. WARN_ON(!pi_state);
  307. current->pi_state_cache = NULL;
  308. return pi_state;
  309. }
  310. static void free_pi_state(struct futex_pi_state *pi_state)
  311. {
  312. if (!atomic_dec_and_test(&pi_state->refcount))
  313. return;
  314. /*
  315. * If pi_state->owner is NULL, the owner is most probably dying
  316. * and has cleaned up the pi_state already
  317. */
  318. if (pi_state->owner) {
  319. spin_lock_irq(&pi_state->owner->pi_lock);
  320. list_del_init(&pi_state->list);
  321. spin_unlock_irq(&pi_state->owner->pi_lock);
  322. rt_mutex_proxy_unlock(&pi_state->pi_mutex, pi_state->owner);
  323. }
  324. if (current->pi_state_cache)
  325. kfree(pi_state);
  326. else {
  327. /*
  328. * pi_state->list is already empty.
  329. * clear pi_state->owner.
  330. * refcount is at 0 - put it back to 1.
  331. */
  332. pi_state->owner = NULL;
  333. atomic_set(&pi_state->refcount, 1);
  334. current->pi_state_cache = pi_state;
  335. }
  336. }
  337. /*
  338. * Look up the task based on what TID userspace gave us.
  339. * We dont trust it.
  340. */
  341. static struct task_struct * futex_find_get_task(pid_t pid)
  342. {
  343. struct task_struct *p;
  344. read_lock(&tasklist_lock);
  345. p = find_task_by_pid(pid);
  346. if (!p)
  347. goto out_unlock;
  348. if ((current->euid != p->euid) && (current->euid != p->uid)) {
  349. p = NULL;
  350. goto out_unlock;
  351. }
  352. if (p->state == EXIT_ZOMBIE || p->exit_state == EXIT_ZOMBIE) {
  353. p = NULL;
  354. goto out_unlock;
  355. }
  356. get_task_struct(p);
  357. out_unlock:
  358. read_unlock(&tasklist_lock);
  359. return p;
  360. }
  361. /*
  362. * This task is holding PI mutexes at exit time => bad.
  363. * Kernel cleans up PI-state, but userspace is likely hosed.
  364. * (Robust-futex cleanup is separate and might save the day for userspace.)
  365. */
  366. void exit_pi_state_list(struct task_struct *curr)
  367. {
  368. struct list_head *next, *head = &curr->pi_state_list;
  369. struct futex_pi_state *pi_state;
  370. struct futex_hash_bucket *hb;
  371. union futex_key key;
  372. /*
  373. * We are a ZOMBIE and nobody can enqueue itself on
  374. * pi_state_list anymore, but we have to be careful
  375. * versus waiters unqueueing themselves:
  376. */
  377. spin_lock_irq(&curr->pi_lock);
  378. while (!list_empty(head)) {
  379. next = head->next;
  380. pi_state = list_entry(next, struct futex_pi_state, list);
  381. key = pi_state->key;
  382. hb = hash_futex(&key);
  383. spin_unlock_irq(&curr->pi_lock);
  384. spin_lock(&hb->lock);
  385. spin_lock_irq(&curr->pi_lock);
  386. /*
  387. * We dropped the pi-lock, so re-check whether this
  388. * task still owns the PI-state:
  389. */
  390. if (head->next != next) {
  391. spin_unlock(&hb->lock);
  392. continue;
  393. }
  394. WARN_ON(pi_state->owner != curr);
  395. WARN_ON(list_empty(&pi_state->list));
  396. list_del_init(&pi_state->list);
  397. pi_state->owner = NULL;
  398. spin_unlock_irq(&curr->pi_lock);
  399. rt_mutex_unlock(&pi_state->pi_mutex);
  400. spin_unlock(&hb->lock);
  401. spin_lock_irq(&curr->pi_lock);
  402. }
  403. spin_unlock_irq(&curr->pi_lock);
  404. }
  405. static int
  406. lookup_pi_state(u32 uval, struct futex_hash_bucket *hb, struct futex_q *me)
  407. {
  408. struct futex_pi_state *pi_state = NULL;
  409. struct futex_q *this, *next;
  410. struct list_head *head;
  411. struct task_struct *p;
  412. pid_t pid;
  413. head = &hb->chain;
  414. list_for_each_entry_safe(this, next, head, list) {
  415. if (match_futex(&this->key, &me->key)) {
  416. /*
  417. * Another waiter already exists - bump up
  418. * the refcount and return its pi_state:
  419. */
  420. pi_state = this->pi_state;
  421. /*
  422. * Userspace might have messed up non PI and PI futexes
  423. */
  424. if (unlikely(!pi_state))
  425. return -EINVAL;
  426. WARN_ON(!atomic_read(&pi_state->refcount));
  427. atomic_inc(&pi_state->refcount);
  428. me->pi_state = pi_state;
  429. return 0;
  430. }
  431. }
  432. /*
  433. * We are the first waiter - try to look up the real owner and
  434. * attach the new pi_state to it:
  435. */
  436. pid = uval & FUTEX_TID_MASK;
  437. p = futex_find_get_task(pid);
  438. if (!p)
  439. return -ESRCH;
  440. pi_state = alloc_pi_state();
  441. /*
  442. * Initialize the pi_mutex in locked state and make 'p'
  443. * the owner of it:
  444. */
  445. rt_mutex_init_proxy_locked(&pi_state->pi_mutex, p);
  446. /* Store the key for possible exit cleanups: */
  447. pi_state->key = me->key;
  448. spin_lock_irq(&p->pi_lock);
  449. WARN_ON(!list_empty(&pi_state->list));
  450. list_add(&pi_state->list, &p->pi_state_list);
  451. pi_state->owner = p;
  452. spin_unlock_irq(&p->pi_lock);
  453. put_task_struct(p);
  454. me->pi_state = pi_state;
  455. return 0;
  456. }
  457. /*
  458. * The hash bucket lock must be held when this is called.
  459. * Afterwards, the futex_q must not be accessed.
  460. */
  461. static void wake_futex(struct futex_q *q)
  462. {
  463. list_del_init(&q->list);
  464. if (q->filp)
  465. send_sigio(&q->filp->f_owner, q->fd, POLL_IN);
  466. /*
  467. * The lock in wake_up_all() is a crucial memory barrier after the
  468. * list_del_init() and also before assigning to q->lock_ptr.
  469. */
  470. wake_up_all(&q->waiters);
  471. /*
  472. * The waiting task can free the futex_q as soon as this is written,
  473. * without taking any locks. This must come last.
  474. *
  475. * A memory barrier is required here to prevent the following store
  476. * to lock_ptr from getting ahead of the wakeup. Clearing the lock
  477. * at the end of wake_up_all() does not prevent this store from
  478. * moving.
  479. */
  480. wmb();
  481. q->lock_ptr = NULL;
  482. }
  483. static int wake_futex_pi(u32 __user *uaddr, u32 uval, struct futex_q *this)
  484. {
  485. struct task_struct *new_owner;
  486. struct futex_pi_state *pi_state = this->pi_state;
  487. u32 curval, newval;
  488. if (!pi_state)
  489. return -EINVAL;
  490. new_owner = rt_mutex_next_owner(&pi_state->pi_mutex);
  491. /*
  492. * This happens when we have stolen the lock and the original
  493. * pending owner did not enqueue itself back on the rt_mutex.
  494. * Thats not a tragedy. We know that way, that a lock waiter
  495. * is on the fly. We make the futex_q waiter the pending owner.
  496. */
  497. if (!new_owner)
  498. new_owner = this->task;
  499. /*
  500. * We pass it to the next owner. (The WAITERS bit is always
  501. * kept enabled while there is PI state around. We must also
  502. * preserve the owner died bit.)
  503. */
  504. newval = (uval & FUTEX_OWNER_DIED) | FUTEX_WAITERS | new_owner->pid;
  505. inc_preempt_count();
  506. curval = futex_atomic_cmpxchg_inatomic(uaddr, uval, newval);
  507. dec_preempt_count();
  508. if (curval == -EFAULT)
  509. return -EFAULT;
  510. if (curval != uval)
  511. return -EINVAL;
  512. spin_lock_irq(&pi_state->owner->pi_lock);
  513. WARN_ON(list_empty(&pi_state->list));
  514. list_del_init(&pi_state->list);
  515. spin_unlock_irq(&pi_state->owner->pi_lock);
  516. spin_lock_irq(&new_owner->pi_lock);
  517. WARN_ON(!list_empty(&pi_state->list));
  518. list_add(&pi_state->list, &new_owner->pi_state_list);
  519. pi_state->owner = new_owner;
  520. spin_unlock_irq(&new_owner->pi_lock);
  521. rt_mutex_unlock(&pi_state->pi_mutex);
  522. return 0;
  523. }
  524. static int unlock_futex_pi(u32 __user *uaddr, u32 uval)
  525. {
  526. u32 oldval;
  527. /*
  528. * There is no waiter, so we unlock the futex. The owner died
  529. * bit has not to be preserved here. We are the owner:
  530. */
  531. inc_preempt_count();
  532. oldval = futex_atomic_cmpxchg_inatomic(uaddr, uval, 0);
  533. dec_preempt_count();
  534. if (oldval == -EFAULT)
  535. return oldval;
  536. if (oldval != uval)
  537. return -EAGAIN;
  538. return 0;
  539. }
  540. /*
  541. * Express the locking dependencies for lockdep:
  542. */
  543. static inline void
  544. double_lock_hb(struct futex_hash_bucket *hb1, struct futex_hash_bucket *hb2)
  545. {
  546. if (hb1 <= hb2) {
  547. spin_lock(&hb1->lock);
  548. if (hb1 < hb2)
  549. spin_lock_nested(&hb2->lock, SINGLE_DEPTH_NESTING);
  550. } else { /* hb1 > hb2 */
  551. spin_lock(&hb2->lock);
  552. spin_lock_nested(&hb1->lock, SINGLE_DEPTH_NESTING);
  553. }
  554. }
  555. /*
  556. * Wake up all waiters hashed on the physical page that is mapped
  557. * to this virtual address:
  558. */
  559. static int futex_wake(u32 __user *uaddr, int nr_wake)
  560. {
  561. struct futex_hash_bucket *hb;
  562. struct futex_q *this, *next;
  563. struct list_head *head;
  564. union futex_key key;
  565. int ret;
  566. down_read(&current->mm->mmap_sem);
  567. ret = get_futex_key(uaddr, &key);
  568. if (unlikely(ret != 0))
  569. goto out;
  570. hb = hash_futex(&key);
  571. spin_lock(&hb->lock);
  572. head = &hb->chain;
  573. list_for_each_entry_safe(this, next, head, list) {
  574. if (match_futex (&this->key, &key)) {
  575. if (this->pi_state) {
  576. ret = -EINVAL;
  577. break;
  578. }
  579. wake_futex(this);
  580. if (++ret >= nr_wake)
  581. break;
  582. }
  583. }
  584. spin_unlock(&hb->lock);
  585. out:
  586. up_read(&current->mm->mmap_sem);
  587. return ret;
  588. }
  589. /*
  590. * Wake up all waiters hashed on the physical page that is mapped
  591. * to this virtual address:
  592. */
  593. static int
  594. futex_wake_op(u32 __user *uaddr1, u32 __user *uaddr2,
  595. int nr_wake, int nr_wake2, int op)
  596. {
  597. union futex_key key1, key2;
  598. struct futex_hash_bucket *hb1, *hb2;
  599. struct list_head *head;
  600. struct futex_q *this, *next;
  601. int ret, op_ret, attempt = 0;
  602. retryfull:
  603. down_read(&current->mm->mmap_sem);
  604. ret = get_futex_key(uaddr1, &key1);
  605. if (unlikely(ret != 0))
  606. goto out;
  607. ret = get_futex_key(uaddr2, &key2);
  608. if (unlikely(ret != 0))
  609. goto out;
  610. hb1 = hash_futex(&key1);
  611. hb2 = hash_futex(&key2);
  612. retry:
  613. double_lock_hb(hb1, hb2);
  614. op_ret = futex_atomic_op_inuser(op, uaddr2);
  615. if (unlikely(op_ret < 0)) {
  616. u32 dummy;
  617. spin_unlock(&hb1->lock);
  618. if (hb1 != hb2)
  619. spin_unlock(&hb2->lock);
  620. #ifndef CONFIG_MMU
  621. /*
  622. * we don't get EFAULT from MMU faults if we don't have an MMU,
  623. * but we might get them from range checking
  624. */
  625. ret = op_ret;
  626. goto out;
  627. #endif
  628. if (unlikely(op_ret != -EFAULT)) {
  629. ret = op_ret;
  630. goto out;
  631. }
  632. /*
  633. * futex_atomic_op_inuser needs to both read and write
  634. * *(int __user *)uaddr2, but we can't modify it
  635. * non-atomically. Therefore, if get_user below is not
  636. * enough, we need to handle the fault ourselves, while
  637. * still holding the mmap_sem.
  638. */
  639. if (attempt++) {
  640. if (futex_handle_fault((unsigned long)uaddr2,
  641. attempt))
  642. goto out;
  643. goto retry;
  644. }
  645. /*
  646. * If we would have faulted, release mmap_sem,
  647. * fault it in and start all over again.
  648. */
  649. up_read(&current->mm->mmap_sem);
  650. ret = get_user(dummy, uaddr2);
  651. if (ret)
  652. return ret;
  653. goto retryfull;
  654. }
  655. head = &hb1->chain;
  656. list_for_each_entry_safe(this, next, head, list) {
  657. if (match_futex (&this->key, &key1)) {
  658. wake_futex(this);
  659. if (++ret >= nr_wake)
  660. break;
  661. }
  662. }
  663. if (op_ret > 0) {
  664. head = &hb2->chain;
  665. op_ret = 0;
  666. list_for_each_entry_safe(this, next, head, list) {
  667. if (match_futex (&this->key, &key2)) {
  668. wake_futex(this);
  669. if (++op_ret >= nr_wake2)
  670. break;
  671. }
  672. }
  673. ret += op_ret;
  674. }
  675. spin_unlock(&hb1->lock);
  676. if (hb1 != hb2)
  677. spin_unlock(&hb2->lock);
  678. out:
  679. up_read(&current->mm->mmap_sem);
  680. return ret;
  681. }
  682. /*
  683. * Requeue all waiters hashed on one physical page to another
  684. * physical page.
  685. */
  686. static int futex_requeue(u32 __user *uaddr1, u32 __user *uaddr2,
  687. int nr_wake, int nr_requeue, u32 *cmpval)
  688. {
  689. union futex_key key1, key2;
  690. struct futex_hash_bucket *hb1, *hb2;
  691. struct list_head *head1;
  692. struct futex_q *this, *next;
  693. int ret, drop_count = 0;
  694. retry:
  695. down_read(&current->mm->mmap_sem);
  696. ret = get_futex_key(uaddr1, &key1);
  697. if (unlikely(ret != 0))
  698. goto out;
  699. ret = get_futex_key(uaddr2, &key2);
  700. if (unlikely(ret != 0))
  701. goto out;
  702. hb1 = hash_futex(&key1);
  703. hb2 = hash_futex(&key2);
  704. double_lock_hb(hb1, hb2);
  705. if (likely(cmpval != NULL)) {
  706. u32 curval;
  707. ret = get_futex_value_locked(&curval, uaddr1);
  708. if (unlikely(ret)) {
  709. spin_unlock(&hb1->lock);
  710. if (hb1 != hb2)
  711. spin_unlock(&hb2->lock);
  712. /*
  713. * If we would have faulted, release mmap_sem, fault
  714. * it in and start all over again.
  715. */
  716. up_read(&current->mm->mmap_sem);
  717. ret = get_user(curval, uaddr1);
  718. if (!ret)
  719. goto retry;
  720. return ret;
  721. }
  722. if (curval != *cmpval) {
  723. ret = -EAGAIN;
  724. goto out_unlock;
  725. }
  726. }
  727. head1 = &hb1->chain;
  728. list_for_each_entry_safe(this, next, head1, list) {
  729. if (!match_futex (&this->key, &key1))
  730. continue;
  731. if (++ret <= nr_wake) {
  732. wake_futex(this);
  733. } else {
  734. /*
  735. * If key1 and key2 hash to the same bucket, no need to
  736. * requeue.
  737. */
  738. if (likely(head1 != &hb2->chain)) {
  739. list_move_tail(&this->list, &hb2->chain);
  740. this->lock_ptr = &hb2->lock;
  741. }
  742. this->key = key2;
  743. get_key_refs(&key2);
  744. drop_count++;
  745. if (ret - nr_wake >= nr_requeue)
  746. break;
  747. }
  748. }
  749. out_unlock:
  750. spin_unlock(&hb1->lock);
  751. if (hb1 != hb2)
  752. spin_unlock(&hb2->lock);
  753. /* drop_key_refs() must be called outside the spinlocks. */
  754. while (--drop_count >= 0)
  755. drop_key_refs(&key1);
  756. out:
  757. up_read(&current->mm->mmap_sem);
  758. return ret;
  759. }
  760. /* The key must be already stored in q->key. */
  761. static inline struct futex_hash_bucket *
  762. queue_lock(struct futex_q *q, int fd, struct file *filp)
  763. {
  764. struct futex_hash_bucket *hb;
  765. q->fd = fd;
  766. q->filp = filp;
  767. init_waitqueue_head(&q->waiters);
  768. get_key_refs(&q->key);
  769. hb = hash_futex(&q->key);
  770. q->lock_ptr = &hb->lock;
  771. spin_lock(&hb->lock);
  772. return hb;
  773. }
  774. static inline void __queue_me(struct futex_q *q, struct futex_hash_bucket *hb)
  775. {
  776. list_add_tail(&q->list, &hb->chain);
  777. q->task = current;
  778. spin_unlock(&hb->lock);
  779. }
  780. static inline void
  781. queue_unlock(struct futex_q *q, struct futex_hash_bucket *hb)
  782. {
  783. spin_unlock(&hb->lock);
  784. drop_key_refs(&q->key);
  785. }
  786. /*
  787. * queue_me and unqueue_me must be called as a pair, each
  788. * exactly once. They are called with the hashed spinlock held.
  789. */
  790. /* The key must be already stored in q->key. */
  791. static void queue_me(struct futex_q *q, int fd, struct file *filp)
  792. {
  793. struct futex_hash_bucket *hb;
  794. hb = queue_lock(q, fd, filp);
  795. __queue_me(q, hb);
  796. }
  797. /* Return 1 if we were still queued (ie. 0 means we were woken) */
  798. static int unqueue_me(struct futex_q *q)
  799. {
  800. spinlock_t *lock_ptr;
  801. int ret = 0;
  802. /* In the common case we don't take the spinlock, which is nice. */
  803. retry:
  804. lock_ptr = q->lock_ptr;
  805. if (lock_ptr != 0) {
  806. spin_lock(lock_ptr);
  807. /*
  808. * q->lock_ptr can change between reading it and
  809. * spin_lock(), causing us to take the wrong lock. This
  810. * corrects the race condition.
  811. *
  812. * Reasoning goes like this: if we have the wrong lock,
  813. * q->lock_ptr must have changed (maybe several times)
  814. * between reading it and the spin_lock(). It can
  815. * change again after the spin_lock() but only if it was
  816. * already changed before the spin_lock(). It cannot,
  817. * however, change back to the original value. Therefore
  818. * we can detect whether we acquired the correct lock.
  819. */
  820. if (unlikely(lock_ptr != q->lock_ptr)) {
  821. spin_unlock(lock_ptr);
  822. goto retry;
  823. }
  824. WARN_ON(list_empty(&q->list));
  825. list_del(&q->list);
  826. BUG_ON(q->pi_state);
  827. spin_unlock(lock_ptr);
  828. ret = 1;
  829. }
  830. drop_key_refs(&q->key);
  831. return ret;
  832. }
  833. /*
  834. * PI futexes can not be requeued and must remove themself from the
  835. * hash bucket. The hash bucket lock is held on entry and dropped here.
  836. */
  837. static void unqueue_me_pi(struct futex_q *q, struct futex_hash_bucket *hb)
  838. {
  839. WARN_ON(list_empty(&q->list));
  840. list_del(&q->list);
  841. BUG_ON(!q->pi_state);
  842. free_pi_state(q->pi_state);
  843. q->pi_state = NULL;
  844. spin_unlock(&hb->lock);
  845. drop_key_refs(&q->key);
  846. }
  847. static int futex_wait(u32 __user *uaddr, u32 val, unsigned long time)
  848. {
  849. struct task_struct *curr = current;
  850. DECLARE_WAITQUEUE(wait, curr);
  851. struct futex_hash_bucket *hb;
  852. struct futex_q q;
  853. u32 uval;
  854. int ret;
  855. q.pi_state = NULL;
  856. retry:
  857. down_read(&curr->mm->mmap_sem);
  858. ret = get_futex_key(uaddr, &q.key);
  859. if (unlikely(ret != 0))
  860. goto out_release_sem;
  861. hb = queue_lock(&q, -1, NULL);
  862. /*
  863. * Access the page AFTER the futex is queued.
  864. * Order is important:
  865. *
  866. * Userspace waiter: val = var; if (cond(val)) futex_wait(&var, val);
  867. * Userspace waker: if (cond(var)) { var = new; futex_wake(&var); }
  868. *
  869. * The basic logical guarantee of a futex is that it blocks ONLY
  870. * if cond(var) is known to be true at the time of blocking, for
  871. * any cond. If we queued after testing *uaddr, that would open
  872. * a race condition where we could block indefinitely with
  873. * cond(var) false, which would violate the guarantee.
  874. *
  875. * A consequence is that futex_wait() can return zero and absorb
  876. * a wakeup when *uaddr != val on entry to the syscall. This is
  877. * rare, but normal.
  878. *
  879. * We hold the mmap semaphore, so the mapping cannot have changed
  880. * since we looked it up in get_futex_key.
  881. */
  882. ret = get_futex_value_locked(&uval, uaddr);
  883. if (unlikely(ret)) {
  884. queue_unlock(&q, hb);
  885. /*
  886. * If we would have faulted, release mmap_sem, fault it in and
  887. * start all over again.
  888. */
  889. up_read(&curr->mm->mmap_sem);
  890. ret = get_user(uval, uaddr);
  891. if (!ret)
  892. goto retry;
  893. return ret;
  894. }
  895. ret = -EWOULDBLOCK;
  896. if (uval != val)
  897. goto out_unlock_release_sem;
  898. /* Only actually queue if *uaddr contained val. */
  899. __queue_me(&q, hb);
  900. /*
  901. * Now the futex is queued and we have checked the data, we
  902. * don't want to hold mmap_sem while we sleep.
  903. */
  904. up_read(&curr->mm->mmap_sem);
  905. /*
  906. * There might have been scheduling since the queue_me(), as we
  907. * cannot hold a spinlock across the get_user() in case it
  908. * faults, and we cannot just set TASK_INTERRUPTIBLE state when
  909. * queueing ourselves into the futex hash. This code thus has to
  910. * rely on the futex_wake() code removing us from hash when it
  911. * wakes us up.
  912. */
  913. /* add_wait_queue is the barrier after __set_current_state. */
  914. __set_current_state(TASK_INTERRUPTIBLE);
  915. add_wait_queue(&q.waiters, &wait);
  916. /*
  917. * !list_empty() is safe here without any lock.
  918. * q.lock_ptr != 0 is not safe, because of ordering against wakeup.
  919. */
  920. if (likely(!list_empty(&q.list)))
  921. time = schedule_timeout(time);
  922. __set_current_state(TASK_RUNNING);
  923. /*
  924. * NOTE: we don't remove ourselves from the waitqueue because
  925. * we are the only user of it.
  926. */
  927. /* If we were woken (and unqueued), we succeeded, whatever. */
  928. if (!unqueue_me(&q))
  929. return 0;
  930. if (time == 0)
  931. return -ETIMEDOUT;
  932. /*
  933. * We expect signal_pending(current), but another thread may
  934. * have handled it for us already.
  935. */
  936. return -EINTR;
  937. out_unlock_release_sem:
  938. queue_unlock(&q, hb);
  939. out_release_sem:
  940. up_read(&curr->mm->mmap_sem);
  941. return ret;
  942. }
  943. /*
  944. * Userspace tried a 0 -> TID atomic transition of the futex value
  945. * and failed. The kernel side here does the whole locking operation:
  946. * if there are waiters then it will block, it does PI, etc. (Due to
  947. * races the kernel might see a 0 value of the futex too.)
  948. */
  949. static int do_futex_lock_pi(u32 __user *uaddr, int detect, int trylock,
  950. struct hrtimer_sleeper *to)
  951. {
  952. struct task_struct *curr = current;
  953. struct futex_hash_bucket *hb;
  954. u32 uval, newval, curval;
  955. struct futex_q q;
  956. int ret, attempt = 0;
  957. if (refill_pi_state_cache())
  958. return -ENOMEM;
  959. q.pi_state = NULL;
  960. retry:
  961. down_read(&curr->mm->mmap_sem);
  962. ret = get_futex_key(uaddr, &q.key);
  963. if (unlikely(ret != 0))
  964. goto out_release_sem;
  965. hb = queue_lock(&q, -1, NULL);
  966. retry_locked:
  967. /*
  968. * To avoid races, we attempt to take the lock here again
  969. * (by doing a 0 -> TID atomic cmpxchg), while holding all
  970. * the locks. It will most likely not succeed.
  971. */
  972. newval = current->pid;
  973. inc_preempt_count();
  974. curval = futex_atomic_cmpxchg_inatomic(uaddr, 0, newval);
  975. dec_preempt_count();
  976. if (unlikely(curval == -EFAULT))
  977. goto uaddr_faulted;
  978. /* We own the lock already */
  979. if (unlikely((curval & FUTEX_TID_MASK) == current->pid)) {
  980. if (!detect && 0)
  981. force_sig(SIGKILL, current);
  982. ret = -EDEADLK;
  983. goto out_unlock_release_sem;
  984. }
  985. /*
  986. * Surprise - we got the lock. Just return
  987. * to userspace:
  988. */
  989. if (unlikely(!curval))
  990. goto out_unlock_release_sem;
  991. uval = curval;
  992. newval = uval | FUTEX_WAITERS;
  993. inc_preempt_count();
  994. curval = futex_atomic_cmpxchg_inatomic(uaddr, uval, newval);
  995. dec_preempt_count();
  996. if (unlikely(curval == -EFAULT))
  997. goto uaddr_faulted;
  998. if (unlikely(curval != uval))
  999. goto retry_locked;
  1000. /*
  1001. * We dont have the lock. Look up the PI state (or create it if
  1002. * we are the first waiter):
  1003. */
  1004. ret = lookup_pi_state(uval, hb, &q);
  1005. if (unlikely(ret)) {
  1006. /*
  1007. * There were no waiters and the owner task lookup
  1008. * failed. When the OWNER_DIED bit is set, then we
  1009. * know that this is a robust futex and we actually
  1010. * take the lock. This is safe as we are protected by
  1011. * the hash bucket lock. We also set the waiters bit
  1012. * unconditionally here, to simplify glibc handling of
  1013. * multiple tasks racing to acquire the lock and
  1014. * cleanup the problems which were left by the dead
  1015. * owner.
  1016. */
  1017. if (curval & FUTEX_OWNER_DIED) {
  1018. uval = newval;
  1019. newval = current->pid |
  1020. FUTEX_OWNER_DIED | FUTEX_WAITERS;
  1021. inc_preempt_count();
  1022. curval = futex_atomic_cmpxchg_inatomic(uaddr,
  1023. uval, newval);
  1024. dec_preempt_count();
  1025. if (unlikely(curval == -EFAULT))
  1026. goto uaddr_faulted;
  1027. if (unlikely(curval != uval))
  1028. goto retry_locked;
  1029. ret = 0;
  1030. }
  1031. goto out_unlock_release_sem;
  1032. }
  1033. /*
  1034. * Only actually queue now that the atomic ops are done:
  1035. */
  1036. __queue_me(&q, hb);
  1037. /*
  1038. * Now the futex is queued and we have checked the data, we
  1039. * don't want to hold mmap_sem while we sleep.
  1040. */
  1041. up_read(&curr->mm->mmap_sem);
  1042. WARN_ON(!q.pi_state);
  1043. /*
  1044. * Block on the PI mutex:
  1045. */
  1046. if (!trylock)
  1047. ret = rt_mutex_timed_lock(&q.pi_state->pi_mutex, to, 1);
  1048. else {
  1049. ret = rt_mutex_trylock(&q.pi_state->pi_mutex);
  1050. /* Fixup the trylock return value: */
  1051. ret = ret ? 0 : -EWOULDBLOCK;
  1052. }
  1053. down_read(&curr->mm->mmap_sem);
  1054. spin_lock(q.lock_ptr);
  1055. /*
  1056. * Got the lock. We might not be the anticipated owner if we
  1057. * did a lock-steal - fix up the PI-state in that case.
  1058. */
  1059. if (!ret && q.pi_state->owner != curr) {
  1060. u32 newtid = current->pid | FUTEX_WAITERS;
  1061. /* Owner died? */
  1062. if (q.pi_state->owner != NULL) {
  1063. spin_lock_irq(&q.pi_state->owner->pi_lock);
  1064. WARN_ON(list_empty(&q.pi_state->list));
  1065. list_del_init(&q.pi_state->list);
  1066. spin_unlock_irq(&q.pi_state->owner->pi_lock);
  1067. } else
  1068. newtid |= FUTEX_OWNER_DIED;
  1069. q.pi_state->owner = current;
  1070. spin_lock_irq(&current->pi_lock);
  1071. WARN_ON(!list_empty(&q.pi_state->list));
  1072. list_add(&q.pi_state->list, &current->pi_state_list);
  1073. spin_unlock_irq(&current->pi_lock);
  1074. /* Unqueue and drop the lock */
  1075. unqueue_me_pi(&q, hb);
  1076. up_read(&curr->mm->mmap_sem);
  1077. /*
  1078. * We own it, so we have to replace the pending owner
  1079. * TID. This must be atomic as we have preserve the
  1080. * owner died bit here.
  1081. */
  1082. ret = get_user(uval, uaddr);
  1083. while (!ret) {
  1084. newval = (uval & FUTEX_OWNER_DIED) | newtid;
  1085. curval = futex_atomic_cmpxchg_inatomic(uaddr,
  1086. uval, newval);
  1087. if (curval == -EFAULT)
  1088. ret = -EFAULT;
  1089. if (curval == uval)
  1090. break;
  1091. uval = curval;
  1092. }
  1093. } else {
  1094. /*
  1095. * Catch the rare case, where the lock was released
  1096. * when we were on the way back before we locked
  1097. * the hash bucket.
  1098. */
  1099. if (ret && q.pi_state->owner == curr) {
  1100. if (rt_mutex_trylock(&q.pi_state->pi_mutex))
  1101. ret = 0;
  1102. }
  1103. /* Unqueue and drop the lock */
  1104. unqueue_me_pi(&q, hb);
  1105. up_read(&curr->mm->mmap_sem);
  1106. }
  1107. if (!detect && ret == -EDEADLK && 0)
  1108. force_sig(SIGKILL, current);
  1109. return ret;
  1110. out_unlock_release_sem:
  1111. queue_unlock(&q, hb);
  1112. out_release_sem:
  1113. up_read(&curr->mm->mmap_sem);
  1114. return ret;
  1115. uaddr_faulted:
  1116. /*
  1117. * We have to r/w *(int __user *)uaddr, but we can't modify it
  1118. * non-atomically. Therefore, if get_user below is not
  1119. * enough, we need to handle the fault ourselves, while
  1120. * still holding the mmap_sem.
  1121. */
  1122. if (attempt++) {
  1123. if (futex_handle_fault((unsigned long)uaddr, attempt))
  1124. goto out_unlock_release_sem;
  1125. goto retry_locked;
  1126. }
  1127. queue_unlock(&q, hb);
  1128. up_read(&curr->mm->mmap_sem);
  1129. ret = get_user(uval, uaddr);
  1130. if (!ret && (uval != -EFAULT))
  1131. goto retry;
  1132. return ret;
  1133. }
  1134. /*
  1135. * Restart handler
  1136. */
  1137. static long futex_lock_pi_restart(struct restart_block *restart)
  1138. {
  1139. struct hrtimer_sleeper timeout, *to = NULL;
  1140. int ret;
  1141. restart->fn = do_no_restart_syscall;
  1142. if (restart->arg2 || restart->arg3) {
  1143. to = &timeout;
  1144. hrtimer_init(&to->timer, CLOCK_REALTIME, HRTIMER_ABS);
  1145. hrtimer_init_sleeper(to, current);
  1146. to->timer.expires.tv64 = ((u64)restart->arg1 << 32) |
  1147. (u64) restart->arg0;
  1148. }
  1149. pr_debug("lock_pi restart: %p, %d (%d)\n",
  1150. (u32 __user *)restart->arg0, current->pid);
  1151. ret = do_futex_lock_pi((u32 __user *)restart->arg0, restart->arg1,
  1152. 0, to);
  1153. if (ret != -EINTR)
  1154. return ret;
  1155. restart->fn = futex_lock_pi_restart;
  1156. /* The other values are filled in */
  1157. return -ERESTART_RESTARTBLOCK;
  1158. }
  1159. /*
  1160. * Called from the syscall entry below.
  1161. */
  1162. static int futex_lock_pi(u32 __user *uaddr, int detect, unsigned long sec,
  1163. long nsec, int trylock)
  1164. {
  1165. struct hrtimer_sleeper timeout, *to = NULL;
  1166. struct restart_block *restart;
  1167. int ret;
  1168. if (sec != MAX_SCHEDULE_TIMEOUT) {
  1169. to = &timeout;
  1170. hrtimer_init(&to->timer, CLOCK_REALTIME, HRTIMER_ABS);
  1171. hrtimer_init_sleeper(to, current);
  1172. to->timer.expires = ktime_set(sec, nsec);
  1173. }
  1174. ret = do_futex_lock_pi(uaddr, detect, trylock, to);
  1175. if (ret != -EINTR)
  1176. return ret;
  1177. pr_debug("lock_pi interrupted: %p, %d (%d)\n", uaddr, current->pid);
  1178. restart = &current_thread_info()->restart_block;
  1179. restart->fn = futex_lock_pi_restart;
  1180. restart->arg0 = (unsigned long) uaddr;
  1181. restart->arg1 = detect;
  1182. if (to) {
  1183. restart->arg2 = to->timer.expires.tv64 & 0xFFFFFFFF;
  1184. restart->arg3 = to->timer.expires.tv64 >> 32;
  1185. } else
  1186. restart->arg2 = restart->arg3 = 0;
  1187. return -ERESTART_RESTARTBLOCK;
  1188. }
  1189. /*
  1190. * Userspace attempted a TID -> 0 atomic transition, and failed.
  1191. * This is the in-kernel slowpath: we look up the PI state (if any),
  1192. * and do the rt-mutex unlock.
  1193. */
  1194. static int futex_unlock_pi(u32 __user *uaddr)
  1195. {
  1196. struct futex_hash_bucket *hb;
  1197. struct futex_q *this, *next;
  1198. u32 uval;
  1199. struct list_head *head;
  1200. union futex_key key;
  1201. int ret, attempt = 0;
  1202. retry:
  1203. if (get_user(uval, uaddr))
  1204. return -EFAULT;
  1205. /*
  1206. * We release only a lock we actually own:
  1207. */
  1208. if ((uval & FUTEX_TID_MASK) != current->pid)
  1209. return -EPERM;
  1210. /*
  1211. * First take all the futex related locks:
  1212. */
  1213. down_read(&current->mm->mmap_sem);
  1214. ret = get_futex_key(uaddr, &key);
  1215. if (unlikely(ret != 0))
  1216. goto out;
  1217. hb = hash_futex(&key);
  1218. spin_lock(&hb->lock);
  1219. retry_locked:
  1220. /*
  1221. * To avoid races, try to do the TID -> 0 atomic transition
  1222. * again. If it succeeds then we can return without waking
  1223. * anyone else up:
  1224. */
  1225. inc_preempt_count();
  1226. uval = futex_atomic_cmpxchg_inatomic(uaddr, current->pid, 0);
  1227. dec_preempt_count();
  1228. if (unlikely(uval == -EFAULT))
  1229. goto pi_faulted;
  1230. /*
  1231. * Rare case: we managed to release the lock atomically,
  1232. * no need to wake anyone else up:
  1233. */
  1234. if (unlikely(uval == current->pid))
  1235. goto out_unlock;
  1236. /*
  1237. * Ok, other tasks may need to be woken up - check waiters
  1238. * and do the wakeup if necessary:
  1239. */
  1240. head = &hb->chain;
  1241. list_for_each_entry_safe(this, next, head, list) {
  1242. if (!match_futex (&this->key, &key))
  1243. continue;
  1244. ret = wake_futex_pi(uaddr, uval, this);
  1245. /*
  1246. * The atomic access to the futex value
  1247. * generated a pagefault, so retry the
  1248. * user-access and the wakeup:
  1249. */
  1250. if (ret == -EFAULT)
  1251. goto pi_faulted;
  1252. goto out_unlock;
  1253. }
  1254. /*
  1255. * No waiters - kernel unlocks the futex:
  1256. */
  1257. ret = unlock_futex_pi(uaddr, uval);
  1258. if (ret == -EFAULT)
  1259. goto pi_faulted;
  1260. out_unlock:
  1261. spin_unlock(&hb->lock);
  1262. out:
  1263. up_read(&current->mm->mmap_sem);
  1264. return ret;
  1265. pi_faulted:
  1266. /*
  1267. * We have to r/w *(int __user *)uaddr, but we can't modify it
  1268. * non-atomically. Therefore, if get_user below is not
  1269. * enough, we need to handle the fault ourselves, while
  1270. * still holding the mmap_sem.
  1271. */
  1272. if (attempt++) {
  1273. if (futex_handle_fault((unsigned long)uaddr, attempt))
  1274. goto out_unlock;
  1275. goto retry_locked;
  1276. }
  1277. spin_unlock(&hb->lock);
  1278. up_read(&current->mm->mmap_sem);
  1279. ret = get_user(uval, uaddr);
  1280. if (!ret && (uval != -EFAULT))
  1281. goto retry;
  1282. return ret;
  1283. }
  1284. static int futex_close(struct inode *inode, struct file *filp)
  1285. {
  1286. struct futex_q *q = filp->private_data;
  1287. unqueue_me(q);
  1288. kfree(q);
  1289. return 0;
  1290. }
  1291. /* This is one-shot: once it's gone off you need a new fd */
  1292. static unsigned int futex_poll(struct file *filp,
  1293. struct poll_table_struct *wait)
  1294. {
  1295. struct futex_q *q = filp->private_data;
  1296. int ret = 0;
  1297. poll_wait(filp, &q->waiters, wait);
  1298. /*
  1299. * list_empty() is safe here without any lock.
  1300. * q->lock_ptr != 0 is not safe, because of ordering against wakeup.
  1301. */
  1302. if (list_empty(&q->list))
  1303. ret = POLLIN | POLLRDNORM;
  1304. return ret;
  1305. }
  1306. static struct file_operations futex_fops = {
  1307. .release = futex_close,
  1308. .poll = futex_poll,
  1309. };
  1310. /*
  1311. * Signal allows caller to avoid the race which would occur if they
  1312. * set the sigio stuff up afterwards.
  1313. */
  1314. static int futex_fd(u32 __user *uaddr, int signal)
  1315. {
  1316. struct futex_q *q;
  1317. struct file *filp;
  1318. int ret, err;
  1319. ret = -EINVAL;
  1320. if (!valid_signal(signal))
  1321. goto out;
  1322. ret = get_unused_fd();
  1323. if (ret < 0)
  1324. goto out;
  1325. filp = get_empty_filp();
  1326. if (!filp) {
  1327. put_unused_fd(ret);
  1328. ret = -ENFILE;
  1329. goto out;
  1330. }
  1331. filp->f_op = &futex_fops;
  1332. filp->f_vfsmnt = mntget(futex_mnt);
  1333. filp->f_dentry = dget(futex_mnt->mnt_root);
  1334. filp->f_mapping = filp->f_dentry->d_inode->i_mapping;
  1335. if (signal) {
  1336. err = f_setown(filp, current->pid, 1);
  1337. if (err < 0) {
  1338. goto error;
  1339. }
  1340. filp->f_owner.signum = signal;
  1341. }
  1342. q = kmalloc(sizeof(*q), GFP_KERNEL);
  1343. if (!q) {
  1344. err = -ENOMEM;
  1345. goto error;
  1346. }
  1347. q->pi_state = NULL;
  1348. down_read(&current->mm->mmap_sem);
  1349. err = get_futex_key(uaddr, &q->key);
  1350. if (unlikely(err != 0)) {
  1351. up_read(&current->mm->mmap_sem);
  1352. kfree(q);
  1353. goto error;
  1354. }
  1355. /*
  1356. * queue_me() must be called before releasing mmap_sem, because
  1357. * key->shared.inode needs to be referenced while holding it.
  1358. */
  1359. filp->private_data = q;
  1360. queue_me(q, ret, filp);
  1361. up_read(&current->mm->mmap_sem);
  1362. /* Now we map fd to filp, so userspace can access it */
  1363. fd_install(ret, filp);
  1364. out:
  1365. return ret;
  1366. error:
  1367. put_unused_fd(ret);
  1368. put_filp(filp);
  1369. ret = err;
  1370. goto out;
  1371. }
  1372. /*
  1373. * Support for robust futexes: the kernel cleans up held futexes at
  1374. * thread exit time.
  1375. *
  1376. * Implementation: user-space maintains a per-thread list of locks it
  1377. * is holding. Upon do_exit(), the kernel carefully walks this list,
  1378. * and marks all locks that are owned by this thread with the
  1379. * FUTEX_OWNER_DIED bit, and wakes up a waiter (if any). The list is
  1380. * always manipulated with the lock held, so the list is private and
  1381. * per-thread. Userspace also maintains a per-thread 'list_op_pending'
  1382. * field, to allow the kernel to clean up if the thread dies after
  1383. * acquiring the lock, but just before it could have added itself to
  1384. * the list. There can only be one such pending lock.
  1385. */
  1386. /**
  1387. * sys_set_robust_list - set the robust-futex list head of a task
  1388. * @head: pointer to the list-head
  1389. * @len: length of the list-head, as userspace expects
  1390. */
  1391. asmlinkage long
  1392. sys_set_robust_list(struct robust_list_head __user *head,
  1393. size_t len)
  1394. {
  1395. /*
  1396. * The kernel knows only one size for now:
  1397. */
  1398. if (unlikely(len != sizeof(*head)))
  1399. return -EINVAL;
  1400. current->robust_list = head;
  1401. return 0;
  1402. }
  1403. /**
  1404. * sys_get_robust_list - get the robust-futex list head of a task
  1405. * @pid: pid of the process [zero for current task]
  1406. * @head_ptr: pointer to a list-head pointer, the kernel fills it in
  1407. * @len_ptr: pointer to a length field, the kernel fills in the header size
  1408. */
  1409. asmlinkage long
  1410. sys_get_robust_list(int pid, struct robust_list_head __user **head_ptr,
  1411. size_t __user *len_ptr)
  1412. {
  1413. struct robust_list_head *head;
  1414. unsigned long ret;
  1415. if (!pid)
  1416. head = current->robust_list;
  1417. else {
  1418. struct task_struct *p;
  1419. ret = -ESRCH;
  1420. read_lock(&tasklist_lock);
  1421. p = find_task_by_pid(pid);
  1422. if (!p)
  1423. goto err_unlock;
  1424. ret = -EPERM;
  1425. if ((current->euid != p->euid) && (current->euid != p->uid) &&
  1426. !capable(CAP_SYS_PTRACE))
  1427. goto err_unlock;
  1428. head = p->robust_list;
  1429. read_unlock(&tasklist_lock);
  1430. }
  1431. if (put_user(sizeof(*head), len_ptr))
  1432. return -EFAULT;
  1433. return put_user(head, head_ptr);
  1434. err_unlock:
  1435. read_unlock(&tasklist_lock);
  1436. return ret;
  1437. }
  1438. /*
  1439. * Process a futex-list entry, check whether it's owned by the
  1440. * dying task, and do notification if so:
  1441. */
  1442. int handle_futex_death(u32 __user *uaddr, struct task_struct *curr)
  1443. {
  1444. u32 uval, nval;
  1445. retry:
  1446. if (get_user(uval, uaddr))
  1447. return -1;
  1448. if ((uval & FUTEX_TID_MASK) == curr->pid) {
  1449. /*
  1450. * Ok, this dying thread is truly holding a futex
  1451. * of interest. Set the OWNER_DIED bit atomically
  1452. * via cmpxchg, and if the value had FUTEX_WAITERS
  1453. * set, wake up a waiter (if any). (We have to do a
  1454. * futex_wake() even if OWNER_DIED is already set -
  1455. * to handle the rare but possible case of recursive
  1456. * thread-death.) The rest of the cleanup is done in
  1457. * userspace.
  1458. */
  1459. nval = futex_atomic_cmpxchg_inatomic(uaddr, uval,
  1460. uval | FUTEX_OWNER_DIED);
  1461. if (nval == -EFAULT)
  1462. return -1;
  1463. if (nval != uval)
  1464. goto retry;
  1465. if (uval & FUTEX_WAITERS)
  1466. futex_wake(uaddr, 1);
  1467. }
  1468. return 0;
  1469. }
  1470. /*
  1471. * Walk curr->robust_list (very carefully, it's a userspace list!)
  1472. * and mark any locks found there dead, and notify any waiters.
  1473. *
  1474. * We silently return on any sign of list-walking problem.
  1475. */
  1476. void exit_robust_list(struct task_struct *curr)
  1477. {
  1478. struct robust_list_head __user *head = curr->robust_list;
  1479. struct robust_list __user *entry, *pending;
  1480. unsigned int limit = ROBUST_LIST_LIMIT;
  1481. unsigned long futex_offset;
  1482. /*
  1483. * Fetch the list head (which was registered earlier, via
  1484. * sys_set_robust_list()):
  1485. */
  1486. if (get_user(entry, &head->list.next))
  1487. return;
  1488. /*
  1489. * Fetch the relative futex offset:
  1490. */
  1491. if (get_user(futex_offset, &head->futex_offset))
  1492. return;
  1493. /*
  1494. * Fetch any possibly pending lock-add first, and handle it
  1495. * if it exists:
  1496. */
  1497. if (get_user(pending, &head->list_op_pending))
  1498. return;
  1499. if (pending)
  1500. handle_futex_death((void *)pending + futex_offset, curr);
  1501. while (entry != &head->list) {
  1502. /*
  1503. * A pending lock might already be on the list, so
  1504. * don't process it twice:
  1505. */
  1506. if (entry != pending)
  1507. if (handle_futex_death((void *)entry + futex_offset,
  1508. curr))
  1509. return;
  1510. /*
  1511. * Fetch the next entry in the list:
  1512. */
  1513. if (get_user(entry, &entry->next))
  1514. return;
  1515. /*
  1516. * Avoid excessively long or circular lists:
  1517. */
  1518. if (!--limit)
  1519. break;
  1520. cond_resched();
  1521. }
  1522. }
  1523. long do_futex(u32 __user *uaddr, int op, u32 val, unsigned long timeout,
  1524. u32 __user *uaddr2, u32 val2, u32 val3)
  1525. {
  1526. int ret;
  1527. switch (op) {
  1528. case FUTEX_WAIT:
  1529. ret = futex_wait(uaddr, val, timeout);
  1530. break;
  1531. case FUTEX_WAKE:
  1532. ret = futex_wake(uaddr, val);
  1533. break;
  1534. case FUTEX_FD:
  1535. /* non-zero val means F_SETOWN(getpid()) & F_SETSIG(val) */
  1536. ret = futex_fd(uaddr, val);
  1537. break;
  1538. case FUTEX_REQUEUE:
  1539. ret = futex_requeue(uaddr, uaddr2, val, val2, NULL);
  1540. break;
  1541. case FUTEX_CMP_REQUEUE:
  1542. ret = futex_requeue(uaddr, uaddr2, val, val2, &val3);
  1543. break;
  1544. case FUTEX_WAKE_OP:
  1545. ret = futex_wake_op(uaddr, uaddr2, val, val2, val3);
  1546. break;
  1547. case FUTEX_LOCK_PI:
  1548. ret = futex_lock_pi(uaddr, val, timeout, val2, 0);
  1549. break;
  1550. case FUTEX_UNLOCK_PI:
  1551. ret = futex_unlock_pi(uaddr);
  1552. break;
  1553. case FUTEX_TRYLOCK_PI:
  1554. ret = futex_lock_pi(uaddr, 0, timeout, val2, 1);
  1555. break;
  1556. default:
  1557. ret = -ENOSYS;
  1558. }
  1559. return ret;
  1560. }
  1561. asmlinkage long sys_futex(u32 __user *uaddr, int op, u32 val,
  1562. struct timespec __user *utime, u32 __user *uaddr2,
  1563. u32 val3)
  1564. {
  1565. struct timespec t;
  1566. unsigned long timeout = MAX_SCHEDULE_TIMEOUT;
  1567. u32 val2 = 0;
  1568. if (utime && (op == FUTEX_WAIT || op == FUTEX_LOCK_PI)) {
  1569. if (copy_from_user(&t, utime, sizeof(t)) != 0)
  1570. return -EFAULT;
  1571. if (!timespec_valid(&t))
  1572. return -EINVAL;
  1573. if (op == FUTEX_WAIT)
  1574. timeout = timespec_to_jiffies(&t) + 1;
  1575. else {
  1576. timeout = t.tv_sec;
  1577. val2 = t.tv_nsec;
  1578. }
  1579. }
  1580. /*
  1581. * requeue parameter in 'utime' if op == FUTEX_REQUEUE.
  1582. */
  1583. if (op == FUTEX_REQUEUE || op == FUTEX_CMP_REQUEUE)
  1584. val2 = (u32) (unsigned long) utime;
  1585. return do_futex(uaddr, op, val, timeout, uaddr2, val2, val3);
  1586. }
  1587. static int futexfs_get_sb(struct file_system_type *fs_type,
  1588. int flags, const char *dev_name, void *data,
  1589. struct vfsmount *mnt)
  1590. {
  1591. return get_sb_pseudo(fs_type, "futex", NULL, 0xBAD1DEA, mnt);
  1592. }
  1593. static struct file_system_type futex_fs_type = {
  1594. .name = "futexfs",
  1595. .get_sb = futexfs_get_sb,
  1596. .kill_sb = kill_anon_super,
  1597. };
  1598. static int __init init(void)
  1599. {
  1600. unsigned int i;
  1601. register_filesystem(&futex_fs_type);
  1602. futex_mnt = kern_mount(&futex_fs_type);
  1603. for (i = 0; i < ARRAY_SIZE(futex_queues); i++) {
  1604. INIT_LIST_HEAD(&futex_queues[i].chain);
  1605. spin_lock_init(&futex_queues[i].lock);
  1606. }
  1607. return 0;
  1608. }
  1609. __initcall(init);