|
@@ -122,24 +122,6 @@ struct futex_hash_bucket {
|
|
|
|
|
|
static struct futex_hash_bucket futex_queues[1<<FUTEX_HASHBITS];
|
|
static struct futex_hash_bucket futex_queues[1<<FUTEX_HASHBITS];
|
|
|
|
|
|
-/*
|
|
|
|
- * Take mm->mmap_sem, when futex is shared
|
|
|
|
- */
|
|
|
|
-static inline void futex_lock_mm(struct rw_semaphore *fshared)
|
|
|
|
-{
|
|
|
|
- if (fshared)
|
|
|
|
- down_read(fshared);
|
|
|
|
-}
|
|
|
|
-
|
|
|
|
-/*
|
|
|
|
- * Release mm->mmap_sem, when the futex is shared
|
|
|
|
- */
|
|
|
|
-static inline void futex_unlock_mm(struct rw_semaphore *fshared)
|
|
|
|
-{
|
|
|
|
- if (fshared)
|
|
|
|
- up_read(fshared);
|
|
|
|
-}
|
|
|
|
-
|
|
|
|
/*
|
|
/*
|
|
* We hash on the keys returned from get_futex_key (see below).
|
|
* We hash on the keys returned from get_futex_key (see below).
|
|
*/
|
|
*/
|
|
@@ -161,6 +143,45 @@ static inline int match_futex(union futex_key *key1, union futex_key *key2)
|
|
&& key1->both.offset == key2->both.offset);
|
|
&& key1->both.offset == key2->both.offset);
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+/*
|
|
|
|
+ * Take a reference to the resource addressed by a key.
|
|
|
|
+ * Can be called while holding spinlocks.
|
|
|
|
+ *
|
|
|
|
+ */
|
|
|
|
+static void get_futex_key_refs(union futex_key *key)
|
|
|
|
+{
|
|
|
|
+ if (!key->both.ptr)
|
|
|
|
+ return;
|
|
|
|
+
|
|
|
|
+ switch (key->both.offset & (FUT_OFF_INODE|FUT_OFF_MMSHARED)) {
|
|
|
|
+ case FUT_OFF_INODE:
|
|
|
|
+ atomic_inc(&key->shared.inode->i_count);
|
|
|
|
+ break;
|
|
|
|
+ case FUT_OFF_MMSHARED:
|
|
|
|
+ atomic_inc(&key->private.mm->mm_count);
|
|
|
|
+ break;
|
|
|
|
+ }
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+/*
|
|
|
|
+ * Drop a reference to the resource addressed by a key.
|
|
|
|
+ * The hash bucket spinlock must not be held.
|
|
|
|
+ */
|
|
|
|
+static void drop_futex_key_refs(union futex_key *key)
|
|
|
|
+{
|
|
|
|
+ if (!key->both.ptr)
|
|
|
|
+ return;
|
|
|
|
+
|
|
|
|
+ switch (key->both.offset & (FUT_OFF_INODE|FUT_OFF_MMSHARED)) {
|
|
|
|
+ case FUT_OFF_INODE:
|
|
|
|
+ iput(key->shared.inode);
|
|
|
|
+ break;
|
|
|
|
+ case FUT_OFF_MMSHARED:
|
|
|
|
+ mmdrop(key->private.mm);
|
|
|
|
+ break;
|
|
|
|
+ }
|
|
|
|
+}
|
|
|
|
+
|
|
/**
|
|
/**
|
|
* get_futex_key - Get parameters which are the keys for a futex.
|
|
* get_futex_key - Get parameters which are the keys for a futex.
|
|
* @uaddr: virtual address of the futex
|
|
* @uaddr: virtual address of the futex
|
|
@@ -179,12 +200,10 @@ static inline int match_futex(union futex_key *key1, union futex_key *key2)
|
|
* For other futexes, it points to ¤t->mm->mmap_sem and
|
|
* For other futexes, it points to ¤t->mm->mmap_sem and
|
|
* caller must have taken the reader lock. but NOT any spinlocks.
|
|
* caller must have taken the reader lock. but NOT any spinlocks.
|
|
*/
|
|
*/
|
|
-static int get_futex_key(u32 __user *uaddr, struct rw_semaphore *fshared,
|
|
|
|
- union futex_key *key)
|
|
|
|
|
|
+static int get_futex_key(u32 __user *uaddr, int fshared, union futex_key *key)
|
|
{
|
|
{
|
|
unsigned long address = (unsigned long)uaddr;
|
|
unsigned long address = (unsigned long)uaddr;
|
|
struct mm_struct *mm = current->mm;
|
|
struct mm_struct *mm = current->mm;
|
|
- struct vm_area_struct *vma;
|
|
|
|
struct page *page;
|
|
struct page *page;
|
|
int err;
|
|
int err;
|
|
|
|
|
|
@@ -208,100 +227,50 @@ static int get_futex_key(u32 __user *uaddr, struct rw_semaphore *fshared,
|
|
return -EFAULT;
|
|
return -EFAULT;
|
|
key->private.mm = mm;
|
|
key->private.mm = mm;
|
|
key->private.address = address;
|
|
key->private.address = address;
|
|
|
|
+ get_futex_key_refs(key);
|
|
return 0;
|
|
return 0;
|
|
}
|
|
}
|
|
- /*
|
|
|
|
- * The futex is hashed differently depending on whether
|
|
|
|
- * it's in a shared or private mapping. So check vma first.
|
|
|
|
- */
|
|
|
|
- vma = find_extend_vma(mm, address);
|
|
|
|
- if (unlikely(!vma))
|
|
|
|
- return -EFAULT;
|
|
|
|
|
|
|
|
- /*
|
|
|
|
- * Permissions.
|
|
|
|
- */
|
|
|
|
- if (unlikely((vma->vm_flags & (VM_IO|VM_READ)) != VM_READ))
|
|
|
|
- return (vma->vm_flags & VM_IO) ? -EPERM : -EACCES;
|
|
|
|
|
|
+again:
|
|
|
|
+ err = get_user_pages_fast(address, 1, 0, &page);
|
|
|
|
+ if (err < 0)
|
|
|
|
+ return err;
|
|
|
|
+
|
|
|
|
+ lock_page(page);
|
|
|
|
+ if (!page->mapping) {
|
|
|
|
+ unlock_page(page);
|
|
|
|
+ put_page(page);
|
|
|
|
+ goto again;
|
|
|
|
+ }
|
|
|
|
|
|
/*
|
|
/*
|
|
* Private mappings are handled in a simple way.
|
|
* Private mappings are handled in a simple way.
|
|
*
|
|
*
|
|
* NOTE: When userspace waits on a MAP_SHARED mapping, even if
|
|
* NOTE: When userspace waits on a MAP_SHARED mapping, even if
|
|
* it's a read-only handle, it's expected that futexes attach to
|
|
* it's a read-only handle, it's expected that futexes attach to
|
|
- * the object not the particular process. Therefore we use
|
|
|
|
- * VM_MAYSHARE here, not VM_SHARED which is restricted to shared
|
|
|
|
- * mappings of _writable_ handles.
|
|
|
|
|
|
+ * the object not the particular process.
|
|
*/
|
|
*/
|
|
- if (likely(!(vma->vm_flags & VM_MAYSHARE))) {
|
|
|
|
- key->both.offset |= FUT_OFF_MMSHARED; /* reference taken on mm */
|
|
|
|
|
|
+ if (PageAnon(page)) {
|
|
|
|
+ key->both.offset |= FUT_OFF_MMSHARED; /* ref taken on mm */
|
|
key->private.mm = mm;
|
|
key->private.mm = mm;
|
|
key->private.address = address;
|
|
key->private.address = address;
|
|
- return 0;
|
|
|
|
|
|
+ } else {
|
|
|
|
+ key->both.offset |= FUT_OFF_INODE; /* inode-based key */
|
|
|
|
+ key->shared.inode = page->mapping->host;
|
|
|
|
+ key->shared.pgoff = page->index;
|
|
}
|
|
}
|
|
|
|
|
|
- /*
|
|
|
|
- * Linear file mappings are also simple.
|
|
|
|
- */
|
|
|
|
- key->shared.inode = vma->vm_file->f_path.dentry->d_inode;
|
|
|
|
- key->both.offset |= FUT_OFF_INODE; /* inode-based key. */
|
|
|
|
- if (likely(!(vma->vm_flags & VM_NONLINEAR))) {
|
|
|
|
- key->shared.pgoff = (((address - vma->vm_start) >> PAGE_SHIFT)
|
|
|
|
- + vma->vm_pgoff);
|
|
|
|
- return 0;
|
|
|
|
- }
|
|
|
|
|
|
+ get_futex_key_refs(key);
|
|
|
|
|
|
- /*
|
|
|
|
- * We could walk the page table to read the non-linear
|
|
|
|
- * pte, and get the page index without fetching the page
|
|
|
|
- * from swap. But that's a lot of code to duplicate here
|
|
|
|
- * for a rare case, so we simply fetch the page.
|
|
|
|
- */
|
|
|
|
- err = get_user_pages(current, mm, address, 1, 0, 0, &page, NULL);
|
|
|
|
- if (err >= 0) {
|
|
|
|
- key->shared.pgoff =
|
|
|
|
- page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
|
|
|
|
- put_page(page);
|
|
|
|
- return 0;
|
|
|
|
- }
|
|
|
|
- return err;
|
|
|
|
-}
|
|
|
|
-
|
|
|
|
-/*
|
|
|
|
- * Take a reference to the resource addressed by a key.
|
|
|
|
- * Can be called while holding spinlocks.
|
|
|
|
- *
|
|
|
|
- */
|
|
|
|
-static void get_futex_key_refs(union futex_key *key)
|
|
|
|
-{
|
|
|
|
- if (key->both.ptr == NULL)
|
|
|
|
- return;
|
|
|
|
- switch (key->both.offset & (FUT_OFF_INODE|FUT_OFF_MMSHARED)) {
|
|
|
|
- case FUT_OFF_INODE:
|
|
|
|
- atomic_inc(&key->shared.inode->i_count);
|
|
|
|
- break;
|
|
|
|
- case FUT_OFF_MMSHARED:
|
|
|
|
- atomic_inc(&key->private.mm->mm_count);
|
|
|
|
- break;
|
|
|
|
- }
|
|
|
|
|
|
+ unlock_page(page);
|
|
|
|
+ put_page(page);
|
|
|
|
+ return 0;
|
|
}
|
|
}
|
|
|
|
|
|
-/*
|
|
|
|
- * Drop a reference to the resource addressed by a key.
|
|
|
|
- * The hash bucket spinlock must not be held.
|
|
|
|
- */
|
|
|
|
-static void drop_futex_key_refs(union futex_key *key)
|
|
|
|
|
|
+static inline
|
|
|
|
+void put_futex_key(int fshared, union futex_key *key)
|
|
{
|
|
{
|
|
- if (!key->both.ptr)
|
|
|
|
- return;
|
|
|
|
- switch (key->both.offset & (FUT_OFF_INODE|FUT_OFF_MMSHARED)) {
|
|
|
|
- case FUT_OFF_INODE:
|
|
|
|
- iput(key->shared.inode);
|
|
|
|
- break;
|
|
|
|
- case FUT_OFF_MMSHARED:
|
|
|
|
- mmdrop(key->private.mm);
|
|
|
|
- break;
|
|
|
|
- }
|
|
|
|
|
|
+ drop_futex_key_refs(key);
|
|
}
|
|
}
|
|
|
|
|
|
static u32 cmpxchg_futex_value_locked(u32 __user *uaddr, u32 uval, u32 newval)
|
|
static u32 cmpxchg_futex_value_locked(u32 __user *uaddr, u32 uval, u32 newval)
|
|
@@ -328,10 +297,8 @@ static int get_futex_value_locked(u32 *dest, u32 __user *from)
|
|
|
|
|
|
/*
|
|
/*
|
|
* Fault handling.
|
|
* Fault handling.
|
|
- * if fshared is non NULL, current->mm->mmap_sem is already held
|
|
|
|
*/
|
|
*/
|
|
-static int futex_handle_fault(unsigned long address,
|
|
|
|
- struct rw_semaphore *fshared, int attempt)
|
|
|
|
|
|
+static int futex_handle_fault(unsigned long address, int attempt)
|
|
{
|
|
{
|
|
struct vm_area_struct * vma;
|
|
struct vm_area_struct * vma;
|
|
struct mm_struct *mm = current->mm;
|
|
struct mm_struct *mm = current->mm;
|
|
@@ -340,8 +307,7 @@ static int futex_handle_fault(unsigned long address,
|
|
if (attempt > 2)
|
|
if (attempt > 2)
|
|
return ret;
|
|
return ret;
|
|
|
|
|
|
- if (!fshared)
|
|
|
|
- down_read(&mm->mmap_sem);
|
|
|
|
|
|
+ down_read(&mm->mmap_sem);
|
|
vma = find_vma(mm, address);
|
|
vma = find_vma(mm, address);
|
|
if (vma && address >= vma->vm_start &&
|
|
if (vma && address >= vma->vm_start &&
|
|
(vma->vm_flags & VM_WRITE)) {
|
|
(vma->vm_flags & VM_WRITE)) {
|
|
@@ -361,8 +327,7 @@ static int futex_handle_fault(unsigned long address,
|
|
current->min_flt++;
|
|
current->min_flt++;
|
|
}
|
|
}
|
|
}
|
|
}
|
|
- if (!fshared)
|
|
|
|
- up_read(&mm->mmap_sem);
|
|
|
|
|
|
+ up_read(&mm->mmap_sem);
|
|
return ret;
|
|
return ret;
|
|
}
|
|
}
|
|
|
|
|
|
@@ -385,6 +350,7 @@ static int refill_pi_state_cache(void)
|
|
/* pi_mutex gets initialized later */
|
|
/* pi_mutex gets initialized later */
|
|
pi_state->owner = NULL;
|
|
pi_state->owner = NULL;
|
|
atomic_set(&pi_state->refcount, 1);
|
|
atomic_set(&pi_state->refcount, 1);
|
|
|
|
+ pi_state->key = FUTEX_KEY_INIT;
|
|
|
|
|
|
current->pi_state_cache = pi_state;
|
|
current->pi_state_cache = pi_state;
|
|
|
|
|
|
@@ -462,7 +428,7 @@ void exit_pi_state_list(struct task_struct *curr)
|
|
struct list_head *next, *head = &curr->pi_state_list;
|
|
struct list_head *next, *head = &curr->pi_state_list;
|
|
struct futex_pi_state *pi_state;
|
|
struct futex_pi_state *pi_state;
|
|
struct futex_hash_bucket *hb;
|
|
struct futex_hash_bucket *hb;
|
|
- union futex_key key;
|
|
|
|
|
|
+ union futex_key key = FUTEX_KEY_INIT;
|
|
|
|
|
|
if (!futex_cmpxchg_enabled)
|
|
if (!futex_cmpxchg_enabled)
|
|
return;
|
|
return;
|
|
@@ -719,20 +685,17 @@ double_lock_hb(struct futex_hash_bucket *hb1, struct futex_hash_bucket *hb2)
|
|
* Wake up all waiters hashed on the physical page that is mapped
|
|
* Wake up all waiters hashed on the physical page that is mapped
|
|
* to this virtual address:
|
|
* to this virtual address:
|
|
*/
|
|
*/
|
|
-static int futex_wake(u32 __user *uaddr, struct rw_semaphore *fshared,
|
|
|
|
- int nr_wake, u32 bitset)
|
|
|
|
|
|
+static int futex_wake(u32 __user *uaddr, int fshared, int nr_wake, u32 bitset)
|
|
{
|
|
{
|
|
struct futex_hash_bucket *hb;
|
|
struct futex_hash_bucket *hb;
|
|
struct futex_q *this, *next;
|
|
struct futex_q *this, *next;
|
|
struct plist_head *head;
|
|
struct plist_head *head;
|
|
- union futex_key key;
|
|
|
|
|
|
+ union futex_key key = FUTEX_KEY_INIT;
|
|
int ret;
|
|
int ret;
|
|
|
|
|
|
if (!bitset)
|
|
if (!bitset)
|
|
return -EINVAL;
|
|
return -EINVAL;
|
|
|
|
|
|
- futex_lock_mm(fshared);
|
|
|
|
-
|
|
|
|
ret = get_futex_key(uaddr, fshared, &key);
|
|
ret = get_futex_key(uaddr, fshared, &key);
|
|
if (unlikely(ret != 0))
|
|
if (unlikely(ret != 0))
|
|
goto out;
|
|
goto out;
|
|
@@ -760,7 +723,7 @@ static int futex_wake(u32 __user *uaddr, struct rw_semaphore *fshared,
|
|
|
|
|
|
spin_unlock(&hb->lock);
|
|
spin_unlock(&hb->lock);
|
|
out:
|
|
out:
|
|
- futex_unlock_mm(fshared);
|
|
|
|
|
|
+ put_futex_key(fshared, &key);
|
|
return ret;
|
|
return ret;
|
|
}
|
|
}
|
|
|
|
|
|
@@ -769,19 +732,16 @@ out:
|
|
* to this virtual address:
|
|
* to this virtual address:
|
|
*/
|
|
*/
|
|
static int
|
|
static int
|
|
-futex_wake_op(u32 __user *uaddr1, struct rw_semaphore *fshared,
|
|
|
|
- u32 __user *uaddr2,
|
|
|
|
|
|
+futex_wake_op(u32 __user *uaddr1, int fshared, u32 __user *uaddr2,
|
|
int nr_wake, int nr_wake2, int op)
|
|
int nr_wake, int nr_wake2, int op)
|
|
{
|
|
{
|
|
- union futex_key key1, key2;
|
|
|
|
|
|
+ union futex_key key1 = FUTEX_KEY_INIT, key2 = FUTEX_KEY_INIT;
|
|
struct futex_hash_bucket *hb1, *hb2;
|
|
struct futex_hash_bucket *hb1, *hb2;
|
|
struct plist_head *head;
|
|
struct plist_head *head;
|
|
struct futex_q *this, *next;
|
|
struct futex_q *this, *next;
|
|
int ret, op_ret, attempt = 0;
|
|
int ret, op_ret, attempt = 0;
|
|
|
|
|
|
retryfull:
|
|
retryfull:
|
|
- futex_lock_mm(fshared);
|
|
|
|
-
|
|
|
|
ret = get_futex_key(uaddr1, fshared, &key1);
|
|
ret = get_futex_key(uaddr1, fshared, &key1);
|
|
if (unlikely(ret != 0))
|
|
if (unlikely(ret != 0))
|
|
goto out;
|
|
goto out;
|
|
@@ -826,18 +786,12 @@ retry:
|
|
*/
|
|
*/
|
|
if (attempt++) {
|
|
if (attempt++) {
|
|
ret = futex_handle_fault((unsigned long)uaddr2,
|
|
ret = futex_handle_fault((unsigned long)uaddr2,
|
|
- fshared, attempt);
|
|
|
|
|
|
+ attempt);
|
|
if (ret)
|
|
if (ret)
|
|
goto out;
|
|
goto out;
|
|
goto retry;
|
|
goto retry;
|
|
}
|
|
}
|
|
|
|
|
|
- /*
|
|
|
|
- * If we would have faulted, release mmap_sem,
|
|
|
|
- * fault it in and start all over again.
|
|
|
|
- */
|
|
|
|
- futex_unlock_mm(fshared);
|
|
|
|
-
|
|
|
|
ret = get_user(dummy, uaddr2);
|
|
ret = get_user(dummy, uaddr2);
|
|
if (ret)
|
|
if (ret)
|
|
return ret;
|
|
return ret;
|
|
@@ -873,7 +827,8 @@ retry:
|
|
if (hb1 != hb2)
|
|
if (hb1 != hb2)
|
|
spin_unlock(&hb2->lock);
|
|
spin_unlock(&hb2->lock);
|
|
out:
|
|
out:
|
|
- futex_unlock_mm(fshared);
|
|
|
|
|
|
+ put_futex_key(fshared, &key2);
|
|
|
|
+ put_futex_key(fshared, &key1);
|
|
|
|
|
|
return ret;
|
|
return ret;
|
|
}
|
|
}
|
|
@@ -882,19 +837,16 @@ out:
|
|
* Requeue all waiters hashed on one physical page to another
|
|
* Requeue all waiters hashed on one physical page to another
|
|
* physical page.
|
|
* physical page.
|
|
*/
|
|
*/
|
|
-static int futex_requeue(u32 __user *uaddr1, struct rw_semaphore *fshared,
|
|
|
|
- u32 __user *uaddr2,
|
|
|
|
|
|
+static int futex_requeue(u32 __user *uaddr1, int fshared, u32 __user *uaddr2,
|
|
int nr_wake, int nr_requeue, u32 *cmpval)
|
|
int nr_wake, int nr_requeue, u32 *cmpval)
|
|
{
|
|
{
|
|
- union futex_key key1, key2;
|
|
|
|
|
|
+ union futex_key key1 = FUTEX_KEY_INIT, key2 = FUTEX_KEY_INIT;
|
|
struct futex_hash_bucket *hb1, *hb2;
|
|
struct futex_hash_bucket *hb1, *hb2;
|
|
struct plist_head *head1;
|
|
struct plist_head *head1;
|
|
struct futex_q *this, *next;
|
|
struct futex_q *this, *next;
|
|
int ret, drop_count = 0;
|
|
int ret, drop_count = 0;
|
|
|
|
|
|
retry:
|
|
retry:
|
|
- futex_lock_mm(fshared);
|
|
|
|
-
|
|
|
|
ret = get_futex_key(uaddr1, fshared, &key1);
|
|
ret = get_futex_key(uaddr1, fshared, &key1);
|
|
if (unlikely(ret != 0))
|
|
if (unlikely(ret != 0))
|
|
goto out;
|
|
goto out;
|
|
@@ -917,12 +869,6 @@ static int futex_requeue(u32 __user *uaddr1, struct rw_semaphore *fshared,
|
|
if (hb1 != hb2)
|
|
if (hb1 != hb2)
|
|
spin_unlock(&hb2->lock);
|
|
spin_unlock(&hb2->lock);
|
|
|
|
|
|
- /*
|
|
|
|
- * If we would have faulted, release mmap_sem, fault
|
|
|
|
- * it in and start all over again.
|
|
|
|
- */
|
|
|
|
- futex_unlock_mm(fshared);
|
|
|
|
-
|
|
|
|
ret = get_user(curval, uaddr1);
|
|
ret = get_user(curval, uaddr1);
|
|
|
|
|
|
if (!ret)
|
|
if (!ret)
|
|
@@ -974,7 +920,8 @@ out_unlock:
|
|
drop_futex_key_refs(&key1);
|
|
drop_futex_key_refs(&key1);
|
|
|
|
|
|
out:
|
|
out:
|
|
- futex_unlock_mm(fshared);
|
|
|
|
|
|
+ put_futex_key(fshared, &key2);
|
|
|
|
+ put_futex_key(fshared, &key1);
|
|
return ret;
|
|
return ret;
|
|
}
|
|
}
|
|
|
|
|
|
@@ -1096,8 +1043,7 @@ static void unqueue_me_pi(struct futex_q *q)
|
|
* private futexes.
|
|
* private futexes.
|
|
*/
|
|
*/
|
|
static int fixup_pi_state_owner(u32 __user *uaddr, struct futex_q *q,
|
|
static int fixup_pi_state_owner(u32 __user *uaddr, struct futex_q *q,
|
|
- struct task_struct *newowner,
|
|
|
|
- struct rw_semaphore *fshared)
|
|
|
|
|
|
+ struct task_struct *newowner, int fshared)
|
|
{
|
|
{
|
|
u32 newtid = task_pid_vnr(newowner) | FUTEX_WAITERS;
|
|
u32 newtid = task_pid_vnr(newowner) | FUTEX_WAITERS;
|
|
struct futex_pi_state *pi_state = q->pi_state;
|
|
struct futex_pi_state *pi_state = q->pi_state;
|
|
@@ -1176,7 +1122,7 @@ retry:
|
|
handle_fault:
|
|
handle_fault:
|
|
spin_unlock(q->lock_ptr);
|
|
spin_unlock(q->lock_ptr);
|
|
|
|
|
|
- ret = futex_handle_fault((unsigned long)uaddr, fshared, attempt++);
|
|
|
|
|
|
+ ret = futex_handle_fault((unsigned long)uaddr, attempt++);
|
|
|
|
|
|
spin_lock(q->lock_ptr);
|
|
spin_lock(q->lock_ptr);
|
|
|
|
|
|
@@ -1200,7 +1146,7 @@ handle_fault:
|
|
|
|
|
|
static long futex_wait_restart(struct restart_block *restart);
|
|
static long futex_wait_restart(struct restart_block *restart);
|
|
|
|
|
|
-static int futex_wait(u32 __user *uaddr, struct rw_semaphore *fshared,
|
|
|
|
|
|
+static int futex_wait(u32 __user *uaddr, int fshared,
|
|
u32 val, ktime_t *abs_time, u32 bitset)
|
|
u32 val, ktime_t *abs_time, u32 bitset)
|
|
{
|
|
{
|
|
struct task_struct *curr = current;
|
|
struct task_struct *curr = current;
|
|
@@ -1218,8 +1164,7 @@ static int futex_wait(u32 __user *uaddr, struct rw_semaphore *fshared,
|
|
q.pi_state = NULL;
|
|
q.pi_state = NULL;
|
|
q.bitset = bitset;
|
|
q.bitset = bitset;
|
|
retry:
|
|
retry:
|
|
- futex_lock_mm(fshared);
|
|
|
|
-
|
|
|
|
|
|
+ q.key = FUTEX_KEY_INIT;
|
|
ret = get_futex_key(uaddr, fshared, &q.key);
|
|
ret = get_futex_key(uaddr, fshared, &q.key);
|
|
if (unlikely(ret != 0))
|
|
if (unlikely(ret != 0))
|
|
goto out_release_sem;
|
|
goto out_release_sem;
|
|
@@ -1251,12 +1196,6 @@ static int futex_wait(u32 __user *uaddr, struct rw_semaphore *fshared,
|
|
if (unlikely(ret)) {
|
|
if (unlikely(ret)) {
|
|
queue_unlock(&q, hb);
|
|
queue_unlock(&q, hb);
|
|
|
|
|
|
- /*
|
|
|
|
- * If we would have faulted, release mmap_sem, fault it in and
|
|
|
|
- * start all over again.
|
|
|
|
- */
|
|
|
|
- futex_unlock_mm(fshared);
|
|
|
|
-
|
|
|
|
ret = get_user(uval, uaddr);
|
|
ret = get_user(uval, uaddr);
|
|
|
|
|
|
if (!ret)
|
|
if (!ret)
|
|
@@ -1270,12 +1209,6 @@ static int futex_wait(u32 __user *uaddr, struct rw_semaphore *fshared,
|
|
/* Only actually queue if *uaddr contained val. */
|
|
/* Only actually queue if *uaddr contained val. */
|
|
queue_me(&q, hb);
|
|
queue_me(&q, hb);
|
|
|
|
|
|
- /*
|
|
|
|
- * Now the futex is queued and we have checked the data, we
|
|
|
|
- * don't want to hold mmap_sem while we sleep.
|
|
|
|
- */
|
|
|
|
- futex_unlock_mm(fshared);
|
|
|
|
-
|
|
|
|
/*
|
|
/*
|
|
* There might have been scheduling since the queue_me(), as we
|
|
* There might have been scheduling since the queue_me(), as we
|
|
* cannot hold a spinlock across the get_user() in case it
|
|
* cannot hold a spinlock across the get_user() in case it
|
|
@@ -1363,7 +1296,7 @@ static int futex_wait(u32 __user *uaddr, struct rw_semaphore *fshared,
|
|
queue_unlock(&q, hb);
|
|
queue_unlock(&q, hb);
|
|
|
|
|
|
out_release_sem:
|
|
out_release_sem:
|
|
- futex_unlock_mm(fshared);
|
|
|
|
|
|
+ put_futex_key(fshared, &q.key);
|
|
return ret;
|
|
return ret;
|
|
}
|
|
}
|
|
|
|
|
|
@@ -1371,13 +1304,13 @@ static int futex_wait(u32 __user *uaddr, struct rw_semaphore *fshared,
|
|
static long futex_wait_restart(struct restart_block *restart)
|
|
static long futex_wait_restart(struct restart_block *restart)
|
|
{
|
|
{
|
|
u32 __user *uaddr = (u32 __user *)restart->futex.uaddr;
|
|
u32 __user *uaddr = (u32 __user *)restart->futex.uaddr;
|
|
- struct rw_semaphore *fshared = NULL;
|
|
|
|
|
|
+ int fshared = 0;
|
|
ktime_t t;
|
|
ktime_t t;
|
|
|
|
|
|
t.tv64 = restart->futex.time;
|
|
t.tv64 = restart->futex.time;
|
|
restart->fn = do_no_restart_syscall;
|
|
restart->fn = do_no_restart_syscall;
|
|
if (restart->futex.flags & FLAGS_SHARED)
|
|
if (restart->futex.flags & FLAGS_SHARED)
|
|
- fshared = ¤t->mm->mmap_sem;
|
|
|
|
|
|
+ fshared = 1;
|
|
return (long)futex_wait(uaddr, fshared, restart->futex.val, &t,
|
|
return (long)futex_wait(uaddr, fshared, restart->futex.val, &t,
|
|
restart->futex.bitset);
|
|
restart->futex.bitset);
|
|
}
|
|
}
|
|
@@ -1389,7 +1322,7 @@ static long futex_wait_restart(struct restart_block *restart)
|
|
* if there are waiters then it will block, it does PI, etc. (Due to
|
|
* if there are waiters then it will block, it does PI, etc. (Due to
|
|
* races the kernel might see a 0 value of the futex too.)
|
|
* races the kernel might see a 0 value of the futex too.)
|
|
*/
|
|
*/
|
|
-static int futex_lock_pi(u32 __user *uaddr, struct rw_semaphore *fshared,
|
|
|
|
|
|
+static int futex_lock_pi(u32 __user *uaddr, int fshared,
|
|
int detect, ktime_t *time, int trylock)
|
|
int detect, ktime_t *time, int trylock)
|
|
{
|
|
{
|
|
struct hrtimer_sleeper timeout, *to = NULL;
|
|
struct hrtimer_sleeper timeout, *to = NULL;
|
|
@@ -1412,8 +1345,7 @@ static int futex_lock_pi(u32 __user *uaddr, struct rw_semaphore *fshared,
|
|
|
|
|
|
q.pi_state = NULL;
|
|
q.pi_state = NULL;
|
|
retry:
|
|
retry:
|
|
- futex_lock_mm(fshared);
|
|
|
|
-
|
|
|
|
|
|
+ q.key = FUTEX_KEY_INIT;
|
|
ret = get_futex_key(uaddr, fshared, &q.key);
|
|
ret = get_futex_key(uaddr, fshared, &q.key);
|
|
if (unlikely(ret != 0))
|
|
if (unlikely(ret != 0))
|
|
goto out_release_sem;
|
|
goto out_release_sem;
|
|
@@ -1502,7 +1434,6 @@ static int futex_lock_pi(u32 __user *uaddr, struct rw_semaphore *fshared,
|
|
* exit to complete.
|
|
* exit to complete.
|
|
*/
|
|
*/
|
|
queue_unlock(&q, hb);
|
|
queue_unlock(&q, hb);
|
|
- futex_unlock_mm(fshared);
|
|
|
|
cond_resched();
|
|
cond_resched();
|
|
goto retry;
|
|
goto retry;
|
|
|
|
|
|
@@ -1534,12 +1465,6 @@ static int futex_lock_pi(u32 __user *uaddr, struct rw_semaphore *fshared,
|
|
*/
|
|
*/
|
|
queue_me(&q, hb);
|
|
queue_me(&q, hb);
|
|
|
|
|
|
- /*
|
|
|
|
- * Now the futex is queued and we have checked the data, we
|
|
|
|
- * don't want to hold mmap_sem while we sleep.
|
|
|
|
- */
|
|
|
|
- futex_unlock_mm(fshared);
|
|
|
|
-
|
|
|
|
WARN_ON(!q.pi_state);
|
|
WARN_ON(!q.pi_state);
|
|
/*
|
|
/*
|
|
* Block on the PI mutex:
|
|
* Block on the PI mutex:
|
|
@@ -1552,7 +1477,6 @@ static int futex_lock_pi(u32 __user *uaddr, struct rw_semaphore *fshared,
|
|
ret = ret ? 0 : -EWOULDBLOCK;
|
|
ret = ret ? 0 : -EWOULDBLOCK;
|
|
}
|
|
}
|
|
|
|
|
|
- futex_lock_mm(fshared);
|
|
|
|
spin_lock(q.lock_ptr);
|
|
spin_lock(q.lock_ptr);
|
|
|
|
|
|
if (!ret) {
|
|
if (!ret) {
|
|
@@ -1618,7 +1542,6 @@ static int futex_lock_pi(u32 __user *uaddr, struct rw_semaphore *fshared,
|
|
|
|
|
|
/* Unqueue and drop the lock */
|
|
/* Unqueue and drop the lock */
|
|
unqueue_me_pi(&q);
|
|
unqueue_me_pi(&q);
|
|
- futex_unlock_mm(fshared);
|
|
|
|
|
|
|
|
if (to)
|
|
if (to)
|
|
destroy_hrtimer_on_stack(&to->timer);
|
|
destroy_hrtimer_on_stack(&to->timer);
|
|
@@ -1628,7 +1551,7 @@ static int futex_lock_pi(u32 __user *uaddr, struct rw_semaphore *fshared,
|
|
queue_unlock(&q, hb);
|
|
queue_unlock(&q, hb);
|
|
|
|
|
|
out_release_sem:
|
|
out_release_sem:
|
|
- futex_unlock_mm(fshared);
|
|
|
|
|
|
+ put_futex_key(fshared, &q.key);
|
|
if (to)
|
|
if (to)
|
|
destroy_hrtimer_on_stack(&to->timer);
|
|
destroy_hrtimer_on_stack(&to->timer);
|
|
return ret;
|
|
return ret;
|
|
@@ -1645,15 +1568,12 @@ static int futex_lock_pi(u32 __user *uaddr, struct rw_semaphore *fshared,
|
|
queue_unlock(&q, hb);
|
|
queue_unlock(&q, hb);
|
|
|
|
|
|
if (attempt++) {
|
|
if (attempt++) {
|
|
- ret = futex_handle_fault((unsigned long)uaddr, fshared,
|
|
|
|
- attempt);
|
|
|
|
|
|
+ ret = futex_handle_fault((unsigned long)uaddr, attempt);
|
|
if (ret)
|
|
if (ret)
|
|
goto out_release_sem;
|
|
goto out_release_sem;
|
|
goto retry_unlocked;
|
|
goto retry_unlocked;
|
|
}
|
|
}
|
|
|
|
|
|
- futex_unlock_mm(fshared);
|
|
|
|
-
|
|
|
|
ret = get_user(uval, uaddr);
|
|
ret = get_user(uval, uaddr);
|
|
if (!ret && (uval != -EFAULT))
|
|
if (!ret && (uval != -EFAULT))
|
|
goto retry;
|
|
goto retry;
|
|
@@ -1668,13 +1588,13 @@ static int futex_lock_pi(u32 __user *uaddr, struct rw_semaphore *fshared,
|
|
* This is the in-kernel slowpath: we look up the PI state (if any),
|
|
* This is the in-kernel slowpath: we look up the PI state (if any),
|
|
* and do the rt-mutex unlock.
|
|
* and do the rt-mutex unlock.
|
|
*/
|
|
*/
|
|
-static int futex_unlock_pi(u32 __user *uaddr, struct rw_semaphore *fshared)
|
|
|
|
|
|
+static int futex_unlock_pi(u32 __user *uaddr, int fshared)
|
|
{
|
|
{
|
|
struct futex_hash_bucket *hb;
|
|
struct futex_hash_bucket *hb;
|
|
struct futex_q *this, *next;
|
|
struct futex_q *this, *next;
|
|
u32 uval;
|
|
u32 uval;
|
|
struct plist_head *head;
|
|
struct plist_head *head;
|
|
- union futex_key key;
|
|
|
|
|
|
+ union futex_key key = FUTEX_KEY_INIT;
|
|
int ret, attempt = 0;
|
|
int ret, attempt = 0;
|
|
|
|
|
|
retry:
|
|
retry:
|
|
@@ -1685,10 +1605,6 @@ retry:
|
|
*/
|
|
*/
|
|
if ((uval & FUTEX_TID_MASK) != task_pid_vnr(current))
|
|
if ((uval & FUTEX_TID_MASK) != task_pid_vnr(current))
|
|
return -EPERM;
|
|
return -EPERM;
|
|
- /*
|
|
|
|
- * First take all the futex related locks:
|
|
|
|
- */
|
|
|
|
- futex_lock_mm(fshared);
|
|
|
|
|
|
|
|
ret = get_futex_key(uaddr, fshared, &key);
|
|
ret = get_futex_key(uaddr, fshared, &key);
|
|
if (unlikely(ret != 0))
|
|
if (unlikely(ret != 0))
|
|
@@ -1747,7 +1663,7 @@ retry_unlocked:
|
|
out_unlock:
|
|
out_unlock:
|
|
spin_unlock(&hb->lock);
|
|
spin_unlock(&hb->lock);
|
|
out:
|
|
out:
|
|
- futex_unlock_mm(fshared);
|
|
|
|
|
|
+ put_futex_key(fshared, &key);
|
|
|
|
|
|
return ret;
|
|
return ret;
|
|
|
|
|
|
@@ -1763,16 +1679,13 @@ pi_faulted:
|
|
spin_unlock(&hb->lock);
|
|
spin_unlock(&hb->lock);
|
|
|
|
|
|
if (attempt++) {
|
|
if (attempt++) {
|
|
- ret = futex_handle_fault((unsigned long)uaddr, fshared,
|
|
|
|
- attempt);
|
|
|
|
|
|
+ ret = futex_handle_fault((unsigned long)uaddr, attempt);
|
|
if (ret)
|
|
if (ret)
|
|
goto out;
|
|
goto out;
|
|
uval = 0;
|
|
uval = 0;
|
|
goto retry_unlocked;
|
|
goto retry_unlocked;
|
|
}
|
|
}
|
|
|
|
|
|
- futex_unlock_mm(fshared);
|
|
|
|
-
|
|
|
|
ret = get_user(uval, uaddr);
|
|
ret = get_user(uval, uaddr);
|
|
if (!ret && (uval != -EFAULT))
|
|
if (!ret && (uval != -EFAULT))
|
|
goto retry;
|
|
goto retry;
|
|
@@ -1898,8 +1811,7 @@ retry:
|
|
* PI futexes happens in exit_pi_state():
|
|
* PI futexes happens in exit_pi_state():
|
|
*/
|
|
*/
|
|
if (!pi && (uval & FUTEX_WAITERS))
|
|
if (!pi && (uval & FUTEX_WAITERS))
|
|
- futex_wake(uaddr, &curr->mm->mmap_sem, 1,
|
|
|
|
- FUTEX_BITSET_MATCH_ANY);
|
|
|
|
|
|
+ futex_wake(uaddr, 1, 1, FUTEX_BITSET_MATCH_ANY);
|
|
}
|
|
}
|
|
return 0;
|
|
return 0;
|
|
}
|
|
}
|
|
@@ -1995,10 +1907,10 @@ long do_futex(u32 __user *uaddr, int op, u32 val, ktime_t *timeout,
|
|
{
|
|
{
|
|
int ret = -ENOSYS;
|
|
int ret = -ENOSYS;
|
|
int cmd = op & FUTEX_CMD_MASK;
|
|
int cmd = op & FUTEX_CMD_MASK;
|
|
- struct rw_semaphore *fshared = NULL;
|
|
|
|
|
|
+ int fshared = 0;
|
|
|
|
|
|
if (!(op & FUTEX_PRIVATE_FLAG))
|
|
if (!(op & FUTEX_PRIVATE_FLAG))
|
|
- fshared = ¤t->mm->mmap_sem;
|
|
|
|
|
|
+ fshared = 1;
|
|
|
|
|
|
switch (cmd) {
|
|
switch (cmd) {
|
|
case FUTEX_WAIT:
|
|
case FUTEX_WAIT:
|