|
@@ -87,24 +87,24 @@ static inline void anon_vma_free(struct anon_vma *anon_vma)
|
|
|
VM_BUG_ON(atomic_read(&anon_vma->refcount));
|
|
|
|
|
|
/*
|
|
|
- * Synchronize against page_lock_anon_vma() such that
|
|
|
+ * Synchronize against page_lock_anon_vma_read() such that
|
|
|
* we can safely hold the lock without the anon_vma getting
|
|
|
* freed.
|
|
|
*
|
|
|
* Relies on the full mb implied by the atomic_dec_and_test() from
|
|
|
* put_anon_vma() against the acquire barrier implied by
|
|
|
- * mutex_trylock() from page_lock_anon_vma(). This orders:
|
|
|
+ * down_read_trylock() from page_lock_anon_vma_read(). This orders:
|
|
|
*
|
|
|
- * page_lock_anon_vma() VS put_anon_vma()
|
|
|
- * mutex_trylock() atomic_dec_and_test()
|
|
|
+ * page_lock_anon_vma_read() VS put_anon_vma()
|
|
|
+ * down_read_trylock() atomic_dec_and_test()
|
|
|
* LOCK MB
|
|
|
- * atomic_read() mutex_is_locked()
|
|
|
+ * atomic_read() rwsem_is_locked()
|
|
|
*
|
|
|
* LOCK should suffice since the actual taking of the lock must
|
|
|
* happen _before_ what follows.
|
|
|
*/
|
|
|
if (rwsem_is_locked(&anon_vma->root->rwsem)) {
|
|
|
- anon_vma_lock(anon_vma);
|
|
|
+ anon_vma_lock_write(anon_vma);
|
|
|
anon_vma_unlock(anon_vma);
|
|
|
}
|
|
|
|
|
@@ -146,7 +146,7 @@ static void anon_vma_chain_link(struct vm_area_struct *vma,
|
|
|
* allocate a new one.
|
|
|
*
|
|
|
* Anon-vma allocations are very subtle, because we may have
|
|
|
- * optimistically looked up an anon_vma in page_lock_anon_vma()
|
|
|
+ * optimistically looked up an anon_vma in page_lock_anon_vma_read()
|
|
|
* and that may actually touch the spinlock even in the newly
|
|
|
* allocated vma (it depends on RCU to make sure that the
|
|
|
* anon_vma isn't actually destroyed).
|
|
@@ -181,7 +181,7 @@ int anon_vma_prepare(struct vm_area_struct *vma)
|
|
|
allocated = anon_vma;
|
|
|
}
|
|
|
|
|
|
- anon_vma_lock(anon_vma);
|
|
|
+ anon_vma_lock_write(anon_vma);
|
|
|
/* page_table_lock to protect against threads */
|
|
|
spin_lock(&mm->page_table_lock);
|
|
|
if (likely(!vma->anon_vma)) {
|
|
@@ -306,7 +306,7 @@ int anon_vma_fork(struct vm_area_struct *vma, struct vm_area_struct *pvma)
|
|
|
get_anon_vma(anon_vma->root);
|
|
|
/* Mark this anon_vma as the one where our new (COWed) pages go. */
|
|
|
vma->anon_vma = anon_vma;
|
|
|
- anon_vma_lock(anon_vma);
|
|
|
+ anon_vma_lock_write(anon_vma);
|
|
|
anon_vma_chain_link(vma, avc, anon_vma);
|
|
|
anon_vma_unlock(anon_vma);
|
|
|
|
|
@@ -442,7 +442,7 @@ out:
|
|
|
* atomic op -- the trylock. If we fail the trylock, we fall back to getting a
|
|
|
* reference like with page_get_anon_vma() and then block on the mutex.
|
|
|
*/
|
|
|
-struct anon_vma *page_lock_anon_vma(struct page *page)
|
|
|
+struct anon_vma *page_lock_anon_vma_read(struct page *page)
|
|
|
{
|
|
|
struct anon_vma *anon_vma = NULL;
|
|
|
struct anon_vma *root_anon_vma;
|
|
@@ -457,14 +457,14 @@ struct anon_vma *page_lock_anon_vma(struct page *page)
|
|
|
|
|
|
anon_vma = (struct anon_vma *) (anon_mapping - PAGE_MAPPING_ANON);
|
|
|
root_anon_vma = ACCESS_ONCE(anon_vma->root);
|
|
|
- if (down_write_trylock(&root_anon_vma->rwsem)) {
|
|
|
+ if (down_read_trylock(&root_anon_vma->rwsem)) {
|
|
|
/*
|
|
|
* If the page is still mapped, then this anon_vma is still
|
|
|
* its anon_vma, and holding the mutex ensures that it will
|
|
|
* not go away, see anon_vma_free().
|
|
|
*/
|
|
|
if (!page_mapped(page)) {
|
|
|
- up_write(&root_anon_vma->rwsem);
|
|
|
+ up_read(&root_anon_vma->rwsem);
|
|
|
anon_vma = NULL;
|
|
|
}
|
|
|
goto out;
|
|
@@ -484,15 +484,15 @@ struct anon_vma *page_lock_anon_vma(struct page *page)
|
|
|
|
|
|
/* we pinned the anon_vma, its safe to sleep */
|
|
|
rcu_read_unlock();
|
|
|
- anon_vma_lock(anon_vma);
|
|
|
+ anon_vma_lock_read(anon_vma);
|
|
|
|
|
|
if (atomic_dec_and_test(&anon_vma->refcount)) {
|
|
|
/*
|
|
|
* Oops, we held the last refcount, release the lock
|
|
|
* and bail -- can't simply use put_anon_vma() because
|
|
|
- * we'll deadlock on the anon_vma_lock() recursion.
|
|
|
+ * we'll deadlock on the anon_vma_lock_write() recursion.
|
|
|
*/
|
|
|
- anon_vma_unlock(anon_vma);
|
|
|
+ anon_vma_unlock_read(anon_vma);
|
|
|
__put_anon_vma(anon_vma);
|
|
|
anon_vma = NULL;
|
|
|
}
|
|
@@ -504,9 +504,9 @@ out:
|
|
|
return anon_vma;
|
|
|
}
|
|
|
|
|
|
-void page_unlock_anon_vma(struct anon_vma *anon_vma)
|
|
|
+void page_unlock_anon_vma_read(struct anon_vma *anon_vma)
|
|
|
{
|
|
|
- anon_vma_unlock(anon_vma);
|
|
|
+ anon_vma_unlock_read(anon_vma);
|
|
|
}
|
|
|
|
|
|
/*
|
|
@@ -732,7 +732,7 @@ static int page_referenced_anon(struct page *page,
|
|
|
struct anon_vma_chain *avc;
|
|
|
int referenced = 0;
|
|
|
|
|
|
- anon_vma = page_lock_anon_vma(page);
|
|
|
+ anon_vma = page_lock_anon_vma_read(page);
|
|
|
if (!anon_vma)
|
|
|
return referenced;
|
|
|
|
|
@@ -754,7 +754,7 @@ static int page_referenced_anon(struct page *page,
|
|
|
break;
|
|
|
}
|
|
|
|
|
|
- page_unlock_anon_vma(anon_vma);
|
|
|
+ page_unlock_anon_vma_read(anon_vma);
|
|
|
return referenced;
|
|
|
}
|
|
|
|
|
@@ -1474,7 +1474,7 @@ static int try_to_unmap_anon(struct page *page, enum ttu_flags flags)
|
|
|
struct anon_vma_chain *avc;
|
|
|
int ret = SWAP_AGAIN;
|
|
|
|
|
|
- anon_vma = page_lock_anon_vma(page);
|
|
|
+ anon_vma = page_lock_anon_vma_read(page);
|
|
|
if (!anon_vma)
|
|
|
return ret;
|
|
|
|
|
@@ -1501,7 +1501,7 @@ static int try_to_unmap_anon(struct page *page, enum ttu_flags flags)
|
|
|
break;
|
|
|
}
|
|
|
|
|
|
- page_unlock_anon_vma(anon_vma);
|
|
|
+ page_unlock_anon_vma_read(anon_vma);
|
|
|
return ret;
|
|
|
}
|
|
|
|
|
@@ -1696,7 +1696,7 @@ static int rmap_walk_anon(struct page *page, int (*rmap_one)(struct page *,
|
|
|
int ret = SWAP_AGAIN;
|
|
|
|
|
|
/*
|
|
|
- * Note: remove_migration_ptes() cannot use page_lock_anon_vma()
|
|
|
+ * Note: remove_migration_ptes() cannot use page_lock_anon_vma_read()
|
|
|
* because that depends on page_mapped(); but not all its usages
|
|
|
* are holding mmap_sem. Users without mmap_sem are required to
|
|
|
* take a reference count to prevent the anon_vma disappearing
|
|
@@ -1704,7 +1704,7 @@ static int rmap_walk_anon(struct page *page, int (*rmap_one)(struct page *,
|
|
|
anon_vma = page_anon_vma(page);
|
|
|
if (!anon_vma)
|
|
|
return ret;
|
|
|
- anon_vma_lock(anon_vma);
|
|
|
+ anon_vma_lock_read(anon_vma);
|
|
|
anon_vma_interval_tree_foreach(avc, &anon_vma->rb_root, pgoff, pgoff) {
|
|
|
struct vm_area_struct *vma = avc->vma;
|
|
|
unsigned long address = vma_address(page, vma);
|
|
@@ -1712,7 +1712,7 @@ static int rmap_walk_anon(struct page *page, int (*rmap_one)(struct page *,
|
|
|
if (ret != SWAP_AGAIN)
|
|
|
break;
|
|
|
}
|
|
|
- anon_vma_unlock(anon_vma);
|
|
|
+ anon_vma_unlock_read(anon_vma);
|
|
|
return ret;
|
|
|
}
|
|
|
|