|
@@ -788,6 +788,8 @@ static int try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
|
|
ret = SWAP_MLOCK;
|
|
ret = SWAP_MLOCK;
|
|
goto out_unmap;
|
|
goto out_unmap;
|
|
}
|
|
}
|
|
|
|
+ if (MLOCK_PAGES && TTU_ACTION(flags) == TTU_MUNLOCK)
|
|
|
|
+ goto out_unmap;
|
|
}
|
|
}
|
|
if (!(flags & TTU_IGNORE_ACCESS)) {
|
|
if (!(flags & TTU_IGNORE_ACCESS)) {
|
|
if (ptep_clear_flush_young_notify(vma, address, pte)) {
|
|
if (ptep_clear_flush_young_notify(vma, address, pte)) {
|
|
@@ -853,12 +855,22 @@ static int try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
|
|
} else
|
|
} else
|
|
dec_mm_counter(mm, file_rss);
|
|
dec_mm_counter(mm, file_rss);
|
|
|
|
|
|
-
|
|
|
|
page_remove_rmap(page);
|
|
page_remove_rmap(page);
|
|
page_cache_release(page);
|
|
page_cache_release(page);
|
|
|
|
|
|
out_unmap:
|
|
out_unmap:
|
|
pte_unmap_unlock(pte, ptl);
|
|
pte_unmap_unlock(pte, ptl);
|
|
|
|
+
|
|
|
|
+ if (MLOCK_PAGES && ret == SWAP_MLOCK) {
|
|
|
|
+ ret = SWAP_AGAIN;
|
|
|
|
+ if (down_read_trylock(&vma->vm_mm->mmap_sem)) {
|
|
|
|
+ if (vma->vm_flags & VM_LOCKED) {
|
|
|
|
+ mlock_vma_page(page);
|
|
|
|
+ ret = SWAP_MLOCK;
|
|
|
|
+ }
|
|
|
|
+ up_read(&vma->vm_mm->mmap_sem);
|
|
|
|
+ }
|
|
|
|
+ }
|
|
out:
|
|
out:
|
|
return ret;
|
|
return ret;
|
|
}
|
|
}
|
|
@@ -980,23 +992,6 @@ static int try_to_unmap_cluster(unsigned long cursor, unsigned int *mapcount,
|
|
return ret;
|
|
return ret;
|
|
}
|
|
}
|
|
|
|
|
|
-/*
|
|
|
|
- * common handling for pages mapped in VM_LOCKED vmas
|
|
|
|
- */
|
|
|
|
-static int try_to_mlock_page(struct page *page, struct vm_area_struct *vma)
|
|
|
|
-{
|
|
|
|
- int mlocked = 0;
|
|
|
|
-
|
|
|
|
- if (down_read_trylock(&vma->vm_mm->mmap_sem)) {
|
|
|
|
- if (vma->vm_flags & VM_LOCKED) {
|
|
|
|
- mlock_vma_page(page);
|
|
|
|
- mlocked++; /* really mlocked the page */
|
|
|
|
- }
|
|
|
|
- up_read(&vma->vm_mm->mmap_sem);
|
|
|
|
- }
|
|
|
|
- return mlocked;
|
|
|
|
-}
|
|
|
|
-
|
|
|
|
/**
|
|
/**
|
|
* try_to_unmap_anon - unmap or unlock anonymous page using the object-based
|
|
* try_to_unmap_anon - unmap or unlock anonymous page using the object-based
|
|
* rmap method
|
|
* rmap method
|
|
@@ -1017,42 +1012,19 @@ static int try_to_unmap_anon(struct page *page, enum ttu_flags flags)
|
|
{
|
|
{
|
|
struct anon_vma *anon_vma;
|
|
struct anon_vma *anon_vma;
|
|
struct vm_area_struct *vma;
|
|
struct vm_area_struct *vma;
|
|
- unsigned int mlocked = 0;
|
|
|
|
int ret = SWAP_AGAIN;
|
|
int ret = SWAP_AGAIN;
|
|
- int unlock = TTU_ACTION(flags) == TTU_MUNLOCK;
|
|
|
|
-
|
|
|
|
- if (MLOCK_PAGES && unlikely(unlock))
|
|
|
|
- ret = SWAP_SUCCESS; /* default for try_to_munlock() */
|
|
|
|
|
|
|
|
anon_vma = page_lock_anon_vma(page);
|
|
anon_vma = page_lock_anon_vma(page);
|
|
if (!anon_vma)
|
|
if (!anon_vma)
|
|
return ret;
|
|
return ret;
|
|
|
|
|
|
list_for_each_entry(vma, &anon_vma->head, anon_vma_node) {
|
|
list_for_each_entry(vma, &anon_vma->head, anon_vma_node) {
|
|
- if (MLOCK_PAGES && unlikely(unlock)) {
|
|
|
|
- if (!((vma->vm_flags & VM_LOCKED) &&
|
|
|
|
- page_mapped_in_vma(page, vma)))
|
|
|
|
- continue; /* must visit all unlocked vmas */
|
|
|
|
- ret = SWAP_MLOCK; /* saw at least one mlocked vma */
|
|
|
|
- } else {
|
|
|
|
- ret = try_to_unmap_one(page, vma, flags);
|
|
|
|
- if (ret == SWAP_FAIL || !page_mapped(page))
|
|
|
|
- break;
|
|
|
|
- }
|
|
|
|
- if (ret == SWAP_MLOCK) {
|
|
|
|
- mlocked = try_to_mlock_page(page, vma);
|
|
|
|
- if (mlocked)
|
|
|
|
- break; /* stop if actually mlocked page */
|
|
|
|
- }
|
|
|
|
|
|
+ ret = try_to_unmap_one(page, vma, flags);
|
|
|
|
+ if (ret != SWAP_AGAIN || !page_mapped(page))
|
|
|
|
+ break;
|
|
}
|
|
}
|
|
|
|
|
|
page_unlock_anon_vma(anon_vma);
|
|
page_unlock_anon_vma(anon_vma);
|
|
-
|
|
|
|
- if (mlocked)
|
|
|
|
- ret = SWAP_MLOCK; /* actually mlocked the page */
|
|
|
|
- else if (ret == SWAP_MLOCK)
|
|
|
|
- ret = SWAP_AGAIN; /* saw VM_LOCKED vma */
|
|
|
|
-
|
|
|
|
return ret;
|
|
return ret;
|
|
}
|
|
}
|
|
|
|
|
|
@@ -1082,42 +1054,27 @@ static int try_to_unmap_file(struct page *page, enum ttu_flags flags)
|
|
unsigned long max_nl_cursor = 0;
|
|
unsigned long max_nl_cursor = 0;
|
|
unsigned long max_nl_size = 0;
|
|
unsigned long max_nl_size = 0;
|
|
unsigned int mapcount;
|
|
unsigned int mapcount;
|
|
- unsigned int mlocked = 0;
|
|
|
|
- int unlock = TTU_ACTION(flags) == TTU_MUNLOCK;
|
|
|
|
-
|
|
|
|
- if (MLOCK_PAGES && unlikely(unlock))
|
|
|
|
- ret = SWAP_SUCCESS; /* default for try_to_munlock() */
|
|
|
|
|
|
|
|
spin_lock(&mapping->i_mmap_lock);
|
|
spin_lock(&mapping->i_mmap_lock);
|
|
vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, pgoff, pgoff) {
|
|
vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, pgoff, pgoff) {
|
|
- if (MLOCK_PAGES && unlikely(unlock)) {
|
|
|
|
- if (!((vma->vm_flags & VM_LOCKED) &&
|
|
|
|
- page_mapped_in_vma(page, vma)))
|
|
|
|
- continue; /* must visit all vmas */
|
|
|
|
- ret = SWAP_MLOCK;
|
|
|
|
- } else {
|
|
|
|
- ret = try_to_unmap_one(page, vma, flags);
|
|
|
|
- if (ret == SWAP_FAIL || !page_mapped(page))
|
|
|
|
- goto out;
|
|
|
|
- }
|
|
|
|
- if (ret == SWAP_MLOCK) {
|
|
|
|
- mlocked = try_to_mlock_page(page, vma);
|
|
|
|
- if (mlocked)
|
|
|
|
- goto out; /* stop if actually mlocked page */
|
|
|
|
- }
|
|
|
|
|
|
+ ret = try_to_unmap_one(page, vma, flags);
|
|
|
|
+ if (ret != SWAP_AGAIN || !page_mapped(page))
|
|
|
|
+ goto out;
|
|
}
|
|
}
|
|
|
|
|
|
if (list_empty(&mapping->i_mmap_nonlinear))
|
|
if (list_empty(&mapping->i_mmap_nonlinear))
|
|
goto out;
|
|
goto out;
|
|
|
|
|
|
|
|
+ /*
|
|
|
|
+ * We don't bother to try to find the munlocked page in nonlinears.
|
|
|
|
+ * It's costly. Instead, later, page reclaim logic may call
|
|
|
|
+ * try_to_unmap(TTU_MUNLOCK) and recover PG_mlocked lazily.
|
|
|
|
+ */
|
|
|
|
+ if (TTU_ACTION(flags) == TTU_MUNLOCK)
|
|
|
|
+ goto out;
|
|
|
|
+
|
|
list_for_each_entry(vma, &mapping->i_mmap_nonlinear,
|
|
list_for_each_entry(vma, &mapping->i_mmap_nonlinear,
|
|
shared.vm_set.list) {
|
|
shared.vm_set.list) {
|
|
- if (MLOCK_PAGES && unlikely(unlock)) {
|
|
|
|
- if (!(vma->vm_flags & VM_LOCKED))
|
|
|
|
- continue; /* must visit all vmas */
|
|
|
|
- ret = SWAP_MLOCK; /* leave mlocked == 0 */
|
|
|
|
- goto out; /* no need to look further */
|
|
|
|
- }
|
|
|
|
if (!MLOCK_PAGES && !(flags & TTU_IGNORE_MLOCK) &&
|
|
if (!MLOCK_PAGES && !(flags & TTU_IGNORE_MLOCK) &&
|
|
(vma->vm_flags & VM_LOCKED))
|
|
(vma->vm_flags & VM_LOCKED))
|
|
continue;
|
|
continue;
|
|
@@ -1159,10 +1116,9 @@ static int try_to_unmap_file(struct page *page, enum ttu_flags flags)
|
|
cursor = (unsigned long) vma->vm_private_data;
|
|
cursor = (unsigned long) vma->vm_private_data;
|
|
while ( cursor < max_nl_cursor &&
|
|
while ( cursor < max_nl_cursor &&
|
|
cursor < vma->vm_end - vma->vm_start) {
|
|
cursor < vma->vm_end - vma->vm_start) {
|
|
- ret = try_to_unmap_cluster(cursor, &mapcount,
|
|
|
|
- vma, page);
|
|
|
|
- if (ret == SWAP_MLOCK)
|
|
|
|
- mlocked = 2; /* to return below */
|
|
|
|
|
|
+ if (try_to_unmap_cluster(cursor, &mapcount,
|
|
|
|
+ vma, page) == SWAP_MLOCK)
|
|
|
|
+ ret = SWAP_MLOCK;
|
|
cursor += CLUSTER_SIZE;
|
|
cursor += CLUSTER_SIZE;
|
|
vma->vm_private_data = (void *) cursor;
|
|
vma->vm_private_data = (void *) cursor;
|
|
if ((int)mapcount <= 0)
|
|
if ((int)mapcount <= 0)
|
|
@@ -1183,10 +1139,6 @@ static int try_to_unmap_file(struct page *page, enum ttu_flags flags)
|
|
vma->vm_private_data = NULL;
|
|
vma->vm_private_data = NULL;
|
|
out:
|
|
out:
|
|
spin_unlock(&mapping->i_mmap_lock);
|
|
spin_unlock(&mapping->i_mmap_lock);
|
|
- if (mlocked)
|
|
|
|
- ret = SWAP_MLOCK; /* actually mlocked the page */
|
|
|
|
- else if (ret == SWAP_MLOCK)
|
|
|
|
- ret = SWAP_AGAIN; /* saw VM_LOCKED vma */
|
|
|
|
return ret;
|
|
return ret;
|
|
}
|
|
}
|
|
|
|
|
|
@@ -1229,7 +1181,7 @@ int try_to_unmap(struct page *page, enum ttu_flags flags)
|
|
*
|
|
*
|
|
* Return values are:
|
|
* Return values are:
|
|
*
|
|
*
|
|
- * SWAP_SUCCESS - no vma's holding page mlocked.
|
|
|
|
|
|
+ * SWAP_AGAIN - no vma is holding page mlocked, or,
|
|
* SWAP_AGAIN - page mapped in mlocked vma -- couldn't acquire mmap sem
|
|
* SWAP_AGAIN - page mapped in mlocked vma -- couldn't acquire mmap sem
|
|
* SWAP_MLOCK - page is now mlocked.
|
|
* SWAP_MLOCK - page is now mlocked.
|
|
*/
|
|
*/
|