|
@@ -789,10 +789,9 @@ int try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
|
|
* skipped over this mm) then we should reactivate it.
|
|
* skipped over this mm) then we should reactivate it.
|
|
*/
|
|
*/
|
|
if (!(flags & TTU_IGNORE_MLOCK)) {
|
|
if (!(flags & TTU_IGNORE_MLOCK)) {
|
|
- if (vma->vm_flags & VM_LOCKED) {
|
|
|
|
- ret = SWAP_MLOCK;
|
|
|
|
- goto out_unmap;
|
|
|
|
- }
|
|
|
|
|
|
+ if (vma->vm_flags & VM_LOCKED)
|
|
|
|
+ goto out_mlock;
|
|
|
|
+
|
|
if (TTU_ACTION(flags) == TTU_MUNLOCK)
|
|
if (TTU_ACTION(flags) == TTU_MUNLOCK)
|
|
goto out_unmap;
|
|
goto out_unmap;
|
|
}
|
|
}
|
|
@@ -865,18 +864,28 @@ int try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
|
|
|
|
|
|
out_unmap:
|
|
out_unmap:
|
|
pte_unmap_unlock(pte, ptl);
|
|
pte_unmap_unlock(pte, ptl);
|
|
|
|
+out:
|
|
|
|
+ return ret;
|
|
|
|
|
|
- if (ret == SWAP_MLOCK) {
|
|
|
|
- ret = SWAP_AGAIN;
|
|
|
|
- if (down_read_trylock(&vma->vm_mm->mmap_sem)) {
|
|
|
|
- if (vma->vm_flags & VM_LOCKED) {
|
|
|
|
- mlock_vma_page(page);
|
|
|
|
- ret = SWAP_MLOCK;
|
|
|
|
- }
|
|
|
|
- up_read(&vma->vm_mm->mmap_sem);
|
|
|
|
|
|
+out_mlock:
|
|
|
|
+ pte_unmap_unlock(pte, ptl);
|
|
|
|
+
|
|
|
|
+
|
|
|
|
+ /*
|
|
|
|
+ * We need mmap_sem locking, Otherwise VM_LOCKED check makes
|
|
|
|
+ * unstable result and race. Plus, We can't wait here because
|
|
|
|
+ * we now hold anon_vma->lock or mapping->i_mmap_lock.
|
|
|
|
+ * if trylock failed, the page remain in evictable lru and later
|
|
|
|
+ * vmscan could retry to move the page to unevictable lru if the
|
|
|
|
+ * page is actually mlocked.
|
|
|
|
+ */
|
|
|
|
+ if (down_read_trylock(&vma->vm_mm->mmap_sem)) {
|
|
|
|
+ if (vma->vm_flags & VM_LOCKED) {
|
|
|
|
+ mlock_vma_page(page);
|
|
|
|
+ ret = SWAP_MLOCK;
|
|
}
|
|
}
|
|
|
|
+ up_read(&vma->vm_mm->mmap_sem);
|
|
}
|
|
}
|
|
-out:
|
|
|
|
return ret;
|
|
return ret;
|
|
}
|
|
}
|
|
|
|
|