|
@@ -510,22 +510,26 @@ void page_unlock_anon_vma(struct anon_vma *anon_vma)
|
|
|
|
|
|
/*
|
|
|
* At what user virtual address is page expected in @vma?
|
|
|
- * Returns virtual address or -EFAULT if page's index/offset is not
|
|
|
- * within the range mapped the @vma.
|
|
|
*/
|
|
|
-inline unsigned long
|
|
|
-vma_address(struct page *page, struct vm_area_struct *vma)
|
|
|
+static inline unsigned long
|
|
|
+__vma_address(struct page *page, struct vm_area_struct *vma)
|
|
|
{
|
|
|
pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
|
|
|
- unsigned long address;
|
|
|
|
|
|
if (unlikely(is_vm_hugetlb_page(vma)))
|
|
|
pgoff = page->index << huge_page_order(page_hstate(page));
|
|
|
- address = vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT);
|
|
|
- if (unlikely(address < vma->vm_start || address >= vma->vm_end)) {
|
|
|
- /* page should be within @vma mapping range */
|
|
|
- return -EFAULT;
|
|
|
- }
|
|
|
+
|
|
|
+ return vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT);
|
|
|
+}
|
|
|
+
|
|
|
+inline unsigned long
|
|
|
+vma_address(struct page *page, struct vm_area_struct *vma)
|
|
|
+{
|
|
|
+ unsigned long address = __vma_address(page, vma);
|
|
|
+
|
|
|
+ /* page should be within @vma mapping range */
|
|
|
+ VM_BUG_ON(address < vma->vm_start || address >= vma->vm_end);
|
|
|
+
|
|
|
return address;
|
|
|
}
|
|
|
|
|
@@ -535,6 +539,7 @@ vma_address(struct page *page, struct vm_area_struct *vma)
|
|
|
*/
|
|
|
unsigned long page_address_in_vma(struct page *page, struct vm_area_struct *vma)
|
|
|
{
|
|
|
+ unsigned long address;
|
|
|
if (PageAnon(page)) {
|
|
|
struct anon_vma *page__anon_vma = page_anon_vma(page);
|
|
|
/*
|
|
@@ -550,7 +555,10 @@ unsigned long page_address_in_vma(struct page *page, struct vm_area_struct *vma)
|
|
|
return -EFAULT;
|
|
|
} else
|
|
|
return -EFAULT;
|
|
|
- return vma_address(page, vma);
|
|
|
+ address = __vma_address(page, vma);
|
|
|
+ if (unlikely(address < vma->vm_start || address >= vma->vm_end))
|
|
|
+ return -EFAULT;
|
|
|
+ return address;
|
|
|
}
|
|
|
|
|
|
/*
|
|
@@ -624,8 +632,8 @@ int page_mapped_in_vma(struct page *page, struct vm_area_struct *vma)
|
|
|
pte_t *pte;
|
|
|
spinlock_t *ptl;
|
|
|
|
|
|
- address = vma_address(page, vma);
|
|
|
- if (address == -EFAULT) /* out of vma range */
|
|
|
+ address = __vma_address(page, vma);
|
|
|
+ if (unlikely(address < vma->vm_start || address >= vma->vm_end))
|
|
|
return 0;
|
|
|
pte = page_check_address(page, vma->vm_mm, address, &ptl, 1);
|
|
|
if (!pte) /* the page is not in this mm */
|
|
@@ -732,8 +740,6 @@ static int page_referenced_anon(struct page *page,
|
|
|
anon_vma_interval_tree_foreach(avc, &anon_vma->rb_root, pgoff, pgoff) {
|
|
|
struct vm_area_struct *vma = avc->vma;
|
|
|
unsigned long address = vma_address(page, vma);
|
|
|
- if (address == -EFAULT)
|
|
|
- continue;
|
|
|
/*
|
|
|
* If we are reclaiming on behalf of a cgroup, skip
|
|
|
* counting on behalf of references from different
|
|
@@ -799,8 +805,6 @@ static int page_referenced_file(struct page *page,
|
|
|
|
|
|
vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff) {
|
|
|
unsigned long address = vma_address(page, vma);
|
|
|
- if (address == -EFAULT)
|
|
|
- continue;
|
|
|
/*
|
|
|
* If we are reclaiming on behalf of a cgroup, skip
|
|
|
* counting on behalf of references from different
|
|
@@ -904,8 +908,6 @@ static int page_mkclean_file(struct address_space *mapping, struct page *page)
|
|
|
vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff) {
|
|
|
if (vma->vm_flags & VM_SHARED) {
|
|
|
unsigned long address = vma_address(page, vma);
|
|
|
- if (address == -EFAULT)
|
|
|
- continue;
|
|
|
ret += page_mkclean_one(page, vma, address);
|
|
|
}
|
|
|
}
|
|
@@ -1468,8 +1470,6 @@ static int try_to_unmap_anon(struct page *page, enum ttu_flags flags)
|
|
|
continue;
|
|
|
|
|
|
address = vma_address(page, vma);
|
|
|
- if (address == -EFAULT)
|
|
|
- continue;
|
|
|
ret = try_to_unmap_one(page, vma, address, flags);
|
|
|
if (ret != SWAP_AGAIN || !page_mapped(page))
|
|
|
break;
|
|
@@ -1508,8 +1508,6 @@ static int try_to_unmap_file(struct page *page, enum ttu_flags flags)
|
|
|
mutex_lock(&mapping->i_mmap_mutex);
|
|
|
vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff) {
|
|
|
unsigned long address = vma_address(page, vma);
|
|
|
- if (address == -EFAULT)
|
|
|
- continue;
|
|
|
ret = try_to_unmap_one(page, vma, address, flags);
|
|
|
if (ret != SWAP_AGAIN || !page_mapped(page))
|
|
|
goto out;
|
|
@@ -1684,8 +1682,6 @@ static int rmap_walk_anon(struct page *page, int (*rmap_one)(struct page *,
|
|
|
anon_vma_interval_tree_foreach(avc, &anon_vma->rb_root, pgoff, pgoff) {
|
|
|
struct vm_area_struct *vma = avc->vma;
|
|
|
unsigned long address = vma_address(page, vma);
|
|
|
- if (address == -EFAULT)
|
|
|
- continue;
|
|
|
ret = rmap_one(page, vma, address, arg);
|
|
|
if (ret != SWAP_AGAIN)
|
|
|
break;
|
|
@@ -1707,8 +1703,6 @@ static int rmap_walk_file(struct page *page, int (*rmap_one)(struct page *,
|
|
|
mutex_lock(&mapping->i_mmap_mutex);
|
|
|
vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff) {
|
|
|
unsigned long address = vma_address(page, vma);
|
|
|
- if (address == -EFAULT)
|
|
|
- continue;
|
|
|
ret = rmap_one(page, vma, address, arg);
|
|
|
if (ret != SWAP_AGAIN)
|
|
|
break;
|