|
@@ -1174,41 +1174,22 @@ no_page:
|
|
|
pte_unmap_unlock(ptep, ptl);
|
|
|
if (!pte_none(pte))
|
|
|
return page;
|
|
|
- /* Fall through to ZERO_PAGE handling */
|
|
|
+
|
|
|
no_page_table:
|
|
|
/*
|
|
|
* When core dumping an enormous anonymous area that nobody
|
|
|
- * has touched so far, we don't want to allocate page tables.
|
|
|
+ * has touched so far, we don't want to allocate unnecessary pages or
|
|
|
+ * page tables. Return error instead of NULL to skip handle_mm_fault,
|
|
|
+ * then get_dump_page() will return NULL to leave a hole in the dump.
|
|
|
+ * But we can only make this optimization where a hole would surely
|
|
|
+ * be zero-filled if handle_mm_fault() actually did handle it.
|
|
|
*/
|
|
|
- if (flags & FOLL_ANON) {
|
|
|
- page = ZERO_PAGE(0);
|
|
|
- if (flags & FOLL_GET)
|
|
|
- get_page(page);
|
|
|
- BUG_ON(flags & FOLL_WRITE);
|
|
|
- }
|
|
|
+ if ((flags & FOLL_DUMP) &&
|
|
|
+ (!vma->vm_ops || !vma->vm_ops->fault))
|
|
|
+ return ERR_PTR(-EFAULT);
|
|
|
return page;
|
|
|
}
|
|
|
|
|
|
-/* Can we do the FOLL_ANON optimization? */
|
|
|
-static inline int use_zero_page(struct vm_area_struct *vma)
|
|
|
-{
|
|
|
- /*
|
|
|
- * We don't want to optimize FOLL_ANON for make_pages_present()
|
|
|
- * when it tries to page in a VM_LOCKED region. As to VM_SHARED,
|
|
|
- * we want to get the page from the page tables to make sure
|
|
|
- * that we serialize and update with any other user of that
|
|
|
- * mapping.
|
|
|
- */
|
|
|
- if (vma->vm_flags & (VM_LOCKED | VM_SHARED))
|
|
|
- return 0;
|
|
|
- /*
|
|
|
- * And if we have a fault routine, it's not an anonymous region.
|
|
|
- */
|
|
|
- return !vma->vm_ops || !vma->vm_ops->fault;
|
|
|
-}
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
|
|
|
unsigned long start, int nr_pages, int flags,
|
|
|
struct page **pages, struct vm_area_struct **vmas)
|
|
@@ -1288,8 +1269,8 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
|
|
|
foll_flags = FOLL_TOUCH;
|
|
|
if (pages)
|
|
|
foll_flags |= FOLL_GET;
|
|
|
- if (!write && use_zero_page(vma))
|
|
|
- foll_flags |= FOLL_ANON;
|
|
|
+ if (flags & GUP_FLAGS_DUMP)
|
|
|
+ foll_flags |= FOLL_DUMP;
|
|
|
|
|
|
do {
|
|
|
struct page *page;
|
|
@@ -1446,7 +1427,7 @@ struct page *get_dump_page(unsigned long addr)
|
|
|
struct page *page;
|
|
|
|
|
|
if (__get_user_pages(current, current->mm, addr, 1,
|
|
|
- GUP_FLAGS_FORCE, &page, &vma) < 1)
|
|
|
+ GUP_FLAGS_FORCE | GUP_FLAGS_DUMP, &page, &vma) < 1)
|
|
|
return NULL;
|
|
|
if (page == ZERO_PAGE(0)) {
|
|
|
page_cache_release(page);
|