|
@@ -553,24 +553,45 @@ static u64 swap_pte_to_pagemap_entry(pte_t pte)
|
|
|
return swp_type(e) | (swp_offset(e) << MAX_SWAPFILES_SHIFT);
|
|
|
}
|
|
|
|
|
|
+static unsigned long pte_to_pagemap_entry(pte_t pte)
|
|
|
+{
|
|
|
+ unsigned long pme = 0;
|
|
|
+ if (is_swap_pte(pte))
|
|
|
+ pme = PM_PFRAME(swap_pte_to_pagemap_entry(pte))
|
|
|
+ | PM_PSHIFT(PAGE_SHIFT) | PM_SWAP;
|
|
|
+ else if (pte_present(pte))
|
|
|
+ pme = PM_PFRAME(pte_pfn(pte))
|
|
|
+ | PM_PSHIFT(PAGE_SHIFT) | PM_PRESENT;
|
|
|
+ return pme;
|
|
|
+}
|
|
|
+
|
|
|
static int pagemap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
|
|
|
struct mm_walk *walk)
|
|
|
{
|
|
|
+ struct vm_area_struct *vma;
|
|
|
struct pagemapread *pm = walk->private;
|
|
|
pte_t *pte;
|
|
|
int err = 0;
|
|
|
|
|
|
+ /* find the first VMA at or above 'addr' */
|
|
|
+ vma = find_vma(walk->mm, addr);
|
|
|
for (; addr != end; addr += PAGE_SIZE) {
|
|
|
u64 pfn = PM_NOT_PRESENT;
|
|
|
- pte = pte_offset_map(pmd, addr);
|
|
|
- if (is_swap_pte(*pte))
|
|
|
- pfn = PM_PFRAME(swap_pte_to_pagemap_entry(*pte))
|
|
|
- | PM_PSHIFT(PAGE_SHIFT) | PM_SWAP;
|
|
|
- else if (pte_present(*pte))
|
|
|
- pfn = PM_PFRAME(pte_pfn(*pte))
|
|
|
- | PM_PSHIFT(PAGE_SHIFT) | PM_PRESENT;
|
|
|
- /* unmap so we're not in atomic when we copy to userspace */
|
|
|
- pte_unmap(pte);
|
|
|
+
|
|
|
+ /* check to see if we've left 'vma' behind
|
|
|
+ * and need a new, higher one */
|
|
|
+ if (vma && (addr >= vma->vm_end))
|
|
|
+ vma = find_vma(walk->mm, addr);
|
|
|
+
|
|
|
+ /* check that 'vma' actually covers this address,
|
|
|
+ * and that it isn't a huge page vma */
|
|
|
+ if (vma && (vma->vm_start <= addr) &&
|
|
|
+ !is_vm_hugetlb_page(vma)) {
|
|
|
+ pte = pte_offset_map(pmd, addr);
|
|
|
+ pfn = pte_to_pagemap_entry(*pte);
|
|
|
+ /* unmap before userspace copy */
|
|
|
+ pte_unmap(pte);
|
|
|
+ }
|
|
|
err = add_to_pagemap(addr, pfn, pm);
|
|
|
if (err)
|
|
|
return err;
|