|
@@ -1055,7 +1055,7 @@ int copy_page_range(struct mm_struct *dst_mm, struct mm_struct *src_mm,
|
|
|
if (is_vm_hugetlb_page(vma))
|
|
|
return copy_hugetlb_page_range(dst_mm, src_mm, vma);
|
|
|
|
|
|
- if (unlikely(is_pfn_mapping(vma))) {
|
|
|
+ if (unlikely(vma->vm_flags & VM_PFNMAP)) {
|
|
|
/*
|
|
|
* We do not free on error cases below as remove_vma
|
|
|
* gets called on error from higher level routine
|
|
@@ -1327,7 +1327,7 @@ static void unmap_single_vma(struct mmu_gather *tlb,
|
|
|
if (vma->vm_file)
|
|
|
uprobe_munmap(vma, start, end);
|
|
|
|
|
|
- if (unlikely(is_pfn_mapping(vma)))
|
|
|
+ if (unlikely(vma->vm_flags & VM_PFNMAP))
|
|
|
untrack_pfn(vma, 0, 0);
|
|
|
|
|
|
if (start != end) {
|
|
@@ -2299,26 +2299,20 @@ int remap_pfn_range(struct vm_area_struct *vma, unsigned long addr,
|
|
|
* There's a horrible special case to handle copy-on-write
|
|
|
* behaviour that some programs depend on. We mark the "original"
|
|
|
* un-COW'ed pages by matching them up with "vma->vm_pgoff".
|
|
|
+ * See vm_normal_page() for details.
|
|
|
*/
|
|
|
- if (addr == vma->vm_start && end == vma->vm_end) {
|
|
|
+ if (is_cow_mapping(vma->vm_flags)) {
|
|
|
+ if (addr != vma->vm_start || end != vma->vm_end)
|
|
|
+ return -EINVAL;
|
|
|
vma->vm_pgoff = pfn;
|
|
|
- vma->vm_flags |= VM_PFN_AT_MMAP;
|
|
|
- } else if (is_cow_mapping(vma->vm_flags))
|
|
|
+ }
|
|
|
+
|
|
|
+ err = track_pfn_remap(vma, &prot, pfn, addr, PAGE_ALIGN(size));
|
|
|
+ if (err)
|
|
|
return -EINVAL;
|
|
|
|
|
|
vma->vm_flags |= VM_IO | VM_RESERVED | VM_PFNMAP;
|
|
|
|
|
|
- err = track_pfn_remap(vma, &prot, pfn, PAGE_ALIGN(size));
|
|
|
- if (err) {
|
|
|
- /*
|
|
|
- * To indicate that track_pfn related cleanup is not
|
|
|
- * needed from higher level routine calling unmap_vmas
|
|
|
- */
|
|
|
- vma->vm_flags &= ~(VM_IO | VM_RESERVED | VM_PFNMAP);
|
|
|
- vma->vm_flags &= ~VM_PFN_AT_MMAP;
|
|
|
- return -EINVAL;
|
|
|
- }
|
|
|
-
|
|
|
BUG_ON(addr >= end);
|
|
|
pfn -= addr >> PAGE_SHIFT;
|
|
|
pgd = pgd_offset(mm, addr);
|