浏览代码

Make sure we copy pages inserted with "vm_insert_page()" on fork

The logic that decides that a fork() might be able to avoid copying a VM
area when it can be re-created by page faults didn't know about the new
vm_insert_page() case.

Also make some things a bit more anal wrt VM_PFNMAP.

Pointed out by Hugh Dickins <hugh@veritas.com>

Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Linus Torvalds 19 年之前
父节点
当前提交
4d7672b462
共有 4 个文件被更改,包括 5 次插入3 次删除
  1. 1 0
      include/linux/mm.h
  2. 2 1
      mm/memory.c
  3. 1 1
      mm/mmap.c
  4. 1 1
      mm/mremap.c

+ 1 - 0
include/linux/mm.h

@@ -163,6 +163,7 @@ extern unsigned int kobjsize(const void *objp);
 #define VM_HUGETLB	0x00400000	/* Huge TLB Page VM */
 #define VM_HUGETLB	0x00400000	/* Huge TLB Page VM */
 #define VM_NONLINEAR	0x00800000	/* Is non-linear (remap_file_pages) */
 #define VM_NONLINEAR	0x00800000	/* Is non-linear (remap_file_pages) */
 #define VM_MAPPED_COPY	0x01000000	/* T if mapped copy of data (nommu mmap) */
 #define VM_MAPPED_COPY	0x01000000	/* T if mapped copy of data (nommu mmap) */
+#define VM_INSERTPAGE	0x02000000	/* The vma has had "vm_insert_page()" done on it */
 
 
 #ifndef VM_STACK_DEFAULT_FLAGS		/* arch can override this */
 #ifndef VM_STACK_DEFAULT_FLAGS		/* arch can override this */
 #define VM_STACK_DEFAULT_FLAGS VM_DATA_DEFAULT_FLAGS
 #define VM_STACK_DEFAULT_FLAGS VM_DATA_DEFAULT_FLAGS

+ 2 - 1
mm/memory.c

@@ -574,7 +574,7 @@ int copy_page_range(struct mm_struct *dst_mm, struct mm_struct *src_mm,
 	 * readonly mappings. The tradeoff is that copy_page_range is more
 	 * readonly mappings. The tradeoff is that copy_page_range is more
 	 * efficient than faulting.
 	 * efficient than faulting.
 	 */
 	 */
-	if (!(vma->vm_flags & (VM_HUGETLB|VM_NONLINEAR|VM_PFNMAP))) {
+	if (!(vma->vm_flags & (VM_HUGETLB|VM_NONLINEAR|VM_PFNMAP|VM_INSERTPAGE))) {
 		if (!vma->anon_vma)
 		if (!vma->anon_vma)
 			return 0;
 			return 0;
 	}
 	}
@@ -1228,6 +1228,7 @@ int vm_insert_page(struct vm_area_struct *vma, unsigned long addr, struct page *
 		return -EFAULT;
 		return -EFAULT;
 	if (!page_count(page))
 	if (!page_count(page))
 		return -EINVAL;
 		return -EINVAL;
+	vma->vm_flags |= VM_INSERTPAGE;
 	return insert_page(vma->vm_mm, addr, page, vma->vm_page_prot);
 	return insert_page(vma->vm_mm, addr, page, vma->vm_page_prot);
 }
 }
 EXPORT_SYMBOL(vm_insert_page);
 EXPORT_SYMBOL(vm_insert_page);

+ 1 - 1
mm/mmap.c

@@ -611,7 +611,7 @@ again:			remove_next = 1 + (end > next->vm_end);
  * If the vma has a ->close operation then the driver probably needs to release
  * If the vma has a ->close operation then the driver probably needs to release
  * per-vma resources, so we don't attempt to merge those.
  * per-vma resources, so we don't attempt to merge those.
  */
  */
-#define VM_SPECIAL (VM_IO | VM_DONTCOPY | VM_DONTEXPAND | VM_RESERVED)
+#define VM_SPECIAL (VM_IO | VM_DONTCOPY | VM_DONTEXPAND | VM_RESERVED | VM_PFNMAP)
 
 
 static inline int is_mergeable_vma(struct vm_area_struct *vma,
 static inline int is_mergeable_vma(struct vm_area_struct *vma,
 			struct file *file, unsigned long vm_flags)
 			struct file *file, unsigned long vm_flags)

+ 1 - 1
mm/mremap.c

@@ -323,7 +323,7 @@ unsigned long do_mremap(unsigned long addr,
 	/* We can't remap across vm area boundaries */
 	/* We can't remap across vm area boundaries */
 	if (old_len > vma->vm_end - addr)
 	if (old_len > vma->vm_end - addr)
 		goto out;
 		goto out;
-	if (vma->vm_flags & VM_DONTEXPAND) {
+	if (vma->vm_flags & (VM_DONTEXPAND | VM_PFNMAP)) {
 		if (new_len > old_len)
 		if (new_len > old_len)
 			goto out;
 			goto out;
 	}
 	}