Pārlūkot izejas kodu

Remove (at least temporarily) the "incomplete PFN mapping" support

With the previous commit, we can handle arbitrary shared re-mappings
even without this complexity, and since the only known private mappings
are for strange users of /dev/mem (which never create an incomplete one),
there seems to be no reason to support it.

Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Linus Torvalds 19 gadi atpakaļ
vecāks
revīzija
7fc7e2eeec
2 mainītis faili ar 1 papildinājumiem un 46 dzēšanām
  1. 0 1
      include/linux/mm.h
  2. 1 45
      mm/memory.c

+ 0 - 1
include/linux/mm.h

@@ -163,7 +163,6 @@ extern unsigned int kobjsize(const void *objp);
 #define VM_HUGETLB	0x00400000	/* Huge TLB Page VM */
 #define VM_NONLINEAR	0x00800000	/* Is non-linear (remap_file_pages) */
 #define VM_MAPPED_COPY	0x01000000	/* T if mapped copy of data (nommu mmap) */
-#define VM_INCOMPLETE	0x02000000	/* Strange partial PFN mapping marker */
 
 #ifndef VM_STACK_DEFAULT_FLAGS		/* arch can override this */
 #define VM_STACK_DEFAULT_FLAGS VM_DATA_DEFAULT_FLAGS

+ 1 - 45
mm/memory.c

@@ -1227,50 +1227,6 @@ int vm_insert_page(struct vm_area_struct *vma, unsigned long addr, struct page *
 }
 EXPORT_SYMBOL(vm_insert_page);
 
-/*
- * Somebody does a pfn remapping that doesn't actually work as a vma.
- *
- * Do it as individual pages instead, and warn about it. It's bad form,
- * and very inefficient.
- */
-static int incomplete_pfn_remap(struct vm_area_struct *vma,
-		unsigned long start, unsigned long end,
-		unsigned long pfn, pgprot_t prot)
-{
-	static int warn = 10;
-	struct page *page;
-	int retval;
-
-	if (!(vma->vm_flags & VM_INCOMPLETE)) {
-		if (warn) {
-			warn--;
-			printk("%s does an incomplete pfn remapping", current->comm);
-			dump_stack();
-		}
-	}
-	vma->vm_flags |= VM_INCOMPLETE | VM_IO | VM_RESERVED;
-
-	if (start < vma->vm_start || end > vma->vm_end)
-		return -EINVAL;
-
-	if (!pfn_valid(pfn))
-		return -EINVAL;
-
-	page = pfn_to_page(pfn);
-	if (!PageReserved(page))
-		return -EINVAL;
-
-	retval = 0;
-	while (start < end) {
-		retval = insert_page(vma->vm_mm, start, page, prot);
-		if (retval < 0)
-			break;
-		start += PAGE_SIZE;
-		page++;
-	}
-	return retval;
-}
-
 /*
  * maps a range of physical memory into the requested pages. the old
  * mappings are removed. any references to nonexistent pages results
@@ -1365,7 +1321,7 @@ int remap_pfn_range(struct vm_area_struct *vma, unsigned long addr,
 	 */
 	if (!(vma->vm_flags & VM_SHARED)) {
 		if (addr != vma->vm_start || end != vma->vm_end)
-			return incomplete_pfn_remap(vma, addr, end, pfn, prot);
+			return -EINVAL;
 		vma->vm_pgoff = pfn;
 	}