Parcourir la source

[PATCH] freepgt: hugetlb area is clean

Once we're strict about clearing away page tables, hugetlb_prefault can assume
there are no page tables left within its range.  Since the other arches
continue if !pte_none here, let i386 do the same.

Signed-off-by: Hugh Dickins <hugh@veritas.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Hugh Dickins il y a 20 ans
Parent
commit
021740dc30
2 fichiers modifiés avec 2 ajouts et 46 suppressions
  1. 2 9
      arch/i386/mm/hugetlbpage.c
  2. 0 37
      arch/ppc64/mm/hugetlbpage.c

+ 2 - 9
arch/i386/mm/hugetlbpage.c

@@ -249,15 +249,8 @@ int hugetlb_prefault(struct address_space *mapping, struct vm_area_struct *vma)
 			goto out;
 		}
 
-		if (!pte_none(*pte)) {
-			pmd_t *pmd = (pmd_t *) pte;
-
-			page = pmd_page(*pmd);
-			pmd_clear(pmd);
-			mm->nr_ptes--;
-			dec_page_state(nr_page_table_pages);
-			page_cache_release(page);
-		}
+		if (!pte_none(*pte))
+			continue;
 
 		idx = ((addr - vma->vm_start) >> HPAGE_SHIFT)
 			+ (vma->vm_pgoff >> (HPAGE_SHIFT - PAGE_SHIFT));

+ 0 - 37
arch/ppc64/mm/hugetlbpage.c

@@ -203,8 +203,6 @@ static int prepare_low_seg_for_htlb(struct mm_struct *mm, unsigned long seg)
 	unsigned long start = seg << SID_SHIFT;
 	unsigned long end = (seg+1) << SID_SHIFT;
 	struct vm_area_struct *vma;
-	unsigned long addr;
-	struct mmu_gather *tlb;
 
 	BUG_ON(seg >= 16);
 
@@ -213,41 +211,6 @@ static int prepare_low_seg_for_htlb(struct mm_struct *mm, unsigned long seg)
 	if (vma && (vma->vm_start < end))
 		return -EBUSY;
 
-	/* Clean up any leftover PTE pages in the region */
-	spin_lock(&mm->page_table_lock);
-	tlb = tlb_gather_mmu(mm, 0);
-	for (addr = start; addr < end; addr += PMD_SIZE) {
-		pgd_t *pgd = pgd_offset(mm, addr);
-		pmd_t *pmd;
-		struct page *page;
-		pte_t *pte;
-		int i;
-
-		if (pgd_none(*pgd))
-			continue;
-		pmd = pmd_offset(pgd, addr);
-		if (!pmd || pmd_none(*pmd))
-			continue;
-		if (pmd_bad(*pmd)) {
-			pmd_ERROR(*pmd);
-			pmd_clear(pmd);
-			continue;
-		}
-		pte = (pte_t *)pmd_page_kernel(*pmd);
-		/* No VMAs, so there should be no PTEs, check just in case. */
-		for (i = 0; i < PTRS_PER_PTE; i++) {
-			BUG_ON(!pte_none(*pte));
-			pte++;
-		}
-		page = pmd_page(*pmd);
-		pmd_clear(pmd);
-		mm->nr_ptes--;
-		dec_page_state(nr_page_table_pages);
-		pte_free_tlb(tlb, page);
-	}
-	tlb_finish_mmu(tlb, start, end);
-	spin_unlock(&mm->page_table_lock);
-
 	return 0;
 }