|
@@ -24,8 +24,9 @@
|
|
|
|
|
|
#include <asm/page.h>
|
|
|
#include <asm/pgtable.h>
|
|
|
-#include <linux/io.h>
|
|
|
+#include <asm/tlb.h>
|
|
|
|
|
|
+#include <linux/io.h>
|
|
|
#include <linux/hugetlb.h>
|
|
|
#include <linux/node.h>
|
|
|
#include "internal.h"
|
|
@@ -2310,30 +2311,26 @@ static int is_hugetlb_entry_hwpoisoned(pte_t pte)
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
|
-void __unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
|
|
|
- unsigned long end, struct page *ref_page)
|
|
|
+void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
|
|
|
+ unsigned long start, unsigned long end,
|
|
|
+ struct page *ref_page)
|
|
|
{
|
|
|
+ int force_flush = 0;
|
|
|
struct mm_struct *mm = vma->vm_mm;
|
|
|
unsigned long address;
|
|
|
pte_t *ptep;
|
|
|
pte_t pte;
|
|
|
struct page *page;
|
|
|
- struct page *tmp;
|
|
|
struct hstate *h = hstate_vma(vma);
|
|
|
unsigned long sz = huge_page_size(h);
|
|
|
|
|
|
- /*
|
|
|
- * A page gathering list, protected by per file i_mmap_mutex. The
|
|
|
- * lock is used to avoid list corruption from multiple unmapping
|
|
|
- * of the same page since we are using page->lru.
|
|
|
- */
|
|
|
- LIST_HEAD(page_list);
|
|
|
-
|
|
|
WARN_ON(!is_vm_hugetlb_page(vma));
|
|
|
BUG_ON(start & ~huge_page_mask(h));
|
|
|
BUG_ON(end & ~huge_page_mask(h));
|
|
|
|
|
|
+ tlb_start_vma(tlb, vma);
|
|
|
mmu_notifier_invalidate_range_start(mm, start, end);
|
|
|
+again:
|
|
|
spin_lock(&mm->page_table_lock);
|
|
|
for (address = start; address < end; address += sz) {
|
|
|
ptep = huge_pte_offset(mm, address);
|
|
@@ -2372,30 +2369,45 @@ void __unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
|
|
|
}
|
|
|
|
|
|
pte = huge_ptep_get_and_clear(mm, address, ptep);
|
|
|
+ tlb_remove_tlb_entry(tlb, ptep, address);
|
|
|
if (pte_dirty(pte))
|
|
|
set_page_dirty(page);
|
|
|
- list_add(&page->lru, &page_list);
|
|
|
|
|
|
+ page_remove_rmap(page);
|
|
|
+ force_flush = !__tlb_remove_page(tlb, page);
|
|
|
+ if (force_flush)
|
|
|
+ break;
|
|
|
/* Bail out after unmapping reference page if supplied */
|
|
|
if (ref_page)
|
|
|
break;
|
|
|
}
|
|
|
- flush_tlb_range(vma, start, end);
|
|
|
spin_unlock(&mm->page_table_lock);
|
|
|
- mmu_notifier_invalidate_range_end(mm, start, end);
|
|
|
- list_for_each_entry_safe(page, tmp, &page_list, lru) {
|
|
|
- page_remove_rmap(page);
|
|
|
- list_del(&page->lru);
|
|
|
- put_page(page);
|
|
|
+ /*
|
|
|
+ * mmu_gather ran out of room to batch pages, we break out of
|
|
|
+ * the PTE lock to avoid doing the potential expensive TLB invalidate
|
|
|
+ * and page-free while holding it.
|
|
|
+ */
|
|
|
+ if (force_flush) {
|
|
|
+ force_flush = 0;
|
|
|
+ tlb_flush_mmu(tlb);
|
|
|
+ if (address < end && !ref_page)
|
|
|
+ goto again;
|
|
|
}
|
|
|
+ mmu_notifier_invalidate_range_end(mm, start, end);
|
|
|
+ tlb_end_vma(tlb, vma);
|
|
|
}
|
|
|
|
|
|
void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
|
|
|
unsigned long end, struct page *ref_page)
|
|
|
{
|
|
|
- mutex_lock(&vma->vm_file->f_mapping->i_mmap_mutex);
|
|
|
- __unmap_hugepage_range(vma, start, end, ref_page);
|
|
|
- mutex_unlock(&vma->vm_file->f_mapping->i_mmap_mutex);
|
|
|
+ struct mm_struct *mm;
|
|
|
+ struct mmu_gather tlb;
|
|
|
+
|
|
|
+ mm = vma->vm_mm;
|
|
|
+
|
|
|
+ tlb_gather_mmu(&tlb, mm, 0);
|
|
|
+ __unmap_hugepage_range(&tlb, vma, start, end, ref_page);
|
|
|
+ tlb_finish_mmu(&tlb, start, end);
|
|
|
}
|
|
|
|
|
|
/*
|
|
@@ -2440,9 +2452,8 @@ static int unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma,
|
|
|
* from the time of fork. This would look like data corruption
|
|
|
*/
|
|
|
if (!is_vma_resv_set(iter_vma, HPAGE_RESV_OWNER))
|
|
|
- __unmap_hugepage_range(iter_vma,
|
|
|
- address, address + huge_page_size(h),
|
|
|
- page);
|
|
|
+ unmap_hugepage_range(iter_vma, address,
|
|
|
+ address + huge_page_size(h), page);
|
|
|
}
|
|
|
mutex_unlock(&mapping->i_mmap_mutex);
|
|
|
|