|
@@ -2429,6 +2429,25 @@ again:
|
|
|
tlb_end_vma(tlb, vma);
|
|
|
}
|
|
|
|
|
|
+void __unmap_hugepage_range_final(struct mmu_gather *tlb,
|
|
|
+ struct vm_area_struct *vma, unsigned long start,
|
|
|
+ unsigned long end, struct page *ref_page)
|
|
|
+{
|
|
|
+ __unmap_hugepage_range(tlb, vma, start, end, ref_page);
|
|
|
+
|
|
|
+ /*
|
|
|
+ * Clear this flag so that x86's huge_pmd_share page_table_shareable
|
|
|
+ * test will fail on a vma being torn down, and not grab a page table
|
|
|
+ * on its way out. We're lucky that the flag has such an appropriate
|
|
|
+ * name, and can in fact be safely cleared here. We could clear it
|
|
|
+ * before the __unmap_hugepage_range above, but all that's necessary
|
|
|
+ * is to clear it before releasing the i_mmap_mutex. This works
|
|
|
+ * because in the context this is called, the VMA is about to be
|
|
|
+ * destroyed and the i_mmap_mutex is held.
|
|
|
+ */
|
|
|
+ vma->vm_flags &= ~VM_MAYSHARE;
|
|
|
+}
|
|
|
+
|
|
|
void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
|
|
|
unsigned long end, struct page *ref_page)
|
|
|
{
|
|
@@ -3012,9 +3031,14 @@ void hugetlb_change_protection(struct vm_area_struct *vma,
|
|
|
}
|
|
|
}
|
|
|
spin_unlock(&mm->page_table_lock);
|
|
|
- mutex_unlock(&vma->vm_file->f_mapping->i_mmap_mutex);
|
|
|
-
|
|
|
+ /*
|
|
|
+ * Must flush TLB before releasing i_mmap_mutex: x86's huge_pmd_unshare
|
|
|
+ * may have cleared our pud entry and done put_page on the page table:
|
|
|
+ * once we release i_mmap_mutex, another task can do the final put_page
|
|
|
+ * and that page table be reused and filled with junk.
|
|
|
+ */
|
|
|
flush_tlb_range(vma, start, end);
|
|
|
+ mutex_unlock(&vma->vm_file->f_mapping->i_mmap_mutex);
|
|
|
}
|
|
|
|
|
|
int hugetlb_reserve_pages(struct inode *inode,
|