|
@@ -1075,8 +1075,16 @@ pmd_t *page_check_address_pmd(struct page *page,
|
|
goto out;
|
|
goto out;
|
|
if (pmd_page(*pmd) != page)
|
|
if (pmd_page(*pmd) != page)
|
|
goto out;
|
|
goto out;
|
|
- VM_BUG_ON(flag == PAGE_CHECK_ADDRESS_PMD_NOTSPLITTING_FLAG &&
|
|
|
|
- pmd_trans_splitting(*pmd));
|
|
|
|
|
|
+ /*
|
|
|
|
+ * split_vma() may create temporary aliased mappings. There is
|
|
|
|
+ * no risk as long as all huge pmd are found and have their
|
|
|
|
+ * splitting bit set before __split_huge_page_refcount
|
|
|
|
+ * runs. Finding the same huge pmd more than once during the
|
|
|
|
+ * same rmap walk is not a problem.
|
|
|
|
+ */
|
|
|
|
+ if (flag == PAGE_CHECK_ADDRESS_PMD_NOTSPLITTING_FLAG &&
|
|
|
|
+ pmd_trans_splitting(*pmd))
|
|
|
|
+ goto out;
|
|
if (pmd_trans_huge(*pmd)) {
|
|
if (pmd_trans_huge(*pmd)) {
|
|
VM_BUG_ON(flag == PAGE_CHECK_ADDRESS_PMD_SPLITTING_FLAG &&
|
|
VM_BUG_ON(flag == PAGE_CHECK_ADDRESS_PMD_SPLITTING_FLAG &&
|
|
!pmd_trans_splitting(*pmd));
|
|
!pmd_trans_splitting(*pmd));
|
|
@@ -2196,3 +2204,71 @@ void __split_huge_page_pmd(struct mm_struct *mm, pmd_t *pmd)
|
|
put_page(page);
|
|
put_page(page);
|
|
BUG_ON(pmd_trans_huge(*pmd));
|
|
BUG_ON(pmd_trans_huge(*pmd));
|
|
}
|
|
}
|
|
|
|
+
|
|
|
|
+static void split_huge_page_address(struct mm_struct *mm,
|
|
|
|
+ unsigned long address)
|
|
|
|
+{
|
|
|
|
+ pgd_t *pgd;
|
|
|
|
+ pud_t *pud;
|
|
|
|
+ pmd_t *pmd;
|
|
|
|
+
|
|
|
|
+ VM_BUG_ON(!(address & ~HPAGE_PMD_MASK));
|
|
|
|
+
|
|
|
|
+ pgd = pgd_offset(mm, address);
|
|
|
|
+ if (!pgd_present(*pgd))
|
|
|
|
+ return;
|
|
|
|
+
|
|
|
|
+ pud = pud_offset(pgd, address);
|
|
|
|
+ if (!pud_present(*pud))
|
|
|
|
+ return;
|
|
|
|
+
|
|
|
|
+ pmd = pmd_offset(pud, address);
|
|
|
|
+ if (!pmd_present(*pmd))
|
|
|
|
+ return;
|
|
|
|
+ /*
|
|
|
|
+ * Caller holds the mmap_sem write mode, so a huge pmd cannot
|
|
|
|
+ * materialize from under us.
|
|
|
|
+ */
|
|
|
|
+ split_huge_page_pmd(mm, pmd);
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+void __vma_adjust_trans_huge(struct vm_area_struct *vma,
|
|
|
|
+ unsigned long start,
|
|
|
|
+ unsigned long end,
|
|
|
|
+ long adjust_next)
|
|
|
|
+{
|
|
|
|
+ /*
|
|
|
|
+ * If the new start address isn't hpage aligned and it could
|
|
|
|
+ * previously contain an hugepage: check if we need to split
|
|
|
|
+ * an huge pmd.
|
|
|
|
+ */
|
|
|
|
+ if (start & ~HPAGE_PMD_MASK &&
|
|
|
|
+ (start & HPAGE_PMD_MASK) >= vma->vm_start &&
|
|
|
|
+ (start & HPAGE_PMD_MASK) + HPAGE_PMD_SIZE <= vma->vm_end)
|
|
|
|
+ split_huge_page_address(vma->vm_mm, start);
|
|
|
|
+
|
|
|
|
+ /*
|
|
|
|
+ * If the new end address isn't hpage aligned and it could
|
|
|
|
+ * previously contain an hugepage: check if we need to split
|
|
|
|
+ * an huge pmd.
|
|
|
|
+ */
|
|
|
|
+ if (end & ~HPAGE_PMD_MASK &&
|
|
|
|
+ (end & HPAGE_PMD_MASK) >= vma->vm_start &&
|
|
|
|
+ (end & HPAGE_PMD_MASK) + HPAGE_PMD_SIZE <= vma->vm_end)
|
|
|
|
+ split_huge_page_address(vma->vm_mm, end);
|
|
|
|
+
|
|
|
|
+ /*
|
|
|
|
+ * If we're also updating the vma->vm_next->vm_start, if the new
|
|
|
|
+ * vm_next->vm_start isn't page aligned and it could previously
|
|
|
|
+ * contain an hugepage: check if we need to split an huge pmd.
|
|
|
|
+ */
|
|
|
|
+ if (adjust_next > 0) {
|
|
|
|
+ struct vm_area_struct *next = vma->vm_next;
|
|
|
|
+ unsigned long nstart = next->vm_start;
|
|
|
|
+ nstart += adjust_next << PAGE_SHIFT;
|
|
|
|
+ if (nstart & ~HPAGE_PMD_MASK &&
|
|
|
|
+ (nstart & HPAGE_PMD_MASK) >= next->vm_start &&
|
|
|
|
+ (nstart & HPAGE_PMD_MASK) + HPAGE_PMD_SIZE <= next->vm_end)
|
|
|
|
+ split_huge_page_address(next->vm_mm, nstart);
|
|
|
|
+ }
|
|
|
|
+}
|