|
@@ -882,6 +882,10 @@ int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm,
|
|
|
ret = 0;
|
|
|
goto out_unlock;
|
|
|
}
|
|
|
+
|
|
|
+ /* mmap_sem prevents this happening but warn if that changes */
|
|
|
+ WARN_ON(pmd_trans_migrating(pmd));
|
|
|
+
|
|
|
if (unlikely(pmd_trans_splitting(pmd))) {
|
|
|
/* split huge page running from under us */
|
|
|
spin_unlock(src_ptl);
|
|
@@ -1299,6 +1303,17 @@ int do_huge_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma,
|
|
|
if (unlikely(!pmd_same(pmd, *pmdp)))
|
|
|
goto out_unlock;
|
|
|
|
|
|
+ /*
|
|
|
+ * If there are potential migrations, wait for completion and retry
|
|
|
+ * without disrupting NUMA hinting information. Do not relock and
|
|
|
+ * check_same as the page may no longer be mapped.
|
|
|
+ */
|
|
|
+ if (unlikely(pmd_trans_migrating(*pmdp))) {
|
|
|
+ spin_unlock(ptl);
|
|
|
+ wait_migrate_huge_page(vma->anon_vma, pmdp);
|
|
|
+ goto out;
|
|
|
+ }
|
|
|
+
|
|
|
page = pmd_page(pmd);
|
|
|
BUG_ON(is_huge_zero_page(page));
|
|
|
page_nid = page_to_nid(page);
|
|
@@ -1329,12 +1344,7 @@ int do_huge_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma,
|
|
|
goto clear_pmdnuma;
|
|
|
}
|
|
|
|
|
|
- /*
|
|
|
- * If there are potential migrations, wait for completion and retry. We
|
|
|
- * do not relock and check_same as the page may no longer be mapped.
|
|
|
- * Furtermore, even if the page is currently misplaced, there is no
|
|
|
- * guarantee it is still misplaced after the migration completes.
|
|
|
- */
|
|
|
+ /* Migration could have started since the pmd_trans_migrating check */
|
|
|
if (!page_locked) {
|
|
|
spin_unlock(ptl);
|
|
|
wait_on_page_locked(page);
|