|
@@ -1052,6 +1052,51 @@ int mincore_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
|
|
|
return ret;
|
|
|
}
|
|
|
|
|
|
+int move_huge_pmd(struct vm_area_struct *vma, struct vm_area_struct *new_vma,
|
|
|
+ unsigned long old_addr,
|
|
|
+ unsigned long new_addr, unsigned long old_end,
|
|
|
+ pmd_t *old_pmd, pmd_t *new_pmd)
|
|
|
+{
|
|
|
+ int ret = 0;
|
|
|
+ pmd_t pmd;
|
|
|
+
|
|
|
+ struct mm_struct *mm = vma->vm_mm;
|
|
|
+
|
|
|
+ if ((old_addr & ~HPAGE_PMD_MASK) ||
|
|
|
+ (new_addr & ~HPAGE_PMD_MASK) ||
|
|
|
+ old_end - old_addr < HPAGE_PMD_SIZE ||
|
|
|
+ (new_vma->vm_flags & VM_NOHUGEPAGE))
|
|
|
+ goto out;
|
|
|
+
|
|
|
+ /*
|
|
|
+ * The destination pmd shouldn't be established, free_pgtables()
|
|
|
+ * should have release it.
|
|
|
+ */
|
|
|
+ if (WARN_ON(!pmd_none(*new_pmd))) {
|
|
|
+ VM_BUG_ON(pmd_trans_huge(*new_pmd));
|
|
|
+ goto out;
|
|
|
+ }
|
|
|
+
|
|
|
+ spin_lock(&mm->page_table_lock);
|
|
|
+ if (likely(pmd_trans_huge(*old_pmd))) {
|
|
|
+ if (pmd_trans_splitting(*old_pmd)) {
|
|
|
+ spin_unlock(&mm->page_table_lock);
|
|
|
+ wait_split_huge_page(vma->anon_vma, old_pmd);
|
|
|
+ ret = -1;
|
|
|
+ } else {
|
|
|
+ pmd = pmdp_get_and_clear(mm, old_addr, old_pmd);
|
|
|
+ VM_BUG_ON(!pmd_none(*new_pmd));
|
|
|
+ set_pmd_at(mm, new_addr, new_pmd, pmd);
|
|
|
+ spin_unlock(&mm->page_table_lock);
|
|
|
+ ret = 1;
|
|
|
+ }
|
|
|
+ } else {
|
|
|
+ spin_unlock(&mm->page_table_lock);
|
|
|
+ }
|
|
|
+out:
|
|
|
+ return ret;
|
|
|
+}
|
|
|
+
|
|
|
int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
|
|
|
unsigned long addr, pgprot_t newprot)
|
|
|
{
|