|
@@ -690,11 +690,10 @@ pmd_t maybe_pmd_mkwrite(pmd_t pmd, struct vm_area_struct *vma)
|
|
|
return pmd;
|
|
|
}
|
|
|
|
|
|
-static inline pmd_t mk_huge_pmd(struct page *page, struct vm_area_struct *vma)
|
|
|
+static inline pmd_t mk_huge_pmd(struct page *page, pgprot_t prot)
|
|
|
{
|
|
|
pmd_t entry;
|
|
|
- entry = mk_pmd(page, vma->vm_page_prot);
|
|
|
- entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
|
|
|
+ entry = mk_pmd(page, prot);
|
|
|
entry = pmd_mkhuge(entry);
|
|
|
return entry;
|
|
|
}
|
|
@@ -727,7 +726,8 @@ static int __do_huge_pmd_anonymous_page(struct mm_struct *mm,
|
|
|
pte_free(mm, pgtable);
|
|
|
} else {
|
|
|
pmd_t entry;
|
|
|
- entry = mk_huge_pmd(page, vma);
|
|
|
+ entry = mk_huge_pmd(page, vma->vm_page_prot);
|
|
|
+ entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
|
|
|
page_add_new_anon_rmap(page, vma, haddr);
|
|
|
pgtable_trans_huge_deposit(mm, pmd, pgtable);
|
|
|
set_pmd_at(mm, haddr, pmd, entry);
|
|
@@ -1210,7 +1210,8 @@ alloc:
|
|
|
goto out_mn;
|
|
|
} else {
|
|
|
pmd_t entry;
|
|
|
- entry = mk_huge_pmd(new_page, vma);
|
|
|
+ entry = mk_huge_pmd(new_page, vma->vm_page_prot);
|
|
|
+ entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
|
|
|
pmdp_clear_flush(vma, haddr, pmd);
|
|
|
page_add_new_anon_rmap(new_page, vma, haddr);
|
|
|
set_pmd_at(mm, haddr, pmd, entry);
|
|
@@ -2358,7 +2359,8 @@ static void collapse_huge_page(struct mm_struct *mm,
|
|
|
__SetPageUptodate(new_page);
|
|
|
pgtable = pmd_pgtable(_pmd);
|
|
|
|
|
|
- _pmd = mk_huge_pmd(new_page, vma);
|
|
|
+ _pmd = mk_huge_pmd(new_page, vma->vm_page_prot);
|
|
|
+ _pmd = maybe_pmd_mkwrite(pmd_mkdirty(_pmd), vma);
|
|
|
|
|
|
/*
|
|
|
* spin_lock() below is not the equivalent of smp_wmb(), so
|