|
@@ -606,6 +606,15 @@ static inline pmd_t maybe_pmd_mkwrite(pmd_t pmd, struct vm_area_struct *vma)
|
|
|
return pmd;
|
|
|
}
|
|
|
|
|
|
+static inline pmd_t mk_huge_pmd(struct page *page, struct vm_area_struct *vma)
|
|
|
+{
|
|
|
+ pmd_t entry;
|
|
|
+ entry = mk_pmd(page, vma->vm_page_prot);
|
|
|
+ entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
|
|
|
+ entry = pmd_mkhuge(entry);
|
|
|
+ return entry;
|
|
|
+}
|
|
|
+
|
|
|
static int __do_huge_pmd_anonymous_page(struct mm_struct *mm,
|
|
|
struct vm_area_struct *vma,
|
|
|
unsigned long haddr, pmd_t *pmd,
|
|
@@ -629,9 +638,7 @@ static int __do_huge_pmd_anonymous_page(struct mm_struct *mm,
|
|
|
pte_free(mm, pgtable);
|
|
|
} else {
|
|
|
pmd_t entry;
|
|
|
- entry = mk_pmd(page, vma->vm_page_prot);
|
|
|
- entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
|
|
|
- entry = pmd_mkhuge(entry);
|
|
|
+ entry = mk_huge_pmd(page, vma);
|
|
|
/*
|
|
|
* The spinlocking to take the lru_lock inside
|
|
|
* page_add_new_anon_rmap() acts as a full memory
|
|
@@ -951,9 +958,7 @@ int do_huge_pmd_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
|
|
|
} else {
|
|
|
pmd_t entry;
|
|
|
VM_BUG_ON(!PageHead(page));
|
|
|
- entry = mk_pmd(new_page, vma->vm_page_prot);
|
|
|
- entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
|
|
|
- entry = pmd_mkhuge(entry);
|
|
|
+ entry = mk_huge_pmd(new_page, vma);
|
|
|
pmdp_clear_flush(vma, haddr, pmd);
|
|
|
page_add_new_anon_rmap(new_page, vma, haddr);
|
|
|
set_pmd_at(mm, haddr, pmd, entry);
|
|
@@ -2000,9 +2005,7 @@ static void collapse_huge_page(struct mm_struct *mm,
|
|
|
__SetPageUptodate(new_page);
|
|
|
pgtable = pmd_pgtable(_pmd);
|
|
|
|
|
|
- _pmd = mk_pmd(new_page, vma->vm_page_prot);
|
|
|
- _pmd = maybe_pmd_mkwrite(pmd_mkdirty(_pmd), vma);
|
|
|
- _pmd = pmd_mkhuge(_pmd);
|
|
|
+ _pmd = mk_huge_pmd(new_page, vma);
|
|
|
|
|
|
/*
|
|
|
* spin_lock() below is not the equivalent of smp_wmb(), so
|