|
@@ -132,6 +132,7 @@ static void update_and_free_page(struct page *page)
|
|
|
}
|
|
|
set_compound_page_dtor(page, NULL);
|
|
|
set_page_refcounted(page);
|
|
|
+ arch_release_hugepage(page);
|
|
|
__free_pages(page, HUGETLB_PAGE_ORDER);
|
|
|
}
|
|
|
|
|
@@ -201,6 +202,10 @@ static struct page *alloc_fresh_huge_page_node(int nid)
|
|
|
htlb_alloc_mask|__GFP_COMP|__GFP_THISNODE|__GFP_NOWARN,
|
|
|
HUGETLB_PAGE_ORDER);
|
|
|
if (page) {
|
|
|
+ if (arch_prepare_hugepage(page)) {
|
|
|
+ __free_pages(page, HUGETLB_PAGE_ORDER);
|
|
|
+ return 0;
|
|
|
+ }
|
|
|
set_compound_page_dtor(page, free_huge_page);
|
|
|
spin_lock(&hugetlb_lock);
|
|
|
nr_huge_pages++;
|
|
@@ -735,7 +740,7 @@ static pte_t make_huge_pte(struct vm_area_struct *vma, struct page *page,
|
|
|
entry =
|
|
|
pte_mkwrite(pte_mkdirty(mk_pte(page, vma->vm_page_prot)));
|
|
|
} else {
|
|
|
- entry = pte_wrprotect(mk_pte(page, vma->vm_page_prot));
|
|
|
+ entry = huge_pte_wrprotect(mk_pte(page, vma->vm_page_prot));
|
|
|
}
|
|
|
entry = pte_mkyoung(entry);
|
|
|
entry = pte_mkhuge(entry);
|
|
@@ -748,8 +753,8 @@ static void set_huge_ptep_writable(struct vm_area_struct *vma,
|
|
|
{
|
|
|
pte_t entry;
|
|
|
|
|
|
- entry = pte_mkwrite(pte_mkdirty(*ptep));
|
|
|
- if (ptep_set_access_flags(vma, address, ptep, entry, 1)) {
|
|
|
+ entry = pte_mkwrite(pte_mkdirty(huge_ptep_get(ptep)));
|
|
|
+ if (huge_ptep_set_access_flags(vma, address, ptep, entry, 1)) {
|
|
|
update_mmu_cache(vma, address, entry);
|
|
|
}
|
|
|
}
|
|
@@ -779,10 +784,10 @@ int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
|
|
|
|
|
|
spin_lock(&dst->page_table_lock);
|
|
|
spin_lock(&src->page_table_lock);
|
|
|
- if (!pte_none(*src_pte)) {
|
|
|
+ if (!huge_pte_none(huge_ptep_get(src_pte))) {
|
|
|
if (cow)
|
|
|
- ptep_set_wrprotect(src, addr, src_pte);
|
|
|
- entry = *src_pte;
|
|
|
+ huge_ptep_set_wrprotect(src, addr, src_pte);
|
|
|
+ entry = huge_ptep_get(src_pte);
|
|
|
ptepage = pte_page(entry);
|
|
|
get_page(ptepage);
|
|
|
set_huge_pte_at(dst, addr, dst_pte, entry);
|
|
@@ -826,7 +831,7 @@ void __unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
|
|
|
continue;
|
|
|
|
|
|
pte = huge_ptep_get_and_clear(mm, address, ptep);
|
|
|
- if (pte_none(pte))
|
|
|
+ if (huge_pte_none(pte))
|
|
|
continue;
|
|
|
|
|
|
page = pte_page(pte);
|
|
@@ -890,7 +895,7 @@ static int hugetlb_cow(struct mm_struct *mm, struct vm_area_struct *vma,
|
|
|
spin_lock(&mm->page_table_lock);
|
|
|
|
|
|
ptep = huge_pte_offset(mm, address & HPAGE_MASK);
|
|
|
- if (likely(pte_same(*ptep, pte))) {
|
|
|
+ if (likely(pte_same(huge_ptep_get(ptep), pte))) {
|
|
|
/* Break COW */
|
|
|
huge_ptep_clear_flush(vma, address, ptep);
|
|
|
set_huge_pte_at(mm, address, ptep,
|
|
@@ -960,7 +965,7 @@ retry:
|
|
|
goto backout;
|
|
|
|
|
|
ret = 0;
|
|
|
- if (!pte_none(*ptep))
|
|
|
+ if (!huge_pte_none(huge_ptep_get(ptep)))
|
|
|
goto backout;
|
|
|
|
|
|
new_pte = make_huge_pte(vma, page, ((vma->vm_flags & VM_WRITE)
|
|
@@ -1002,8 +1007,8 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
|
|
|
* the same page in the page cache.
|
|
|
*/
|
|
|
mutex_lock(&hugetlb_instantiation_mutex);
|
|
|
- entry = *ptep;
|
|
|
- if (pte_none(entry)) {
|
|
|
+ entry = huge_ptep_get(ptep);
|
|
|
+ if (huge_pte_none(entry)) {
|
|
|
ret = hugetlb_no_page(mm, vma, address, ptep, write_access);
|
|
|
mutex_unlock(&hugetlb_instantiation_mutex);
|
|
|
return ret;
|
|
@@ -1013,7 +1018,7 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
|
|
|
|
|
|
spin_lock(&mm->page_table_lock);
|
|
|
/* Check for a racing update before calling hugetlb_cow */
|
|
|
- if (likely(pte_same(entry, *ptep)))
|
|
|
+ if (likely(pte_same(entry, huge_ptep_get(ptep))))
|
|
|
if (write_access && !pte_write(entry))
|
|
|
ret = hugetlb_cow(mm, vma, address, ptep, entry);
|
|
|
spin_unlock(&mm->page_table_lock);
|
|
@@ -1043,7 +1048,8 @@ int follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
|
|
|
*/
|
|
|
pte = huge_pte_offset(mm, vaddr & HPAGE_MASK);
|
|
|
|
|
|
- if (!pte || pte_none(*pte) || (write && !pte_write(*pte))) {
|
|
|
+ if (!pte || huge_pte_none(huge_ptep_get(pte)) ||
|
|
|
+ (write && !pte_write(huge_ptep_get(pte)))) {
|
|
|
int ret;
|
|
|
|
|
|
spin_unlock(&mm->page_table_lock);
|
|
@@ -1059,7 +1065,7 @@ int follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
|
|
|
}
|
|
|
|
|
|
pfn_offset = (vaddr & ~HPAGE_MASK) >> PAGE_SHIFT;
|
|
|
- page = pte_page(*pte);
|
|
|
+ page = pte_page(huge_ptep_get(pte));
|
|
|
same_page:
|
|
|
if (pages) {
|
|
|
get_page(page);
|
|
@@ -1108,7 +1114,7 @@ void hugetlb_change_protection(struct vm_area_struct *vma,
|
|
|
continue;
|
|
|
if (huge_pmd_unshare(mm, &address, ptep))
|
|
|
continue;
|
|
|
- if (!pte_none(*ptep)) {
|
|
|
+ if (!huge_pte_none(huge_ptep_get(ptep))) {
|
|
|
pte = huge_ptep_get_and_clear(mm, address, ptep);
|
|
|
pte = pte_mkhuge(pte_modify(pte, newprot));
|
|
|
set_huge_pte_at(mm, address, ptep, pte);
|