|
@@ -785,55 +785,54 @@ int do_huge_pmd_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
|
|
|
unsigned long haddr = address & HPAGE_PMD_MASK;
|
|
|
pte_t *pte;
|
|
|
|
|
|
- if (haddr >= vma->vm_start && haddr + HPAGE_PMD_SIZE <= vma->vm_end) {
|
|
|
- if (unlikely(anon_vma_prepare(vma)))
|
|
|
- return VM_FAULT_OOM;
|
|
|
- if (unlikely(khugepaged_enter(vma)))
|
|
|
+ if (haddr < vma->vm_start || haddr + HPAGE_PMD_SIZE > vma->vm_end)
|
|
|
+ goto out;
|
|
|
+ if (unlikely(anon_vma_prepare(vma)))
|
|
|
+ return VM_FAULT_OOM;
|
|
|
+ if (unlikely(khugepaged_enter(vma)))
|
|
|
+ return VM_FAULT_OOM;
|
|
|
+ if (!(flags & FAULT_FLAG_WRITE) &&
|
|
|
+ transparent_hugepage_use_zero_page()) {
|
|
|
+ pgtable_t pgtable;
|
|
|
+ struct page *zero_page;
|
|
|
+ bool set;
|
|
|
+ pgtable = pte_alloc_one(mm, haddr);
|
|
|
+ if (unlikely(!pgtable))
|
|
|
return VM_FAULT_OOM;
|
|
|
- if (!(flags & FAULT_FLAG_WRITE) &&
|
|
|
- transparent_hugepage_use_zero_page()) {
|
|
|
- pgtable_t pgtable;
|
|
|
- struct page *zero_page;
|
|
|
- bool set;
|
|
|
- pgtable = pte_alloc_one(mm, haddr);
|
|
|
- if (unlikely(!pgtable))
|
|
|
- return VM_FAULT_OOM;
|
|
|
- zero_page = get_huge_zero_page();
|
|
|
- if (unlikely(!zero_page)) {
|
|
|
- pte_free(mm, pgtable);
|
|
|
- count_vm_event(THP_FAULT_FALLBACK);
|
|
|
- goto out;
|
|
|
- }
|
|
|
- spin_lock(&mm->page_table_lock);
|
|
|
- set = set_huge_zero_page(pgtable, mm, vma, haddr, pmd,
|
|
|
- zero_page);
|
|
|
- spin_unlock(&mm->page_table_lock);
|
|
|
- if (!set) {
|
|
|
- pte_free(mm, pgtable);
|
|
|
- put_huge_zero_page();
|
|
|
- }
|
|
|
- return 0;
|
|
|
- }
|
|
|
- page = alloc_hugepage_vma(transparent_hugepage_defrag(vma),
|
|
|
- vma, haddr, numa_node_id(), 0);
|
|
|
- if (unlikely(!page)) {
|
|
|
+ zero_page = get_huge_zero_page();
|
|
|
+ if (unlikely(!zero_page)) {
|
|
|
+ pte_free(mm, pgtable);
|
|
|
count_vm_event(THP_FAULT_FALLBACK);
|
|
|
goto out;
|
|
|
}
|
|
|
- count_vm_event(THP_FAULT_ALLOC);
|
|
|
- if (unlikely(mem_cgroup_newpage_charge(page, mm, GFP_KERNEL))) {
|
|
|
- put_page(page);
|
|
|
- goto out;
|
|
|
- }
|
|
|
- if (unlikely(__do_huge_pmd_anonymous_page(mm, vma, haddr, pmd,
|
|
|
- page))) {
|
|
|
- mem_cgroup_uncharge_page(page);
|
|
|
- put_page(page);
|
|
|
- goto out;
|
|
|
+ spin_lock(&mm->page_table_lock);
|
|
|
+ set = set_huge_zero_page(pgtable, mm, vma, haddr, pmd,
|
|
|
+ zero_page);
|
|
|
+ spin_unlock(&mm->page_table_lock);
|
|
|
+ if (!set) {
|
|
|
+ pte_free(mm, pgtable);
|
|
|
+ put_huge_zero_page();
|
|
|
}
|
|
|
-
|
|
|
return 0;
|
|
|
}
|
|
|
+ page = alloc_hugepage_vma(transparent_hugepage_defrag(vma),
|
|
|
+ vma, haddr, numa_node_id(), 0);
|
|
|
+ if (unlikely(!page)) {
|
|
|
+ count_vm_event(THP_FAULT_FALLBACK);
|
|
|
+ goto out;
|
|
|
+ }
|
|
|
+ count_vm_event(THP_FAULT_ALLOC);
|
|
|
+ if (unlikely(mem_cgroup_newpage_charge(page, mm, GFP_KERNEL))) {
|
|
|
+ put_page(page);
|
|
|
+ goto out;
|
|
|
+ }
|
|
|
+ if (unlikely(__do_huge_pmd_anonymous_page(mm, vma, haddr, pmd, page))) {
|
|
|
+ mem_cgroup_uncharge_page(page);
|
|
|
+ put_page(page);
|
|
|
+ goto out;
|
|
|
+ }
|
|
|
+
|
|
|
+ return 0;
|
|
|
out:
|
|
|
/*
|
|
|
* Use __pte_alloc instead of pte_alloc_map, because we can't
|