|
@@ -160,22 +160,24 @@ static int start_khugepaged(void)
|
|
|
return err;
|
|
|
}
|
|
|
|
|
|
-static int __init init_huge_zero_page(void)
|
|
|
+static int init_huge_zero_pfn(void)
|
|
|
{
|
|
|
struct page *hpage;
|
|
|
+ unsigned long pfn;
|
|
|
|
|
|
hpage = alloc_pages((GFP_TRANSHUGE | __GFP_ZERO) & ~__GFP_MOVABLE,
|
|
|
HPAGE_PMD_ORDER);
|
|
|
if (!hpage)
|
|
|
return -ENOMEM;
|
|
|
-
|
|
|
- huge_zero_pfn = page_to_pfn(hpage);
|
|
|
+ pfn = page_to_pfn(hpage);
|
|
|
+ if (cmpxchg(&huge_zero_pfn, 0, pfn))
|
|
|
+ __free_page(hpage);
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
|
static inline bool is_huge_zero_pfn(unsigned long pfn)
|
|
|
{
|
|
|
- return pfn == huge_zero_pfn;
|
|
|
+ return huge_zero_pfn && pfn == huge_zero_pfn;
|
|
|
}
|
|
|
|
|
|
static inline bool is_huge_zero_pmd(pmd_t pmd)
|
|
@@ -564,10 +566,6 @@ static int __init hugepage_init(void)
|
|
|
if (err)
|
|
|
return err;
|
|
|
|
|
|
- err = init_huge_zero_page();
|
|
|
- if (err)
|
|
|
- goto out;
|
|
|
-
|
|
|
err = khugepaged_slab_init();
|
|
|
if (err)
|
|
|
goto out;
|
|
@@ -590,8 +588,6 @@ static int __init hugepage_init(void)
|
|
|
|
|
|
return 0;
|
|
|
out:
|
|
|
- if (huge_zero_pfn)
|
|
|
- __free_page(pfn_to_page(huge_zero_pfn));
|
|
|
hugepage_exit_sysfs(hugepage_kobj);
|
|
|
return err;
|
|
|
}
|
|
@@ -735,6 +731,10 @@ int do_huge_pmd_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
|
|
|
return VM_FAULT_OOM;
|
|
|
if (!(flags & FAULT_FLAG_WRITE)) {
|
|
|
pgtable_t pgtable;
|
|
|
+ if (unlikely(!huge_zero_pfn && init_huge_zero_pfn())) {
|
|
|
+ count_vm_event(THP_FAULT_FALLBACK);
|
|
|
+ goto out;
|
|
|
+ }
|
|
|
pgtable = pte_alloc_one(mm, haddr);
|
|
|
if (unlikely(!pgtable))
|
|
|
return VM_FAULT_OOM;
|