|
@@ -680,8 +680,11 @@ int do_huge_pmd_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
|
|
|
return VM_FAULT_OOM;
|
|
|
page = alloc_hugepage_vma(transparent_hugepage_defrag(vma),
|
|
|
vma, haddr, numa_node_id(), 0);
|
|
|
- if (unlikely(!page))
|
|
|
+ if (unlikely(!page)) {
|
|
|
+ count_vm_event(THP_FAULT_FALLBACK);
|
|
|
goto out;
|
|
|
+ }
|
|
|
+ count_vm_event(THP_FAULT_ALLOC);
|
|
|
if (unlikely(mem_cgroup_newpage_charge(page, mm, GFP_KERNEL))) {
|
|
|
put_page(page);
|
|
|
goto out;
|
|
@@ -909,11 +912,13 @@ int do_huge_pmd_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
|
|
|
new_page = NULL;
|
|
|
|
|
|
if (unlikely(!new_page)) {
|
|
|
+ count_vm_event(THP_FAULT_FALLBACK);
|
|
|
ret = do_huge_pmd_wp_page_fallback(mm, vma, address,
|
|
|
pmd, orig_pmd, page, haddr);
|
|
|
put_page(page);
|
|
|
goto out;
|
|
|
}
|
|
|
+ count_vm_event(THP_FAULT_ALLOC);
|
|
|
|
|
|
if (unlikely(mem_cgroup_newpage_charge(new_page, mm, GFP_KERNEL))) {
|
|
|
put_page(new_page);
|
|
@@ -1390,6 +1395,7 @@ int split_huge_page(struct page *page)
|
|
|
|
|
|
BUG_ON(!PageSwapBacked(page));
|
|
|
__split_huge_page(page, anon_vma);
|
|
|
+ count_vm_event(THP_SPLIT);
|
|
|
|
|
|
BUG_ON(PageCompound(page));
|
|
|
out_unlock:
|
|
@@ -1784,9 +1790,11 @@ static void collapse_huge_page(struct mm_struct *mm,
|
|
|
node, __GFP_OTHER_NODE);
|
|
|
if (unlikely(!new_page)) {
|
|
|
up_read(&mm->mmap_sem);
|
|
|
+ count_vm_event(THP_COLLAPSE_ALLOC_FAILED);
|
|
|
*hpage = ERR_PTR(-ENOMEM);
|
|
|
return;
|
|
|
}
|
|
|
+ count_vm_event(THP_COLLAPSE_ALLOC);
|
|
|
if (unlikely(mem_cgroup_newpage_charge(new_page, mm, GFP_KERNEL))) {
|
|
|
up_read(&mm->mmap_sem);
|
|
|
put_page(new_page);
|
|
@@ -2151,8 +2159,11 @@ static void khugepaged_do_scan(struct page **hpage)
|
|
|
#ifndef CONFIG_NUMA
|
|
|
if (!*hpage) {
|
|
|
*hpage = alloc_hugepage(khugepaged_defrag());
|
|
|
- if (unlikely(!*hpage))
|
|
|
+ if (unlikely(!*hpage)) {
|
|
|
+ count_vm_event(THP_COLLAPSE_ALLOC_FAILED);
|
|
|
break;
|
|
|
+ }
|
|
|
+ count_vm_event(THP_COLLAPSE_ALLOC);
|
|
|
}
|
|
|
#else
|
|
|
if (IS_ERR(*hpage))
|
|
@@ -2192,8 +2203,11 @@ static struct page *khugepaged_alloc_hugepage(void)
|
|
|
|
|
|
do {
|
|
|
hpage = alloc_hugepage(khugepaged_defrag());
|
|
|
- if (!hpage)
|
|
|
+ if (!hpage) {
|
|
|
+ count_vm_event(THP_COLLAPSE_ALLOC_FAILED);
|
|
|
khugepaged_alloc_sleep();
|
|
|
+ } else
|
|
|
+ count_vm_event(THP_COLLAPSE_ALLOC);
|
|
|
} while (unlikely(!hpage) &&
|
|
|
likely(khugepaged_enabled()));
|
|
|
return hpage;
|
|
@@ -2210,8 +2224,11 @@ static void khugepaged_loop(void)
|
|
|
while (likely(khugepaged_enabled())) {
|
|
|
#ifndef CONFIG_NUMA
|
|
|
hpage = khugepaged_alloc_hugepage();
|
|
|
- if (unlikely(!hpage))
|
|
|
+ if (unlikely(!hpage)) {
|
|
|
+ count_vm_event(THP_COLLAPSE_ALLOC_FAILED);
|
|
|
break;
|
|
|
+ }
|
|
|
+ count_vm_event(THP_COLLAPSE_ALLOC);
|
|
|
#else
|
|
|
if (IS_ERR(hpage)) {
|
|
|
khugepaged_alloc_sleep();
|