|
@@ -1894,6 +1894,20 @@ static struct page
|
|
|
}
|
|
|
#endif
|
|
|
|
|
|
+static bool hugepage_vma_check(struct vm_area_struct *vma)
|
|
|
+{
|
|
|
+ if ((!(vma->vm_flags & VM_HUGEPAGE) && !khugepaged_always()) ||
|
|
|
+ (vma->vm_flags & VM_NOHUGEPAGE))
|
|
|
+ return false;
|
|
|
+
|
|
|
+ if (!vma->anon_vma || vma->vm_ops)
|
|
|
+ return false;
|
|
|
+ if (is_vma_temporary_stack(vma))
|
|
|
+ return false;
|
|
|
+ VM_BUG_ON(vma->vm_flags & VM_NO_THP);
|
|
|
+ return true;
|
|
|
+}
|
|
|
+
|
|
|
static void collapse_huge_page(struct mm_struct *mm,
|
|
|
unsigned long address,
|
|
|
struct page **hpage,
|
|
@@ -1934,17 +1948,8 @@ static void collapse_huge_page(struct mm_struct *mm,
|
|
|
hend = vma->vm_end & HPAGE_PMD_MASK;
|
|
|
if (address < hstart || address + HPAGE_PMD_SIZE > hend)
|
|
|
goto out;
|
|
|
-
|
|
|
- if ((!(vma->vm_flags & VM_HUGEPAGE) && !khugepaged_always()) ||
|
|
|
- (vma->vm_flags & VM_NOHUGEPAGE))
|
|
|
- goto out;
|
|
|
-
|
|
|
- if (!vma->anon_vma || vma->vm_ops)
|
|
|
- goto out;
|
|
|
- if (is_vma_temporary_stack(vma))
|
|
|
+ if (!hugepage_vma_check(vma))
|
|
|
goto out;
|
|
|
- VM_BUG_ON(vma->vm_flags & VM_NO_THP);
|
|
|
-
|
|
|
pmd = mm_find_pmd(mm, address);
|
|
|
if (!pmd)
|
|
|
goto out;
|
|
@@ -2152,20 +2157,11 @@ static unsigned int khugepaged_scan_mm_slot(unsigned int pages,
|
|
|
progress++;
|
|
|
break;
|
|
|
}
|
|
|
-
|
|
|
- if ((!(vma->vm_flags & VM_HUGEPAGE) &&
|
|
|
- !khugepaged_always()) ||
|
|
|
- (vma->vm_flags & VM_NOHUGEPAGE)) {
|
|
|
- skip:
|
|
|
+ if (!hugepage_vma_check(vma)) {
|
|
|
+skip:
|
|
|
progress++;
|
|
|
continue;
|
|
|
}
|
|
|
- if (!vma->anon_vma || vma->vm_ops)
|
|
|
- goto skip;
|
|
|
- if (is_vma_temporary_stack(vma))
|
|
|
- goto skip;
|
|
|
- VM_BUG_ON(vma->vm_flags & VM_NO_THP);
|
|
|
-
|
|
|
hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK;
|
|
|
hend = vma->vm_end & HPAGE_PMD_MASK;
|
|
|
if (hstart >= hend)
|