|
@@ -1389,7 +1389,8 @@ out:
|
|
|
return ret;
|
|
|
}
|
|
|
|
|
|
-int hugepage_madvise(unsigned long *vm_flags, int advice)
|
|
|
+int hugepage_madvise(struct vm_area_struct *vma,
|
|
|
+ unsigned long *vm_flags, int advice)
|
|
|
{
|
|
|
switch (advice) {
|
|
|
case MADV_HUGEPAGE:
|
|
@@ -1404,6 +1405,13 @@ int hugepage_madvise(unsigned long *vm_flags, int advice)
|
|
|
return -EINVAL;
|
|
|
*vm_flags &= ~VM_NOHUGEPAGE;
|
|
|
*vm_flags |= VM_HUGEPAGE;
|
|
|
+ /*
|
|
|
+ * If the vma become good for khugepaged to scan,
|
|
|
+ * register it here without waiting a page fault that
|
|
|
+ * may not happen any time soon.
|
|
|
+ */
|
|
|
+ if (unlikely(khugepaged_enter_vma_merge(vma)))
|
|
|
+ return -ENOMEM;
|
|
|
break;
|
|
|
case MADV_NOHUGEPAGE:
|
|
|
/*
|
|
@@ -1417,6 +1425,11 @@ int hugepage_madvise(unsigned long *vm_flags, int advice)
|
|
|
return -EINVAL;
|
|
|
*vm_flags &= ~VM_HUGEPAGE;
|
|
|
*vm_flags |= VM_NOHUGEPAGE;
|
|
|
+ /*
|
|
|
+ * Setting VM_NOHUGEPAGE will prevent khugepaged from scanning
|
|
|
+ * this vma even if we leave the mm registered in khugepaged if
|
|
|
+ * it got registered before VM_NOHUGEPAGE was set.
|
|
|
+ */
|
|
|
break;
|
|
|
}
|
|
|
|
|
@@ -1784,7 +1797,8 @@ static void collapse_huge_page(struct mm_struct *mm,
|
|
|
if (address < hstart || address + HPAGE_PMD_SIZE > hend)
|
|
|
goto out;
|
|
|
|
|
|
- if (!(vma->vm_flags & VM_HUGEPAGE) && !khugepaged_always())
|
|
|
+ if ((!(vma->vm_flags & VM_HUGEPAGE) && !khugepaged_always()) ||
|
|
|
+ (vma->vm_flags & VM_NOHUGEPAGE))
|
|
|
goto out;
|
|
|
|
|
|
/* VM_PFNMAP vmas may have vm_ops null but vm_file set */
|
|
@@ -2007,8 +2021,9 @@ static unsigned int khugepaged_scan_mm_slot(unsigned int pages,
|
|
|
break;
|
|
|
}
|
|
|
|
|
|
- if (!(vma->vm_flags & VM_HUGEPAGE) &&
|
|
|
- !khugepaged_always()) {
|
|
|
+ if ((!(vma->vm_flags & VM_HUGEPAGE) &&
|
|
|
+ !khugepaged_always()) ||
|
|
|
+ (vma->vm_flags & VM_NOHUGEPAGE)) {
|
|
|
progress++;
|
|
|
continue;
|
|
|
}
|