Forráskód Böngészése

thp: madvise(MADV_HUGEPAGE)

Add madvise MADV_HUGEPAGE to mark regions that are important to be
hugepage backed.  Return -EINVAL if the vma is not of an anonymous type,
or the feature isn't built into the kernel.  Never silently return
success.

Signed-off-by: Andrea Arcangeli <aarcange@redhat.com>
Acked-by: Rik van Riel <riel@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Andrea Arcangeli 14 éve
szülő
commit
0af4e98b6b
3 módosított fájl, 30 hozzáadás és 0 törlés
  1. 6 0
      include/linux/huge_mm.h
  2. 16 0
      mm/huge_memory.c
  3. 8 0
      mm/madvise.c

+ 6 - 0
include/linux/huge_mm.h

@@ -97,6 +97,7 @@ extern void __split_huge_page_pmd(struct mm_struct *mm, pmd_t *pmd);
 #if HPAGE_PMD_ORDER > MAX_ORDER
 #if HPAGE_PMD_ORDER > MAX_ORDER
 #error "hugepages can't be allocated by the buddy allocator"
 #error "hugepages can't be allocated by the buddy allocator"
 #endif
 #endif
+extern int hugepage_madvise(unsigned long *vm_flags);
 #else /* CONFIG_TRANSPARENT_HUGEPAGE */
 #else /* CONFIG_TRANSPARENT_HUGEPAGE */
 #define HPAGE_PMD_SHIFT ({ BUG(); 0; })
 #define HPAGE_PMD_SHIFT ({ BUG(); 0; })
 #define HPAGE_PMD_MASK ({ BUG(); 0; })
 #define HPAGE_PMD_MASK ({ BUG(); 0; })
@@ -113,6 +114,11 @@ static inline int split_huge_page(struct page *page)
 	do { } while (0)
 	do { } while (0)
 #define wait_split_huge_page(__anon_vma, __pmd)	\
 #define wait_split_huge_page(__anon_vma, __pmd)	\
 	do { } while (0)
 	do { } while (0)
+static inline int hugepage_madvise(unsigned long *vm_flags)
+{
+	BUG();
+	return 0;
+}
 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
 
 
 #endif /* _LINUX_HUGE_MM_H */
 #endif /* _LINUX_HUGE_MM_H */

+ 16 - 0
mm/huge_memory.c

@@ -896,6 +896,22 @@ out:
 	return ret;
 	return ret;
 }
 }
 
 
+int hugepage_madvise(unsigned long *vm_flags)
+{
+	/*
+	 * Be somewhat over-protective like KSM for now!
+	 */
+	if (*vm_flags & (VM_HUGEPAGE | VM_SHARED  | VM_MAYSHARE   |
+			 VM_PFNMAP   | VM_IO      | VM_DONTEXPAND |
+			 VM_RESERVED | VM_HUGETLB | VM_INSERTPAGE |
+			 VM_MIXEDMAP | VM_SAO))
+		return -EINVAL;
+
+	*vm_flags |= VM_HUGEPAGE;
+
+	return 0;
+}
+
 void __split_huge_page_pmd(struct mm_struct *mm, pmd_t *pmd)
 void __split_huge_page_pmd(struct mm_struct *mm, pmd_t *pmd)
 {
 {
 	struct page *page;
 	struct page *page;

+ 8 - 0
mm/madvise.c

@@ -71,6 +71,11 @@ static long madvise_behavior(struct vm_area_struct * vma,
 		if (error)
 		if (error)
 			goto out;
 			goto out;
 		break;
 		break;
+	case MADV_HUGEPAGE:
+		error = hugepage_madvise(&new_flags);
+		if (error)
+			goto out;
+		break;
 	}
 	}
 
 
 	if (new_flags == vma->vm_flags) {
 	if (new_flags == vma->vm_flags) {
@@ -282,6 +287,9 @@ madvise_behavior_valid(int behavior)
 #ifdef CONFIG_KSM
 #ifdef CONFIG_KSM
 	case MADV_MERGEABLE:
 	case MADV_MERGEABLE:
 	case MADV_UNMERGEABLE:
 	case MADV_UNMERGEABLE:
+#endif
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+	case MADV_HUGEPAGE:
 #endif
 #endif
 		return 1;
 		return 1;