|
@@ -199,7 +199,8 @@ static struct page *alloc_fresh_huge_page_node(int nid)
|
|
struct page *page;
|
|
struct page *page;
|
|
|
|
|
|
page = alloc_pages_node(nid,
|
|
page = alloc_pages_node(nid,
|
|
- htlb_alloc_mask|__GFP_COMP|__GFP_THISNODE|__GFP_NOWARN,
|
|
|
|
|
|
+ htlb_alloc_mask|__GFP_COMP|__GFP_THISNODE|
|
|
|
|
+ __GFP_REPEAT|__GFP_NOWARN,
|
|
HUGETLB_PAGE_ORDER);
|
|
HUGETLB_PAGE_ORDER);
|
|
if (page) {
|
|
if (page) {
|
|
if (arch_prepare_hugepage(page)) {
|
|
if (arch_prepare_hugepage(page)) {
|
|
@@ -294,7 +295,8 @@ static struct page *alloc_buddy_huge_page(struct vm_area_struct *vma,
|
|
}
|
|
}
|
|
spin_unlock(&hugetlb_lock);
|
|
spin_unlock(&hugetlb_lock);
|
|
|
|
|
|
- page = alloc_pages(htlb_alloc_mask|__GFP_COMP|__GFP_NOWARN,
|
|
|
|
|
|
+ page = alloc_pages(htlb_alloc_mask|__GFP_COMP|
|
|
|
|
+ __GFP_REPEAT|__GFP_NOWARN,
|
|
HUGETLB_PAGE_ORDER);
|
|
HUGETLB_PAGE_ORDER);
|
|
|
|
|
|
spin_lock(&hugetlb_lock);
|
|
spin_lock(&hugetlb_lock);
|