|
@@ -928,14 +928,8 @@ struct page *alloc_huge_page_node(struct hstate *h, int nid)
|
|
|
page = dequeue_huge_page_node(h, nid);
|
|
|
spin_unlock(&hugetlb_lock);
|
|
|
|
|
|
- if (!page) {
|
|
|
+ if (!page)
|
|
|
page = alloc_buddy_huge_page(h, nid);
|
|
|
- if (page) {
|
|
|
- spin_lock(&hugetlb_lock);
|
|
|
- list_move(&page->lru, &h->hugepage_activelist);
|
|
|
- spin_unlock(&hugetlb_lock);
|
|
|
- }
|
|
|
- }
|
|
|
|
|
|
return page;
|
|
|
}
|
|
@@ -1150,9 +1144,13 @@ static struct page *alloc_huge_page(struct vm_area_struct *vma,
|
|
|
}
|
|
|
spin_lock(&hugetlb_lock);
|
|
|
page = dequeue_huge_page_vma(h, vma, addr, avoid_reserve);
|
|
|
- spin_unlock(&hugetlb_lock);
|
|
|
-
|
|
|
- if (!page) {
|
|
|
+ if (page) {
|
|
|
+ /* update page cgroup details */
|
|
|
+ hugetlb_cgroup_commit_charge(idx, pages_per_huge_page(h),
|
|
|
+ h_cg, page);
|
|
|
+ spin_unlock(&hugetlb_lock);
|
|
|
+ } else {
|
|
|
+ spin_unlock(&hugetlb_lock);
|
|
|
page = alloc_buddy_huge_page(h, NUMA_NO_NODE);
|
|
|
if (!page) {
|
|
|
hugetlb_cgroup_uncharge_cgroup(idx,
|
|
@@ -1162,6 +1160,8 @@ static struct page *alloc_huge_page(struct vm_area_struct *vma,
|
|
|
return ERR_PTR(-ENOSPC);
|
|
|
}
|
|
|
spin_lock(&hugetlb_lock);
|
|
|
+ hugetlb_cgroup_commit_charge(idx, pages_per_huge_page(h),
|
|
|
+ h_cg, page);
|
|
|
list_move(&page->lru, &h->hugepage_activelist);
|
|
|
spin_unlock(&hugetlb_lock);
|
|
|
}
|
|
@@ -1169,8 +1169,6 @@ static struct page *alloc_huge_page(struct vm_area_struct *vma,
|
|
|
set_page_private(page, (unsigned long)spool);
|
|
|
|
|
|
vma_commit_reservation(h, vma, addr);
|
|
|
- /* update page cgroup details */
|
|
|
- hugetlb_cgroup_commit_charge(idx, pages_per_huge_page(h), h_cg, page);
|
|
|
return page;
|
|
|
}
|
|
|
|