|
@@ -1166,12 +1166,7 @@ static struct page *alloc_huge_page(struct vm_area_struct *vma,
|
|
|
}
|
|
|
spin_lock(&hugetlb_lock);
|
|
|
page = dequeue_huge_page_vma(h, vma, addr, avoid_reserve);
|
|
|
- if (page) {
|
|
|
- /* update page cgroup details */
|
|
|
- hugetlb_cgroup_commit_charge(idx, pages_per_huge_page(h),
|
|
|
- h_cg, page);
|
|
|
- spin_unlock(&hugetlb_lock);
|
|
|
- } else {
|
|
|
+ if (!page) {
|
|
|
spin_unlock(&hugetlb_lock);
|
|
|
page = alloc_buddy_huge_page(h, NUMA_NO_NODE);
|
|
|
if (!page) {
|
|
@@ -1182,11 +1177,11 @@ static struct page *alloc_huge_page(struct vm_area_struct *vma,
|
|
|
return ERR_PTR(-ENOSPC);
|
|
|
}
|
|
|
spin_lock(&hugetlb_lock);
|
|
|
- hugetlb_cgroup_commit_charge(idx, pages_per_huge_page(h),
|
|
|
- h_cg, page);
|
|
|
list_move(&page->lru, &h->hugepage_activelist);
|
|
|
- spin_unlock(&hugetlb_lock);
|
|
|
+ /* Fall through */
|
|
|
}
|
|
|
+ hugetlb_cgroup_commit_charge(idx, pages_per_huge_page(h), h_cg, page);
|
|
|
+ spin_unlock(&hugetlb_lock);
|
|
|
|
|
|
set_page_private(page, (unsigned long)spool);
|
|
|
|