|
@@ -1164,13 +1164,14 @@ static struct page *alloc_huge_page(struct vm_area_struct *vma,
|
|
|
chg = vma_needs_reservation(h, vma, addr);
|
|
|
if (chg < 0)
|
|
|
return ERR_PTR(-ENOMEM);
|
|
|
- if (chg)
|
|
|
- if (hugepage_subpool_get_pages(spool, chg))
|
|
|
+ if (chg || avoid_reserve)
|
|
|
+ if (hugepage_subpool_get_pages(spool, 1))
|
|
|
return ERR_PTR(-ENOSPC);
|
|
|
|
|
|
ret = hugetlb_cgroup_charge_cgroup(idx, pages_per_huge_page(h), &h_cg);
|
|
|
if (ret) {
|
|
|
- hugepage_subpool_put_pages(spool, chg);
|
|
|
+ if (chg || avoid_reserve)
|
|
|
+ hugepage_subpool_put_pages(spool, 1);
|
|
|
return ERR_PTR(-ENOSPC);
|
|
|
}
|
|
|
spin_lock(&hugetlb_lock);
|
|
@@ -1182,7 +1183,8 @@ static struct page *alloc_huge_page(struct vm_area_struct *vma,
|
|
|
hugetlb_cgroup_uncharge_cgroup(idx,
|
|
|
pages_per_huge_page(h),
|
|
|
h_cg);
|
|
|
- hugepage_subpool_put_pages(spool, chg);
|
|
|
+ if (chg || avoid_reserve)
|
|
|
+ hugepage_subpool_put_pages(spool, 1);
|
|
|
return ERR_PTR(-ENOSPC);
|
|
|
}
|
|
|
spin_lock(&hugetlb_lock);
|