|
@@ -1123,10 +1123,10 @@ static struct page *alloc_huge_page(struct vm_area_struct *vma,
|
|
|
*/
|
|
|
chg = vma_needs_reservation(h, vma, addr);
|
|
|
if (chg < 0)
|
|
|
- return ERR_PTR(-VM_FAULT_OOM);
|
|
|
+ return ERR_PTR(-ENOMEM);
|
|
|
if (chg)
|
|
|
if (hugepage_subpool_get_pages(spool, chg))
|
|
|
- return ERR_PTR(-VM_FAULT_SIGBUS);
|
|
|
+ return ERR_PTR(-ENOSPC);
|
|
|
|
|
|
spin_lock(&hugetlb_lock);
|
|
|
page = dequeue_huge_page_vma(h, vma, addr, avoid_reserve);
|
|
@@ -1136,7 +1136,7 @@ static struct page *alloc_huge_page(struct vm_area_struct *vma,
|
|
|
page = alloc_buddy_huge_page(h, NUMA_NO_NODE);
|
|
|
if (!page) {
|
|
|
hugepage_subpool_put_pages(spool, chg);
|
|
|
- return ERR_PTR(-VM_FAULT_SIGBUS);
|
|
|
+ return ERR_PTR(-ENOSPC);
|
|
|
}
|
|
|
}
|
|
|
|
|
@@ -2496,6 +2496,7 @@ retry_avoidcopy:
|
|
|
new_page = alloc_huge_page(vma, address, outside_reserve);
|
|
|
|
|
|
if (IS_ERR(new_page)) {
|
|
|
+ long err = PTR_ERR(new_page);
|
|
|
page_cache_release(old_page);
|
|
|
|
|
|
/*
|
|
@@ -2524,7 +2525,10 @@ retry_avoidcopy:
|
|
|
|
|
|
/* Caller expects lock to be held */
|
|
|
spin_lock(&mm->page_table_lock);
|
|
|
- return -PTR_ERR(new_page);
|
|
|
+ if (err == -ENOMEM)
|
|
|
+ return VM_FAULT_OOM;
|
|
|
+ else
|
|
|
+ return VM_FAULT_SIGBUS;
|
|
|
}
|
|
|
|
|
|
/*
|
|
@@ -2642,7 +2646,11 @@ retry:
|
|
|
goto out;
|
|
|
page = alloc_huge_page(vma, address, 0);
|
|
|
if (IS_ERR(page)) {
|
|
|
- ret = -PTR_ERR(page);
|
|
|
+ ret = PTR_ERR(page);
|
|
|
+ if (ret == -ENOMEM)
|
|
|
+ ret = VM_FAULT_OOM;
|
|
|
+ else
|
|
|
+ ret = VM_FAULT_SIGBUS;
|
|
|
goto out;
|
|
|
}
|
|
|
clear_huge_page(page, address, pages_per_huge_page(h));
|