|
@@ -2157,6 +2157,15 @@ static void hugetlb_vm_op_open(struct vm_area_struct *vma)
|
|
|
kref_get(&reservations->refs);
|
|
|
}
|
|
|
|
|
|
+static void resv_map_put(struct vm_area_struct *vma)
|
|
|
+{
|
|
|
+ struct resv_map *reservations = vma_resv_map(vma);
|
|
|
+
|
|
|
+ if (!reservations)
|
|
|
+ return;
|
|
|
+ kref_put(&reservations->refs, resv_map_release);
|
|
|
+}
|
|
|
+
|
|
|
static void hugetlb_vm_op_close(struct vm_area_struct *vma)
|
|
|
{
|
|
|
struct hstate *h = hstate_vma(vma);
|
|
@@ -2173,7 +2182,7 @@ static void hugetlb_vm_op_close(struct vm_area_struct *vma)
|
|
|
reserve = (end - start) -
|
|
|
region_count(&reservations->regions, start, end);
|
|
|
|
|
|
- kref_put(&reservations->refs, resv_map_release);
|
|
|
+ resv_map_put(vma);
|
|
|
|
|
|
if (reserve) {
|
|
|
hugetlb_acct_memory(h, -reserve);
|
|
@@ -2991,12 +3000,16 @@ int hugetlb_reserve_pages(struct inode *inode,
|
|
|
set_vma_resv_flags(vma, HPAGE_RESV_OWNER);
|
|
|
}
|
|
|
|
|
|
- if (chg < 0)
|
|
|
- return chg;
|
|
|
+ if (chg < 0) {
|
|
|
+ ret = chg;
|
|
|
+ goto out_err;
|
|
|
+ }
|
|
|
|
|
|
/* There must be enough pages in the subpool for the mapping */
|
|
|
- if (hugepage_subpool_get_pages(spool, chg))
|
|
|
- return -ENOSPC;
|
|
|
+ if (hugepage_subpool_get_pages(spool, chg)) {
|
|
|
+ ret = -ENOSPC;
|
|
|
+ goto out_err;
|
|
|
+ }
|
|
|
|
|
|
/*
|
|
|
* Check enough hugepages are available for the reservation.
|
|
@@ -3005,7 +3018,7 @@ int hugetlb_reserve_pages(struct inode *inode,
|
|
|
ret = hugetlb_acct_memory(h, chg);
|
|
|
if (ret < 0) {
|
|
|
hugepage_subpool_put_pages(spool, chg);
|
|
|
- return ret;
|
|
|
+ goto out_err;
|
|
|
}
|
|
|
|
|
|
/*
|
|
@@ -3022,6 +3035,9 @@ int hugetlb_reserve_pages(struct inode *inode,
|
|
|
if (!vma || vma->vm_flags & VM_MAYSHARE)
|
|
|
region_add(&inode->i_mapping->private_list, from, to);
|
|
|
return 0;
|
|
|
+out_err:
|
|
|
+ resv_map_put(vma);
|
|
|
+ return ret;
|
|
|
}
|
|
|
|
|
|
void hugetlb_unreserve_pages(struct inode *inode, long offset, long freed)
|