|
@@ -434,25 +434,6 @@ static int is_vma_resv_set(struct vm_area_struct *vma, unsigned long flag)
|
|
|
return (get_vma_private_data(vma) & flag) != 0;
|
|
|
}
|
|
|
|
|
|
-/* Decrement the reserved pages in the hugepage pool by one */
|
|
|
-static void decrement_hugepage_resv_vma(struct hstate *h,
|
|
|
- struct vm_area_struct *vma)
|
|
|
-{
|
|
|
- if (vma->vm_flags & VM_NORESERVE)
|
|
|
- return;
|
|
|
-
|
|
|
- if (vma->vm_flags & VM_MAYSHARE) {
|
|
|
- /* Shared mappings always use reserves */
|
|
|
- h->resv_huge_pages--;
|
|
|
- } else if (is_vma_resv_set(vma, HPAGE_RESV_OWNER)) {
|
|
|
- /*
|
|
|
- * Only the process that called mmap() has reserves for
|
|
|
- * private mappings.
|
|
|
- */
|
|
|
- h->resv_huge_pages--;
|
|
|
- }
|
|
|
-}
|
|
|
-
|
|
|
/* Reset counters to 0 and clear all HPAGE_RESV_* flags */
|
|
|
void reset_vma_resv_huge_pages(struct vm_area_struct *vma)
|
|
|
{
|
|
@@ -466,10 +447,18 @@ static int vma_has_reserves(struct vm_area_struct *vma)
|
|
|
{
|
|
|
if (vma->vm_flags & VM_NORESERVE)
|
|
|
return 0;
|
|
|
+
|
|
|
+ /* Shared mappings always use reserves */
|
|
|
if (vma->vm_flags & VM_MAYSHARE)
|
|
|
return 1;
|
|
|
+
|
|
|
+ /*
|
|
|
+ * Only the process that called mmap() has reserves for
|
|
|
+ * private mappings.
|
|
|
+ */
|
|
|
if (is_vma_resv_set(vma, HPAGE_RESV_OWNER))
|
|
|
return 1;
|
|
|
+
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
@@ -564,8 +553,8 @@ retry_cpuset:
|
|
|
if (cpuset_zone_allowed_softwall(zone, htlb_alloc_mask)) {
|
|
|
page = dequeue_huge_page_node(h, zone_to_nid(zone));
|
|
|
if (page) {
|
|
|
- if (!avoid_reserve)
|
|
|
- decrement_hugepage_resv_vma(h, vma);
|
|
|
+ if (!avoid_reserve && vma_has_reserves(vma))
|
|
|
+ h->resv_huge_pages--;
|
|
|
break;
|
|
|
}
|
|
|
}
|