|
@@ -918,7 +918,6 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
|
|
struct inode *inode;
|
|
struct inode *inode;
|
|
unsigned int vm_flags;
|
|
unsigned int vm_flags;
|
|
int error;
|
|
int error;
|
|
- int accountable = 1;
|
|
|
|
unsigned long reqprot = prot;
|
|
unsigned long reqprot = prot;
|
|
|
|
|
|
/*
|
|
/*
|
|
@@ -1019,8 +1018,6 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
|
|
return -EPERM;
|
|
return -EPERM;
|
|
vm_flags &= ~VM_MAYEXEC;
|
|
vm_flags &= ~VM_MAYEXEC;
|
|
}
|
|
}
|
|
- if (is_file_hugepages(file))
|
|
|
|
- accountable = 0;
|
|
|
|
|
|
|
|
if (!file->f_op || !file->f_op->mmap)
|
|
if (!file->f_op || !file->f_op->mmap)
|
|
return -ENODEV;
|
|
return -ENODEV;
|
|
@@ -1053,8 +1050,7 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
|
|
if (error)
|
|
if (error)
|
|
return error;
|
|
return error;
|
|
|
|
|
|
- return mmap_region(file, addr, len, flags, vm_flags, pgoff,
|
|
|
|
- accountable);
|
|
|
|
|
|
+ return mmap_region(file, addr, len, flags, vm_flags, pgoff);
|
|
}
|
|
}
|
|
EXPORT_SYMBOL(do_mmap_pgoff);
|
|
EXPORT_SYMBOL(do_mmap_pgoff);
|
|
|
|
|
|
@@ -1092,17 +1088,23 @@ int vma_wants_writenotify(struct vm_area_struct *vma)
|
|
|
|
|
|
/*
|
|
/*
|
|
* We account for memory if it's a private writeable mapping,
|
|
* We account for memory if it's a private writeable mapping,
|
|
- * and VM_NORESERVE wasn't set.
|
|
|
|
|
|
+ * not hugepages and VM_NORESERVE wasn't set.
|
|
*/
|
|
*/
|
|
-static inline int accountable_mapping(unsigned int vm_flags)
|
|
|
|
|
|
+static inline int accountable_mapping(struct file *file, unsigned int vm_flags)
|
|
{
|
|
{
|
|
|
|
+ /*
|
|
|
|
+ * hugetlb has its own accounting separate from the core VM
|
|
|
|
+ * VM_HUGETLB may not be set yet so we cannot check for that flag.
|
|
|
|
+ */
|
|
|
|
+ if (file && is_file_hugepages(file))
|
|
|
|
+ return 0;
|
|
|
|
+
|
|
return (vm_flags & (VM_NORESERVE | VM_SHARED | VM_WRITE)) == VM_WRITE;
|
|
return (vm_flags & (VM_NORESERVE | VM_SHARED | VM_WRITE)) == VM_WRITE;
|
|
}
|
|
}
|
|
|
|
|
|
unsigned long mmap_region(struct file *file, unsigned long addr,
|
|
unsigned long mmap_region(struct file *file, unsigned long addr,
|
|
unsigned long len, unsigned long flags,
|
|
unsigned long len, unsigned long flags,
|
|
- unsigned int vm_flags, unsigned long pgoff,
|
|
|
|
- int accountable)
|
|
|
|
|
|
+ unsigned int vm_flags, unsigned long pgoff)
|
|
{
|
|
{
|
|
struct mm_struct *mm = current->mm;
|
|
struct mm_struct *mm = current->mm;
|
|
struct vm_area_struct *vma, *prev;
|
|
struct vm_area_struct *vma, *prev;
|
|
@@ -1128,18 +1130,22 @@ munmap_back:
|
|
|
|
|
|
/*
|
|
/*
|
|
* Set 'VM_NORESERVE' if we should not account for the
|
|
* Set 'VM_NORESERVE' if we should not account for the
|
|
- * memory use of this mapping. We only honor MAP_NORESERVE
|
|
|
|
- * if we're allowed to overcommit memory.
|
|
|
|
|
|
+ * memory use of this mapping.
|
|
*/
|
|
*/
|
|
- if ((flags & MAP_NORESERVE) && sysctl_overcommit_memory != OVERCOMMIT_NEVER)
|
|
|
|
- vm_flags |= VM_NORESERVE;
|
|
|
|
- if (!accountable)
|
|
|
|
- vm_flags |= VM_NORESERVE;
|
|
|
|
|
|
+ if ((flags & MAP_NORESERVE)) {
|
|
|
|
+ /* We honor MAP_NORESERVE if allowed to overcommit */
|
|
|
|
+ if (sysctl_overcommit_memory != OVERCOMMIT_NEVER)
|
|
|
|
+ vm_flags |= VM_NORESERVE;
|
|
|
|
+
|
|
|
|
+ /* hugetlb applies strict overcommit unless MAP_NORESERVE */
|
|
|
|
+ if (file && is_file_hugepages(file))
|
|
|
|
+ vm_flags |= VM_NORESERVE;
|
|
|
|
+ }
|
|
|
|
|
|
/*
|
|
/*
|
|
* Private writable mapping: check memory availability
|
|
* Private writable mapping: check memory availability
|
|
*/
|
|
*/
|
|
- if (accountable_mapping(vm_flags)) {
|
|
|
|
|
|
+ if (accountable_mapping(file, vm_flags)) {
|
|
charged = len >> PAGE_SHIFT;
|
|
charged = len >> PAGE_SHIFT;
|
|
if (security_vm_enough_memory(charged))
|
|
if (security_vm_enough_memory(charged))
|
|
return -ENOMEM;
|
|
return -ENOMEM;
|