|
@@ -3754,22 +3754,14 @@ unlock:
|
|
|
/*
|
|
|
* By the time we get here, we already hold the mm semaphore
|
|
|
*/
|
|
|
-int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
|
|
|
- unsigned long address, unsigned int flags)
|
|
|
+static int __handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
|
|
|
+ unsigned long address, unsigned int flags)
|
|
|
{
|
|
|
pgd_t *pgd;
|
|
|
pud_t *pud;
|
|
|
pmd_t *pmd;
|
|
|
pte_t *pte;
|
|
|
|
|
|
- __set_current_state(TASK_RUNNING);
|
|
|
-
|
|
|
- count_vm_event(PGFAULT);
|
|
|
- mem_cgroup_count_vm_event(mm, PGFAULT);
|
|
|
-
|
|
|
- /* do counter updates before entering really critical section. */
|
|
|
- check_sync_rss_stat(current);
|
|
|
-
|
|
|
if (unlikely(is_vm_hugetlb_page(vma)))
|
|
|
return hugetlb_fault(mm, vma, address, flags);
|
|
|
|
|
@@ -3850,6 +3842,34 @@ retry:
|
|
|
return handle_pte_fault(mm, vma, address, pte, pmd, flags);
|
|
|
}
|
|
|
|
|
|
+int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
|
|
|
+ unsigned long address, unsigned int flags)
|
|
|
+{
|
|
|
+ int ret;
|
|
|
+
|
|
|
+ __set_current_state(TASK_RUNNING);
|
|
|
+
|
|
|
+ count_vm_event(PGFAULT);
|
|
|
+ mem_cgroup_count_vm_event(mm, PGFAULT);
|
|
|
+
|
|
|
+ /* do counter updates before entering really critical section. */
|
|
|
+ check_sync_rss_stat(current);
|
|
|
+
|
|
|
+ /*
|
|
|
+ * Enable the memcg OOM handling for faults triggered in user
|
|
|
+ * space. Kernel faults are handled more gracefully.
|
|
|
+ */
|
|
|
+ if (flags & FAULT_FLAG_USER)
|
|
|
+ mem_cgroup_enable_oom();
|
|
|
+
|
|
|
+ ret = __handle_mm_fault(mm, vma, address, flags);
|
|
|
+
|
|
|
+ if (flags & FAULT_FLAG_USER)
|
|
|
+ mem_cgroup_disable_oom();
|
|
|
+
|
|
|
+ return ret;
|
|
|
+}
|
|
|
+
|
|
|
#ifndef __PAGETABLE_PUD_FOLDED
|
|
|
/*
|
|
|
* Allocate page upper directory.
|