|
@@ -231,7 +231,7 @@ static inline bool access_error(unsigned int fsr, struct vm_area_struct *vma)
|
|
|
|
|
|
static int __kprobes
|
|
static int __kprobes
|
|
__do_page_fault(struct mm_struct *mm, unsigned long addr, unsigned int fsr,
|
|
__do_page_fault(struct mm_struct *mm, unsigned long addr, unsigned int fsr,
|
|
- struct task_struct *tsk)
|
|
|
|
|
|
+ unsigned int flags, struct task_struct *tsk)
|
|
{
|
|
{
|
|
struct vm_area_struct *vma;
|
|
struct vm_area_struct *vma;
|
|
int fault;
|
|
int fault;
|
|
@@ -253,18 +253,7 @@ good_area:
|
|
goto out;
|
|
goto out;
|
|
}
|
|
}
|
|
|
|
|
|
- /*
|
|
|
|
- * If for any reason at all we couldn't handle the fault, make
|
|
|
|
- * sure we exit gracefully rather than endlessly redo the fault.
|
|
|
|
- */
|
|
|
|
- fault = handle_mm_fault(mm, vma, addr & PAGE_MASK, (fsr & FSR_WRITE) ? FAULT_FLAG_WRITE : 0);
|
|
|
|
- if (unlikely(fault & VM_FAULT_ERROR))
|
|
|
|
- return fault;
|
|
|
|
- if (fault & VM_FAULT_MAJOR)
|
|
|
|
- tsk->maj_flt++;
|
|
|
|
- else
|
|
|
|
- tsk->min_flt++;
|
|
|
|
- return fault;
|
|
|
|
|
|
+ return handle_mm_fault(mm, vma, addr & PAGE_MASK, flags);
|
|
|
|
|
|
check_stack:
|
|
check_stack:
|
|
if (vma->vm_flags & VM_GROWSDOWN && !expand_stack(vma, addr))
|
|
if (vma->vm_flags & VM_GROWSDOWN && !expand_stack(vma, addr))
|
|
@@ -279,6 +268,9 @@ do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
|
|
struct task_struct *tsk;
|
|
struct task_struct *tsk;
|
|
struct mm_struct *mm;
|
|
struct mm_struct *mm;
|
|
int fault, sig, code;
|
|
int fault, sig, code;
|
|
|
|
+ int write = fsr & FSR_WRITE;
|
|
|
|
+ unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE |
|
|
|
|
+ (write ? FAULT_FLAG_WRITE : 0);
|
|
|
|
|
|
if (notify_page_fault(regs, fsr))
|
|
if (notify_page_fault(regs, fsr))
|
|
return 0;
|
|
return 0;
|
|
@@ -305,6 +297,7 @@ do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
|
|
if (!down_read_trylock(&mm->mmap_sem)) {
|
|
if (!down_read_trylock(&mm->mmap_sem)) {
|
|
if (!user_mode(regs) && !search_exception_tables(regs->ARM_pc))
|
|
if (!user_mode(regs) && !search_exception_tables(regs->ARM_pc))
|
|
goto no_context;
|
|
goto no_context;
|
|
|
|
+retry:
|
|
down_read(&mm->mmap_sem);
|
|
down_read(&mm->mmap_sem);
|
|
} else {
|
|
} else {
|
|
/*
|
|
/*
|
|
@@ -320,14 +313,41 @@ do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
|
|
#endif
|
|
#endif
|
|
}
|
|
}
|
|
|
|
|
|
- fault = __do_page_fault(mm, addr, fsr, tsk);
|
|
|
|
- up_read(&mm->mmap_sem);
|
|
|
|
|
|
+ fault = __do_page_fault(mm, addr, fsr, flags, tsk);
|
|
|
|
+
|
|
|
|
+ /* If we need to retry but a fatal signal is pending, handle the
|
|
|
|
+ * signal first. We do not need to release the mmap_sem because
|
|
|
|
+ * it would already be released in __lock_page_or_retry in
|
|
|
|
+ * mm/filemap.c. */
|
|
|
|
+ if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current))
|
|
|
|
+ return 0;
|
|
|
|
+
|
|
|
|
+ /*
|
|
|
|
+ * Major/minor page fault accounting is only done on the
|
|
|
|
+ * initial attempt. If we go through a retry, it is extremely
|
|
|
|
+ * likely that the page will be found in page cache at that point.
|
|
|
|
+ */
|
|
|
|
|
|
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, addr);
|
|
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, addr);
|
|
- if (fault & VM_FAULT_MAJOR)
|
|
|
|
- perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, regs, addr);
|
|
|
|
- else if (fault & VM_FAULT_MINOR)
|
|
|
|
- perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, regs, addr);
|
|
|
|
|
|
+ if (flags & FAULT_FLAG_ALLOW_RETRY) {
|
|
|
|
+ if (fault & VM_FAULT_MAJOR) {
|
|
|
|
+ tsk->maj_flt++;
|
|
|
|
+ perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1,
|
|
|
|
+ regs, addr);
|
|
|
|
+ } else {
|
|
|
|
+ tsk->min_flt++;
|
|
|
|
+ perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1,
|
|
|
|
+ regs, addr);
|
|
|
|
+ }
|
|
|
|
+ if (fault & VM_FAULT_RETRY) {
|
|
|
|
+ /* Clear FAULT_FLAG_ALLOW_RETRY to avoid any risk
|
|
|
|
+ * of starvation. */
|
|
|
|
+ flags &= ~FAULT_FLAG_ALLOW_RETRY;
|
|
|
|
+ goto retry;
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ up_read(&mm->mmap_sem);
|
|
|
|
|
|
/*
|
|
/*
|
|
* Handle the "normal" case first - VM_FAULT_MAJOR / VM_FAULT_MINOR
|
|
* Handle the "normal" case first - VM_FAULT_MAJOR / VM_FAULT_MINOR
|