|
@@ -168,7 +168,7 @@ static inline bool access_error(unsigned int fsr, struct vm_area_struct *vma)
|
|
|
}
|
|
|
|
|
|
static int __do_pf(struct mm_struct *mm, unsigned long addr, unsigned int fsr,
|
|
|
- struct task_struct *tsk)
|
|
|
+ unsigned int flags, struct task_struct *tsk)
|
|
|
{
|
|
|
struct vm_area_struct *vma;
|
|
|
int fault;
|
|
@@ -194,14 +194,7 @@ good_area:
|
|
|
* If for any reason at all we couldn't handle the fault, make
|
|
|
* sure we exit gracefully rather than endlessly redo the fault.
|
|
|
*/
|
|
|
- fault = handle_mm_fault(mm, vma, addr & PAGE_MASK,
|
|
|
- (!(fsr ^ 0x12)) ? FAULT_FLAG_WRITE : 0);
|
|
|
- if (unlikely(fault & VM_FAULT_ERROR))
|
|
|
- return fault;
|
|
|
- if (fault & VM_FAULT_MAJOR)
|
|
|
- tsk->maj_flt++;
|
|
|
- else
|
|
|
- tsk->min_flt++;
|
|
|
+ fault = handle_mm_fault(mm, vma, addr & PAGE_MASK, flags);
|
|
|
return fault;
|
|
|
|
|
|
check_stack:
|
|
@@ -216,6 +209,8 @@ static int do_pf(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
|
|
|
struct task_struct *tsk;
|
|
|
struct mm_struct *mm;
|
|
|
int fault, sig, code;
|
|
|
+ unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE |
|
|
|
+ ((!(fsr ^ 0x12)) ? FAULT_FLAG_WRITE : 0);
|
|
|
|
|
|
tsk = current;
|
|
|
mm = tsk->mm;
|
|
@@ -236,6 +231,7 @@ static int do_pf(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
|
|
|
if (!user_mode(regs)
|
|
|
&& !search_exception_tables(regs->UCreg_pc))
|
|
|
goto no_context;
|
|
|
+retry:
|
|
|
down_read(&mm->mmap_sem);
|
|
|
} else {
|
|
|
/*
|
|
@@ -251,7 +247,28 @@ static int do_pf(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
|
|
|
#endif
|
|
|
}
|
|
|
|
|
|
- fault = __do_pf(mm, addr, fsr, tsk);
|
|
|
+ fault = __do_pf(mm, addr, fsr, flags, tsk);
|
|
|
+
|
|
|
+ /* If we need to retry but a fatal signal is pending, handle the
|
|
|
+ * signal first. We do not need to release the mmap_sem because
|
|
|
+ * it would already be released in __lock_page_or_retry in
|
|
|
+ * mm/filemap.c. */
|
|
|
+ if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current))
|
|
|
+ return 0;
|
|
|
+
|
|
|
+ if (!(fault & VM_FAULT_ERROR) && (flags & FAULT_FLAG_ALLOW_RETRY)) {
|
|
|
+ if (fault & VM_FAULT_MAJOR)
|
|
|
+ tsk->maj_flt++;
|
|
|
+ else
|
|
|
+ tsk->min_flt++;
|
|
|
+ if (fault & VM_FAULT_RETRY) {
|
|
|
+ /* Clear FAULT_FLAG_ALLOW_RETRY to avoid any risk
|
|
|
+ * of starvation. */
|
|
|
+ flags &= ~FAULT_FLAG_ALLOW_RETRY;
|
|
|
+ goto retry;
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
up_read(&mm->mmap_sem);
|
|
|
|
|
|
/*
|