|
@@ -3,7 +3,7 @@
|
|
|
*
|
|
|
* Copyright (C) 2000, 2001 Paolo Alberelli
|
|
|
* Copyright (C) 2003 Richard Curnow (/proc/tlb, bug fixes)
|
|
|
- * Copyright (C) 2003 - 2009 Paul Mundt
|
|
|
+ * Copyright (C) 2003 - 2012 Paul Mundt
|
|
|
*
|
|
|
* This file is subject to the terms and conditions of the GNU General Public
|
|
|
* License. See the file "COPYING" in the main directory of this archive
|
|
@@ -95,6 +95,8 @@ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long writeaccess,
|
|
|
struct mm_struct *mm;
|
|
|
struct vm_area_struct * vma;
|
|
|
const struct exception_table_entry *fixup;
|
|
|
+ unsigned int flags = (FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE |
|
|
|
+ (writeaccess ? FAULT_FLAG_WRITE : 0));
|
|
|
pte_t *pte;
|
|
|
int fault;
|
|
|
|
|
@@ -124,6 +126,7 @@ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long writeaccess,
|
|
|
if (in_atomic() || !mm)
|
|
|
goto no_context;
|
|
|
|
|
|
+retry:
|
|
|
/* TLB misses upon some cache flushes get done under cli() */
|
|
|
down_read(&mm->mmap_sem);
|
|
|
|
|
@@ -188,7 +191,11 @@ good_area:
|
|
|
* make sure we exit gracefully rather than endlessly redo
|
|
|
* the fault.
|
|
|
*/
|
|
|
- fault = handle_mm_fault(mm, vma, address, writeaccess ? FAULT_FLAG_WRITE : 0);
|
|
|
+ fault = handle_mm_fault(mm, vma, address, flags);
|
|
|
+
|
|
|
+ if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current))
|
|
|
+ return;
|
|
|
+
|
|
|
if (unlikely(fault & VM_FAULT_ERROR)) {
|
|
|
if (fault & VM_FAULT_OOM)
|
|
|
goto out_of_memory;
|
|
@@ -197,14 +204,27 @@ good_area:
|
|
|
BUG();
|
|
|
}
|
|
|
|
|
|
- if (fault & VM_FAULT_MAJOR) {
|
|
|
- tsk->maj_flt++;
|
|
|
- perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1,
|
|
|
- regs, address);
|
|
|
- } else {
|
|
|
- tsk->min_flt++;
|
|
|
- perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1,
|
|
|
- regs, address);
|
|
|
+ if (flags & FAULT_FLAG_ALLOW_RETRY) {
|
|
|
+ if (fault & VM_FAULT_MAJOR) {
|
|
|
+ tsk->maj_flt++;
|
|
|
+ perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1,
|
|
|
+ regs, address);
|
|
|
+ } else {
|
|
|
+ tsk->min_flt++;
|
|
|
+ perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1,
|
|
|
+ regs, address);
|
|
|
+ }
|
|
|
+
|
|
|
+ if (fault & VM_FAULT_RETRY) {
|
|
|
+ flags &= ~FAULT_FLAG_ALLOW_RETRY;
|
|
|
+
|
|
|
+ /*
|
|
|
+ * No need to up_read(&mm->mmap_sem) as we would
|
|
|
+ * have already released it in __lock_page_or_retry
|
|
|
+ * in mm/filemap.c.
|
|
|
+ */
|
|
|
+ goto retry;
|
|
|
+ }
|
|
|
}
|
|
|
|
|
|
/* If we get here, the page fault has been handled. Do the TLB refill
|