|
@@ -956,8 +956,10 @@ do_page_fault(struct pt_regs *regs, unsigned long error_code)
|
|
|
struct task_struct *tsk;
|
|
|
unsigned long address;
|
|
|
struct mm_struct *mm;
|
|
|
- int write;
|
|
|
int fault;
|
|
|
+ int write = error_code & PF_WRITE;
|
|
|
+ unsigned int flags = FAULT_FLAG_ALLOW_RETRY |
|
|
|
+ (write ? FAULT_FLAG_WRITE : 0);
|
|
|
|
|
|
tsk = current;
|
|
|
mm = tsk->mm;
|
|
@@ -1068,6 +1070,7 @@ do_page_fault(struct pt_regs *regs, unsigned long error_code)
|
|
|
bad_area_nosemaphore(regs, error_code, address);
|
|
|
return;
|
|
|
}
|
|
|
+retry:
|
|
|
down_read(&mm->mmap_sem);
|
|
|
} else {
|
|
|
/*
|
|
@@ -1111,8 +1114,6 @@ do_page_fault(struct pt_regs *regs, unsigned long error_code)
|
|
|
* we can handle it..
|
|
|
*/
|
|
|
good_area:
|
|
|
- write = error_code & PF_WRITE;
|
|
|
-
|
|
|
if (unlikely(access_error(error_code, write, vma))) {
|
|
|
bad_area_access_error(regs, error_code, address);
|
|
|
return;
|
|
@@ -1123,21 +1124,34 @@ good_area:
|
|
|
* make sure we exit gracefully rather than endlessly redo
|
|
|
* the fault:
|
|
|
*/
|
|
|
- fault = handle_mm_fault(mm, vma, address, write ? FAULT_FLAG_WRITE : 0);
|
|
|
+ fault = handle_mm_fault(mm, vma, address, flags);
|
|
|
|
|
|
if (unlikely(fault & VM_FAULT_ERROR)) {
|
|
|
mm_fault_error(regs, error_code, address, fault);
|
|
|
return;
|
|
|
}
|
|
|
|
|
|
- if (fault & VM_FAULT_MAJOR) {
|
|
|
- tsk->maj_flt++;
|
|
|
- perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, 0,
|
|
|
- regs, address);
|
|
|
- } else {
|
|
|
- tsk->min_flt++;
|
|
|
- perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, 0,
|
|
|
- regs, address);
|
|
|
+ /*
|
|
|
+ * Major/minor page fault accounting is only done on the
|
|
|
+ * initial attempt. If we go through a retry, it is extremely
|
|
|
+ * likely that the page will be found in page cache at that point.
|
|
|
+ */
|
|
|
+ if (flags & FAULT_FLAG_ALLOW_RETRY) {
|
|
|
+ if (fault & VM_FAULT_MAJOR) {
|
|
|
+ tsk->maj_flt++;
|
|
|
+ perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, 0,
|
|
|
+ regs, address);
|
|
|
+ } else {
|
|
|
+ tsk->min_flt++;
|
|
|
+ perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, 0,
|
|
|
+ regs, address);
|
|
|
+ }
|
|
|
+ if (fault & VM_FAULT_RETRY) {
|
|
|
+ /* Clear FAULT_FLAG_ALLOW_RETRY to avoid any risk
|
|
|
+ * of starvation. */
|
|
|
+ flags &= ~FAULT_FLAG_ALLOW_RETRY;
|
|
|
+ goto retry;
|
|
|
+ }
|
|
|
}
|
|
|
|
|
|
check_v8086_mode(regs, address, tsk);
|