|
@@ -42,6 +42,8 @@ asmlinkage void __kprobes do_page_fault(struct pt_regs *regs, unsigned long writ
|
|
const int field = sizeof(unsigned long) * 2;
|
|
const int field = sizeof(unsigned long) * 2;
|
|
siginfo_t info;
|
|
siginfo_t info;
|
|
int fault;
|
|
int fault;
|
|
|
|
+ unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE |
|
|
|
|
+ (write ? FAULT_FLAG_WRITE : 0);
|
|
|
|
|
|
#if 0
|
|
#if 0
|
|
printk("Cpu%d[%s:%d:%0*lx:%ld:%0*lx]\n", raw_smp_processor_id(),
|
|
printk("Cpu%d[%s:%d:%0*lx:%ld:%0*lx]\n", raw_smp_processor_id(),
|
|
@@ -91,6 +93,7 @@ asmlinkage void __kprobes do_page_fault(struct pt_regs *regs, unsigned long writ
|
|
if (in_atomic() || !mm)
|
|
if (in_atomic() || !mm)
|
|
goto bad_area_nosemaphore;
|
|
goto bad_area_nosemaphore;
|
|
|
|
|
|
|
|
+retry:
|
|
down_read(&mm->mmap_sem);
|
|
down_read(&mm->mmap_sem);
|
|
vma = find_vma(mm, address);
|
|
vma = find_vma(mm, address);
|
|
if (!vma)
|
|
if (!vma)
|
|
@@ -144,7 +147,11 @@ good_area:
|
|
* make sure we exit gracefully rather than endlessly redo
|
|
* make sure we exit gracefully rather than endlessly redo
|
|
* the fault.
|
|
* the fault.
|
|
*/
|
|
*/
|
|
- fault = handle_mm_fault(mm, vma, address, write ? FAULT_FLAG_WRITE : 0);
|
|
|
|
|
|
+ fault = handle_mm_fault(mm, vma, address, flags);
|
|
|
|
+
|
|
|
|
+ if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current))
|
|
|
|
+ return;
|
|
|
|
+
|
|
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
|
|
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
|
|
if (unlikely(fault & VM_FAULT_ERROR)) {
|
|
if (unlikely(fault & VM_FAULT_ERROR)) {
|
|
if (fault & VM_FAULT_OOM)
|
|
if (fault & VM_FAULT_OOM)
|
|
@@ -153,12 +160,27 @@ good_area:
|
|
goto do_sigbus;
|
|
goto do_sigbus;
|
|
BUG();
|
|
BUG();
|
|
}
|
|
}
|
|
- if (fault & VM_FAULT_MAJOR) {
|
|
|
|
- perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, regs, address);
|
|
|
|
- tsk->maj_flt++;
|
|
|
|
- } else {
|
|
|
|
- perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, regs, address);
|
|
|
|
- tsk->min_flt++;
|
|
|
|
|
|
+ if (flags & FAULT_FLAG_ALLOW_RETRY) {
|
|
|
|
+ if (fault & VM_FAULT_MAJOR) {
|
|
|
|
+ perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1,
|
|
|
|
+ regs, address);
|
|
|
|
+ tsk->maj_flt++;
|
|
|
|
+ } else {
|
|
|
|
+ perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1,
|
|
|
|
+ regs, address);
|
|
|
|
+ tsk->min_flt++;
|
|
|
|
+ }
|
|
|
|
+ if (fault & VM_FAULT_RETRY) {
|
|
|
|
+ flags &= ~FAULT_FLAG_ALLOW_RETRY;
|
|
|
|
+
|
|
|
|
+ /*
|
|
|
|
+ * No need to up_read(&mm->mmap_sem) as we would
|
|
|
|
+ * have already released it in __lock_page_or_retry
|
|
|
|
+ * in mm/filemap.c.
|
|
|
|
+ */
|
|
|
|
+
|
|
|
|
+ goto retry;
|
|
|
|
+ }
|
|
}
|
|
}
|
|
|
|
|
|
up_read(&mm->mmap_sem);
|
|
up_read(&mm->mmap_sem);
|