|
@@ -44,6 +44,7 @@ void do_page_fault(struct pt_regs *regs)
|
|
|
|
|
|
int is_write, is_exec;
|
|
|
int fault;
|
|
|
+ unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
|
|
|
|
|
|
info.si_code = SEGV_MAPERR;
|
|
|
|
|
@@ -71,6 +72,7 @@ void do_page_fault(struct pt_regs *regs)
|
|
|
address, exccause, regs->pc, is_write? "w":"", is_exec? "x":"");
|
|
|
#endif
|
|
|
|
|
|
+retry:
|
|
|
down_read(&mm->mmap_sem);
|
|
|
vma = find_vma(mm, address);
|
|
|
|
|
@@ -93,6 +95,7 @@ good_area:
|
|
|
if (is_write) {
|
|
|
if (!(vma->vm_flags & VM_WRITE))
|
|
|
goto bad_area;
|
|
|
+ flags |= FAULT_FLAG_WRITE;
|
|
|
} else if (is_exec) {
|
|
|
if (!(vma->vm_flags & VM_EXEC))
|
|
|
goto bad_area;
|
|
@@ -104,7 +107,11 @@ good_area:
|
|
|
* make sure we exit gracefully rather than endlessly redo
|
|
|
* the fault.
|
|
|
*/
|
|
|
- fault = handle_mm_fault(mm, vma, address, is_write ? FAULT_FLAG_WRITE : 0);
|
|
|
+ fault = handle_mm_fault(mm, vma, address, flags);
|
|
|
+
|
|
|
+ if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current))
|
|
|
+ return;
|
|
|
+
|
|
|
if (unlikely(fault & VM_FAULT_ERROR)) {
|
|
|
if (fault & VM_FAULT_OOM)
|
|
|
goto out_of_memory;
|
|
@@ -112,10 +119,22 @@ good_area:
|
|
|
goto do_sigbus;
|
|
|
BUG();
|
|
|
}
|
|
|
- if (fault & VM_FAULT_MAJOR)
|
|
|
- current->maj_flt++;
|
|
|
- else
|
|
|
- current->min_flt++;
|
|
|
+ if (flags & FAULT_FLAG_ALLOW_RETRY) {
|
|
|
+ if (fault & VM_FAULT_MAJOR)
|
|
|
+ current->maj_flt++;
|
|
|
+ else
|
|
|
+ current->min_flt++;
|
|
|
+ if (fault & VM_FAULT_RETRY) {
|
|
|
+ flags &= ~FAULT_FLAG_ALLOW_RETRY;
|
|
|
+
|
|
|
+ /* No need to up_read(&mm->mmap_sem) as we would
|
|
|
+ * have already released it in __lock_page_or_retry
|
|
|
+ * in mm/filemap.c.
|
|
|
+ */
|
|
|
+
|
|
|
+ goto retry;
|
|
|
+ }
|
|
|
+ }
|
|
|
|
|
|
up_read(&mm->mmap_sem);
|
|
|
return;
|