|
@@ -310,26 +310,13 @@ enum oom_scan_t oom_scan_process_thread(struct task_struct *task,
|
|
|
if (!task->mm)
|
|
|
return OOM_SCAN_CONTINUE;
|
|
|
|
|
|
- if (task->flags & PF_EXITING) {
|
|
|
+ if (task->flags & PF_EXITING && !force_kill) {
|
|
|
/*
|
|
|
- * If task is current and is in the process of releasing memory,
|
|
|
- * allow the "kill" to set TIF_MEMDIE, which will allow it to
|
|
|
- * access memory reserves. Otherwise, it may stall forever.
|
|
|
- *
|
|
|
- * The iteration isn't broken here, however, in case other
|
|
|
- * threads are found to have already been oom killed.
|
|
|
+ * If this task is not being ptraced on exit, then wait for it
|
|
|
+ * to finish before killing some other task unnecessarily.
|
|
|
*/
|
|
|
- if (task == current)
|
|
|
- return OOM_SCAN_SELECT;
|
|
|
- else if (!force_kill) {
|
|
|
- /*
|
|
|
- * If this task is not being ptraced on exit, then wait
|
|
|
- * for it to finish before killing some other task
|
|
|
- * unnecessarily.
|
|
|
- */
|
|
|
- if (!(task->group_leader->ptrace & PT_TRACE_EXIT))
|
|
|
- return OOM_SCAN_ABORT;
|
|
|
- }
|
|
|
+ if (!(task->group_leader->ptrace & PT_TRACE_EXIT))
|
|
|
+ return OOM_SCAN_ABORT;
|
|
|
}
|
|
|
return OOM_SCAN_OK;
|
|
|
}
|
|
@@ -706,11 +693,11 @@ void out_of_memory(struct zonelist *zonelist, gfp_t gfp_mask,
|
|
|
return;
|
|
|
|
|
|
/*
|
|
|
- * If current has a pending SIGKILL, then automatically select it. The
|
|
|
- * goal is to allow it to allocate so that it may quickly exit and free
|
|
|
- * its memory.
|
|
|
+ * If current has a pending SIGKILL or is exiting, then automatically
|
|
|
+ * select it. The goal is to allow it to allocate so that it may
|
|
|
+ * quickly exit and free its memory.
|
|
|
*/
|
|
|
- if (fatal_signal_pending(current)) {
|
|
|
+ if (fatal_signal_pending(current) || current->flags & PF_EXITING) {
|
|
|
set_thread_flag(TIF_MEMDIE);
|
|
|
return;
|
|
|
}
|