|
@@ -81,6 +81,24 @@ static bool has_intersects_mems_allowed(struct task_struct *tsk,
|
|
|
}
|
|
|
#endif /* CONFIG_NUMA */
|
|
|
|
|
|
+/*
|
|
|
+ * If this is a system OOM (not a memcg OOM) and the task selected to be
|
|
|
+ * killed is not already running at high (RT) priorities, speed up the
|
|
|
+ * recovery by boosting the dying task to the lowest FIFO priority.
|
|
|
+ * That helps with the recovery and avoids interfering with RT tasks.
|
|
|
+ */
|
|
|
+static void boost_dying_task_prio(struct task_struct *p,
|
|
|
+ struct mem_cgroup *mem)
|
|
|
+{
|
|
|
+ struct sched_param param = { .sched_priority = 1 };
|
|
|
+
|
|
|
+ if (mem)
|
|
|
+ return;
|
|
|
+
|
|
|
+ if (!rt_task(p))
|
|
|
+ sched_setscheduler_nocheck(p, SCHED_FIFO, ¶m);
|
|
|
+}
|
|
|
+
|
|
|
/*
|
|
|
* The process p may have detached its own ->mm while exiting or through
|
|
|
* use_mm(), but one or more of its subthreads may still have a valid
|
|
@@ -421,7 +439,7 @@ static void dump_header(struct task_struct *p, gfp_t gfp_mask, int order,
|
|
|
}
|
|
|
|
|
|
#define K(x) ((x) << (PAGE_SHIFT-10))
|
|
|
-static int oom_kill_task(struct task_struct *p)
|
|
|
+static int oom_kill_task(struct task_struct *p, struct mem_cgroup *mem)
|
|
|
{
|
|
|
p = find_lock_task_mm(p);
|
|
|
if (!p) {
|
|
@@ -434,9 +452,17 @@ static int oom_kill_task(struct task_struct *p)
|
|
|
K(get_mm_counter(p->mm, MM_FILEPAGES)));
|
|
|
task_unlock(p);
|
|
|
|
|
|
- p->rt.time_slice = HZ;
|
|
|
+
|
|
|
set_tsk_thread_flag(p, TIF_MEMDIE);
|
|
|
force_sig(SIGKILL, p);
|
|
|
+
|
|
|
+ /*
|
|
|
+ * We give our sacrificial lamb high priority and access to
|
|
|
+ * all the memory it needs. That way it should be able to
|
|
|
+ * exit() and clear out its resources quickly...
|
|
|
+ */
|
|
|
+ boost_dying_task_prio(p, mem);
|
|
|
+
|
|
|
return 0;
|
|
|
}
|
|
|
#undef K
|
|
@@ -460,6 +486,7 @@ static int oom_kill_process(struct task_struct *p, gfp_t gfp_mask, int order,
|
|
|
*/
|
|
|
if (p->flags & PF_EXITING) {
|
|
|
set_tsk_thread_flag(p, TIF_MEMDIE);
|
|
|
+ boost_dying_task_prio(p, mem);
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
@@ -489,7 +516,7 @@ static int oom_kill_process(struct task_struct *p, gfp_t gfp_mask, int order,
|
|
|
}
|
|
|
} while_each_thread(p, t);
|
|
|
|
|
|
- return oom_kill_task(victim);
|
|
|
+ return oom_kill_task(victim, mem);
|
|
|
}
|
|
|
|
|
|
/*
|
|
@@ -670,6 +697,7 @@ void out_of_memory(struct zonelist *zonelist, gfp_t gfp_mask,
|
|
|
*/
|
|
|
if (fatal_signal_pending(current)) {
|
|
|
set_thread_flag(TIF_MEMDIE);
|
|
|
+ boost_dying_task_prio(current, NULL);
|
|
|
return;
|
|
|
}
|
|
|
|