|
@@ -101,6 +101,26 @@ static struct task_struct *find_lock_task_mm(struct task_struct *p)
|
|
|
return NULL;
|
|
|
}
|
|
|
|
|
|
+/* return true if the task is not adequate as candidate victim task. */
|
|
|
+static bool oom_unkillable_task(struct task_struct *p, struct mem_cgroup *mem,
|
|
|
+ const nodemask_t *nodemask)
|
|
|
+{
|
|
|
+ if (is_global_init(p))
|
|
|
+ return true;
|
|
|
+ if (p->flags & PF_KTHREAD)
|
|
|
+ return true;
|
|
|
+
|
|
|
+ /* When mem_cgroup_out_of_memory() and p is not member of the group */
|
|
|
+ if (mem && !task_in_mem_cgroup(p, mem))
|
|
|
+ return true;
|
|
|
+
|
|
|
+ /* p may not have freeable memory in nodemask */
|
|
|
+ if (!has_intersects_mems_allowed(p, nodemask))
|
|
|
+ return true;
|
|
|
+
|
|
|
+ return false;
|
|
|
+}
|
|
|
+
|
|
|
/**
|
|
|
* badness - calculate a numeric value for how bad this task has been
|
|
|
* @p: task struct of which task we should calculate
|
|
@@ -295,12 +315,7 @@ static struct task_struct *select_bad_process(unsigned long *ppoints,
|
|
|
for_each_process(p) {
|
|
|
unsigned long points;
|
|
|
|
|
|
- /* skip the init task and kthreads */
|
|
|
- if (is_global_init(p) || (p->flags & PF_KTHREAD))
|
|
|
- continue;
|
|
|
- if (mem && !task_in_mem_cgroup(p, mem))
|
|
|
- continue;
|
|
|
- if (!has_intersects_mems_allowed(p, nodemask))
|
|
|
+ if (oom_unkillable_task(p, mem, nodemask))
|
|
|
continue;
|
|
|
|
|
|
/*
|
|
@@ -467,11 +482,7 @@ static int oom_kill_process(struct task_struct *p, gfp_t gfp_mask, int order,
|
|
|
|
|
|
if (child->mm == p->mm)
|
|
|
continue;
|
|
|
- if (child->flags & PF_KTHREAD)
|
|
|
- continue;
|
|
|
- if (mem && !task_in_mem_cgroup(child, mem))
|
|
|
- continue;
|
|
|
- if (!has_intersects_mems_allowed(child, nodemask))
|
|
|
+ if (oom_unkillable_task(p, mem, nodemask))
|
|
|
continue;
|
|
|
|
|
|
/* badness() returns 0 if the thread is unkillable */
|