|
@@ -162,10 +162,11 @@ unsigned int oom_badness(struct task_struct *p, struct mem_cgroup *mem,
|
|
|
return 0;
|
|
|
|
|
|
/*
|
|
|
- * Shortcut check for OOM_SCORE_ADJ_MIN so the entire heuristic doesn't
|
|
|
- * need to be executed for something that cannot be killed.
|
|
|
+ * Shortcut check for a thread sharing p->mm that is OOM_SCORE_ADJ_MIN
|
|
|
+ * so the entire heuristic doesn't need to be executed for something
|
|
|
+ * that cannot be killed.
|
|
|
*/
|
|
|
- if (p->signal->oom_score_adj == OOM_SCORE_ADJ_MIN) {
|
|
|
+ if (atomic_read(&p->mm->oom_disable_count)) {
|
|
|
task_unlock(p);
|
|
|
return 0;
|
|
|
}
|
|
@@ -680,7 +681,7 @@ void out_of_memory(struct zonelist *zonelist, gfp_t gfp_mask,
|
|
|
read_lock(&tasklist_lock);
|
|
|
if (sysctl_oom_kill_allocating_task &&
|
|
|
!oom_unkillable_task(current, NULL, nodemask) &&
|
|
|
- (current->signal->oom_adj != OOM_DISABLE)) {
|
|
|
+ current->mm && !atomic_read(¤t->mm->oom_disable_count)) {
|
|
|
/*
|
|
|
* oom_kill_process() needs tasklist_lock held. If it returns
|
|
|
* non-zero, current could not be killed so we must fallback to
|