|
@@ -54,13 +54,7 @@ int test_set_oom_score_adj(int new_val)
|
|
|
|
|
|
spin_lock_irq(&sighand->siglock);
|
|
|
old_val = current->signal->oom_score_adj;
|
|
|
- if (new_val != old_val) {
|
|
|
- if (new_val == OOM_SCORE_ADJ_MIN)
|
|
|
- atomic_inc(¤t->mm->oom_disable_count);
|
|
|
- else if (old_val == OOM_SCORE_ADJ_MIN)
|
|
|
- atomic_dec(¤t->mm->oom_disable_count);
|
|
|
- current->signal->oom_score_adj = new_val;
|
|
|
- }
|
|
|
+ current->signal->oom_score_adj = new_val;
|
|
|
spin_unlock_irq(&sighand->siglock);
|
|
|
|
|
|
return old_val;
|
|
@@ -172,16 +166,6 @@ unsigned int oom_badness(struct task_struct *p, struct mem_cgroup *mem,
|
|
|
if (!p)
|
|
|
return 0;
|
|
|
|
|
|
- /*
|
|
|
- * Shortcut check for a thread sharing p->mm that is OOM_SCORE_ADJ_MIN
|
|
|
- * so the entire heuristic doesn't need to be executed for something
|
|
|
- * that cannot be killed.
|
|
|
- */
|
|
|
- if (atomic_read(&p->mm->oom_disable_count)) {
|
|
|
- task_unlock(p);
|
|
|
- return 0;
|
|
|
- }
|
|
|
-
|
|
|
/*
|
|
|
* The memory controller may have a limit of 0 bytes, so avoid a divide
|
|
|
* by zero, if necessary.
|
|
@@ -451,6 +435,9 @@ static int oom_kill_task(struct task_struct *p, struct mem_cgroup *mem)
|
|
|
for_each_process(q)
|
|
|
if (q->mm == mm && !same_thread_group(q, p) &&
|
|
|
!(q->flags & PF_KTHREAD)) {
|
|
|
+ if (q->signal->oom_score_adj == OOM_SCORE_ADJ_MIN)
|
|
|
+ continue;
|
|
|
+
|
|
|
task_lock(q); /* Protect ->comm from prctl() */
|
|
|
pr_err("Kill process %d (%s) sharing same memory\n",
|
|
|
task_pid_nr(q), q->comm);
|
|
@@ -727,7 +714,7 @@ void out_of_memory(struct zonelist *zonelist, gfp_t gfp_mask,
|
|
|
read_lock(&tasklist_lock);
|
|
|
if (sysctl_oom_kill_allocating_task &&
|
|
|
!oom_unkillable_task(current, NULL, nodemask) &&
|
|
|
- current->mm && !atomic_read(¤t->mm->oom_disable_count)) {
|
|
|
+ current->mm) {
|
|
|
/*
|
|
|
* oom_kill_process() needs tasklist_lock held. If it returns
|
|
|
* non-zero, current could not be killed so we must fallback to
|