|
@@ -152,7 +152,7 @@ struct task_struct *find_lock_task_mm(struct task_struct *p)
|
|
|
|
|
|
/* return true if the task is not adequate as candidate victim task. */
|
|
|
static bool oom_unkillable_task(struct task_struct *p,
|
|
|
- const struct mem_cgroup *mem, const nodemask_t *nodemask)
|
|
|
+ const struct mem_cgroup *memcg, const nodemask_t *nodemask)
|
|
|
{
|
|
|
if (is_global_init(p))
|
|
|
return true;
|
|
@@ -160,7 +160,7 @@ static bool oom_unkillable_task(struct task_struct *p,
|
|
|
return true;
|
|
|
|
|
|
/* When mem_cgroup_out_of_memory() and p is not member of the group */
|
|
|
- if (mem && !task_in_mem_cgroup(p, mem))
|
|
|
+ if (memcg && !task_in_mem_cgroup(p, memcg))
|
|
|
return true;
|
|
|
|
|
|
/* p may not have freeable memory in nodemask */
|
|
@@ -179,12 +179,12 @@ static bool oom_unkillable_task(struct task_struct *p,
|
|
|
* predictable as possible. The goal is to return the highest value for the
|
|
|
* task consuming the most memory to avoid subsequent oom failures.
|
|
|
*/
|
|
|
-unsigned int oom_badness(struct task_struct *p, struct mem_cgroup *mem,
|
|
|
+unsigned int oom_badness(struct task_struct *p, struct mem_cgroup *memcg,
|
|
|
const nodemask_t *nodemask, unsigned long totalpages)
|
|
|
{
|
|
|
long points;
|
|
|
|
|
|
- if (oom_unkillable_task(p, mem, nodemask))
|
|
|
+ if (oom_unkillable_task(p, memcg, nodemask))
|
|
|
return 0;
|
|
|
|
|
|
p = find_lock_task_mm(p);
|
|
@@ -308,7 +308,7 @@ static enum oom_constraint constrained_alloc(struct zonelist *zonelist,
|
|
|
* (not docbooked, we don't want this one cluttering up the manual)
|
|
|
*/
|
|
|
static struct task_struct *select_bad_process(unsigned int *ppoints,
|
|
|
- unsigned long totalpages, struct mem_cgroup *mem,
|
|
|
+ unsigned long totalpages, struct mem_cgroup *memcg,
|
|
|
const nodemask_t *nodemask)
|
|
|
{
|
|
|
struct task_struct *g, *p;
|
|
@@ -320,7 +320,7 @@ static struct task_struct *select_bad_process(unsigned int *ppoints,
|
|
|
|
|
|
if (p->exit_state)
|
|
|
continue;
|
|
|
- if (oom_unkillable_task(p, mem, nodemask))
|
|
|
+ if (oom_unkillable_task(p, memcg, nodemask))
|
|
|
continue;
|
|
|
|
|
|
/*
|
|
@@ -364,7 +364,7 @@ static struct task_struct *select_bad_process(unsigned int *ppoints,
|
|
|
}
|
|
|
}
|
|
|
|
|
|
- points = oom_badness(p, mem, nodemask, totalpages);
|
|
|
+ points = oom_badness(p, memcg, nodemask, totalpages);
|
|
|
if (points > *ppoints) {
|
|
|
chosen = p;
|
|
|
*ppoints = points;
|
|
@@ -387,14 +387,14 @@ static struct task_struct *select_bad_process(unsigned int *ppoints,
|
|
|
*
|
|
|
* Call with tasklist_lock read-locked.
|
|
|
*/
|
|
|
-static void dump_tasks(const struct mem_cgroup *mem, const nodemask_t *nodemask)
|
|
|
+static void dump_tasks(const struct mem_cgroup *memcg, const nodemask_t *nodemask)
|
|
|
{
|
|
|
struct task_struct *p;
|
|
|
struct task_struct *task;
|
|
|
|
|
|
pr_info("[ pid ] uid tgid total_vm rss cpu oom_adj oom_score_adj name\n");
|
|
|
for_each_process(p) {
|
|
|
- if (oom_unkillable_task(p, mem, nodemask))
|
|
|
+ if (oom_unkillable_task(p, memcg, nodemask))
|
|
|
continue;
|
|
|
|
|
|
task = find_lock_task_mm(p);
|
|
@@ -417,7 +417,7 @@ static void dump_tasks(const struct mem_cgroup *mem, const nodemask_t *nodemask)
|
|
|
}
|
|
|
|
|
|
static void dump_header(struct task_struct *p, gfp_t gfp_mask, int order,
|
|
|
- struct mem_cgroup *mem, const nodemask_t *nodemask)
|
|
|
+ struct mem_cgroup *memcg, const nodemask_t *nodemask)
|
|
|
{
|
|
|
task_lock(current);
|
|
|
pr_warning("%s invoked oom-killer: gfp_mask=0x%x, order=%d, "
|
|
@@ -427,10 +427,10 @@ static void dump_header(struct task_struct *p, gfp_t gfp_mask, int order,
|
|
|
cpuset_print_task_mems_allowed(current);
|
|
|
task_unlock(current);
|
|
|
dump_stack();
|
|
|
- mem_cgroup_print_oom_info(mem, p);
|
|
|
+ mem_cgroup_print_oom_info(memcg, p);
|
|
|
show_mem(SHOW_MEM_FILTER_NODES);
|
|
|
if (sysctl_oom_dump_tasks)
|
|
|
- dump_tasks(mem, nodemask);
|
|
|
+ dump_tasks(memcg, nodemask);
|
|
|
}
|
|
|
|
|
|
#define K(x) ((x) << (PAGE_SHIFT-10))
|
|
@@ -484,7 +484,7 @@ static int oom_kill_task(struct task_struct *p)
|
|
|
|
|
|
static int oom_kill_process(struct task_struct *p, gfp_t gfp_mask, int order,
|
|
|
unsigned int points, unsigned long totalpages,
|
|
|
- struct mem_cgroup *mem, nodemask_t *nodemask,
|
|
|
+ struct mem_cgroup *memcg, nodemask_t *nodemask,
|
|
|
const char *message)
|
|
|
{
|
|
|
struct task_struct *victim = p;
|
|
@@ -493,7 +493,7 @@ static int oom_kill_process(struct task_struct *p, gfp_t gfp_mask, int order,
|
|
|
unsigned int victim_points = 0;
|
|
|
|
|
|
if (printk_ratelimit())
|
|
|
- dump_header(p, gfp_mask, order, mem, nodemask);
|
|
|
+ dump_header(p, gfp_mask, order, memcg, nodemask);
|
|
|
|
|
|
/*
|
|
|
* If the task is already exiting, don't alarm the sysadmin or kill
|
|
@@ -524,7 +524,7 @@ static int oom_kill_process(struct task_struct *p, gfp_t gfp_mask, int order,
|
|
|
/*
|
|
|
* oom_badness() returns 0 if the thread is unkillable
|
|
|
*/
|
|
|
- child_points = oom_badness(child, mem, nodemask,
|
|
|
+ child_points = oom_badness(child, memcg, nodemask,
|
|
|
totalpages);
|
|
|
if (child_points > victim_points) {
|
|
|
victim = child;
|
|
@@ -561,7 +561,7 @@ static void check_panic_on_oom(enum oom_constraint constraint, gfp_t gfp_mask,
|
|
|
}
|
|
|
|
|
|
#ifdef CONFIG_CGROUP_MEM_RES_CTLR
|
|
|
-void mem_cgroup_out_of_memory(struct mem_cgroup *mem, gfp_t gfp_mask)
|
|
|
+void mem_cgroup_out_of_memory(struct mem_cgroup *memcg, gfp_t gfp_mask)
|
|
|
{
|
|
|
unsigned long limit;
|
|
|
unsigned int points = 0;
|
|
@@ -578,14 +578,14 @@ void mem_cgroup_out_of_memory(struct mem_cgroup *mem, gfp_t gfp_mask)
|
|
|
}
|
|
|
|
|
|
check_panic_on_oom(CONSTRAINT_MEMCG, gfp_mask, 0, NULL);
|
|
|
- limit = mem_cgroup_get_limit(mem) >> PAGE_SHIFT;
|
|
|
+ limit = mem_cgroup_get_limit(memcg) >> PAGE_SHIFT;
|
|
|
read_lock(&tasklist_lock);
|
|
|
retry:
|
|
|
- p = select_bad_process(&points, limit, mem, NULL);
|
|
|
+ p = select_bad_process(&points, limit, memcg, NULL);
|
|
|
if (!p || PTR_ERR(p) == -1UL)
|
|
|
goto out;
|
|
|
|
|
|
- if (oom_kill_process(p, gfp_mask, 0, points, limit, mem, NULL,
|
|
|
+ if (oom_kill_process(p, gfp_mask, 0, points, limit, memcg, NULL,
|
|
|
"Memory cgroup out of memory"))
|
|
|
goto retry;
|
|
|
out:
|