|
@@ -600,23 +600,22 @@ static void mem_cgroup_swap_statistics(struct mem_cgroup *mem,
|
|
}
|
|
}
|
|
|
|
|
|
static void mem_cgroup_charge_statistics(struct mem_cgroup *mem,
|
|
static void mem_cgroup_charge_statistics(struct mem_cgroup *mem,
|
|
- struct page_cgroup *pc,
|
|
|
|
- bool charge)
|
|
|
|
|
|
+ bool file, int nr_pages)
|
|
{
|
|
{
|
|
- int val = (charge) ? 1 : -1;
|
|
|
|
-
|
|
|
|
preempt_disable();
|
|
preempt_disable();
|
|
|
|
|
|
- if (PageCgroupCache(pc))
|
|
|
|
- __this_cpu_add(mem->stat->count[MEM_CGROUP_STAT_CACHE], val);
|
|
|
|
|
|
+ if (file)
|
|
|
|
+ __this_cpu_add(mem->stat->count[MEM_CGROUP_STAT_CACHE], nr_pages);
|
|
else
|
|
else
|
|
- __this_cpu_add(mem->stat->count[MEM_CGROUP_STAT_RSS], val);
|
|
|
|
|
|
+ __this_cpu_add(mem->stat->count[MEM_CGROUP_STAT_RSS], nr_pages);
|
|
|
|
|
|
- if (charge)
|
|
|
|
|
|
+ /* pagein of a big page is an event. So, ignore page size */
|
|
|
|
+ if (nr_pages > 0)
|
|
__this_cpu_inc(mem->stat->count[MEM_CGROUP_STAT_PGPGIN_COUNT]);
|
|
__this_cpu_inc(mem->stat->count[MEM_CGROUP_STAT_PGPGIN_COUNT]);
|
|
else
|
|
else
|
|
__this_cpu_inc(mem->stat->count[MEM_CGROUP_STAT_PGPGOUT_COUNT]);
|
|
__this_cpu_inc(mem->stat->count[MEM_CGROUP_STAT_PGPGOUT_COUNT]);
|
|
- __this_cpu_inc(mem->stat->count[MEM_CGROUP_EVENTS]);
|
|
|
|
|
|
+
|
|
|
|
+ __this_cpu_add(mem->stat->count[MEM_CGROUP_EVENTS], nr_pages);
|
|
|
|
|
|
preempt_enable();
|
|
preempt_enable();
|
|
}
|
|
}
|
|
@@ -2115,7 +2114,7 @@ static void ____mem_cgroup_commit_charge(struct mem_cgroup *mem,
|
|
break;
|
|
break;
|
|
}
|
|
}
|
|
|
|
|
|
- mem_cgroup_charge_statistics(mem, pc, true);
|
|
|
|
|
|
+ mem_cgroup_charge_statistics(mem, PageCgroupCache(pc), 1);
|
|
}
|
|
}
|
|
|
|
|
|
static void __mem_cgroup_commit_charge(struct mem_cgroup *mem,
|
|
static void __mem_cgroup_commit_charge(struct mem_cgroup *mem,
|
|
@@ -2186,14 +2185,14 @@ static void __mem_cgroup_move_account(struct page_cgroup *pc,
|
|
__this_cpu_inc(to->stat->count[MEM_CGROUP_STAT_FILE_MAPPED]);
|
|
__this_cpu_inc(to->stat->count[MEM_CGROUP_STAT_FILE_MAPPED]);
|
|
preempt_enable();
|
|
preempt_enable();
|
|
}
|
|
}
|
|
- mem_cgroup_charge_statistics(from, pc, false);
|
|
|
|
|
|
+ mem_cgroup_charge_statistics(from, PageCgroupCache(pc), -1);
|
|
if (uncharge)
|
|
if (uncharge)
|
|
/* This is not "cancel", but cancel_charge does all we need. */
|
|
/* This is not "cancel", but cancel_charge does all we need. */
|
|
mem_cgroup_cancel_charge(from, PAGE_SIZE);
|
|
mem_cgroup_cancel_charge(from, PAGE_SIZE);
|
|
|
|
|
|
/* caller should have done css_get */
|
|
/* caller should have done css_get */
|
|
pc->mem_cgroup = to;
|
|
pc->mem_cgroup = to;
|
|
- mem_cgroup_charge_statistics(to, pc, true);
|
|
|
|
|
|
+ mem_cgroup_charge_statistics(to, PageCgroupCache(pc), 1);
|
|
/*
|
|
/*
|
|
* We charges against "to" which may not have any tasks. Then, "to"
|
|
* We charges against "to" which may not have any tasks. Then, "to"
|
|
* can be under rmdir(). But in current implementation, caller of
|
|
* can be under rmdir(). But in current implementation, caller of
|
|
@@ -2597,7 +2596,7 @@ __mem_cgroup_uncharge_common(struct page *page, enum charge_type ctype)
|
|
}
|
|
}
|
|
|
|
|
|
for (i = 0; i < count; i++)
|
|
for (i = 0; i < count; i++)
|
|
- mem_cgroup_charge_statistics(mem, pc + i, false);
|
|
|
|
|
|
+ mem_cgroup_charge_statistics(mem, PageCgroupCache(pc), -1);
|
|
|
|
|
|
ClearPageCgroupUsed(pc);
|
|
ClearPageCgroupUsed(pc);
|
|
/*
|
|
/*
|