|
@@ -690,15 +690,19 @@ static unsigned long mem_cgroup_read_events(struct mem_cgroup *memcg,
|
|
|
}
|
|
|
|
|
|
static void mem_cgroup_charge_statistics(struct mem_cgroup *memcg,
|
|
|
- bool file, int nr_pages)
|
|
|
+ bool anon, int nr_pages)
|
|
|
{
|
|
|
preempt_disable();
|
|
|
|
|
|
- if (file)
|
|
|
- __this_cpu_add(memcg->stat->count[MEM_CGROUP_STAT_CACHE],
|
|
|
+ /*
|
|
|
+ * Here, RSS means 'mapped anon' and anon's SwapCache. Shmem/tmpfs is
|
|
|
+ * counted as CACHE even if it's on ANON LRU.
|
|
|
+ */
|
|
|
+ if (anon)
|
|
|
+ __this_cpu_add(memcg->stat->count[MEM_CGROUP_STAT_RSS],
|
|
|
nr_pages);
|
|
|
else
|
|
|
- __this_cpu_add(memcg->stat->count[MEM_CGROUP_STAT_RSS],
|
|
|
+ __this_cpu_add(memcg->stat->count[MEM_CGROUP_STAT_CACHE],
|
|
|
nr_pages);
|
|
|
|
|
|
/* pagein of a big page is an event. So, ignore page size */
|
|
@@ -2442,6 +2446,7 @@ static void __mem_cgroup_commit_charge(struct mem_cgroup *memcg,
|
|
|
{
|
|
|
struct zone *uninitialized_var(zone);
|
|
|
bool was_on_lru = false;
|
|
|
+ bool anon;
|
|
|
|
|
|
lock_page_cgroup(pc);
|
|
|
if (unlikely(PageCgroupUsed(pc))) {
|
|
@@ -2477,19 +2482,7 @@ static void __mem_cgroup_commit_charge(struct mem_cgroup *memcg,
|
|
|
* See mem_cgroup_add_lru_list(), etc.
|
|
|
*/
|
|
|
smp_wmb();
|
|
|
- switch (ctype) {
|
|
|
- case MEM_CGROUP_CHARGE_TYPE_CACHE:
|
|
|
- case MEM_CGROUP_CHARGE_TYPE_SHMEM:
|
|
|
- SetPageCgroupCache(pc);
|
|
|
- SetPageCgroupUsed(pc);
|
|
|
- break;
|
|
|
- case MEM_CGROUP_CHARGE_TYPE_MAPPED:
|
|
|
- ClearPageCgroupCache(pc);
|
|
|
- SetPageCgroupUsed(pc);
|
|
|
- break;
|
|
|
- default:
|
|
|
- break;
|
|
|
- }
|
|
|
+ SetPageCgroupUsed(pc);
|
|
|
|
|
|
if (lrucare) {
|
|
|
if (was_on_lru) {
|
|
@@ -2500,7 +2493,12 @@ static void __mem_cgroup_commit_charge(struct mem_cgroup *memcg,
|
|
|
spin_unlock_irq(&zone->lru_lock);
|
|
|
}
|
|
|
|
|
|
- mem_cgroup_charge_statistics(memcg, PageCgroupCache(pc), nr_pages);
|
|
|
+ if (ctype == MEM_CGROUP_CHARGE_TYPE_MAPPED)
|
|
|
+ anon = true;
|
|
|
+ else
|
|
|
+ anon = false;
|
|
|
+
|
|
|
+ mem_cgroup_charge_statistics(memcg, anon, nr_pages);
|
|
|
unlock_page_cgroup(pc);
|
|
|
|
|
|
/*
|
|
@@ -2565,6 +2563,7 @@ static int mem_cgroup_move_account(struct page *page,
|
|
|
{
|
|
|
unsigned long flags;
|
|
|
int ret;
|
|
|
+ bool anon = PageAnon(page);
|
|
|
|
|
|
VM_BUG_ON(from == to);
|
|
|
VM_BUG_ON(PageLRU(page));
|
|
@@ -2593,14 +2592,14 @@ static int mem_cgroup_move_account(struct page *page,
|
|
|
__this_cpu_inc(to->stat->count[MEM_CGROUP_STAT_FILE_MAPPED]);
|
|
|
preempt_enable();
|
|
|
}
|
|
|
- mem_cgroup_charge_statistics(from, PageCgroupCache(pc), -nr_pages);
|
|
|
+ mem_cgroup_charge_statistics(from, anon, -nr_pages);
|
|
|
if (uncharge)
|
|
|
/* This is not "cancel", but cancel_charge does all we need. */
|
|
|
__mem_cgroup_cancel_charge(from, nr_pages);
|
|
|
|
|
|
/* caller should have done css_get */
|
|
|
pc->mem_cgroup = to;
|
|
|
- mem_cgroup_charge_statistics(to, PageCgroupCache(pc), nr_pages);
|
|
|
+ mem_cgroup_charge_statistics(to, anon, nr_pages);
|
|
|
/*
|
|
|
* We charges against "to" which may not have any tasks. Then, "to"
|
|
|
* can be under rmdir(). But in current implementation, caller of
|
|
@@ -2921,6 +2920,7 @@ __mem_cgroup_uncharge_common(struct page *page, enum charge_type ctype)
|
|
|
struct mem_cgroup *memcg = NULL;
|
|
|
unsigned int nr_pages = 1;
|
|
|
struct page_cgroup *pc;
|
|
|
+ bool anon;
|
|
|
|
|
|
if (mem_cgroup_disabled())
|
|
|
return NULL;
|
|
@@ -2946,8 +2946,12 @@ __mem_cgroup_uncharge_common(struct page *page, enum charge_type ctype)
|
|
|
if (!PageCgroupUsed(pc))
|
|
|
goto unlock_out;
|
|
|
|
|
|
+ anon = PageAnon(page);
|
|
|
+
|
|
|
switch (ctype) {
|
|
|
case MEM_CGROUP_CHARGE_TYPE_MAPPED:
|
|
|
+ anon = true;
|
|
|
+ /* fallthrough */
|
|
|
case MEM_CGROUP_CHARGE_TYPE_DROP:
|
|
|
/* See mem_cgroup_prepare_migration() */
|
|
|
if (page_mapped(page) || PageCgroupMigration(pc))
|
|
@@ -2964,7 +2968,7 @@ __mem_cgroup_uncharge_common(struct page *page, enum charge_type ctype)
|
|
|
break;
|
|
|
}
|
|
|
|
|
|
- mem_cgroup_charge_statistics(memcg, PageCgroupCache(pc), -nr_pages);
|
|
|
+ mem_cgroup_charge_statistics(memcg, anon, -nr_pages);
|
|
|
|
|
|
ClearPageCgroupUsed(pc);
|
|
|
/*
|
|
@@ -3271,6 +3275,7 @@ void mem_cgroup_end_migration(struct mem_cgroup *memcg,
|
|
|
{
|
|
|
struct page *used, *unused;
|
|
|
struct page_cgroup *pc;
|
|
|
+ bool anon;
|
|
|
|
|
|
if (!memcg)
|
|
|
return;
|
|
@@ -3292,8 +3297,10 @@ void mem_cgroup_end_migration(struct mem_cgroup *memcg,
|
|
|
lock_page_cgroup(pc);
|
|
|
ClearPageCgroupMigration(pc);
|
|
|
unlock_page_cgroup(pc);
|
|
|
-
|
|
|
- __mem_cgroup_uncharge_common(unused, MEM_CGROUP_CHARGE_TYPE_FORCE);
|
|
|
+ anon = PageAnon(used);
|
|
|
+ __mem_cgroup_uncharge_common(unused,
|
|
|
+ anon ? MEM_CGROUP_CHARGE_TYPE_MAPPED
|
|
|
+ : MEM_CGROUP_CHARGE_TYPE_CACHE);
|
|
|
|
|
|
/*
|
|
|
* If a page is a file cache, radix-tree replacement is very atomic
|
|
@@ -3303,7 +3310,7 @@ void mem_cgroup_end_migration(struct mem_cgroup *memcg,
|
|
|
* and USED bit check in mem_cgroup_uncharge_page() will do enough
|
|
|
* check. (see prepare_charge() also)
|
|
|
*/
|
|
|
- if (PageAnon(used))
|
|
|
+ if (anon)
|
|
|
mem_cgroup_uncharge_page(used);
|
|
|
/*
|
|
|
* At migration, we may charge account against cgroup which has no
|
|
@@ -3333,7 +3340,7 @@ void mem_cgroup_replace_page_cache(struct page *oldpage,
|
|
|
/* fix accounting on old pages */
|
|
|
lock_page_cgroup(pc);
|
|
|
memcg = pc->mem_cgroup;
|
|
|
- mem_cgroup_charge_statistics(memcg, PageCgroupCache(pc), -1);
|
|
|
+ mem_cgroup_charge_statistics(memcg, false, -1);
|
|
|
ClearPageCgroupUsed(pc);
|
|
|
unlock_page_cgroup(pc);
|
|
|
|