|
@@ -21,6 +21,7 @@
|
|
#include <linux/memcontrol.h>
|
|
#include <linux/memcontrol.h>
|
|
#include <linux/cgroup.h>
|
|
#include <linux/cgroup.h>
|
|
#include <linux/mm.h>
|
|
#include <linux/mm.h>
|
|
|
|
+#include <linux/pagemap.h>
|
|
#include <linux/smp.h>
|
|
#include <linux/smp.h>
|
|
#include <linux/page-flags.h>
|
|
#include <linux/page-flags.h>
|
|
#include <linux/backing-dev.h>
|
|
#include <linux/backing-dev.h>
|
|
@@ -139,6 +140,7 @@ enum charge_type {
|
|
MEM_CGROUP_CHARGE_TYPE_MAPPED,
|
|
MEM_CGROUP_CHARGE_TYPE_MAPPED,
|
|
MEM_CGROUP_CHARGE_TYPE_SHMEM, /* used by page migration of shmem */
|
|
MEM_CGROUP_CHARGE_TYPE_SHMEM, /* used by page migration of shmem */
|
|
MEM_CGROUP_CHARGE_TYPE_FORCE, /* used by force_empty */
|
|
MEM_CGROUP_CHARGE_TYPE_FORCE, /* used by force_empty */
|
|
|
|
+ MEM_CGROUP_CHARGE_TYPE_SWAPOUT, /* for accounting swapcache */
|
|
NR_CHARGE_TYPE,
|
|
NR_CHARGE_TYPE,
|
|
};
|
|
};
|
|
|
|
|
|
@@ -780,6 +782,33 @@ int mem_cgroup_cache_charge(struct page *page, struct mm_struct *mm,
|
|
MEM_CGROUP_CHARGE_TYPE_SHMEM, NULL);
|
|
MEM_CGROUP_CHARGE_TYPE_SHMEM, NULL);
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+#ifdef CONFIG_SWAP
|
|
|
|
+int mem_cgroup_cache_charge_swapin(struct page *page,
|
|
|
|
+ struct mm_struct *mm, gfp_t mask, bool locked)
|
|
|
|
+{
|
|
|
|
+ int ret = 0;
|
|
|
|
+
|
|
|
|
+ if (mem_cgroup_subsys.disabled)
|
|
|
|
+ return 0;
|
|
|
|
+ if (unlikely(!mm))
|
|
|
|
+ mm = &init_mm;
|
|
|
|
+ if (!locked)
|
|
|
|
+ lock_page(page);
|
|
|
|
+ /*
|
|
|
|
+ * If not locked, the page can be dropped from SwapCache until
|
|
|
|
+ * we reach here.
|
|
|
|
+ */
|
|
|
|
+ if (PageSwapCache(page)) {
|
|
|
|
+ ret = mem_cgroup_charge_common(page, mm, mask,
|
|
|
|
+ MEM_CGROUP_CHARGE_TYPE_SHMEM, NULL);
|
|
|
|
+ }
|
|
|
|
+ if (!locked)
|
|
|
|
+ unlock_page(page);
|
|
|
|
+
|
|
|
|
+ return ret;
|
|
|
|
+}
|
|
|
|
+#endif
|
|
|
|
+
|
|
void mem_cgroup_commit_charge_swapin(struct page *page, struct mem_cgroup *ptr)
|
|
void mem_cgroup_commit_charge_swapin(struct page *page, struct mem_cgroup *ptr)
|
|
{
|
|
{
|
|
struct page_cgroup *pc;
|
|
struct page_cgroup *pc;
|
|
@@ -817,6 +846,9 @@ __mem_cgroup_uncharge_common(struct page *page, enum charge_type ctype)
|
|
if (mem_cgroup_subsys.disabled)
|
|
if (mem_cgroup_subsys.disabled)
|
|
return;
|
|
return;
|
|
|
|
|
|
|
|
+ if (PageSwapCache(page))
|
|
|
|
+ return;
|
|
|
|
+
|
|
/*
|
|
/*
|
|
* Check if our page_cgroup is valid
|
|
* Check if our page_cgroup is valid
|
|
*/
|
|
*/
|
|
@@ -825,12 +857,26 @@ __mem_cgroup_uncharge_common(struct page *page, enum charge_type ctype)
|
|
return;
|
|
return;
|
|
|
|
|
|
lock_page_cgroup(pc);
|
|
lock_page_cgroup(pc);
|
|
- if ((ctype == MEM_CGROUP_CHARGE_TYPE_MAPPED && page_mapped(page))
|
|
|
|
- || !PageCgroupUsed(pc)) {
|
|
|
|
- /* This happens at race in zap_pte_range() and do_swap_page()*/
|
|
|
|
- unlock_page_cgroup(pc);
|
|
|
|
- return;
|
|
|
|
|
|
+
|
|
|
|
+ if (!PageCgroupUsed(pc))
|
|
|
|
+ goto unlock_out;
|
|
|
|
+
|
|
|
|
+ switch (ctype) {
|
|
|
|
+ case MEM_CGROUP_CHARGE_TYPE_MAPPED:
|
|
|
|
+ if (page_mapped(page))
|
|
|
|
+ goto unlock_out;
|
|
|
|
+ break;
|
|
|
|
+ case MEM_CGROUP_CHARGE_TYPE_SWAPOUT:
|
|
|
|
+ if (!PageAnon(page)) { /* Shared memory */
|
|
|
|
+ if (page->mapping && !page_is_file_cache(page))
|
|
|
|
+ goto unlock_out;
|
|
|
|
+ } else if (page_mapped(page)) /* Anon */
|
|
|
|
+ goto unlock_out;
|
|
|
|
+ break;
|
|
|
|
+ default:
|
|
|
|
+ break;
|
|
}
|
|
}
|
|
|
|
+
|
|
ClearPageCgroupUsed(pc);
|
|
ClearPageCgroupUsed(pc);
|
|
mem = pc->mem_cgroup;
|
|
mem = pc->mem_cgroup;
|
|
|
|
|
|
@@ -844,6 +890,10 @@ __mem_cgroup_uncharge_common(struct page *page, enum charge_type ctype)
|
|
css_put(&mem->css);
|
|
css_put(&mem->css);
|
|
|
|
|
|
return;
|
|
return;
|
|
|
|
+
|
|
|
|
+unlock_out:
|
|
|
|
+ unlock_page_cgroup(pc);
|
|
|
|
+ return;
|
|
}
|
|
}
|
|
|
|
|
|
void mem_cgroup_uncharge_page(struct page *page)
|
|
void mem_cgroup_uncharge_page(struct page *page)
|
|
@@ -863,6 +913,11 @@ void mem_cgroup_uncharge_cache_page(struct page *page)
|
|
__mem_cgroup_uncharge_common(page, MEM_CGROUP_CHARGE_TYPE_CACHE);
|
|
__mem_cgroup_uncharge_common(page, MEM_CGROUP_CHARGE_TYPE_CACHE);
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+void mem_cgroup_uncharge_swapcache(struct page *page)
|
|
|
|
+{
|
|
|
|
+ __mem_cgroup_uncharge_common(page, MEM_CGROUP_CHARGE_TYPE_SWAPOUT);
|
|
|
|
+}
|
|
|
|
+
|
|
/*
|
|
/*
|
|
* Before starting migration, account PAGE_SIZE to mem_cgroup that the old
|
|
* Before starting migration, account PAGE_SIZE to mem_cgroup that the old
|
|
* page belongs to.
|
|
* page belongs to.
|
|
@@ -920,7 +975,7 @@ void mem_cgroup_end_migration(struct mem_cgroup *mem,
|
|
ctype = MEM_CGROUP_CHARGE_TYPE_SHMEM;
|
|
ctype = MEM_CGROUP_CHARGE_TYPE_SHMEM;
|
|
|
|
|
|
/* unused page is not on radix-tree now. */
|
|
/* unused page is not on radix-tree now. */
|
|
- if (unused && ctype != MEM_CGROUP_CHARGE_TYPE_MAPPED)
|
|
|
|
|
|
+ if (unused)
|
|
__mem_cgroup_uncharge_common(unused, ctype);
|
|
__mem_cgroup_uncharge_common(unused, ctype);
|
|
|
|
|
|
pc = lookup_page_cgroup(target);
|
|
pc = lookup_page_cgroup(target);
|