|
@@ -524,7 +524,8 @@ unsigned long mem_cgroup_isolate_pages(unsigned long nr_to_scan,
|
|
|
* < 0 if the cgroup is over its limit
|
|
|
*/
|
|
|
static int mem_cgroup_charge_common(struct page *page, struct mm_struct *mm,
|
|
|
- gfp_t gfp_mask, enum charge_type ctype)
|
|
|
+ gfp_t gfp_mask, enum charge_type ctype,
|
|
|
+ struct mem_cgroup *memcg)
|
|
|
{
|
|
|
struct mem_cgroup *mem;
|
|
|
struct page_cgroup *pc;
|
|
@@ -569,16 +570,21 @@ retry:
|
|
|
* thread group leader migrates. It's possible that mm is not
|
|
|
* set, if so charge the init_mm (happens for pagecache usage).
|
|
|
*/
|
|
|
- if (!mm)
|
|
|
- mm = &init_mm;
|
|
|
+ if (!memcg) {
|
|
|
+ if (!mm)
|
|
|
+ mm = &init_mm;
|
|
|
|
|
|
- rcu_read_lock();
|
|
|
- mem = mem_cgroup_from_task(rcu_dereference(mm->owner));
|
|
|
- /*
|
|
|
- * For every charge from the cgroup, increment reference count
|
|
|
- */
|
|
|
- css_get(&mem->css);
|
|
|
- rcu_read_unlock();
|
|
|
+ rcu_read_lock();
|
|
|
+ mem = mem_cgroup_from_task(rcu_dereference(mm->owner));
|
|
|
+ /*
|
|
|
+ * For every charge from the cgroup, increment reference count
|
|
|
+ */
|
|
|
+ css_get(&mem->css);
|
|
|
+ rcu_read_unlock();
|
|
|
+ } else {
|
|
|
+ mem = memcg;
|
|
|
+ css_get(&memcg->css);
|
|
|
+ }
|
|
|
|
|
|
while (res_counter_charge(&mem->res, PAGE_SIZE)) {
|
|
|
if (!(gfp_mask & __GFP_WAIT))
|
|
@@ -648,7 +654,7 @@ err:
|
|
|
int mem_cgroup_charge(struct page *page, struct mm_struct *mm, gfp_t gfp_mask)
|
|
|
{
|
|
|
return mem_cgroup_charge_common(page, mm, gfp_mask,
|
|
|
- MEM_CGROUP_CHARGE_TYPE_MAPPED);
|
|
|
+ MEM_CGROUP_CHARGE_TYPE_MAPPED, NULL);
|
|
|
}
|
|
|
|
|
|
int mem_cgroup_cache_charge(struct page *page, struct mm_struct *mm,
|
|
@@ -657,7 +663,22 @@ int mem_cgroup_cache_charge(struct page *page, struct mm_struct *mm,
|
|
|
if (!mm)
|
|
|
mm = &init_mm;
|
|
|
return mem_cgroup_charge_common(page, mm, gfp_mask,
|
|
|
- MEM_CGROUP_CHARGE_TYPE_CACHE);
|
|
|
+ MEM_CGROUP_CHARGE_TYPE_CACHE, NULL);
|
|
|
+}
|
|
|
+
|
|
|
+int mem_cgroup_getref(struct page *page)
|
|
|
+{
|
|
|
+ struct page_cgroup *pc;
|
|
|
+
|
|
|
+ if (mem_cgroup_subsys.disabled)
|
|
|
+ return 0;
|
|
|
+
|
|
|
+ lock_page_cgroup(page);
|
|
|
+ pc = page_get_page_cgroup(page);
|
|
|
+ VM_BUG_ON(!pc);
|
|
|
+ pc->ref_cnt++;
|
|
|
+ unlock_page_cgroup(page);
|
|
|
+ return 0;
|
|
|
}
|
|
|
|
|
|
/*
|
|
@@ -707,65 +728,39 @@ unlock:
|
|
|
}
|
|
|
|
|
|
/*
|
|
|
- * Returns non-zero if a page (under migration) has valid page_cgroup member.
|
|
|
- * Refcnt of page_cgroup is incremented.
|
|
|
+ * Before starting migration, account against new page.
|
|
|
*/
|
|
|
-int mem_cgroup_prepare_migration(struct page *page)
|
|
|
+int mem_cgroup_prepare_migration(struct page *page, struct page *newpage)
|
|
|
{
|
|
|
struct page_cgroup *pc;
|
|
|
+ struct mem_cgroup *mem = NULL;
|
|
|
+ enum charge_type ctype = MEM_CGROUP_CHARGE_TYPE_MAPPED;
|
|
|
+ int ret = 0;
|
|
|
|
|
|
if (mem_cgroup_subsys.disabled)
|
|
|
return 0;
|
|
|
|
|
|
lock_page_cgroup(page);
|
|
|
pc = page_get_page_cgroup(page);
|
|
|
- if (pc)
|
|
|
- pc->ref_cnt++;
|
|
|
+ if (pc) {
|
|
|
+ mem = pc->mem_cgroup;
|
|
|
+ css_get(&mem->css);
|
|
|
+ if (pc->flags & PAGE_CGROUP_FLAG_CACHE)
|
|
|
+ ctype = MEM_CGROUP_CHARGE_TYPE_CACHE;
|
|
|
+ }
|
|
|
unlock_page_cgroup(page);
|
|
|
- return pc != NULL;
|
|
|
-}
|
|
|
-
|
|
|
-void mem_cgroup_end_migration(struct page *page)
|
|
|
-{
|
|
|
- mem_cgroup_uncharge_page(page);
|
|
|
+ if (mem) {
|
|
|
+ ret = mem_cgroup_charge_common(newpage, NULL, GFP_KERNEL,
|
|
|
+ ctype, mem);
|
|
|
+ css_put(&mem->css);
|
|
|
+ }
|
|
|
+ return ret;
|
|
|
}
|
|
|
|
|
|
-/*
|
|
|
- * We know both *page* and *newpage* are now not-on-LRU and PG_locked.
|
|
|
- * And no race with uncharge() routines because page_cgroup for *page*
|
|
|
- * has extra one reference by mem_cgroup_prepare_migration.
|
|
|
- */
|
|
|
-void mem_cgroup_page_migration(struct page *page, struct page *newpage)
|
|
|
+/* remove redundant charge */
|
|
|
+void mem_cgroup_end_migration(struct page *newpage)
|
|
|
{
|
|
|
- struct page_cgroup *pc;
|
|
|
- struct mem_cgroup_per_zone *mz;
|
|
|
- unsigned long flags;
|
|
|
-
|
|
|
- lock_page_cgroup(page);
|
|
|
- pc = page_get_page_cgroup(page);
|
|
|
- if (!pc) {
|
|
|
- unlock_page_cgroup(page);
|
|
|
- return;
|
|
|
- }
|
|
|
-
|
|
|
- mz = page_cgroup_zoneinfo(pc);
|
|
|
- spin_lock_irqsave(&mz->lru_lock, flags);
|
|
|
- __mem_cgroup_remove_list(mz, pc);
|
|
|
- spin_unlock_irqrestore(&mz->lru_lock, flags);
|
|
|
-
|
|
|
- page_assign_page_cgroup(page, NULL);
|
|
|
- unlock_page_cgroup(page);
|
|
|
-
|
|
|
- pc->page = newpage;
|
|
|
- lock_page_cgroup(newpage);
|
|
|
- page_assign_page_cgroup(newpage, pc);
|
|
|
-
|
|
|
- mz = page_cgroup_zoneinfo(pc);
|
|
|
- spin_lock_irqsave(&mz->lru_lock, flags);
|
|
|
- __mem_cgroup_add_list(mz, pc);
|
|
|
- spin_unlock_irqrestore(&mz->lru_lock, flags);
|
|
|
-
|
|
|
- unlock_page_cgroup(newpage);
|
|
|
+ mem_cgroup_uncharge_page(newpage);
|
|
|
}
|
|
|
|
|
|
/*
|
|
@@ -795,12 +790,19 @@ static void mem_cgroup_force_empty_list(struct mem_cgroup *mem,
|
|
|
page = pc->page;
|
|
|
get_page(page);
|
|
|
spin_unlock_irqrestore(&mz->lru_lock, flags);
|
|
|
- mem_cgroup_uncharge_page(page);
|
|
|
- put_page(page);
|
|
|
- if (--count <= 0) {
|
|
|
- count = FORCE_UNCHARGE_BATCH;
|
|
|
+ /*
|
|
|
+ * Check if this page is on LRU. !LRU page can be found
|
|
|
+ * if it's under page migration.
|
|
|
+ */
|
|
|
+ if (PageLRU(page)) {
|
|
|
+ mem_cgroup_uncharge_page(page);
|
|
|
+ put_page(page);
|
|
|
+ if (--count <= 0) {
|
|
|
+ count = FORCE_UNCHARGE_BATCH;
|
|
|
+ cond_resched();
|
|
|
+ }
|
|
|
+ } else
|
|
|
cond_resched();
|
|
|
- }
|
|
|
spin_lock_irqsave(&mz->lru_lock, flags);
|
|
|
}
|
|
|
spin_unlock_irqrestore(&mz->lru_lock, flags);
|