|
@@ -1424,8 +1424,7 @@ static int __cpuinit memcg_stock_cpu_callback(struct notifier_block *nb,
|
|
|
* oom-killer can be invoked.
|
|
|
*/
|
|
|
static int __mem_cgroup_try_charge(struct mm_struct *mm,
|
|
|
- gfp_t gfp_mask, struct mem_cgroup **memcg,
|
|
|
- bool oom, struct page *page)
|
|
|
+ gfp_t gfp_mask, struct mem_cgroup **memcg, bool oom)
|
|
|
{
|
|
|
struct mem_cgroup *mem, *mem_over_limit;
|
|
|
int nr_retries = MEM_CGROUP_RECLAIM_RETRIES;
|
|
@@ -1463,7 +1462,7 @@ static int __mem_cgroup_try_charge(struct mm_struct *mm,
|
|
|
unsigned long flags = 0;
|
|
|
|
|
|
if (consume_stock(mem))
|
|
|
- goto charged;
|
|
|
+ goto done;
|
|
|
|
|
|
ret = res_counter_charge(&mem->res, csize, &fail_res);
|
|
|
if (likely(!ret)) {
|
|
@@ -1558,16 +1557,7 @@ static int __mem_cgroup_try_charge(struct mm_struct *mm,
|
|
|
}
|
|
|
if (csize > PAGE_SIZE)
|
|
|
refill_stock(mem, csize - PAGE_SIZE);
|
|
|
-charged:
|
|
|
- /*
|
|
|
- * Insert ancestor (and ancestor's ancestors), to softlimit RB-tree.
|
|
|
- * if they exceeds softlimit.
|
|
|
- */
|
|
|
- if (page && mem_cgroup_soft_limit_check(mem))
|
|
|
- mem_cgroup_update_tree(mem, page);
|
|
|
done:
|
|
|
- if (mem_cgroup_threshold_check(mem))
|
|
|
- mem_cgroup_threshold(mem);
|
|
|
return 0;
|
|
|
nomem:
|
|
|
css_put(&mem->css);
|
|
@@ -1691,6 +1681,16 @@ static void __mem_cgroup_commit_charge(struct mem_cgroup *mem,
|
|
|
mem_cgroup_charge_statistics(mem, pc, true);
|
|
|
|
|
|
unlock_page_cgroup(pc);
|
|
|
+ /*
|
|
|
+ * "charge_statistics" updated event counter. Then, check it.
|
|
|
+ * Insert ancestor (and ancestor's ancestors), to softlimit RB-tree.
|
|
|
+ * if they exceeds softlimit.
|
|
|
+ */
|
|
|
+ if (mem_cgroup_soft_limit_check(mem))
|
|
|
+ mem_cgroup_update_tree(mem, pc->page);
|
|
|
+ if (mem_cgroup_threshold_check(mem))
|
|
|
+ mem_cgroup_threshold(mem);
|
|
|
+
|
|
|
}
|
|
|
|
|
|
/**
|
|
@@ -1788,7 +1788,7 @@ static int mem_cgroup_move_parent(struct page_cgroup *pc,
|
|
|
goto put;
|
|
|
|
|
|
parent = mem_cgroup_from_cont(pcg);
|
|
|
- ret = __mem_cgroup_try_charge(NULL, gfp_mask, &parent, false, page);
|
|
|
+ ret = __mem_cgroup_try_charge(NULL, gfp_mask, &parent, false);
|
|
|
if (ret || !parent)
|
|
|
goto put_back;
|
|
|
|
|
@@ -1824,7 +1824,7 @@ static int mem_cgroup_charge_common(struct page *page, struct mm_struct *mm,
|
|
|
prefetchw(pc);
|
|
|
|
|
|
mem = memcg;
|
|
|
- ret = __mem_cgroup_try_charge(mm, gfp_mask, &mem, true, page);
|
|
|
+ ret = __mem_cgroup_try_charge(mm, gfp_mask, &mem, true);
|
|
|
if (ret || !mem)
|
|
|
return ret;
|
|
|
|
|
@@ -1944,14 +1944,14 @@ int mem_cgroup_try_charge_swapin(struct mm_struct *mm,
|
|
|
if (!mem)
|
|
|
goto charge_cur_mm;
|
|
|
*ptr = mem;
|
|
|
- ret = __mem_cgroup_try_charge(NULL, mask, ptr, true, page);
|
|
|
+ ret = __mem_cgroup_try_charge(NULL, mask, ptr, true);
|
|
|
/* drop extra refcnt from tryget */
|
|
|
css_put(&mem->css);
|
|
|
return ret;
|
|
|
charge_cur_mm:
|
|
|
if (unlikely(!mm))
|
|
|
mm = &init_mm;
|
|
|
- return __mem_cgroup_try_charge(mm, mask, ptr, true, page);
|
|
|
+ return __mem_cgroup_try_charge(mm, mask, ptr, true);
|
|
|
}
|
|
|
|
|
|
static void
|
|
@@ -2340,8 +2340,7 @@ int mem_cgroup_prepare_migration(struct page *page, struct mem_cgroup **ptr)
|
|
|
unlock_page_cgroup(pc);
|
|
|
|
|
|
if (mem) {
|
|
|
- ret = __mem_cgroup_try_charge(NULL, GFP_KERNEL, &mem, false,
|
|
|
- page);
|
|
|
+ ret = __mem_cgroup_try_charge(NULL, GFP_KERNEL, &mem, false);
|
|
|
css_put(&mem->css);
|
|
|
}
|
|
|
*ptr = mem;
|
|
@@ -3872,8 +3871,7 @@ one_by_one:
|
|
|
batch_count = PRECHARGE_COUNT_AT_ONCE;
|
|
|
cond_resched();
|
|
|
}
|
|
|
- ret = __mem_cgroup_try_charge(NULL, GFP_KERNEL, &mem,
|
|
|
- false, NULL);
|
|
|
+ ret = __mem_cgroup_try_charge(NULL, GFP_KERNEL, &mem, false);
|
|
|
if (ret || !mem)
|
|
|
/* mem_cgroup_clear_mc() will do uncharge later */
|
|
|
return -ENOMEM;
|