|
@@ -2258,7 +2258,8 @@ enum {
|
|
|
};
|
|
|
|
|
|
static int mem_cgroup_do_charge(struct mem_cgroup *memcg, gfp_t gfp_mask,
|
|
|
- unsigned int nr_pages, bool oom_check)
|
|
|
+ unsigned int nr_pages, unsigned int min_pages,
|
|
|
+ bool oom_check)
|
|
|
{
|
|
|
unsigned long csize = nr_pages * PAGE_SIZE;
|
|
|
struct mem_cgroup *mem_over_limit;
|
|
@@ -2281,18 +2282,18 @@ static int mem_cgroup_do_charge(struct mem_cgroup *memcg, gfp_t gfp_mask,
|
|
|
} else
|
|
|
mem_over_limit = mem_cgroup_from_res_counter(fail_res, res);
|
|
|
/*
|
|
|
- * nr_pages can be either a huge page (HPAGE_PMD_NR), a batch
|
|
|
- * of regular pages (CHARGE_BATCH), or a single regular page (1).
|
|
|
- *
|
|
|
* Never reclaim on behalf of optional batching, retry with a
|
|
|
* single page instead.
|
|
|
*/
|
|
|
- if (nr_pages == CHARGE_BATCH)
|
|
|
+ if (nr_pages > min_pages)
|
|
|
return CHARGE_RETRY;
|
|
|
|
|
|
if (!(gfp_mask & __GFP_WAIT))
|
|
|
return CHARGE_WOULDBLOCK;
|
|
|
|
|
|
+ if (gfp_mask & __GFP_NORETRY)
|
|
|
+ return CHARGE_NOMEM;
|
|
|
+
|
|
|
ret = mem_cgroup_reclaim(mem_over_limit, gfp_mask, flags);
|
|
|
if (mem_cgroup_margin(mem_over_limit) >= nr_pages)
|
|
|
return CHARGE_RETRY;
|
|
@@ -2305,7 +2306,7 @@ static int mem_cgroup_do_charge(struct mem_cgroup *memcg, gfp_t gfp_mask,
|
|
|
* unlikely to succeed so close to the limit, and we fall back
|
|
|
* to regular pages anyway in case of failure.
|
|
|
*/
|
|
|
- if (nr_pages == 1 && ret)
|
|
|
+ if (nr_pages <= (1 << PAGE_ALLOC_COSTLY_ORDER) && ret)
|
|
|
return CHARGE_RETRY;
|
|
|
|
|
|
/*
|
|
@@ -2439,7 +2440,8 @@ again:
|
|
|
nr_oom_retries = MEM_CGROUP_RECLAIM_RETRIES;
|
|
|
}
|
|
|
|
|
|
- ret = mem_cgroup_do_charge(memcg, gfp_mask, batch, oom_check);
|
|
|
+ ret = mem_cgroup_do_charge(memcg, gfp_mask, batch, nr_pages,
|
|
|
+ oom_check);
|
|
|
switch (ret) {
|
|
|
case CHARGE_OK:
|
|
|
break;
|