|
@@ -702,6 +702,23 @@ static unsigned int get_swappiness(struct mem_cgroup *memcg)
|
|
return swappiness;
|
|
return swappiness;
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+static int mem_cgroup_count_children_cb(struct mem_cgroup *mem, void *data)
|
|
|
|
+{
|
|
|
|
+ int *val = data;
|
|
|
|
+ (*val)++;
|
|
|
|
+ return 0;
|
|
|
|
+}
|
|
|
|
+/*
|
|
|
|
+ * This function returns the number of memcg under hierarchy tree. Returns
|
|
|
|
+ * 1(self count) if no children.
|
|
|
|
+ */
|
|
|
|
+static int mem_cgroup_count_children(struct mem_cgroup *mem)
|
|
|
|
+{
|
|
|
|
+ int num = 0;
|
|
|
|
+ mem_cgroup_walk_tree(mem, &num, mem_cgroup_count_children_cb);
|
|
|
|
+ return num;
|
|
|
|
+}
|
|
|
|
+
|
|
/*
|
|
/*
|
|
* Visit the first child (need not be the first child as per the ordering
|
|
* Visit the first child (need not be the first child as per the ordering
|
|
* of the cgroup list, since we track last_scanned_child) of @mem and use
|
|
* of the cgroup list, since we track last_scanned_child) of @mem and use
|
|
@@ -750,9 +767,11 @@ mem_cgroup_select_victim(struct mem_cgroup *root_mem)
|
|
*
|
|
*
|
|
* We give up and return to the caller when we visit root_mem twice.
|
|
* We give up and return to the caller when we visit root_mem twice.
|
|
* (other groups can be removed while we're walking....)
|
|
* (other groups can be removed while we're walking....)
|
|
|
|
+ *
|
|
|
|
+ * If shrink==true, for avoiding to free too much, this returns immedieately.
|
|
*/
|
|
*/
|
|
static int mem_cgroup_hierarchical_reclaim(struct mem_cgroup *root_mem,
|
|
static int mem_cgroup_hierarchical_reclaim(struct mem_cgroup *root_mem,
|
|
- gfp_t gfp_mask, bool noswap)
|
|
|
|
|
|
+ gfp_t gfp_mask, bool noswap, bool shrink)
|
|
{
|
|
{
|
|
struct mem_cgroup *victim;
|
|
struct mem_cgroup *victim;
|
|
int ret, total = 0;
|
|
int ret, total = 0;
|
|
@@ -771,6 +790,13 @@ static int mem_cgroup_hierarchical_reclaim(struct mem_cgroup *root_mem,
|
|
ret = try_to_free_mem_cgroup_pages(victim, gfp_mask, noswap,
|
|
ret = try_to_free_mem_cgroup_pages(victim, gfp_mask, noswap,
|
|
get_swappiness(victim));
|
|
get_swappiness(victim));
|
|
css_put(&victim->css);
|
|
css_put(&victim->css);
|
|
|
|
+ /*
|
|
|
|
+ * At shrinking usage, we can't check we should stop here or
|
|
|
|
+ * reclaim more. It's depends on callers. last_scanned_child
|
|
|
|
+ * will work enough for keeping fairness under tree.
|
|
|
|
+ */
|
|
|
|
+ if (shrink)
|
|
|
|
+ return ret;
|
|
total += ret;
|
|
total += ret;
|
|
if (mem_cgroup_check_under_limit(root_mem))
|
|
if (mem_cgroup_check_under_limit(root_mem))
|
|
return 1 + total;
|
|
return 1 + total;
|
|
@@ -856,7 +882,7 @@ static int __mem_cgroup_try_charge(struct mm_struct *mm,
|
|
goto nomem;
|
|
goto nomem;
|
|
|
|
|
|
ret = mem_cgroup_hierarchical_reclaim(mem_over_limit, gfp_mask,
|
|
ret = mem_cgroup_hierarchical_reclaim(mem_over_limit, gfp_mask,
|
|
- noswap);
|
|
|
|
|
|
+ noswap, false);
|
|
if (ret)
|
|
if (ret)
|
|
continue;
|
|
continue;
|
|
|
|
|
|
@@ -1489,7 +1515,8 @@ int mem_cgroup_shrink_usage(struct page *page,
|
|
return 0;
|
|
return 0;
|
|
|
|
|
|
do {
|
|
do {
|
|
- progress = mem_cgroup_hierarchical_reclaim(mem, gfp_mask, true);
|
|
|
|
|
|
+ progress = mem_cgroup_hierarchical_reclaim(mem,
|
|
|
|
+ gfp_mask, true, false);
|
|
progress += mem_cgroup_check_under_limit(mem);
|
|
progress += mem_cgroup_check_under_limit(mem);
|
|
} while (!progress && --retry);
|
|
} while (!progress && --retry);
|
|
|
|
|
|
@@ -1504,11 +1531,21 @@ static DEFINE_MUTEX(set_limit_mutex);
|
|
static int mem_cgroup_resize_limit(struct mem_cgroup *memcg,
|
|
static int mem_cgroup_resize_limit(struct mem_cgroup *memcg,
|
|
unsigned long long val)
|
|
unsigned long long val)
|
|
{
|
|
{
|
|
-
|
|
|
|
- int retry_count = MEM_CGROUP_RECLAIM_RETRIES;
|
|
|
|
|
|
+ int retry_count;
|
|
int progress;
|
|
int progress;
|
|
u64 memswlimit;
|
|
u64 memswlimit;
|
|
int ret = 0;
|
|
int ret = 0;
|
|
|
|
+ int children = mem_cgroup_count_children(memcg);
|
|
|
|
+ u64 curusage, oldusage;
|
|
|
|
+
|
|
|
|
+ /*
|
|
|
|
+ * For keeping hierarchical_reclaim simple, how long we should retry
|
|
|
|
+ * is depends on callers. We set our retry-count to be function
|
|
|
|
+ * of # of children which we should visit in this loop.
|
|
|
|
+ */
|
|
|
|
+ retry_count = MEM_CGROUP_RECLAIM_RETRIES * children;
|
|
|
|
+
|
|
|
|
+ oldusage = res_counter_read_u64(&memcg->res, RES_USAGE);
|
|
|
|
|
|
while (retry_count) {
|
|
while (retry_count) {
|
|
if (signal_pending(current)) {
|
|
if (signal_pending(current)) {
|
|
@@ -1534,8 +1571,13 @@ static int mem_cgroup_resize_limit(struct mem_cgroup *memcg,
|
|
break;
|
|
break;
|
|
|
|
|
|
progress = mem_cgroup_hierarchical_reclaim(memcg, GFP_KERNEL,
|
|
progress = mem_cgroup_hierarchical_reclaim(memcg, GFP_KERNEL,
|
|
- false);
|
|
|
|
- if (!progress) retry_count--;
|
|
|
|
|
|
+ false, true);
|
|
|
|
+ curusage = res_counter_read_u64(&memcg->res, RES_USAGE);
|
|
|
|
+ /* Usage is reduced ? */
|
|
|
|
+ if (curusage >= oldusage)
|
|
|
|
+ retry_count--;
|
|
|
|
+ else
|
|
|
|
+ oldusage = curusage;
|
|
}
|
|
}
|
|
|
|
|
|
return ret;
|
|
return ret;
|
|
@@ -1544,13 +1586,16 @@ static int mem_cgroup_resize_limit(struct mem_cgroup *memcg,
|
|
int mem_cgroup_resize_memsw_limit(struct mem_cgroup *memcg,
|
|
int mem_cgroup_resize_memsw_limit(struct mem_cgroup *memcg,
|
|
unsigned long long val)
|
|
unsigned long long val)
|
|
{
|
|
{
|
|
- int retry_count = MEM_CGROUP_RECLAIM_RETRIES;
|
|
|
|
|
|
+ int retry_count;
|
|
u64 memlimit, oldusage, curusage;
|
|
u64 memlimit, oldusage, curusage;
|
|
- int ret;
|
|
|
|
|
|
+ int children = mem_cgroup_count_children(memcg);
|
|
|
|
+ int ret = -EBUSY;
|
|
|
|
|
|
if (!do_swap_account)
|
|
if (!do_swap_account)
|
|
return -EINVAL;
|
|
return -EINVAL;
|
|
-
|
|
|
|
|
|
+ /* see mem_cgroup_resize_res_limit */
|
|
|
|
+ retry_count = children * MEM_CGROUP_RECLAIM_RETRIES;
|
|
|
|
+ oldusage = res_counter_read_u64(&memcg->memsw, RES_USAGE);
|
|
while (retry_count) {
|
|
while (retry_count) {
|
|
if (signal_pending(current)) {
|
|
if (signal_pending(current)) {
|
|
ret = -EINTR;
|
|
ret = -EINTR;
|
|
@@ -1574,11 +1619,13 @@ int mem_cgroup_resize_memsw_limit(struct mem_cgroup *memcg,
|
|
if (!ret)
|
|
if (!ret)
|
|
break;
|
|
break;
|
|
|
|
|
|
- oldusage = res_counter_read_u64(&memcg->memsw, RES_USAGE);
|
|
|
|
- mem_cgroup_hierarchical_reclaim(memcg, GFP_KERNEL, true);
|
|
|
|
|
|
+ mem_cgroup_hierarchical_reclaim(memcg, GFP_KERNEL, true, true);
|
|
curusage = res_counter_read_u64(&memcg->memsw, RES_USAGE);
|
|
curusage = res_counter_read_u64(&memcg->memsw, RES_USAGE);
|
|
|
|
+ /* Usage is reduced ? */
|
|
if (curusage >= oldusage)
|
|
if (curusage >= oldusage)
|
|
retry_count--;
|
|
retry_count--;
|
|
|
|
+ else
|
|
|
|
+ oldusage = curusage;
|
|
}
|
|
}
|
|
return ret;
|
|
return ret;
|
|
}
|
|
}
|