|
@@ -862,15 +862,6 @@ struct mem_cgroup *try_get_mem_cgroup_from_mm(struct mm_struct *mm)
|
|
|
return memcg;
|
|
|
}
|
|
|
|
|
|
-static enum mem_cgroup_filter_t
|
|
|
-mem_cgroup_filter(struct mem_cgroup *memcg, struct mem_cgroup *root,
|
|
|
- mem_cgroup_iter_filter cond)
|
|
|
-{
|
|
|
- if (!cond)
|
|
|
- return VISIT;
|
|
|
- return cond(memcg, root);
|
|
|
-}
|
|
|
-
|
|
|
/*
|
|
|
* Returns a next (in a pre-order walk) alive memcg (with elevated css
|
|
|
* ref. count) or NULL if the whole root's subtree has been visited.
|
|
@@ -878,7 +869,7 @@ mem_cgroup_filter(struct mem_cgroup *memcg, struct mem_cgroup *root,
|
|
|
* helper function to be used by mem_cgroup_iter
|
|
|
*/
|
|
|
static struct mem_cgroup *__mem_cgroup_iter_next(struct mem_cgroup *root,
|
|
|
- struct mem_cgroup *last_visited, mem_cgroup_iter_filter cond)
|
|
|
+ struct mem_cgroup *last_visited)
|
|
|
{
|
|
|
struct cgroup_subsys_state *prev_css, *next_css;
|
|
|
|
|
@@ -896,31 +887,11 @@ skip_node:
|
|
|
if (next_css) {
|
|
|
struct mem_cgroup *mem = mem_cgroup_from_css(next_css);
|
|
|
|
|
|
- switch (mem_cgroup_filter(mem, root, cond)) {
|
|
|
- case SKIP:
|
|
|
+ if (css_tryget(&mem->css))
|
|
|
+ return mem;
|
|
|
+ else {
|
|
|
prev_css = next_css;
|
|
|
goto skip_node;
|
|
|
- case SKIP_TREE:
|
|
|
- if (mem == root)
|
|
|
- return NULL;
|
|
|
- /*
|
|
|
- * css_rightmost_descendant is not an optimal way to
|
|
|
- * skip through a subtree (especially for imbalanced
|
|
|
- * trees leaning to right) but that's what we have right
|
|
|
- * now. More effective solution would be traversing
|
|
|
- * right-up for first non-NULL without calling
|
|
|
- * css_next_descendant_pre afterwards.
|
|
|
- */
|
|
|
- prev_css = css_rightmost_descendant(next_css);
|
|
|
- goto skip_node;
|
|
|
- case VISIT:
|
|
|
- if (css_tryget(&mem->css))
|
|
|
- return mem;
|
|
|
- else {
|
|
|
- prev_css = next_css;
|
|
|
- goto skip_node;
|
|
|
- }
|
|
|
- break;
|
|
|
}
|
|
|
}
|
|
|
|
|
@@ -984,7 +955,6 @@ static void mem_cgroup_iter_update(struct mem_cgroup_reclaim_iter *iter,
|
|
|
* @root: hierarchy root
|
|
|
* @prev: previously returned memcg, NULL on first invocation
|
|
|
* @reclaim: cookie for shared reclaim walks, NULL for full walks
|
|
|
- * @cond: filter for visited nodes, NULL for no filter
|
|
|
*
|
|
|
* Returns references to children of the hierarchy below @root, or
|
|
|
* @root itself, or %NULL after a full round-trip.
|
|
@@ -997,18 +967,15 @@ static void mem_cgroup_iter_update(struct mem_cgroup_reclaim_iter *iter,
|
|
|
* divide up the memcgs in the hierarchy among all concurrent
|
|
|
* reclaimers operating on the same zone and priority.
|
|
|
*/
|
|
|
-struct mem_cgroup *mem_cgroup_iter_cond(struct mem_cgroup *root,
|
|
|
+struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *root,
|
|
|
struct mem_cgroup *prev,
|
|
|
- struct mem_cgroup_reclaim_cookie *reclaim,
|
|
|
- mem_cgroup_iter_filter cond)
|
|
|
+ struct mem_cgroup_reclaim_cookie *reclaim)
|
|
|
{
|
|
|
struct mem_cgroup *memcg = NULL;
|
|
|
struct mem_cgroup *last_visited = NULL;
|
|
|
|
|
|
- if (mem_cgroup_disabled()) {
|
|
|
- /* first call must return non-NULL, second return NULL */
|
|
|
- return (struct mem_cgroup *)(unsigned long)!prev;
|
|
|
- }
|
|
|
+ if (mem_cgroup_disabled())
|
|
|
+ return NULL;
|
|
|
|
|
|
if (!root)
|
|
|
root = root_mem_cgroup;
|
|
@@ -1019,9 +986,7 @@ struct mem_cgroup *mem_cgroup_iter_cond(struct mem_cgroup *root,
|
|
|
if (!root->use_hierarchy && root != root_mem_cgroup) {
|
|
|
if (prev)
|
|
|
goto out_css_put;
|
|
|
- if (mem_cgroup_filter(root, root, cond) == VISIT)
|
|
|
- return root;
|
|
|
- return NULL;
|
|
|
+ return root;
|
|
|
}
|
|
|
|
|
|
rcu_read_lock();
|
|
@@ -1044,7 +1009,7 @@ struct mem_cgroup *mem_cgroup_iter_cond(struct mem_cgroup *root,
|
|
|
last_visited = mem_cgroup_iter_load(iter, root, &seq);
|
|
|
}
|
|
|
|
|
|
- memcg = __mem_cgroup_iter_next(root, last_visited, cond);
|
|
|
+ memcg = __mem_cgroup_iter_next(root, last_visited);
|
|
|
|
|
|
if (reclaim) {
|
|
|
mem_cgroup_iter_update(iter, last_visited, memcg, seq);
|
|
@@ -1055,11 +1020,7 @@ struct mem_cgroup *mem_cgroup_iter_cond(struct mem_cgroup *root,
|
|
|
reclaim->generation = iter->generation;
|
|
|
}
|
|
|
|
|
|
- /*
|
|
|
- * We have finished the whole tree walk or no group has been
|
|
|
- * visited because filter told us to skip the root node.
|
|
|
- */
|
|
|
- if (!memcg && (prev || (cond && !last_visited)))
|
|
|
+ if (prev && !memcg)
|
|
|
goto out_unlock;
|
|
|
}
|
|
|
out_unlock:
|
|
@@ -1804,14 +1765,13 @@ int mem_cgroup_select_victim_node(struct mem_cgroup *memcg)
|
|
|
* a) it is over its soft limit
|
|
|
* b) any parent up the hierarchy is over its soft limit
|
|
|
*/
|
|
|
-enum mem_cgroup_filter_t
|
|
|
-mem_cgroup_soft_reclaim_eligible(struct mem_cgroup *memcg,
|
|
|
+bool mem_cgroup_soft_reclaim_eligible(struct mem_cgroup *memcg,
|
|
|
struct mem_cgroup *root)
|
|
|
{
|
|
|
struct mem_cgroup *parent = memcg;
|
|
|
|
|
|
if (res_counter_soft_limit_excess(&memcg->res))
|
|
|
- return VISIT;
|
|
|
+ return true;
|
|
|
|
|
|
/*
|
|
|
* If any parent up to the root in the hierarchy is over its soft limit
|
|
@@ -1819,12 +1779,12 @@ mem_cgroup_soft_reclaim_eligible(struct mem_cgroup *memcg,
|
|
|
*/
|
|
|
while ((parent = parent_mem_cgroup(parent))) {
|
|
|
if (res_counter_soft_limit_excess(&parent->res))
|
|
|
- return VISIT;
|
|
|
+ return true;
|
|
|
if (parent == root)
|
|
|
break;
|
|
|
}
|
|
|
|
|
|
- return SKIP;
|
|
|
+ return false;
|
|
|
}
|
|
|
|
|
|
static DEFINE_SPINLOCK(memcg_oom_lock);
|