|
@@ -154,7 +154,7 @@ struct mem_cgroup {
|
|
|
|
|
|
/*
|
|
|
* While reclaiming in a hiearchy, we cache the last child we
|
|
|
- * reclaimed from. Protected by cgroup_lock()
|
|
|
+ * reclaimed from. Protected by hierarchy_mutex
|
|
|
*/
|
|
|
struct mem_cgroup *last_scanned_child;
|
|
|
/*
|
|
@@ -615,7 +615,7 @@ unsigned long mem_cgroup_isolate_pages(unsigned long nr_to_scan,
|
|
|
|
|
|
/*
|
|
|
* This routine finds the DFS walk successor. This routine should be
|
|
|
- * called with cgroup_mutex held
|
|
|
+ * called with hierarchy_mutex held
|
|
|
*/
|
|
|
static struct mem_cgroup *
|
|
|
mem_cgroup_get_next_node(struct mem_cgroup *curr, struct mem_cgroup *root_mem)
|
|
@@ -685,7 +685,7 @@ mem_cgroup_get_first_node(struct mem_cgroup *root_mem)
|
|
|
/*
|
|
|
* Scan all children under the mem_cgroup mem
|
|
|
*/
|
|
|
- cgroup_lock();
|
|
|
+ mutex_lock(&mem_cgroup_subsys.hierarchy_mutex);
|
|
|
if (list_empty(&root_mem->css.cgroup->children)) {
|
|
|
ret = root_mem;
|
|
|
goto done;
|
|
@@ -706,7 +706,7 @@ mem_cgroup_get_first_node(struct mem_cgroup *root_mem)
|
|
|
|
|
|
done:
|
|
|
root_mem->last_scanned_child = ret;
|
|
|
- cgroup_unlock();
|
|
|
+ mutex_unlock(&mem_cgroup_subsys.hierarchy_mutex);
|
|
|
return ret;
|
|
|
}
|
|
|
|
|
@@ -770,18 +770,16 @@ static int mem_cgroup_hierarchical_reclaim(struct mem_cgroup *root_mem,
|
|
|
while (next_mem != root_mem) {
|
|
|
if (mem_cgroup_is_obsolete(next_mem)) {
|
|
|
mem_cgroup_put(next_mem);
|
|
|
- cgroup_lock();
|
|
|
next_mem = mem_cgroup_get_first_node(root_mem);
|
|
|
- cgroup_unlock();
|
|
|
continue;
|
|
|
}
|
|
|
ret = try_to_free_mem_cgroup_pages(next_mem, gfp_mask, noswap,
|
|
|
get_swappiness(next_mem));
|
|
|
if (mem_cgroup_check_under_limit(root_mem))
|
|
|
return 0;
|
|
|
- cgroup_lock();
|
|
|
+ mutex_lock(&mem_cgroup_subsys.hierarchy_mutex);
|
|
|
next_mem = mem_cgroup_get_next_node(next_mem, root_mem);
|
|
|
- cgroup_unlock();
|
|
|
+ mutex_unlock(&mem_cgroup_subsys.hierarchy_mutex);
|
|
|
}
|
|
|
return ret;
|
|
|
}
|