|
@@ -187,10 +187,6 @@ struct mem_cgroup_per_node {
|
|
|
struct mem_cgroup_per_zone zoneinfo[MAX_NR_ZONES];
|
|
|
};
|
|
|
|
|
|
-struct mem_cgroup_lru_info {
|
|
|
- struct mem_cgroup_per_node *nodeinfo[0];
|
|
|
-};
|
|
|
-
|
|
|
/*
|
|
|
* Cgroups above their limits are maintained in a RB-Tree, independent of
|
|
|
* their hierarchy representation
|
|
@@ -366,14 +362,8 @@ struct mem_cgroup {
|
|
|
atomic_t numainfo_updating;
|
|
|
#endif
|
|
|
|
|
|
- /*
|
|
|
- * Per cgroup active and inactive list, similar to the
|
|
|
- * per zone LRU lists.
|
|
|
- *
|
|
|
- * WARNING: This has to be the last element of the struct. Don't
|
|
|
- * add new fields after this point.
|
|
|
- */
|
|
|
- struct mem_cgroup_lru_info info;
|
|
|
+ struct mem_cgroup_per_node *nodeinfo[0];
|
|
|
+ /* WARNING: nodeinfo must be the last member here */
|
|
|
};
|
|
|
|
|
|
static size_t memcg_size(void)
|
|
@@ -683,7 +673,7 @@ static struct mem_cgroup_per_zone *
|
|
|
mem_cgroup_zoneinfo(struct mem_cgroup *memcg, int nid, int zid)
|
|
|
{
|
|
|
VM_BUG_ON((unsigned)nid >= nr_node_ids);
|
|
|
- return &memcg->info.nodeinfo[nid]->zoneinfo[zid];
|
|
|
+ return &memcg->nodeinfo[nid]->zoneinfo[zid];
|
|
|
}
|
|
|
|
|
|
struct cgroup_subsys_state *mem_cgroup_css(struct mem_cgroup *memcg)
|
|
@@ -6087,13 +6077,13 @@ static int alloc_mem_cgroup_per_zone_info(struct mem_cgroup *memcg, int node)
|
|
|
mz->on_tree = false;
|
|
|
mz->memcg = memcg;
|
|
|
}
|
|
|
- memcg->info.nodeinfo[node] = pn;
|
|
|
+ memcg->nodeinfo[node] = pn;
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
|
static void free_mem_cgroup_per_zone_info(struct mem_cgroup *memcg, int node)
|
|
|
{
|
|
|
- kfree(memcg->info.nodeinfo[node]);
|
|
|
+ kfree(memcg->nodeinfo[node]);
|
|
|
}
|
|
|
|
|
|
static struct mem_cgroup *mem_cgroup_alloc(void)
|