|
@@ -180,7 +180,7 @@ struct mem_cgroup_per_node {
|
|
|
};
|
|
|
|
|
|
struct mem_cgroup_lru_info {
|
|
|
- struct mem_cgroup_per_node *nodeinfo[MAX_NUMNODES];
|
|
|
+ struct mem_cgroup_per_node *nodeinfo[0];
|
|
|
};
|
|
|
|
|
|
/*
|
|
@@ -283,17 +283,6 @@ struct mem_cgroup {
|
|
|
* the counter to account for kernel memory usage.
|
|
|
*/
|
|
|
struct res_counter kmem;
|
|
|
- /*
|
|
|
- * Per cgroup active and inactive list, similar to the
|
|
|
- * per zone LRU lists.
|
|
|
- */
|
|
|
- struct mem_cgroup_lru_info info;
|
|
|
- int last_scanned_node;
|
|
|
-#if MAX_NUMNODES > 1
|
|
|
- nodemask_t scan_nodes;
|
|
|
- atomic_t numainfo_events;
|
|
|
- atomic_t numainfo_updating;
|
|
|
-#endif
|
|
|
/*
|
|
|
* Should the accounting and control be hierarchical, per subtree?
|
|
|
*/
|
|
@@ -357,8 +346,29 @@ struct mem_cgroup {
|
|
|
/* Index in the kmem_cache->memcg_params->memcg_caches array */
|
|
|
int kmemcg_id;
|
|
|
#endif
|
|
|
+
|
|
|
+ int last_scanned_node;
|
|
|
+#if MAX_NUMNODES > 1
|
|
|
+ nodemask_t scan_nodes;
|
|
|
+ atomic_t numainfo_events;
|
|
|
+ atomic_t numainfo_updating;
|
|
|
+#endif
|
|
|
+ /*
|
|
|
+ * Per cgroup active and inactive list, similar to the
|
|
|
+ * per zone LRU lists.
|
|
|
+ *
|
|
|
+ * WARNING: This has to be the last element of the struct. Don't
|
|
|
+ * add new fields after this point.
|
|
|
+ */
|
|
|
+ struct mem_cgroup_lru_info info;
|
|
|
};
|
|
|
|
|
|
+static size_t memcg_size(void)
|
|
|
+{
|
|
|
+ return sizeof(struct mem_cgroup) +
|
|
|
+ nr_node_ids * sizeof(struct mem_cgroup_per_node);
|
|
|
+}
|
|
|
+
|
|
|
/* internal only representation about the status of kmem accounting. */
|
|
|
enum {
|
|
|
KMEM_ACCOUNTED_ACTIVE = 0, /* accounted by this cgroup itself */
|
|
@@ -635,6 +645,7 @@ static void drain_all_stock_async(struct mem_cgroup *memcg);
|
|
|
static struct mem_cgroup_per_zone *
|
|
|
mem_cgroup_zoneinfo(struct mem_cgroup *memcg, int nid, int zid)
|
|
|
{
|
|
|
+ VM_BUG_ON((unsigned)nid >= nr_node_ids);
|
|
|
return &memcg->info.nodeinfo[nid]->zoneinfo[zid];
|
|
|
}
|
|
|
|
|
@@ -5925,9 +5936,9 @@ static void free_mem_cgroup_per_zone_info(struct mem_cgroup *memcg, int node)
|
|
|
static struct mem_cgroup *mem_cgroup_alloc(void)
|
|
|
{
|
|
|
struct mem_cgroup *memcg;
|
|
|
- int size = sizeof(struct mem_cgroup);
|
|
|
+ size_t size = memcg_size();
|
|
|
|
|
|
- /* Can be very big if MAX_NUMNODES is very big */
|
|
|
+ /* Can be very big if nr_node_ids is very big */
|
|
|
if (size < PAGE_SIZE)
|
|
|
memcg = kzalloc(size, GFP_KERNEL);
|
|
|
else
|
|
@@ -5964,7 +5975,7 @@ out_free:
|
|
|
static void __mem_cgroup_free(struct mem_cgroup *memcg)
|
|
|
{
|
|
|
int node;
|
|
|
- int size = sizeof(struct mem_cgroup);
|
|
|
+ size_t size = memcg_size();
|
|
|
|
|
|
mem_cgroup_remove_from_trees(memcg);
|
|
|
free_css_id(&mem_cgroup_subsys, &memcg->css);
|