|
@@ -389,7 +389,6 @@ struct kmem_cache {
|
|
unsigned int buffer_size;
|
|
unsigned int buffer_size;
|
|
u32 reciprocal_buffer_size;
|
|
u32 reciprocal_buffer_size;
|
|
/* 3) touched by every alloc & free from the backend */
|
|
/* 3) touched by every alloc & free from the backend */
|
|
- struct kmem_list3 *nodelists[MAX_NUMNODES];
|
|
|
|
|
|
|
|
unsigned int flags; /* constant flags */
|
|
unsigned int flags; /* constant flags */
|
|
unsigned int num; /* # of objs per slab */
|
|
unsigned int num; /* # of objs per slab */
|
|
@@ -444,6 +443,17 @@ struct kmem_cache {
|
|
int obj_offset;
|
|
int obj_offset;
|
|
int obj_size;
|
|
int obj_size;
|
|
#endif
|
|
#endif
|
|
|
|
+ /*
|
|
|
|
+ * We put nodelists[] at the end of kmem_cache, because we want to size
|
|
|
|
+ * this array to nr_node_ids slots instead of MAX_NUMNODES
|
|
|
|
+ * (see kmem_cache_init())
|
|
|
|
+ * We still use [MAX_NUMNODES] and not [1] or [0] because cache_cache
|
|
|
|
+ * is statically defined, so we reserve the max number of nodes.
|
|
|
|
+ */
|
|
|
|
+ struct kmem_list3 *nodelists[MAX_NUMNODES];
|
|
|
|
+ /*
|
|
|
|
+ * Do not add fields after nodelists[]
|
|
|
|
+ */
|
|
};
|
|
};
|
|
|
|
|
|
#define CFLGS_OFF_SLAB (0x80000000UL)
|
|
#define CFLGS_OFF_SLAB (0x80000000UL)
|
|
@@ -678,9 +688,6 @@ static struct kmem_cache cache_cache = {
|
|
.shared = 1,
|
|
.shared = 1,
|
|
.buffer_size = sizeof(struct kmem_cache),
|
|
.buffer_size = sizeof(struct kmem_cache),
|
|
.name = "kmem_cache",
|
|
.name = "kmem_cache",
|
|
-#if DEBUG
|
|
|
|
- .obj_size = sizeof(struct kmem_cache),
|
|
|
|
-#endif
|
|
|
|
};
|
|
};
|
|
|
|
|
|
#define BAD_ALIEN_MAGIC 0x01020304ul
|
|
#define BAD_ALIEN_MAGIC 0x01020304ul
|
|
@@ -1440,6 +1447,15 @@ void __init kmem_cache_init(void)
|
|
cache_cache.array[smp_processor_id()] = &initarray_cache.cache;
|
|
cache_cache.array[smp_processor_id()] = &initarray_cache.cache;
|
|
cache_cache.nodelists[node] = &initkmem_list3[CACHE_CACHE];
|
|
cache_cache.nodelists[node] = &initkmem_list3[CACHE_CACHE];
|
|
|
|
|
|
|
|
+ /*
|
|
|
|
+ * struct kmem_cache size depends on nr_node_ids, which
|
|
|
|
+ * can be less than MAX_NUMNODES.
|
|
|
|
+ */
|
|
|
|
+ cache_cache.buffer_size = offsetof(struct kmem_cache, nodelists) +
|
|
|
|
+ nr_node_ids * sizeof(struct kmem_list3 *);
|
|
|
|
+#if DEBUG
|
|
|
|
+ cache_cache.obj_size = cache_cache.buffer_size;
|
|
|
|
+#endif
|
|
cache_cache.buffer_size = ALIGN(cache_cache.buffer_size,
|
|
cache_cache.buffer_size = ALIGN(cache_cache.buffer_size,
|
|
cache_line_size());
|
|
cache_line_size());
|
|
cache_cache.reciprocal_buffer_size =
|
|
cache_cache.reciprocal_buffer_size =
|