|
@@ -176,8 +176,6 @@ static inline int kmem_cache_debug(struct kmem_cache *s)
|
|
|
#define __OBJECT_POISON 0x80000000UL /* Poison object */
|
|
|
#define __CMPXCHG_DOUBLE 0x40000000UL /* Use cmpxchg_double */
|
|
|
|
|
|
-static int kmem_size = sizeof(struct kmem_cache);
|
|
|
-
|
|
|
#ifdef CONFIG_SMP
|
|
|
static struct notifier_block slab_notifier;
|
|
|
#endif
|
|
@@ -3634,15 +3632,16 @@ static int slab_memory_callback(struct notifier_block *self,
|
|
|
|
|
|
/*
|
|
|
* Used for early kmem_cache structures that were allocated using
|
|
|
- * the page allocator
|
|
|
+ * the page allocator. Allocate them properly then fix up the pointers
|
|
|
+ * that may be pointing to the wrong kmem_cache structure.
|
|
|
*/
|
|
|
|
|
|
-static void __init kmem_cache_bootstrap_fixup(struct kmem_cache *s)
|
|
|
+static struct kmem_cache * __init bootstrap(struct kmem_cache *static_cache)
|
|
|
{
|
|
|
int node;
|
|
|
+ struct kmem_cache *s = kmem_cache_zalloc(kmem_cache, GFP_NOWAIT);
|
|
|
|
|
|
- list_add(&s->list, &slab_caches);
|
|
|
- s->refcount = -1;
|
|
|
+ memcpy(s, static_cache, kmem_cache->object_size);
|
|
|
|
|
|
for_each_node_state(node, N_NORMAL_MEMORY) {
|
|
|
struct kmem_cache_node *n = get_node(s, node);
|
|
@@ -3658,70 +3657,44 @@ static void __init kmem_cache_bootstrap_fixup(struct kmem_cache *s)
|
|
|
#endif
|
|
|
}
|
|
|
}
|
|
|
+ list_add(&s->list, &slab_caches);
|
|
|
+ return s;
|
|
|
}
|
|
|
|
|
|
void __init kmem_cache_init(void)
|
|
|
{
|
|
|
+ static __initdata struct kmem_cache boot_kmem_cache,
|
|
|
+ boot_kmem_cache_node;
|
|
|
int i;
|
|
|
- int caches = 0;
|
|
|
- struct kmem_cache *temp_kmem_cache;
|
|
|
- int order;
|
|
|
- struct kmem_cache *temp_kmem_cache_node;
|
|
|
- unsigned long kmalloc_size;
|
|
|
+ int caches = 2;
|
|
|
|
|
|
if (debug_guardpage_minorder())
|
|
|
slub_max_order = 0;
|
|
|
|
|
|
- kmem_size = offsetof(struct kmem_cache, node) +
|
|
|
- nr_node_ids * sizeof(struct kmem_cache_node *);
|
|
|
-
|
|
|
- /* Allocate two kmem_caches from the page allocator */
|
|
|
- kmalloc_size = ALIGN(kmem_size, cache_line_size());
|
|
|
- order = get_order(2 * kmalloc_size);
|
|
|
- kmem_cache = (void *)__get_free_pages(GFP_NOWAIT | __GFP_ZERO, order);
|
|
|
+ kmem_cache_node = &boot_kmem_cache_node;
|
|
|
+ kmem_cache = &boot_kmem_cache;
|
|
|
|
|
|
- /*
|
|
|
- * Must first have the slab cache available for the allocations of the
|
|
|
- * struct kmem_cache_node's. There is special bootstrap code in
|
|
|
- * kmem_cache_open for slab_state == DOWN.
|
|
|
- */
|
|
|
- kmem_cache_node = (void *)kmem_cache + kmalloc_size;
|
|
|
-
|
|
|
- kmem_cache_node->name = "kmem_cache_node";
|
|
|
- kmem_cache_node->size = kmem_cache_node->object_size =
|
|
|
- sizeof(struct kmem_cache_node);
|
|
|
- kmem_cache_open(kmem_cache_node, SLAB_HWCACHE_ALIGN | SLAB_PANIC);
|
|
|
+ create_boot_cache(kmem_cache_node, "kmem_cache_node",
|
|
|
+ sizeof(struct kmem_cache_node), SLAB_HWCACHE_ALIGN);
|
|
|
|
|
|
hotplug_memory_notifier(slab_memory_callback, SLAB_CALLBACK_PRI);
|
|
|
|
|
|
/* Able to allocate the per node structures */
|
|
|
slab_state = PARTIAL;
|
|
|
|
|
|
- temp_kmem_cache = kmem_cache;
|
|
|
- kmem_cache->name = "kmem_cache";
|
|
|
- kmem_cache->size = kmem_cache->object_size = kmem_size;
|
|
|
- kmem_cache_open(kmem_cache, SLAB_HWCACHE_ALIGN | SLAB_PANIC);
|
|
|
+ create_boot_cache(kmem_cache, "kmem_cache",
|
|
|
+ offsetof(struct kmem_cache, node) +
|
|
|
+ nr_node_ids * sizeof(struct kmem_cache_node *),
|
|
|
+ SLAB_HWCACHE_ALIGN);
|
|
|
|
|
|
- kmem_cache = kmem_cache_alloc(kmem_cache, GFP_NOWAIT);
|
|
|
- memcpy(kmem_cache, temp_kmem_cache, kmem_size);
|
|
|
+ kmem_cache = bootstrap(&boot_kmem_cache);
|
|
|
|
|
|
/*
|
|
|
* Allocate kmem_cache_node properly from the kmem_cache slab.
|
|
|
* kmem_cache_node is separately allocated so no need to
|
|
|
* update any list pointers.
|
|
|
*/
|
|
|
- temp_kmem_cache_node = kmem_cache_node;
|
|
|
-
|
|
|
- kmem_cache_node = kmem_cache_alloc(kmem_cache, GFP_NOWAIT);
|
|
|
- memcpy(kmem_cache_node, temp_kmem_cache_node, kmem_size);
|
|
|
-
|
|
|
- kmem_cache_bootstrap_fixup(kmem_cache_node);
|
|
|
-
|
|
|
- caches++;
|
|
|
- kmem_cache_bootstrap_fixup(kmem_cache);
|
|
|
- caches++;
|
|
|
- /* Free temporary boot structure */
|
|
|
- free_pages((unsigned long)temp_kmem_cache, order);
|
|
|
+ kmem_cache_node = bootstrap(&boot_kmem_cache_node);
|
|
|
|
|
|
/* Now we can use the kmem_cache to allocate kmalloc slabs */
|
|
|
|