|
@@ -303,6 +303,12 @@ struct kmem_list3 {
|
|
|
int free_touched; /* updated without locking */
|
|
|
};
|
|
|
|
|
|
+/*
|
|
|
+ * The slab allocator is initialized with interrupts disabled. Therefore, make
|
|
|
+ * sure early boot allocations don't accidentally enable interrupts.
|
|
|
+ */
|
|
|
+static gfp_t slab_gfp_mask __read_mostly = SLAB_GFP_BOOT_MASK;
|
|
|
+
|
|
|
/*
|
|
|
* Need this for bootstrapping a per node allocator.
|
|
|
*/
|
|
@@ -753,6 +759,7 @@ static enum {
|
|
|
NONE,
|
|
|
PARTIAL_AC,
|
|
|
PARTIAL_L3,
|
|
|
+ EARLY,
|
|
|
FULL
|
|
|
} g_cpucache_up;
|
|
|
|
|
@@ -761,7 +768,7 @@ static enum {
|
|
|
*/
|
|
|
int slab_is_available(void)
|
|
|
{
|
|
|
- return g_cpucache_up == FULL;
|
|
|
+ return g_cpucache_up >= EARLY;
|
|
|
}
|
|
|
|
|
|
static DEFINE_PER_CPU(struct delayed_work, reap_work);
|
|
@@ -1625,19 +1632,27 @@ void __init kmem_cache_init(void)
|
|
|
}
|
|
|
}
|
|
|
|
|
|
- /* 6) resize the head arrays to their final sizes */
|
|
|
- {
|
|
|
- struct kmem_cache *cachep;
|
|
|
- mutex_lock(&cache_chain_mutex);
|
|
|
- list_for_each_entry(cachep, &cache_chain, next)
|
|
|
- if (enable_cpucache(cachep, GFP_NOWAIT))
|
|
|
- BUG();
|
|
|
- mutex_unlock(&cache_chain_mutex);
|
|
|
- }
|
|
|
+ g_cpucache_up = EARLY;
|
|
|
|
|
|
/* Annotate slab for lockdep -- annotate the malloc caches */
|
|
|
init_lock_keys();
|
|
|
+}
|
|
|
+
|
|
|
+void __init kmem_cache_init_late(void)
|
|
|
+{
|
|
|
+ struct kmem_cache *cachep;
|
|
|
+
|
|
|
+ /*
|
|
|
+ * Interrupts are enabled now so all GFP allocations are safe.
|
|
|
+ */
|
|
|
+ slab_gfp_mask = __GFP_BITS_MASK;
|
|
|
|
|
|
+ /* 6) resize the head arrays to their final sizes */
|
|
|
+ mutex_lock(&cache_chain_mutex);
|
|
|
+ list_for_each_entry(cachep, &cache_chain, next)
|
|
|
+ if (enable_cpucache(cachep, GFP_NOWAIT))
|
|
|
+ BUG();
|
|
|
+ mutex_unlock(&cache_chain_mutex);
|
|
|
|
|
|
/* Done! */
|
|
|
g_cpucache_up = FULL;
|
|
@@ -2102,7 +2117,7 @@ static int __init_refok setup_cpu_cache(struct kmem_cache *cachep, gfp_t gfp)
|
|
|
for_each_online_node(node) {
|
|
|
cachep->nodelists[node] =
|
|
|
kmalloc_node(sizeof(struct kmem_list3),
|
|
|
- GFP_KERNEL, node);
|
|
|
+ gfp, node);
|
|
|
BUG_ON(!cachep->nodelists[node]);
|
|
|
kmem_list3_init(cachep->nodelists[node]);
|
|
|
}
|
|
@@ -3354,6 +3369,8 @@ __cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid,
|
|
|
unsigned long save_flags;
|
|
|
void *ptr;
|
|
|
|
|
|
+ flags &= slab_gfp_mask;
|
|
|
+
|
|
|
lockdep_trace_alloc(flags);
|
|
|
|
|
|
if (slab_should_failslab(cachep, flags))
|
|
@@ -3434,6 +3451,8 @@ __cache_alloc(struct kmem_cache *cachep, gfp_t flags, void *caller)
|
|
|
unsigned long save_flags;
|
|
|
void *objp;
|
|
|
|
|
|
+ flags &= slab_gfp_mask;
|
|
|
+
|
|
|
lockdep_trace_alloc(flags);
|
|
|
|
|
|
if (slab_should_failslab(cachep, flags))
|