|
@@ -759,6 +759,7 @@ static enum {
|
|
|
NONE,
|
|
|
PARTIAL_AC,
|
|
|
PARTIAL_L3,
|
|
|
+ EARLY,
|
|
|
FULL
|
|
|
} g_cpucache_up;
|
|
|
|
|
@@ -767,7 +768,7 @@ static enum {
|
|
|
*/
|
|
|
int slab_is_available(void)
|
|
|
{
|
|
|
- return g_cpucache_up == FULL;
|
|
|
+ return g_cpucache_up >= EARLY;
|
|
|
}
|
|
|
|
|
|
static DEFINE_PER_CPU(struct delayed_work, reap_work);
|
|
@@ -1631,19 +1632,27 @@ void __init kmem_cache_init(void)
|
|
|
}
|
|
|
}
|
|
|
|
|
|
- /* 6) resize the head arrays to their final sizes */
|
|
|
- {
|
|
|
- struct kmem_cache *cachep;
|
|
|
- mutex_lock(&cache_chain_mutex);
|
|
|
- list_for_each_entry(cachep, &cache_chain, next)
|
|
|
- if (enable_cpucache(cachep, GFP_NOWAIT))
|
|
|
- BUG();
|
|
|
- mutex_unlock(&cache_chain_mutex);
|
|
|
- }
|
|
|
+ g_cpucache_up = EARLY;
|
|
|
|
|
|
/* Annotate slab for lockdep -- annotate the malloc caches */
|
|
|
init_lock_keys();
|
|
|
+}
|
|
|
+
|
|
|
+void __init kmem_cache_init_late(void)
|
|
|
+{
|
|
|
+ struct kmem_cache *cachep;
|
|
|
+
|
|
|
+ /*
|
|
|
+ * Interrupts are enabled now so all GFP allocations are safe.
|
|
|
+ */
|
|
|
+ slab_gfp_mask = __GFP_BITS_MASK;
|
|
|
|
|
|
+ /* 6) resize the head arrays to their final sizes */
|
|
|
+ mutex_lock(&cache_chain_mutex);
|
|
|
+ list_for_each_entry(cachep, &cache_chain, next)
|
|
|
+ if (enable_cpucache(cachep, GFP_NOWAIT))
|
|
|
+ BUG();
|
|
|
+ mutex_unlock(&cache_chain_mutex);
|
|
|
|
|
|
/* Done! */
|
|
|
g_cpucache_up = FULL;
|
|
@@ -1660,14 +1669,6 @@ void __init kmem_cache_init(void)
|
|
|
*/
|
|
|
}
|
|
|
|
|
|
-void __init kmem_cache_init_late(void)
|
|
|
-{
|
|
|
- /*
|
|
|
- * Interrupts are enabled now so all GFP allocations are safe.
|
|
|
- */
|
|
|
- slab_gfp_mask = __GFP_BITS_MASK;
|
|
|
-}
|
|
|
-
|
|
|
static int __init cpucache_init(void)
|
|
|
{
|
|
|
int cpu;
|