|
@@ -570,9 +570,9 @@ static struct arraycache_init initarray_generic =
|
|
|
{ {0, BOOT_CPUCACHE_ENTRIES, 1, 0} };
|
|
|
|
|
|
/* internal cache of cache description objs */
|
|
|
-static struct kmem_list3 *cache_cache_nodelists[MAX_NUMNODES];
|
|
|
-static struct kmem_cache cache_cache = {
|
|
|
- .nodelists = cache_cache_nodelists,
|
|
|
+static struct kmem_list3 *kmem_cache_nodelists[MAX_NUMNODES];
|
|
|
+static struct kmem_cache kmem_cache_boot = {
|
|
|
+ .nodelists = kmem_cache_nodelists,
|
|
|
.batchcount = 1,
|
|
|
.limit = BOOT_CPUCACHE_ENTRIES,
|
|
|
.shared = 1,
|
|
@@ -795,6 +795,7 @@ static void cache_estimate(unsigned long gfporder, size_t buffer_size,
|
|
|
*left_over = slab_size - nr_objs*buffer_size - mgmt_size;
|
|
|
}
|
|
|
|
|
|
+#if DEBUG
|
|
|
#define slab_error(cachep, msg) __slab_error(__func__, cachep, msg)
|
|
|
|
|
|
static void __slab_error(const char *function, struct kmem_cache *cachep,
|
|
@@ -805,6 +806,7 @@ static void __slab_error(const char *function, struct kmem_cache *cachep,
|
|
|
dump_stack();
|
|
|
add_taint(TAINT_BAD_PAGE);
|
|
|
}
|
|
|
+#endif
|
|
|
|
|
|
/*
|
|
|
* By default on NUMA we use alien caches to stage the freeing of
|
|
@@ -1587,15 +1589,17 @@ void __init kmem_cache_init(void)
|
|
|
int order;
|
|
|
int node;
|
|
|
|
|
|
+ kmem_cache = &kmem_cache_boot;
|
|
|
+
|
|
|
if (num_possible_nodes() == 1)
|
|
|
use_alien_caches = 0;
|
|
|
|
|
|
for (i = 0; i < NUM_INIT_LISTS; i++) {
|
|
|
kmem_list3_init(&initkmem_list3[i]);
|
|
|
if (i < MAX_NUMNODES)
|
|
|
- cache_cache.nodelists[i] = NULL;
|
|
|
+ kmem_cache->nodelists[i] = NULL;
|
|
|
}
|
|
|
- set_up_list3s(&cache_cache, CACHE_CACHE);
|
|
|
+ set_up_list3s(kmem_cache, CACHE_CACHE);
|
|
|
|
|
|
/*
|
|
|
* Fragmentation resistance on low memory - only use bigger
|
|
@@ -1607,9 +1611,9 @@ void __init kmem_cache_init(void)
|
|
|
|
|
|
/* Bootstrap is tricky, because several objects are allocated
|
|
|
* from caches that do not exist yet:
|
|
|
- * 1) initialize the cache_cache cache: it contains the struct
|
|
|
- * kmem_cache structures of all caches, except cache_cache itself:
|
|
|
- * cache_cache is statically allocated.
|
|
|
+ * 1) initialize the kmem_cache cache: it contains the struct
|
|
|
+ * kmem_cache structures of all caches, except kmem_cache itself:
|
|
|
+ * kmem_cache is statically allocated.
|
|
|
* Initially an __init data area is used for the head array and the
|
|
|
* kmem_list3 structures, it's replaced with a kmalloc allocated
|
|
|
* array at the end of the bootstrap.
|
|
@@ -1618,43 +1622,43 @@ void __init kmem_cache_init(void)
|
|
|
* An __init data area is used for the head array.
|
|
|
* 3) Create the remaining kmalloc caches, with minimally sized
|
|
|
* head arrays.
|
|
|
- * 4) Replace the __init data head arrays for cache_cache and the first
|
|
|
+ * 4) Replace the __init data head arrays for kmem_cache and the first
|
|
|
* kmalloc cache with kmalloc allocated arrays.
|
|
|
- * 5) Replace the __init data for kmem_list3 for cache_cache and
|
|
|
+ * 5) Replace the __init data for kmem_list3 for kmem_cache and
|
|
|
* the other cache's with kmalloc allocated memory.
|
|
|
* 6) Resize the head arrays of the kmalloc caches to their final sizes.
|
|
|
*/
|
|
|
|
|
|
node = numa_mem_id();
|
|
|
|
|
|
- /* 1) create the cache_cache */
|
|
|
+ /* 1) create the kmem_cache */
|
|
|
INIT_LIST_HEAD(&slab_caches);
|
|
|
- list_add(&cache_cache.list, &slab_caches);
|
|
|
- cache_cache.colour_off = cache_line_size();
|
|
|
- cache_cache.array[smp_processor_id()] = &initarray_cache.cache;
|
|
|
- cache_cache.nodelists[node] = &initkmem_list3[CACHE_CACHE + node];
|
|
|
+ list_add(&kmem_cache->list, &slab_caches);
|
|
|
+ kmem_cache->colour_off = cache_line_size();
|
|
|
+ kmem_cache->array[smp_processor_id()] = &initarray_cache.cache;
|
|
|
+ kmem_cache->nodelists[node] = &initkmem_list3[CACHE_CACHE + node];
|
|
|
|
|
|
/*
|
|
|
* struct kmem_cache size depends on nr_node_ids & nr_cpu_ids
|
|
|
*/
|
|
|
- cache_cache.size = offsetof(struct kmem_cache, array[nr_cpu_ids]) +
|
|
|
+ kmem_cache->size = offsetof(struct kmem_cache, array[nr_cpu_ids]) +
|
|
|
nr_node_ids * sizeof(struct kmem_list3 *);
|
|
|
- cache_cache.object_size = cache_cache.size;
|
|
|
- cache_cache.size = ALIGN(cache_cache.size,
|
|
|
+ kmem_cache->object_size = kmem_cache->size;
|
|
|
+ kmem_cache->size = ALIGN(kmem_cache->object_size,
|
|
|
cache_line_size());
|
|
|
- cache_cache.reciprocal_buffer_size =
|
|
|
- reciprocal_value(cache_cache.size);
|
|
|
+ kmem_cache->reciprocal_buffer_size =
|
|
|
+ reciprocal_value(kmem_cache->size);
|
|
|
|
|
|
for (order = 0; order < MAX_ORDER; order++) {
|
|
|
- cache_estimate(order, cache_cache.size,
|
|
|
- cache_line_size(), 0, &left_over, &cache_cache.num);
|
|
|
- if (cache_cache.num)
|
|
|
+ cache_estimate(order, kmem_cache->size,
|
|
|
+ cache_line_size(), 0, &left_over, &kmem_cache->num);
|
|
|
+ if (kmem_cache->num)
|
|
|
break;
|
|
|
}
|
|
|
- BUG_ON(!cache_cache.num);
|
|
|
- cache_cache.gfporder = order;
|
|
|
- cache_cache.colour = left_over / cache_cache.colour_off;
|
|
|
- cache_cache.slab_size = ALIGN(cache_cache.num * sizeof(kmem_bufctl_t) +
|
|
|
+ BUG_ON(!kmem_cache->num);
|
|
|
+ kmem_cache->gfporder = order;
|
|
|
+ kmem_cache->colour = left_over / kmem_cache->colour_off;
|
|
|
+ kmem_cache->slab_size = ALIGN(kmem_cache->num * sizeof(kmem_bufctl_t) +
|
|
|
sizeof(struct slab), cache_line_size());
|
|
|
|
|
|
/* 2+3) create the kmalloc caches */
|
|
@@ -1667,19 +1671,22 @@ void __init kmem_cache_init(void)
|
|
|
* bug.
|
|
|
*/
|
|
|
|
|
|
- sizes[INDEX_AC].cs_cachep = __kmem_cache_create(names[INDEX_AC].name,
|
|
|
- sizes[INDEX_AC].cs_size,
|
|
|
- ARCH_KMALLOC_MINALIGN,
|
|
|
- ARCH_KMALLOC_FLAGS|SLAB_PANIC,
|
|
|
- NULL);
|
|
|
+ sizes[INDEX_AC].cs_cachep = kmem_cache_zalloc(kmem_cache, GFP_NOWAIT);
|
|
|
+ sizes[INDEX_AC].cs_cachep->name = names[INDEX_AC].name;
|
|
|
+ sizes[INDEX_AC].cs_cachep->size = sizes[INDEX_AC].cs_size;
|
|
|
+ sizes[INDEX_AC].cs_cachep->object_size = sizes[INDEX_AC].cs_size;
|
|
|
+ sizes[INDEX_AC].cs_cachep->align = ARCH_KMALLOC_MINALIGN;
|
|
|
+ __kmem_cache_create(sizes[INDEX_AC].cs_cachep, ARCH_KMALLOC_FLAGS|SLAB_PANIC);
|
|
|
+ list_add(&sizes[INDEX_AC].cs_cachep->list, &slab_caches);
|
|
|
|
|
|
if (INDEX_AC != INDEX_L3) {
|
|
|
- sizes[INDEX_L3].cs_cachep =
|
|
|
- __kmem_cache_create(names[INDEX_L3].name,
|
|
|
- sizes[INDEX_L3].cs_size,
|
|
|
- ARCH_KMALLOC_MINALIGN,
|
|
|
- ARCH_KMALLOC_FLAGS|SLAB_PANIC,
|
|
|
- NULL);
|
|
|
+ sizes[INDEX_L3].cs_cachep = kmem_cache_zalloc(kmem_cache, GFP_NOWAIT);
|
|
|
+ sizes[INDEX_L3].cs_cachep->name = names[INDEX_L3].name;
|
|
|
+ sizes[INDEX_L3].cs_cachep->size = sizes[INDEX_L3].cs_size;
|
|
|
+ sizes[INDEX_L3].cs_cachep->object_size = sizes[INDEX_L3].cs_size;
|
|
|
+ sizes[INDEX_L3].cs_cachep->align = ARCH_KMALLOC_MINALIGN;
|
|
|
+ __kmem_cache_create(sizes[INDEX_L3].cs_cachep, ARCH_KMALLOC_FLAGS|SLAB_PANIC);
|
|
|
+ list_add(&sizes[INDEX_L3].cs_cachep->list, &slab_caches);
|
|
|
}
|
|
|
|
|
|
slab_early_init = 0;
|
|
@@ -1693,20 +1700,23 @@ void __init kmem_cache_init(void)
|
|
|
* allow tighter packing of the smaller caches.
|
|
|
*/
|
|
|
if (!sizes->cs_cachep) {
|
|
|
- sizes->cs_cachep = __kmem_cache_create(names->name,
|
|
|
- sizes->cs_size,
|
|
|
- ARCH_KMALLOC_MINALIGN,
|
|
|
- ARCH_KMALLOC_FLAGS|SLAB_PANIC,
|
|
|
- NULL);
|
|
|
+ sizes->cs_cachep = kmem_cache_zalloc(kmem_cache, GFP_NOWAIT);
|
|
|
+ sizes->cs_cachep->name = names->name;
|
|
|
+ sizes->cs_cachep->size = sizes->cs_size;
|
|
|
+ sizes->cs_cachep->object_size = sizes->cs_size;
|
|
|
+ sizes->cs_cachep->align = ARCH_KMALLOC_MINALIGN;
|
|
|
+ __kmem_cache_create(sizes->cs_cachep, ARCH_KMALLOC_FLAGS|SLAB_PANIC);
|
|
|
+ list_add(&sizes->cs_cachep->list, &slab_caches);
|
|
|
}
|
|
|
#ifdef CONFIG_ZONE_DMA
|
|
|
- sizes->cs_dmacachep = __kmem_cache_create(
|
|
|
- names->name_dma,
|
|
|
- sizes->cs_size,
|
|
|
- ARCH_KMALLOC_MINALIGN,
|
|
|
- ARCH_KMALLOC_FLAGS|SLAB_CACHE_DMA|
|
|
|
- SLAB_PANIC,
|
|
|
- NULL);
|
|
|
+ sizes->cs_dmacachep = kmem_cache_zalloc(kmem_cache, GFP_NOWAIT);
|
|
|
+ sizes->cs_dmacachep->name = names->name_dma;
|
|
|
+ sizes->cs_dmacachep->size = sizes->cs_size;
|
|
|
+ sizes->cs_dmacachep->object_size = sizes->cs_size;
|
|
|
+ sizes->cs_dmacachep->align = ARCH_KMALLOC_MINALIGN;
|
|
|
+ __kmem_cache_create(sizes->cs_dmacachep,
|
|
|
+ ARCH_KMALLOC_FLAGS|SLAB_CACHE_DMA| SLAB_PANIC);
|
|
|
+ list_add(&sizes->cs_dmacachep->list, &slab_caches);
|
|
|
#endif
|
|
|
sizes++;
|
|
|
names++;
|
|
@@ -1717,15 +1727,15 @@ void __init kmem_cache_init(void)
|
|
|
|
|
|
ptr = kmalloc(sizeof(struct arraycache_init), GFP_NOWAIT);
|
|
|
|
|
|
- BUG_ON(cpu_cache_get(&cache_cache) != &initarray_cache.cache);
|
|
|
- memcpy(ptr, cpu_cache_get(&cache_cache),
|
|
|
+ BUG_ON(cpu_cache_get(kmem_cache) != &initarray_cache.cache);
|
|
|
+ memcpy(ptr, cpu_cache_get(kmem_cache),
|
|
|
sizeof(struct arraycache_init));
|
|
|
/*
|
|
|
* Do not assume that spinlocks can be initialized via memcpy:
|
|
|
*/
|
|
|
spin_lock_init(&ptr->lock);
|
|
|
|
|
|
- cache_cache.array[smp_processor_id()] = ptr;
|
|
|
+ kmem_cache->array[smp_processor_id()] = ptr;
|
|
|
|
|
|
ptr = kmalloc(sizeof(struct arraycache_init), GFP_NOWAIT);
|
|
|
|
|
@@ -1746,7 +1756,7 @@ void __init kmem_cache_init(void)
|
|
|
int nid;
|
|
|
|
|
|
for_each_online_node(nid) {
|
|
|
- init_list(&cache_cache, &initkmem_list3[CACHE_CACHE + nid], nid);
|
|
|
+ init_list(kmem_cache, &initkmem_list3[CACHE_CACHE + nid], nid);
|
|
|
|
|
|
init_list(malloc_sizes[INDEX_AC].cs_cachep,
|
|
|
&initkmem_list3[SIZE_AC + nid], nid);
|
|
@@ -2195,27 +2205,6 @@ static void slab_destroy(struct kmem_cache *cachep, struct slab *slabp)
|
|
|
}
|
|
|
}
|
|
|
|
|
|
-static void __kmem_cache_destroy(struct kmem_cache *cachep)
|
|
|
-{
|
|
|
- int i;
|
|
|
- struct kmem_list3 *l3;
|
|
|
-
|
|
|
- for_each_online_cpu(i)
|
|
|
- kfree(cachep->array[i]);
|
|
|
-
|
|
|
- /* NUMA: free the list3 structures */
|
|
|
- for_each_online_node(i) {
|
|
|
- l3 = cachep->nodelists[i];
|
|
|
- if (l3) {
|
|
|
- kfree(l3->shared);
|
|
|
- free_alien_cache(l3->alien);
|
|
|
- kfree(l3);
|
|
|
- }
|
|
|
- }
|
|
|
- kmem_cache_free(&cache_cache, cachep);
|
|
|
-}
|
|
|
-
|
|
|
-
|
|
|
/**
|
|
|
* calculate_slab_order - calculate size (page order) of slabs
|
|
|
* @cachep: pointer to the cache that is being created
|
|
@@ -2352,9 +2341,6 @@ static int __init_refok setup_cpu_cache(struct kmem_cache *cachep, gfp_t gfp)
|
|
|
* Cannot be called within a int, but can be interrupted.
|
|
|
* The @ctor is run when new pages are allocated by the cache.
|
|
|
*
|
|
|
- * @name must be valid until the cache is destroyed. This implies that
|
|
|
- * the module calling this has to destroy the cache before getting unloaded.
|
|
|
- *
|
|
|
* The flags are
|
|
|
*
|
|
|
* %SLAB_POISON - Poison the slab with a known test pattern (a5a5a5a5)
|
|
@@ -2367,13 +2353,13 @@ static int __init_refok setup_cpu_cache(struct kmem_cache *cachep, gfp_t gfp)
|
|
|
* cacheline. This can be beneficial if you're counting cycles as closely
|
|
|
* as davem.
|
|
|
*/
|
|
|
-struct kmem_cache *
|
|
|
-__kmem_cache_create (const char *name, size_t size, size_t align,
|
|
|
- unsigned long flags, void (*ctor)(void *))
|
|
|
+int
|
|
|
+__kmem_cache_create (struct kmem_cache *cachep, unsigned long flags)
|
|
|
{
|
|
|
size_t left_over, slab_size, ralign;
|
|
|
- struct kmem_cache *cachep = NULL;
|
|
|
gfp_t gfp;
|
|
|
+ int err;
|
|
|
+ size_t size = cachep->size;
|
|
|
|
|
|
#if DEBUG
|
|
|
#if FORCED_DEBUG
|
|
@@ -2445,8 +2431,8 @@ __kmem_cache_create (const char *name, size_t size, size_t align,
|
|
|
ralign = ARCH_SLAB_MINALIGN;
|
|
|
}
|
|
|
/* 3) caller mandated alignment */
|
|
|
- if (ralign < align) {
|
|
|
- ralign = align;
|
|
|
+ if (ralign < cachep->align) {
|
|
|
+ ralign = cachep->align;
|
|
|
}
|
|
|
/* disable debug if necessary */
|
|
|
if (ralign > __alignof__(unsigned long long))
|
|
@@ -2454,21 +2440,14 @@ __kmem_cache_create (const char *name, size_t size, size_t align,
|
|
|
/*
|
|
|
* 4) Store it.
|
|
|
*/
|
|
|
- align = ralign;
|
|
|
+ cachep->align = ralign;
|
|
|
|
|
|
if (slab_is_available())
|
|
|
gfp = GFP_KERNEL;
|
|
|
else
|
|
|
gfp = GFP_NOWAIT;
|
|
|
|
|
|
- /* Get cache's description obj. */
|
|
|
- cachep = kmem_cache_zalloc(&cache_cache, gfp);
|
|
|
- if (!cachep)
|
|
|
- return NULL;
|
|
|
-
|
|
|
cachep->nodelists = (struct kmem_list3 **)&cachep->array[nr_cpu_ids];
|
|
|
- cachep->object_size = size;
|
|
|
- cachep->align = align;
|
|
|
#if DEBUG
|
|
|
|
|
|
/*
|
|
@@ -2514,18 +2493,15 @@ __kmem_cache_create (const char *name, size_t size, size_t align,
|
|
|
*/
|
|
|
flags |= CFLGS_OFF_SLAB;
|
|
|
|
|
|
- size = ALIGN(size, align);
|
|
|
+ size = ALIGN(size, cachep->align);
|
|
|
|
|
|
- left_over = calculate_slab_order(cachep, size, align, flags);
|
|
|
+ left_over = calculate_slab_order(cachep, size, cachep->align, flags);
|
|
|
+
|
|
|
+ if (!cachep->num)
|
|
|
+ return -E2BIG;
|
|
|
|
|
|
- if (!cachep->num) {
|
|
|
- printk(KERN_ERR
|
|
|
- "kmem_cache_create: couldn't create cache %s.\n", name);
|
|
|
- kmem_cache_free(&cache_cache, cachep);
|
|
|
- return NULL;
|
|
|
- }
|
|
|
slab_size = ALIGN(cachep->num * sizeof(kmem_bufctl_t)
|
|
|
- + sizeof(struct slab), align);
|
|
|
+ + sizeof(struct slab), cachep->align);
|
|
|
|
|
|
/*
|
|
|
* If the slab has been placed off-slab, and we have enough space then
|
|
@@ -2553,8 +2529,8 @@ __kmem_cache_create (const char *name, size_t size, size_t align,
|
|
|
|
|
|
cachep->colour_off = cache_line_size();
|
|
|
/* Offset must be a multiple of the alignment. */
|
|
|
- if (cachep->colour_off < align)
|
|
|
- cachep->colour_off = align;
|
|
|
+ if (cachep->colour_off < cachep->align)
|
|
|
+ cachep->colour_off = cachep->align;
|
|
|
cachep->colour = left_over / cachep->colour_off;
|
|
|
cachep->slab_size = slab_size;
|
|
|
cachep->flags = flags;
|
|
@@ -2575,12 +2551,11 @@ __kmem_cache_create (const char *name, size_t size, size_t align,
|
|
|
*/
|
|
|
BUG_ON(ZERO_OR_NULL_PTR(cachep->slabp_cache));
|
|
|
}
|
|
|
- cachep->ctor = ctor;
|
|
|
- cachep->name = name;
|
|
|
|
|
|
- if (setup_cpu_cache(cachep, gfp)) {
|
|
|
- __kmem_cache_destroy(cachep);
|
|
|
- return NULL;
|
|
|
+ err = setup_cpu_cache(cachep, gfp);
|
|
|
+ if (err) {
|
|
|
+ __kmem_cache_shutdown(cachep);
|
|
|
+ return err;
|
|
|
}
|
|
|
|
|
|
if (flags & SLAB_DEBUG_OBJECTS) {
|
|
@@ -2593,9 +2568,7 @@ __kmem_cache_create (const char *name, size_t size, size_t align,
|
|
|
slab_set_debugobj_lock_classes(cachep);
|
|
|
}
|
|
|
|
|
|
- /* cache setup completed, link it into the list */
|
|
|
- list_add(&cachep->list, &slab_caches);
|
|
|
- return cachep;
|
|
|
+ return 0;
|
|
|
}
|
|
|
|
|
|
#if DEBUG
|
|
@@ -2754,49 +2727,29 @@ int kmem_cache_shrink(struct kmem_cache *cachep)
|
|
|
}
|
|
|
EXPORT_SYMBOL(kmem_cache_shrink);
|
|
|
|
|
|
-/**
|
|
|
- * kmem_cache_destroy - delete a cache
|
|
|
- * @cachep: the cache to destroy
|
|
|
- *
|
|
|
- * Remove a &struct kmem_cache object from the slab cache.
|
|
|
- *
|
|
|
- * It is expected this function will be called by a module when it is
|
|
|
- * unloaded. This will remove the cache completely, and avoid a duplicate
|
|
|
- * cache being allocated each time a module is loaded and unloaded, if the
|
|
|
- * module doesn't have persistent in-kernel storage across loads and unloads.
|
|
|
- *
|
|
|
- * The cache must be empty before calling this function.
|
|
|
- *
|
|
|
- * The caller must guarantee that no one will allocate memory from the cache
|
|
|
- * during the kmem_cache_destroy().
|
|
|
- */
|
|
|
-void kmem_cache_destroy(struct kmem_cache *cachep)
|
|
|
+int __kmem_cache_shutdown(struct kmem_cache *cachep)
|
|
|
{
|
|
|
- BUG_ON(!cachep || in_interrupt());
|
|
|
+ int i;
|
|
|
+ struct kmem_list3 *l3;
|
|
|
+ int rc = __cache_shrink(cachep);
|
|
|
|
|
|
- /* Find the cache in the chain of caches. */
|
|
|
- get_online_cpus();
|
|
|
- mutex_lock(&slab_mutex);
|
|
|
- /*
|
|
|
- * the chain is never empty, cache_cache is never destroyed
|
|
|
- */
|
|
|
- list_del(&cachep->list);
|
|
|
- if (__cache_shrink(cachep)) {
|
|
|
- slab_error(cachep, "Can't free all objects");
|
|
|
- list_add(&cachep->list, &slab_caches);
|
|
|
- mutex_unlock(&slab_mutex);
|
|
|
- put_online_cpus();
|
|
|
- return;
|
|
|
- }
|
|
|
+ if (rc)
|
|
|
+ return rc;
|
|
|
|
|
|
- if (unlikely(cachep->flags & SLAB_DESTROY_BY_RCU))
|
|
|
- rcu_barrier();
|
|
|
+ for_each_online_cpu(i)
|
|
|
+ kfree(cachep->array[i]);
|
|
|
|
|
|
- __kmem_cache_destroy(cachep);
|
|
|
- mutex_unlock(&slab_mutex);
|
|
|
- put_online_cpus();
|
|
|
+ /* NUMA: free the list3 structures */
|
|
|
+ for_each_online_node(i) {
|
|
|
+ l3 = cachep->nodelists[i];
|
|
|
+ if (l3) {
|
|
|
+ kfree(l3->shared);
|
|
|
+ free_alien_cache(l3->alien);
|
|
|
+ kfree(l3);
|
|
|
+ }
|
|
|
+ }
|
|
|
+ return 0;
|
|
|
}
|
|
|
-EXPORT_SYMBOL(kmem_cache_destroy);
|
|
|
|
|
|
/*
|
|
|
* Get the memory for a slab management obj.
|
|
@@ -3330,7 +3283,7 @@ static void *cache_alloc_debugcheck_after(struct kmem_cache *cachep,
|
|
|
|
|
|
static bool slab_should_failslab(struct kmem_cache *cachep, gfp_t flags)
|
|
|
{
|
|
|
- if (cachep == &cache_cache)
|
|
|
+ if (cachep == kmem_cache)
|
|
|
return false;
|
|
|
|
|
|
return should_failslab(cachep->object_size, flags, cachep->flags);
|