|
@@ -2775,7 +2775,7 @@ init_kmem_cache_node(struct kmem_cache_node *n)
|
|
|
static inline int alloc_kmem_cache_cpus(struct kmem_cache *s)
|
|
|
{
|
|
|
BUILD_BUG_ON(PERCPU_DYNAMIC_EARLY_SIZE <
|
|
|
- SLUB_PAGE_SHIFT * sizeof(struct kmem_cache_cpu));
|
|
|
+ KMALLOC_SHIFT_HIGH * sizeof(struct kmem_cache_cpu));
|
|
|
|
|
|
/*
|
|
|
* Must align to double word boundary for the double cmpxchg
|
|
@@ -3174,11 +3174,11 @@ int __kmem_cache_shutdown(struct kmem_cache *s)
|
|
|
* Kmalloc subsystem
|
|
|
*******************************************************************/
|
|
|
|
|
|
-struct kmem_cache *kmalloc_caches[SLUB_PAGE_SHIFT];
|
|
|
+struct kmem_cache *kmalloc_caches[KMALLOC_SHIFT_HIGH + 1];
|
|
|
EXPORT_SYMBOL(kmalloc_caches);
|
|
|
|
|
|
#ifdef CONFIG_ZONE_DMA
|
|
|
-static struct kmem_cache *kmalloc_dma_caches[SLUB_PAGE_SHIFT];
|
|
|
+static struct kmem_cache *kmalloc_dma_caches[KMALLOC_SHIFT_HIGH + 1];
|
|
|
#endif
|
|
|
|
|
|
static int __init setup_slub_min_order(char *str)
|
|
@@ -3280,7 +3280,7 @@ void *__kmalloc(size_t size, gfp_t flags)
|
|
|
struct kmem_cache *s;
|
|
|
void *ret;
|
|
|
|
|
|
- if (unlikely(size > SLUB_MAX_SIZE))
|
|
|
+ if (unlikely(size > KMALLOC_MAX_CACHE_SIZE))
|
|
|
return kmalloc_large(size, flags);
|
|
|
|
|
|
s = get_slab(size, flags);
|
|
@@ -3316,7 +3316,7 @@ void *__kmalloc_node(size_t size, gfp_t flags, int node)
|
|
|
struct kmem_cache *s;
|
|
|
void *ret;
|
|
|
|
|
|
- if (unlikely(size > SLUB_MAX_SIZE)) {
|
|
|
+ if (unlikely(size > KMALLOC_MAX_CACHE_SIZE)) {
|
|
|
ret = kmalloc_large_node(size, flags, node);
|
|
|
|
|
|
trace_kmalloc_node(_RET_IP_, ret,
|
|
@@ -3721,7 +3721,7 @@ void __init kmem_cache_init(void)
|
|
|
caches++;
|
|
|
}
|
|
|
|
|
|
- for (i = KMALLOC_SHIFT_LOW; i < SLUB_PAGE_SHIFT; i++) {
|
|
|
+ for (i = KMALLOC_SHIFT_LOW; i <= KMALLOC_SHIFT_HIGH; i++) {
|
|
|
kmalloc_caches[i] = create_kmalloc_cache("kmalloc", 1 << i, 0);
|
|
|
caches++;
|
|
|
}
|
|
@@ -3739,7 +3739,7 @@ void __init kmem_cache_init(void)
|
|
|
BUG_ON(!kmalloc_caches[2]->name);
|
|
|
}
|
|
|
|
|
|
- for (i = KMALLOC_SHIFT_LOW; i < SLUB_PAGE_SHIFT; i++) {
|
|
|
+ for (i = KMALLOC_SHIFT_LOW; i <= KMALLOC_SHIFT_HIGH; i++) {
|
|
|
char *s = kasprintf(GFP_NOWAIT, "kmalloc-%d", 1 << i);
|
|
|
|
|
|
BUG_ON(!s);
|
|
@@ -3751,7 +3751,7 @@ void __init kmem_cache_init(void)
|
|
|
#endif
|
|
|
|
|
|
#ifdef CONFIG_ZONE_DMA
|
|
|
- for (i = 0; i < SLUB_PAGE_SHIFT; i++) {
|
|
|
+ for (i = 0; i <= KMALLOC_SHIFT_HIGH; i++) {
|
|
|
struct kmem_cache *s = kmalloc_caches[i];
|
|
|
|
|
|
if (s && s->size) {
|
|
@@ -3930,7 +3930,7 @@ void *__kmalloc_track_caller(size_t size, gfp_t gfpflags, unsigned long caller)
|
|
|
struct kmem_cache *s;
|
|
|
void *ret;
|
|
|
|
|
|
- if (unlikely(size > SLUB_MAX_SIZE))
|
|
|
+ if (unlikely(size > KMALLOC_MAX_CACHE_SIZE))
|
|
|
return kmalloc_large(size, gfpflags);
|
|
|
|
|
|
s = get_slab(size, gfpflags);
|
|
@@ -3953,7 +3953,7 @@ void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags,
|
|
|
struct kmem_cache *s;
|
|
|
void *ret;
|
|
|
|
|
|
- if (unlikely(size > SLUB_MAX_SIZE)) {
|
|
|
+ if (unlikely(size > KMALLOC_MAX_CACHE_SIZE)) {
|
|
|
ret = kmalloc_large_node(size, gfpflags, node);
|
|
|
|
|
|
trace_kmalloc_node(caller, ret,
|
|
@@ -4312,7 +4312,7 @@ static void resiliency_test(void)
|
|
|
{
|
|
|
u8 *p;
|
|
|
|
|
|
- BUILD_BUG_ON(KMALLOC_MIN_SIZE > 16 || SLUB_PAGE_SHIFT < 10);
|
|
|
+ BUILD_BUG_ON(KMALLOC_MIN_SIZE > 16 || KMALLOC_SHIFT_HIGH < 10);
|
|
|
|
|
|
printk(KERN_ERR "SLUB resiliency testing\n");
|
|
|
printk(KERN_ERR "-----------------------\n");
|