|
@@ -2982,7 +2982,7 @@ static int calculate_sizes(struct kmem_cache *s, int forced_order)
|
|
|
s->allocflags |= __GFP_COMP;
|
|
|
|
|
|
if (s->flags & SLAB_CACHE_DMA)
|
|
|
- s->allocflags |= SLUB_DMA;
|
|
|
+ s->allocflags |= GFP_DMA;
|
|
|
|
|
|
if (s->flags & SLAB_RECLAIM_ACCOUNT)
|
|
|
s->allocflags |= __GFP_RECLAIMABLE;
|
|
@@ -3210,64 +3210,6 @@ static int __init setup_slub_nomerge(char *str)
|
|
|
|
|
|
__setup("slub_nomerge", setup_slub_nomerge);
|
|
|
|
|
|
-/*
|
|
|
- * Conversion table for small slabs sizes / 8 to the index in the
|
|
|
- * kmalloc array. This is necessary for slabs < 192 since we have non power
|
|
|
- * of two cache sizes there. The size of larger slabs can be determined using
|
|
|
- * fls.
|
|
|
- */
|
|
|
-static s8 size_index[24] = {
|
|
|
- 3, /* 8 */
|
|
|
- 4, /* 16 */
|
|
|
- 5, /* 24 */
|
|
|
- 5, /* 32 */
|
|
|
- 6, /* 40 */
|
|
|
- 6, /* 48 */
|
|
|
- 6, /* 56 */
|
|
|
- 6, /* 64 */
|
|
|
- 1, /* 72 */
|
|
|
- 1, /* 80 */
|
|
|
- 1, /* 88 */
|
|
|
- 1, /* 96 */
|
|
|
- 7, /* 104 */
|
|
|
- 7, /* 112 */
|
|
|
- 7, /* 120 */
|
|
|
- 7, /* 128 */
|
|
|
- 2, /* 136 */
|
|
|
- 2, /* 144 */
|
|
|
- 2, /* 152 */
|
|
|
- 2, /* 160 */
|
|
|
- 2, /* 168 */
|
|
|
- 2, /* 176 */
|
|
|
- 2, /* 184 */
|
|
|
- 2 /* 192 */
|
|
|
-};
|
|
|
-
|
|
|
-static inline int size_index_elem(size_t bytes)
|
|
|
-{
|
|
|
- return (bytes - 1) / 8;
|
|
|
-}
|
|
|
-
|
|
|
-static struct kmem_cache *get_slab(size_t size, gfp_t flags)
|
|
|
-{
|
|
|
- int index;
|
|
|
-
|
|
|
- if (size <= 192) {
|
|
|
- if (!size)
|
|
|
- return ZERO_SIZE_PTR;
|
|
|
-
|
|
|
- index = size_index[size_index_elem(size)];
|
|
|
- } else
|
|
|
- index = fls(size - 1);
|
|
|
-
|
|
|
-#ifdef CONFIG_ZONE_DMA
|
|
|
- if (unlikely((flags & SLUB_DMA)))
|
|
|
- return kmalloc_dma_caches[index];
|
|
|
-
|
|
|
-#endif
|
|
|
- return kmalloc_caches[index];
|
|
|
-}
|
|
|
-
|
|
|
void *__kmalloc(size_t size, gfp_t flags)
|
|
|
{
|
|
|
struct kmem_cache *s;
|
|
@@ -3276,7 +3218,7 @@ void *__kmalloc(size_t size, gfp_t flags)
|
|
|
if (unlikely(size > KMALLOC_MAX_CACHE_SIZE))
|
|
|
return kmalloc_large(size, flags);
|
|
|
|
|
|
- s = get_slab(size, flags);
|
|
|
+ s = kmalloc_slab(size, flags);
|
|
|
|
|
|
if (unlikely(ZERO_OR_NULL_PTR(s)))
|
|
|
return s;
|
|
@@ -3319,7 +3261,7 @@ void *__kmalloc_node(size_t size, gfp_t flags, int node)
|
|
|
return ret;
|
|
|
}
|
|
|
|
|
|
- s = get_slab(size, flags);
|
|
|
+ s = kmalloc_slab(size, flags);
|
|
|
|
|
|
if (unlikely(ZERO_OR_NULL_PTR(s)))
|
|
|
return s;
|
|
@@ -3632,7 +3574,6 @@ void __init kmem_cache_init(void)
|
|
|
{
|
|
|
static __initdata struct kmem_cache boot_kmem_cache,
|
|
|
boot_kmem_cache_node;
|
|
|
- int i;
|
|
|
|
|
|
if (debug_guardpage_minorder())
|
|
|
slub_max_order = 0;
|
|
@@ -3663,45 +3604,6 @@ void __init kmem_cache_init(void)
|
|
|
kmem_cache_node = bootstrap(&boot_kmem_cache_node);
|
|
|
|
|
|
/* Now we can use the kmem_cache to allocate kmalloc slabs */
|
|
|
-
|
|
|
- /*
|
|
|
- * Patch up the size_index table if we have strange large alignment
|
|
|
- * requirements for the kmalloc array. This is only the case for
|
|
|
- * MIPS it seems. The standard arches will not generate any code here.
|
|
|
- *
|
|
|
- * Largest permitted alignment is 256 bytes due to the way we
|
|
|
- * handle the index determination for the smaller caches.
|
|
|
- *
|
|
|
- * Make sure that nothing crazy happens if someone starts tinkering
|
|
|
- * around with ARCH_KMALLOC_MINALIGN
|
|
|
- */
|
|
|
- BUILD_BUG_ON(KMALLOC_MIN_SIZE > 256 ||
|
|
|
- (KMALLOC_MIN_SIZE & (KMALLOC_MIN_SIZE - 1)));
|
|
|
-
|
|
|
- for (i = 8; i < KMALLOC_MIN_SIZE; i += 8) {
|
|
|
- int elem = size_index_elem(i);
|
|
|
- if (elem >= ARRAY_SIZE(size_index))
|
|
|
- break;
|
|
|
- size_index[elem] = KMALLOC_SHIFT_LOW;
|
|
|
- }
|
|
|
-
|
|
|
- if (KMALLOC_MIN_SIZE == 64) {
|
|
|
- /*
|
|
|
- * The 96 byte size cache is not used if the alignment
|
|
|
- * is 64 byte.
|
|
|
- */
|
|
|
- for (i = 64 + 8; i <= 96; i += 8)
|
|
|
- size_index[size_index_elem(i)] = 7;
|
|
|
- } else if (KMALLOC_MIN_SIZE == 128) {
|
|
|
- /*
|
|
|
- * The 192 byte sized cache is not used if the alignment
|
|
|
- * is 128 byte. Redirect kmalloc to use the 256 byte cache
|
|
|
- * instead.
|
|
|
- */
|
|
|
- for (i = 128 + 8; i <= 192; i += 8)
|
|
|
- size_index[size_index_elem(i)] = 8;
|
|
|
- }
|
|
|
-
|
|
|
create_kmalloc_caches(0);
|
|
|
|
|
|
#ifdef CONFIG_SMP
|
|
@@ -3877,7 +3779,7 @@ void *__kmalloc_track_caller(size_t size, gfp_t gfpflags, unsigned long caller)
|
|
|
if (unlikely(size > KMALLOC_MAX_CACHE_SIZE))
|
|
|
return kmalloc_large(size, gfpflags);
|
|
|
|
|
|
- s = get_slab(size, gfpflags);
|
|
|
+ s = kmalloc_slab(size, gfpflags);
|
|
|
|
|
|
if (unlikely(ZERO_OR_NULL_PTR(s)))
|
|
|
return s;
|
|
@@ -3907,7 +3809,7 @@ void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags,
|
|
|
return ret;
|
|
|
}
|
|
|
|
|
|
- s = get_slab(size, gfpflags);
|
|
|
+ s = kmalloc_slab(size, gfpflags);
|
|
|
|
|
|
if (unlikely(ZERO_OR_NULL_PTR(s)))
|
|
|
return s;
|