|
@@ -260,13 +260,6 @@ static inline int check_valid_pointer(struct kmem_cache *s,
|
|
return 1;
|
|
return 1;
|
|
}
|
|
}
|
|
|
|
|
|
-/*
|
|
|
|
- * Slow version of get and set free pointer.
|
|
|
|
- *
|
|
|
|
- * This version requires touching the cache lines of kmem_cache which
|
|
|
|
- * we avoid to do in the fast alloc free paths. There we obtain the offset
|
|
|
|
- * from the page struct.
|
|
|
|
- */
|
|
|
|
static inline void *get_freepointer(struct kmem_cache *s, void *object)
|
|
static inline void *get_freepointer(struct kmem_cache *s, void *object)
|
|
{
|
|
{
|
|
return *(void **)(object + s->offset);
|
|
return *(void **)(object + s->offset);
|
|
@@ -1473,10 +1466,10 @@ static void deactivate_slab(struct kmem_cache *s, struct kmem_cache_cpu *c)
|
|
|
|
|
|
/* Retrieve object from cpu_freelist */
|
|
/* Retrieve object from cpu_freelist */
|
|
object = c->freelist;
|
|
object = c->freelist;
|
|
- c->freelist = c->freelist[c->offset];
|
|
|
|
|
|
+ c->freelist = get_freepointer(s, c->freelist);
|
|
|
|
|
|
/* And put onto the regular freelist */
|
|
/* And put onto the regular freelist */
|
|
- object[c->offset] = page->freelist;
|
|
|
|
|
|
+ set_freepointer(s, object, page->freelist);
|
|
page->freelist = object;
|
|
page->freelist = object;
|
|
page->inuse--;
|
|
page->inuse--;
|
|
}
|
|
}
|
|
@@ -1635,7 +1628,7 @@ load_freelist:
|
|
if (unlikely(SLABDEBUG && PageSlubDebug(c->page)))
|
|
if (unlikely(SLABDEBUG && PageSlubDebug(c->page)))
|
|
goto debug;
|
|
goto debug;
|
|
|
|
|
|
- c->freelist = object[c->offset];
|
|
|
|
|
|
+ c->freelist = get_freepointer(s, object);
|
|
c->page->inuse = c->page->objects;
|
|
c->page->inuse = c->page->objects;
|
|
c->page->freelist = NULL;
|
|
c->page->freelist = NULL;
|
|
c->node = page_to_nid(c->page);
|
|
c->node = page_to_nid(c->page);
|
|
@@ -1681,7 +1674,7 @@ debug:
|
|
goto another_slab;
|
|
goto another_slab;
|
|
|
|
|
|
c->page->inuse++;
|
|
c->page->inuse++;
|
|
- c->page->freelist = object[c->offset];
|
|
|
|
|
|
+ c->page->freelist = get_freepointer(s, object);
|
|
c->node = -1;
|
|
c->node = -1;
|
|
goto unlock_out;
|
|
goto unlock_out;
|
|
}
|
|
}
|
|
@@ -1702,7 +1695,6 @@ static __always_inline void *slab_alloc(struct kmem_cache *s,
|
|
void **object;
|
|
void **object;
|
|
struct kmem_cache_cpu *c;
|
|
struct kmem_cache_cpu *c;
|
|
unsigned long flags;
|
|
unsigned long flags;
|
|
- unsigned long objsize;
|
|
|
|
|
|
|
|
gfpflags &= gfp_allowed_mask;
|
|
gfpflags &= gfp_allowed_mask;
|
|
|
|
|
|
@@ -1715,22 +1707,21 @@ static __always_inline void *slab_alloc(struct kmem_cache *s,
|
|
local_irq_save(flags);
|
|
local_irq_save(flags);
|
|
c = __this_cpu_ptr(s->cpu_slab);
|
|
c = __this_cpu_ptr(s->cpu_slab);
|
|
object = c->freelist;
|
|
object = c->freelist;
|
|
- objsize = c->objsize;
|
|
|
|
if (unlikely(!object || !node_match(c, node)))
|
|
if (unlikely(!object || !node_match(c, node)))
|
|
|
|
|
|
object = __slab_alloc(s, gfpflags, node, addr, c);
|
|
object = __slab_alloc(s, gfpflags, node, addr, c);
|
|
|
|
|
|
else {
|
|
else {
|
|
- c->freelist = object[c->offset];
|
|
|
|
|
|
+ c->freelist = get_freepointer(s, object);
|
|
stat(c, ALLOC_FASTPATH);
|
|
stat(c, ALLOC_FASTPATH);
|
|
}
|
|
}
|
|
local_irq_restore(flags);
|
|
local_irq_restore(flags);
|
|
|
|
|
|
if (unlikely(gfpflags & __GFP_ZERO) && object)
|
|
if (unlikely(gfpflags & __GFP_ZERO) && object)
|
|
- memset(object, 0, objsize);
|
|
|
|
|
|
+ memset(object, 0, s->objsize);
|
|
|
|
|
|
- kmemcheck_slab_alloc(s, gfpflags, object, c->objsize);
|
|
|
|
- kmemleak_alloc_recursive(object, objsize, 1, s->flags, gfpflags);
|
|
|
|
|
|
+ kmemcheck_slab_alloc(s, gfpflags, object, s->objsize);
|
|
|
|
+ kmemleak_alloc_recursive(object, s->objsize, 1, s->flags, gfpflags);
|
|
|
|
|
|
return object;
|
|
return object;
|
|
}
|
|
}
|
|
@@ -1785,7 +1776,7 @@ EXPORT_SYMBOL(kmem_cache_alloc_node_notrace);
|
|
* handling required then we can return immediately.
|
|
* handling required then we can return immediately.
|
|
*/
|
|
*/
|
|
static void __slab_free(struct kmem_cache *s, struct page *page,
|
|
static void __slab_free(struct kmem_cache *s, struct page *page,
|
|
- void *x, unsigned long addr, unsigned int offset)
|
|
|
|
|
|
+ void *x, unsigned long addr)
|
|
{
|
|
{
|
|
void *prior;
|
|
void *prior;
|
|
void **object = (void *)x;
|
|
void **object = (void *)x;
|
|
@@ -1799,7 +1790,8 @@ static void __slab_free(struct kmem_cache *s, struct page *page,
|
|
goto debug;
|
|
goto debug;
|
|
|
|
|
|
checks_ok:
|
|
checks_ok:
|
|
- prior = object[offset] = page->freelist;
|
|
|
|
|
|
+ prior = page->freelist;
|
|
|
|
+ set_freepointer(s, object, prior);
|
|
page->freelist = object;
|
|
page->freelist = object;
|
|
page->inuse--;
|
|
page->inuse--;
|
|
|
|
|
|
@@ -1864,16 +1856,16 @@ static __always_inline void slab_free(struct kmem_cache *s,
|
|
kmemleak_free_recursive(x, s->flags);
|
|
kmemleak_free_recursive(x, s->flags);
|
|
local_irq_save(flags);
|
|
local_irq_save(flags);
|
|
c = __this_cpu_ptr(s->cpu_slab);
|
|
c = __this_cpu_ptr(s->cpu_slab);
|
|
- kmemcheck_slab_free(s, object, c->objsize);
|
|
|
|
- debug_check_no_locks_freed(object, c->objsize);
|
|
|
|
|
|
+ kmemcheck_slab_free(s, object, s->objsize);
|
|
|
|
+ debug_check_no_locks_freed(object, s->objsize);
|
|
if (!(s->flags & SLAB_DEBUG_OBJECTS))
|
|
if (!(s->flags & SLAB_DEBUG_OBJECTS))
|
|
- debug_check_no_obj_freed(object, c->objsize);
|
|
|
|
|
|
+ debug_check_no_obj_freed(object, s->objsize);
|
|
if (likely(page == c->page && c->node >= 0)) {
|
|
if (likely(page == c->page && c->node >= 0)) {
|
|
- object[c->offset] = c->freelist;
|
|
|
|
|
|
+ set_freepointer(s, object, c->freelist);
|
|
c->freelist = object;
|
|
c->freelist = object;
|
|
stat(c, FREE_FASTPATH);
|
|
stat(c, FREE_FASTPATH);
|
|
} else
|
|
} else
|
|
- __slab_free(s, page, x, addr, c->offset);
|
|
|
|
|
|
+ __slab_free(s, page, x, addr);
|
|
|
|
|
|
local_irq_restore(flags);
|
|
local_irq_restore(flags);
|
|
}
|
|
}
|
|
@@ -2060,19 +2052,6 @@ static unsigned long calculate_alignment(unsigned long flags,
|
|
return ALIGN(align, sizeof(void *));
|
|
return ALIGN(align, sizeof(void *));
|
|
}
|
|
}
|
|
|
|
|
|
-static void init_kmem_cache_cpu(struct kmem_cache *s,
|
|
|
|
- struct kmem_cache_cpu *c)
|
|
|
|
-{
|
|
|
|
- c->page = NULL;
|
|
|
|
- c->freelist = NULL;
|
|
|
|
- c->node = 0;
|
|
|
|
- c->offset = s->offset / sizeof(void *);
|
|
|
|
- c->objsize = s->objsize;
|
|
|
|
-#ifdef CONFIG_SLUB_STATS
|
|
|
|
- memset(c->stat, 0, NR_SLUB_STAT_ITEMS * sizeof(unsigned));
|
|
|
|
-#endif
|
|
|
|
-}
|
|
|
|
-
|
|
|
|
static void
|
|
static void
|
|
init_kmem_cache_node(struct kmem_cache_node *n, struct kmem_cache *s)
|
|
init_kmem_cache_node(struct kmem_cache_node *n, struct kmem_cache *s)
|
|
{
|
|
{
|
|
@@ -2090,8 +2069,6 @@ static DEFINE_PER_CPU(struct kmem_cache_cpu, kmalloc_percpu[SLUB_PAGE_SHIFT]);
|
|
|
|
|
|
static inline int alloc_kmem_cache_cpus(struct kmem_cache *s, gfp_t flags)
|
|
static inline int alloc_kmem_cache_cpus(struct kmem_cache *s, gfp_t flags)
|
|
{
|
|
{
|
|
- int cpu;
|
|
|
|
-
|
|
|
|
if (s < kmalloc_caches + KMALLOC_CACHES && s >= kmalloc_caches)
|
|
if (s < kmalloc_caches + KMALLOC_CACHES && s >= kmalloc_caches)
|
|
/*
|
|
/*
|
|
* Boot time creation of the kmalloc array. Use static per cpu data
|
|
* Boot time creation of the kmalloc array. Use static per cpu data
|
|
@@ -2104,8 +2081,6 @@ static inline int alloc_kmem_cache_cpus(struct kmem_cache *s, gfp_t flags)
|
|
if (!s->cpu_slab)
|
|
if (!s->cpu_slab)
|
|
return 0;
|
|
return 0;
|
|
|
|
|
|
- for_each_possible_cpu(cpu)
|
|
|
|
- init_kmem_cache_cpu(s, per_cpu_ptr(s->cpu_slab, cpu));
|
|
|
|
return 1;
|
|
return 1;
|
|
}
|
|
}
|
|
|
|
|
|
@@ -2391,6 +2366,7 @@ static int kmem_cache_open(struct kmem_cache *s, gfp_t gfpflags,
|
|
|
|
|
|
if (alloc_kmem_cache_cpus(s, gfpflags & ~SLUB_DMA))
|
|
if (alloc_kmem_cache_cpus(s, gfpflags & ~SLUB_DMA))
|
|
return 1;
|
|
return 1;
|
|
|
|
+
|
|
free_kmem_cache_nodes(s);
|
|
free_kmem_cache_nodes(s);
|
|
error:
|
|
error:
|
|
if (flags & SLAB_PANIC)
|
|
if (flags & SLAB_PANIC)
|
|
@@ -3247,22 +3223,12 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size,
|
|
down_write(&slub_lock);
|
|
down_write(&slub_lock);
|
|
s = find_mergeable(size, align, flags, name, ctor);
|
|
s = find_mergeable(size, align, flags, name, ctor);
|
|
if (s) {
|
|
if (s) {
|
|
- int cpu;
|
|
|
|
-
|
|
|
|
s->refcount++;
|
|
s->refcount++;
|
|
/*
|
|
/*
|
|
* Adjust the object sizes so that we clear
|
|
* Adjust the object sizes so that we clear
|
|
* the complete object on kzalloc.
|
|
* the complete object on kzalloc.
|
|
*/
|
|
*/
|
|
s->objsize = max(s->objsize, (int)size);
|
|
s->objsize = max(s->objsize, (int)size);
|
|
-
|
|
|
|
- /*
|
|
|
|
- * And then we need to update the object size in the
|
|
|
|
- * per cpu structures
|
|
|
|
- */
|
|
|
|
- for_each_online_cpu(cpu)
|
|
|
|
- per_cpu_ptr(s->cpu_slab, cpu)->objsize = s->objsize;
|
|
|
|
-
|
|
|
|
s->inuse = max_t(int, s->inuse, ALIGN(size, sizeof(void *)));
|
|
s->inuse = max_t(int, s->inuse, ALIGN(size, sizeof(void *)));
|
|
up_write(&slub_lock);
|
|
up_write(&slub_lock);
|
|
|
|
|
|
@@ -3316,14 +3282,6 @@ static int __cpuinit slab_cpuup_callback(struct notifier_block *nfb,
|
|
unsigned long flags;
|
|
unsigned long flags;
|
|
|
|
|
|
switch (action) {
|
|
switch (action) {
|
|
- case CPU_UP_PREPARE:
|
|
|
|
- case CPU_UP_PREPARE_FROZEN:
|
|
|
|
- down_read(&slub_lock);
|
|
|
|
- list_for_each_entry(s, &slab_caches, list)
|
|
|
|
- init_kmem_cache_cpu(s, per_cpu_ptr(s->cpu_slab, cpu));
|
|
|
|
- up_read(&slub_lock);
|
|
|
|
- break;
|
|
|
|
-
|
|
|
|
case CPU_UP_CANCELED:
|
|
case CPU_UP_CANCELED:
|
|
case CPU_UP_CANCELED_FROZEN:
|
|
case CPU_UP_CANCELED_FROZEN:
|
|
case CPU_DEAD:
|
|
case CPU_DEAD:
|