|
@@ -1551,7 +1551,6 @@ static void unfreeze_slab(struct kmem_cache *s, struct page *page, int tail)
|
|
|
}
|
|
|
}
|
|
|
|
|
|
-#ifdef CONFIG_CMPXCHG_LOCAL
|
|
|
#ifdef CONFIG_PREEMPT
|
|
|
/*
|
|
|
* Calculate the next globally unique transaction for disambiguiation
|
|
@@ -1611,17 +1610,12 @@ static inline void note_cmpxchg_failure(const char *n,
|
|
|
stat(s, CMPXCHG_DOUBLE_CPU_FAIL);
|
|
|
}
|
|
|
|
|
|
-#endif
|
|
|
-
|
|
|
void init_kmem_cache_cpus(struct kmem_cache *s)
|
|
|
{
|
|
|
-#ifdef CONFIG_CMPXCHG_LOCAL
|
|
|
int cpu;
|
|
|
|
|
|
for_each_possible_cpu(cpu)
|
|
|
per_cpu_ptr(s->cpu_slab, cpu)->tid = init_tid(cpu);
|
|
|
-#endif
|
|
|
-
|
|
|
}
|
|
|
/*
|
|
|
* Remove the cpu slab
|
|
@@ -1654,9 +1648,7 @@ static void deactivate_slab(struct kmem_cache *s, struct kmem_cache_cpu *c)
|
|
|
page->inuse--;
|
|
|
}
|
|
|
c->page = NULL;
|
|
|
-#ifdef CONFIG_CMPXCHG_LOCAL
|
|
|
c->tid = next_tid(c->tid);
|
|
|
-#endif
|
|
|
unfreeze_slab(s, page, tail);
|
|
|
}
|
|
|
|
|
@@ -1791,7 +1783,6 @@ static void *__slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
|
|
|
{
|
|
|
void **object;
|
|
|
struct page *page;
|
|
|
-#ifdef CONFIG_CMPXCHG_LOCAL
|
|
|
unsigned long flags;
|
|
|
|
|
|
local_irq_save(flags);
|
|
@@ -1802,7 +1793,6 @@ static void *__slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
|
|
|
* pointer.
|
|
|
*/
|
|
|
c = this_cpu_ptr(s->cpu_slab);
|
|
|
-#endif
|
|
|
#endif
|
|
|
|
|
|
/* We handle __GFP_ZERO in the caller */
|
|
@@ -1831,10 +1821,8 @@ load_freelist:
|
|
|
|
|
|
unlock_out:
|
|
|
slab_unlock(page);
|
|
|
-#ifdef CONFIG_CMPXCHG_LOCAL
|
|
|
c->tid = next_tid(c->tid);
|
|
|
local_irq_restore(flags);
|
|
|
-#endif
|
|
|
stat(s, ALLOC_SLOWPATH);
|
|
|
return object;
|
|
|
|
|
@@ -1873,9 +1861,7 @@ load_from_page:
|
|
|
}
|
|
|
if (!(gfpflags & __GFP_NOWARN) && printk_ratelimit())
|
|
|
slab_out_of_memory(s, gfpflags, node);
|
|
|
-#ifdef CONFIG_CMPXCHG_LOCAL
|
|
|
local_irq_restore(flags);
|
|
|
-#endif
|
|
|
return NULL;
|
|
|
debug:
|
|
|
if (!alloc_debug_processing(s, page, object, addr))
|
|
@@ -1902,20 +1888,12 @@ static __always_inline void *slab_alloc(struct kmem_cache *s,
|
|
|
{
|
|
|
void **object;
|
|
|
struct kmem_cache_cpu *c;
|
|
|
-#ifdef CONFIG_CMPXCHG_LOCAL
|
|
|
unsigned long tid;
|
|
|
-#else
|
|
|
- unsigned long flags;
|
|
|
-#endif
|
|
|
|
|
|
if (slab_pre_alloc_hook(s, gfpflags))
|
|
|
return NULL;
|
|
|
|
|
|
-#ifndef CONFIG_CMPXCHG_LOCAL
|
|
|
- local_irq_save(flags);
|
|
|
-#else
|
|
|
redo:
|
|
|
-#endif
|
|
|
|
|
|
/*
|
|
|
* Must read kmem_cache cpu data via this cpu ptr. Preemption is
|
|
@@ -1925,7 +1903,6 @@ redo:
|
|
|
*/
|
|
|
c = __this_cpu_ptr(s->cpu_slab);
|
|
|
|
|
|
-#ifdef CONFIG_CMPXCHG_LOCAL
|
|
|
/*
|
|
|
* The transaction ids are globally unique per cpu and per operation on
|
|
|
* a per cpu queue. Thus they can be guarantee that the cmpxchg_double
|
|
@@ -1934,7 +1911,6 @@ redo:
|
|
|
*/
|
|
|
tid = c->tid;
|
|
|
barrier();
|
|
|
-#endif
|
|
|
|
|
|
object = c->freelist;
|
|
|
if (unlikely(!object || !node_match(c, node)))
|
|
@@ -1942,7 +1918,6 @@ redo:
|
|
|
object = __slab_alloc(s, gfpflags, node, addr, c);
|
|
|
|
|
|
else {
|
|
|
-#ifdef CONFIG_CMPXCHG_LOCAL
|
|
|
/*
|
|
|
* The cmpxchg will only match if there was no additonal
|
|
|
* operation and if we are on the right processor.
|
|
@@ -1963,16 +1938,9 @@ redo:
|
|
|
note_cmpxchg_failure("slab_alloc", s, tid);
|
|
|
goto redo;
|
|
|
}
|
|
|
-#else
|
|
|
- c->freelist = get_freepointer(s, object);
|
|
|
-#endif
|
|
|
stat(s, ALLOC_FASTPATH);
|
|
|
}
|
|
|
|
|
|
-#ifndef CONFIG_CMPXCHG_LOCAL
|
|
|
- local_irq_restore(flags);
|
|
|
-#endif
|
|
|
-
|
|
|
if (unlikely(gfpflags & __GFP_ZERO) && object)
|
|
|
memset(object, 0, s->objsize);
|
|
|
|
|
@@ -2049,11 +2017,9 @@ static void __slab_free(struct kmem_cache *s, struct page *page,
|
|
|
{
|
|
|
void *prior;
|
|
|
void **object = (void *)x;
|
|
|
-#ifdef CONFIG_CMPXCHG_LOCAL
|
|
|
unsigned long flags;
|
|
|
|
|
|
local_irq_save(flags);
|
|
|
-#endif
|
|
|
slab_lock(page);
|
|
|
stat(s, FREE_SLOWPATH);
|
|
|
|
|
@@ -2084,9 +2050,7 @@ static void __slab_free(struct kmem_cache *s, struct page *page,
|
|
|
|
|
|
out_unlock:
|
|
|
slab_unlock(page);
|
|
|
-#ifdef CONFIG_CMPXCHG_LOCAL
|
|
|
local_irq_restore(flags);
|
|
|
-#endif
|
|
|
return;
|
|
|
|
|
|
slab_empty:
|
|
@@ -2098,9 +2062,7 @@ slab_empty:
|
|
|
stat(s, FREE_REMOVE_PARTIAL);
|
|
|
}
|
|
|
slab_unlock(page);
|
|
|
-#ifdef CONFIG_CMPXCHG_LOCAL
|
|
|
local_irq_restore(flags);
|
|
|
-#endif
|
|
|
stat(s, FREE_SLAB);
|
|
|
discard_slab(s, page);
|
|
|
}
|
|
@@ -2121,20 +2083,11 @@ static __always_inline void slab_free(struct kmem_cache *s,
|
|
|
{
|
|
|
void **object = (void *)x;
|
|
|
struct kmem_cache_cpu *c;
|
|
|
-#ifdef CONFIG_CMPXCHG_LOCAL
|
|
|
unsigned long tid;
|
|
|
-#else
|
|
|
- unsigned long flags;
|
|
|
-#endif
|
|
|
|
|
|
slab_free_hook(s, x);
|
|
|
|
|
|
-#ifndef CONFIG_CMPXCHG_LOCAL
|
|
|
- local_irq_save(flags);
|
|
|
-
|
|
|
-#else
|
|
|
redo:
|
|
|
-#endif
|
|
|
|
|
|
/*
|
|
|
* Determine the currently cpus per cpu slab.
|
|
@@ -2144,15 +2097,12 @@ redo:
|
|
|
*/
|
|
|
c = __this_cpu_ptr(s->cpu_slab);
|
|
|
|
|
|
-#ifdef CONFIG_CMPXCHG_LOCAL
|
|
|
tid = c->tid;
|
|
|
barrier();
|
|
|
-#endif
|
|
|
|
|
|
if (likely(page == c->page && c->node != NUMA_NO_NODE)) {
|
|
|
set_freepointer(s, object, c->freelist);
|
|
|
|
|
|
-#ifdef CONFIG_CMPXCHG_LOCAL
|
|
|
if (unlikely(!this_cpu_cmpxchg_double(
|
|
|
s->cpu_slab->freelist, s->cpu_slab->tid,
|
|
|
c->freelist, tid,
|
|
@@ -2161,16 +2111,10 @@ redo:
|
|
|
note_cmpxchg_failure("slab_free", s, tid);
|
|
|
goto redo;
|
|
|
}
|
|
|
-#else
|
|
|
- c->freelist = object;
|
|
|
-#endif
|
|
|
stat(s, FREE_FASTPATH);
|
|
|
} else
|
|
|
__slab_free(s, page, x, addr);
|
|
|
|
|
|
-#ifndef CONFIG_CMPXCHG_LOCAL
|
|
|
- local_irq_restore(flags);
|
|
|
-#endif
|
|
|
}
|
|
|
|
|
|
void kmem_cache_free(struct kmem_cache *s, void *x)
|