|
@@ -1537,6 +1537,7 @@ debug:
|
|
|
|
|
|
c->page->inuse++;
|
|
|
c->page->freelist = object[c->offset];
|
|
|
+ c->node = -1;
|
|
|
slab_unlock(c->page);
|
|
|
return object;
|
|
|
}
|
|
@@ -1560,8 +1561,7 @@ static void __always_inline *slab_alloc(struct kmem_cache *s,
|
|
|
|
|
|
local_irq_save(flags);
|
|
|
c = get_cpu_slab(s, smp_processor_id());
|
|
|
- if (unlikely(!c->page || !c->freelist ||
|
|
|
- !node_match(c, node)))
|
|
|
+ if (unlikely(!c->freelist || !node_match(c, node)))
|
|
|
|
|
|
object = __slab_alloc(s, gfpflags, node, addr, c);
|
|
|
|
|
@@ -1670,7 +1670,7 @@ static void __always_inline slab_free(struct kmem_cache *s,
|
|
|
local_irq_save(flags);
|
|
|
debug_check_no_locks_freed(object, s->objsize);
|
|
|
c = get_cpu_slab(s, smp_processor_id());
|
|
|
- if (likely(page == c->page && !SlabDebug(page))) {
|
|
|
+ if (likely(page == c->page && c->node >= 0)) {
|
|
|
object[c->offset] = c->freelist;
|
|
|
c->freelist = object;
|
|
|
} else
|
|
@@ -3250,12 +3250,16 @@ static unsigned long slab_objects(struct kmem_cache *s,
|
|
|
|
|
|
for_each_possible_cpu(cpu) {
|
|
|
struct page *page;
|
|
|
+ int node;
|
|
|
struct kmem_cache_cpu *c = get_cpu_slab(s, cpu);
|
|
|
|
|
|
if (!c)
|
|
|
continue;
|
|
|
|
|
|
page = c->page;
|
|
|
+ node = c->node;
|
|
|
+ if (node < 0)
|
|
|
+ continue;
|
|
|
if (page) {
|
|
|
if (flags & SO_CPU) {
|
|
|
int x = 0;
|
|
@@ -3265,9 +3269,9 @@ static unsigned long slab_objects(struct kmem_cache *s,
|
|
|
else
|
|
|
x = 1;
|
|
|
total += x;
|
|
|
- nodes[c->node] += x;
|
|
|
+ nodes[node] += x;
|
|
|
}
|
|
|
- per_cpu[c->node]++;
|
|
|
+ per_cpu[node]++;
|
|
|
}
|
|
|
}
|
|
|
|