|
@@ -1106,15 +1106,18 @@ static inline int cache_free_alien(struct kmem_cache *cachep, void *objp)
|
|
int nodeid = slabp->nodeid;
|
|
int nodeid = slabp->nodeid;
|
|
struct kmem_list3 *l3;
|
|
struct kmem_list3 *l3;
|
|
struct array_cache *alien = NULL;
|
|
struct array_cache *alien = NULL;
|
|
|
|
+ int node;
|
|
|
|
+
|
|
|
|
+ node = numa_node_id();
|
|
|
|
|
|
/*
|
|
/*
|
|
* Make sure we are not freeing a object from another node to the array
|
|
* Make sure we are not freeing a object from another node to the array
|
|
* cache on this cpu.
|
|
* cache on this cpu.
|
|
*/
|
|
*/
|
|
- if (likely(slabp->nodeid == numa_node_id()))
|
|
|
|
|
|
+ if (likely(slabp->nodeid == node))
|
|
return 0;
|
|
return 0;
|
|
|
|
|
|
- l3 = cachep->nodelists[numa_node_id()];
|
|
|
|
|
|
+ l3 = cachep->nodelists[node];
|
|
STATS_INC_NODEFREES(cachep);
|
|
STATS_INC_NODEFREES(cachep);
|
|
if (l3->alien && l3->alien[nodeid]) {
|
|
if (l3->alien && l3->alien[nodeid]) {
|
|
alien = l3->alien[nodeid];
|
|
alien = l3->alien[nodeid];
|
|
@@ -1352,6 +1355,7 @@ void __init kmem_cache_init(void)
|
|
struct cache_names *names;
|
|
struct cache_names *names;
|
|
int i;
|
|
int i;
|
|
int order;
|
|
int order;
|
|
|
|
+ int node;
|
|
|
|
|
|
for (i = 0; i < NUM_INIT_LISTS; i++) {
|
|
for (i = 0; i < NUM_INIT_LISTS; i++) {
|
|
kmem_list3_init(&initkmem_list3[i]);
|
|
kmem_list3_init(&initkmem_list3[i]);
|
|
@@ -1386,12 +1390,14 @@ void __init kmem_cache_init(void)
|
|
* 6) Resize the head arrays of the kmalloc caches to their final sizes.
|
|
* 6) Resize the head arrays of the kmalloc caches to their final sizes.
|
|
*/
|
|
*/
|
|
|
|
|
|
|
|
+ node = numa_node_id();
|
|
|
|
+
|
|
/* 1) create the cache_cache */
|
|
/* 1) create the cache_cache */
|
|
INIT_LIST_HEAD(&cache_chain);
|
|
INIT_LIST_HEAD(&cache_chain);
|
|
list_add(&cache_cache.next, &cache_chain);
|
|
list_add(&cache_cache.next, &cache_chain);
|
|
cache_cache.colour_off = cache_line_size();
|
|
cache_cache.colour_off = cache_line_size();
|
|
cache_cache.array[smp_processor_id()] = &initarray_cache.cache;
|
|
cache_cache.array[smp_processor_id()] = &initarray_cache.cache;
|
|
- cache_cache.nodelists[numa_node_id()] = &initkmem_list3[CACHE_CACHE];
|
|
|
|
|
|
+ cache_cache.nodelists[node] = &initkmem_list3[CACHE_CACHE];
|
|
|
|
|
|
cache_cache.buffer_size = ALIGN(cache_cache.buffer_size,
|
|
cache_cache.buffer_size = ALIGN(cache_cache.buffer_size,
|
|
cache_line_size());
|
|
cache_line_size());
|
|
@@ -1496,19 +1502,18 @@ void __init kmem_cache_init(void)
|
|
}
|
|
}
|
|
/* 5) Replace the bootstrap kmem_list3's */
|
|
/* 5) Replace the bootstrap kmem_list3's */
|
|
{
|
|
{
|
|
- int node;
|
|
|
|
|
|
+ int nid;
|
|
|
|
+
|
|
/* Replace the static kmem_list3 structures for the boot cpu */
|
|
/* Replace the static kmem_list3 structures for the boot cpu */
|
|
- init_list(&cache_cache, &initkmem_list3[CACHE_CACHE],
|
|
|
|
- numa_node_id());
|
|
|
|
|
|
+ init_list(&cache_cache, &initkmem_list3[CACHE_CACHE], node);
|
|
|
|
|
|
- for_each_online_node(node) {
|
|
|
|
|
|
+ for_each_online_node(nid) {
|
|
init_list(malloc_sizes[INDEX_AC].cs_cachep,
|
|
init_list(malloc_sizes[INDEX_AC].cs_cachep,
|
|
- &initkmem_list3[SIZE_AC + node], node);
|
|
|
|
|
|
+ &initkmem_list3[SIZE_AC + nid], nid);
|
|
|
|
|
|
if (INDEX_AC != INDEX_L3) {
|
|
if (INDEX_AC != INDEX_L3) {
|
|
init_list(malloc_sizes[INDEX_L3].cs_cachep,
|
|
init_list(malloc_sizes[INDEX_L3].cs_cachep,
|
|
- &initkmem_list3[SIZE_L3 + node],
|
|
|
|
- node);
|
|
|
|
|
|
+ &initkmem_list3[SIZE_L3 + nid], nid);
|
|
}
|
|
}
|
|
}
|
|
}
|
|
}
|
|
}
|
|
@@ -2918,6 +2923,9 @@ static void *cache_alloc_refill(struct kmem_cache *cachep, gfp_t flags)
|
|
int batchcount;
|
|
int batchcount;
|
|
struct kmem_list3 *l3;
|
|
struct kmem_list3 *l3;
|
|
struct array_cache *ac;
|
|
struct array_cache *ac;
|
|
|
|
+ int node;
|
|
|
|
+
|
|
|
|
+ node = numa_node_id();
|
|
|
|
|
|
check_irq_off();
|
|
check_irq_off();
|
|
ac = cpu_cache_get(cachep);
|
|
ac = cpu_cache_get(cachep);
|
|
@@ -2931,7 +2939,7 @@ retry:
|
|
*/
|
|
*/
|
|
batchcount = BATCHREFILL_LIMIT;
|
|
batchcount = BATCHREFILL_LIMIT;
|
|
}
|
|
}
|
|
- l3 = cachep->nodelists[numa_node_id()];
|
|
|
|
|
|
+ l3 = cachep->nodelists[node];
|
|
|
|
|
|
BUG_ON(ac->avail > 0 || !l3);
|
|
BUG_ON(ac->avail > 0 || !l3);
|
|
spin_lock(&l3->list_lock);
|
|
spin_lock(&l3->list_lock);
|
|
@@ -2961,7 +2969,7 @@ retry:
|
|
STATS_SET_HIGH(cachep);
|
|
STATS_SET_HIGH(cachep);
|
|
|
|
|
|
ac->entry[ac->avail++] = slab_get_obj(cachep, slabp,
|
|
ac->entry[ac->avail++] = slab_get_obj(cachep, slabp,
|
|
- numa_node_id());
|
|
|
|
|
|
+ node);
|
|
}
|
|
}
|
|
check_slabp(cachep, slabp);
|
|
check_slabp(cachep, slabp);
|
|
|
|
|
|
@@ -2980,7 +2988,7 @@ alloc_done:
|
|
|
|
|
|
if (unlikely(!ac->avail)) {
|
|
if (unlikely(!ac->avail)) {
|
|
int x;
|
|
int x;
|
|
- x = cache_grow(cachep, flags, numa_node_id());
|
|
|
|
|
|
+ x = cache_grow(cachep, flags, node);
|
|
|
|
|
|
/* cache_grow can reenable interrupts, then ac could change. */
|
|
/* cache_grow can reenable interrupts, then ac could change. */
|
|
ac = cpu_cache_get(cachep);
|
|
ac = cpu_cache_get(cachep);
|