|
@@ -2050,10 +2050,10 @@ static void flush_all(struct kmem_cache *s)
|
|
|
* Check if the objects in a per cpu structure fit numa
|
|
|
* locality expectations.
|
|
|
*/
|
|
|
-static inline int node_match(struct kmem_cache_cpu *c, int node)
|
|
|
+static inline int node_match(struct page *page, int node)
|
|
|
{
|
|
|
#ifdef CONFIG_NUMA
|
|
|
- if (node != NUMA_NO_NODE && page_to_nid(c->page) != node)
|
|
|
+ if (node != NUMA_NO_NODE && page_to_nid(page) != node)
|
|
|
return 0;
|
|
|
#endif
|
|
|
return 1;
|
|
@@ -2226,7 +2226,7 @@ static void *__slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
|
|
|
goto new_slab;
|
|
|
redo:
|
|
|
|
|
|
- if (unlikely(!node_match(c, node))) {
|
|
|
+ if (unlikely(!node_match(page, node))) {
|
|
|
stat(s, ALLOC_NODE_MISMATCH);
|
|
|
deactivate_slab(s, page, c->freelist);
|
|
|
c->page = NULL;
|
|
@@ -2313,6 +2313,7 @@ static __always_inline void *slab_alloc(struct kmem_cache *s,
|
|
|
{
|
|
|
void **object;
|
|
|
struct kmem_cache_cpu *c;
|
|
|
+ struct page *page;
|
|
|
unsigned long tid;
|
|
|
|
|
|
if (slab_pre_alloc_hook(s, gfpflags))
|
|
@@ -2338,7 +2339,8 @@ redo:
|
|
|
barrier();
|
|
|
|
|
|
object = c->freelist;
|
|
|
- if (unlikely(!object || !node_match(c, node)))
|
|
|
+ page = c->page;
|
|
|
+ if (unlikely(!object || !node_match(page, node)))
|
|
|
|
|
|
object = __slab_alloc(s, gfpflags, node, addr, c);
|
|
|
|