|
@@ -899,6 +899,7 @@ static struct array_cache *alloc_arraycache(int node, int entries,
|
|
|
|
|
|
#ifdef CONFIG_NUMA
|
|
#ifdef CONFIG_NUMA
|
|
static void *__cache_alloc_node(struct kmem_cache *, gfp_t, int);
|
|
static void *__cache_alloc_node(struct kmem_cache *, gfp_t, int);
|
|
|
|
+static void *alternate_node_alloc(struct kmem_cache *, gfp_t);
|
|
|
|
|
|
static struct array_cache **alloc_alien_cache(int node, int limit)
|
|
static struct array_cache **alloc_alien_cache(int node, int limit)
|
|
{
|
|
{
|
|
@@ -2808,19 +2809,11 @@ static inline void *____cache_alloc(struct kmem_cache *cachep, gfp_t flags)
|
|
struct array_cache *ac;
|
|
struct array_cache *ac;
|
|
|
|
|
|
#ifdef CONFIG_NUMA
|
|
#ifdef CONFIG_NUMA
|
|
- if (unlikely(current->mempolicy && !in_interrupt())) {
|
|
|
|
- int nid = slab_node(current->mempolicy);
|
|
|
|
-
|
|
|
|
- if (nid != numa_node_id())
|
|
|
|
- return __cache_alloc_node(cachep, flags, nid);
|
|
|
|
- }
|
|
|
|
- if (unlikely(cpuset_do_slab_mem_spread() &&
|
|
|
|
- (cachep->flags & SLAB_MEM_SPREAD) &&
|
|
|
|
- !in_interrupt())) {
|
|
|
|
- int nid = cpuset_mem_spread_node();
|
|
|
|
-
|
|
|
|
- if (nid != numa_node_id())
|
|
|
|
- return __cache_alloc_node(cachep, flags, nid);
|
|
|
|
|
|
+ if (unlikely(current->flags & (PF_SPREAD_PAGE | PF_SPREAD_SLAB |
|
|
|
|
+ PF_MEMPOLICY))) {
|
|
|
|
+ objp = alternate_node_alloc(cachep, flags);
|
|
|
|
+ if (objp != NULL)
|
|
|
|
+ return objp;
|
|
}
|
|
}
|
|
#endif
|
|
#endif
|
|
|
|
|
|
@@ -2855,6 +2848,28 @@ static __always_inline void *__cache_alloc(struct kmem_cache *cachep,
|
|
}
|
|
}
|
|
|
|
|
|
#ifdef CONFIG_NUMA
|
|
#ifdef CONFIG_NUMA
|
|
|
|
+/*
|
|
|
|
+ * Try allocating on another node if PF_SPREAD_PAGE|PF_SPREAD_SLAB|PF_MEMPOLICY.
|
|
|
|
+ *
|
|
|
|
+ * If we are in_interrupt, then process context, including cpusets and
|
|
|
|
+ * mempolicy, may not apply and should not be used for allocation policy.
|
|
|
|
+ */
|
|
|
|
+static void *alternate_node_alloc(struct kmem_cache *cachep, gfp_t flags)
|
|
|
|
+{
|
|
|
|
+ int nid_alloc, nid_here;
|
|
|
|
+
|
|
|
|
+ if (in_interrupt())
|
|
|
|
+ return NULL;
|
|
|
|
+ nid_alloc = nid_here = numa_node_id();
|
|
|
|
+ if (cpuset_do_slab_mem_spread() && (cachep->flags & SLAB_MEM_SPREAD))
|
|
|
|
+ nid_alloc = cpuset_mem_spread_node();
|
|
|
|
+ else if (current->mempolicy)
|
|
|
|
+ nid_alloc = slab_node(current->mempolicy);
|
|
|
|
+ if (nid_alloc != nid_here)
|
|
|
|
+ return __cache_alloc_node(cachep, flags, nid_alloc);
|
|
|
|
+ return NULL;
|
|
|
|
+}
|
|
|
|
+
|
|
/*
|
|
/*
|
|
* A interface to enable slab creation on nodeid
|
|
* A interface to enable slab creation on nodeid
|
|
*/
|
|
*/
|