|
@@ -1524,12 +1524,13 @@ static inline void *acquire_slab(struct kmem_cache *s,
|
|
|
}
|
|
|
|
|
|
static int put_cpu_partial(struct kmem_cache *s, struct page *page, int drain);
|
|
|
+static inline bool pfmemalloc_match(struct page *page, gfp_t gfpflags);
|
|
|
|
|
|
/*
|
|
|
* Try to allocate a partial slab from a specific node.
|
|
|
*/
|
|
|
-static void *get_partial_node(struct kmem_cache *s,
|
|
|
- struct kmem_cache_node *n, struct kmem_cache_cpu *c)
|
|
|
+static void *get_partial_node(struct kmem_cache *s, struct kmem_cache_node *n,
|
|
|
+ struct kmem_cache_cpu *c, gfp_t flags)
|
|
|
{
|
|
|
struct page *page, *page2;
|
|
|
void *object = NULL;
|
|
@@ -1545,9 +1546,13 @@ static void *get_partial_node(struct kmem_cache *s,
|
|
|
|
|
|
spin_lock(&n->list_lock);
|
|
|
list_for_each_entry_safe(page, page2, &n->partial, lru) {
|
|
|
- void *t = acquire_slab(s, n, page, object == NULL);
|
|
|
+ void *t;
|
|
|
int available;
|
|
|
|
|
|
+ if (!pfmemalloc_match(page, flags))
|
|
|
+ continue;
|
|
|
+
|
|
|
+ t = acquire_slab(s, n, page, object == NULL);
|
|
|
if (!t)
|
|
|
break;
|
|
|
|
|
@@ -1614,7 +1619,7 @@ static void *get_any_partial(struct kmem_cache *s, gfp_t flags,
|
|
|
|
|
|
if (n && cpuset_zone_allowed_hardwall(zone, flags) &&
|
|
|
n->nr_partial > s->min_partial) {
|
|
|
- object = get_partial_node(s, n, c);
|
|
|
+ object = get_partial_node(s, n, c, flags);
|
|
|
if (object) {
|
|
|
/*
|
|
|
* Return the object even if
|
|
@@ -1643,7 +1648,7 @@ static void *get_partial(struct kmem_cache *s, gfp_t flags, int node,
|
|
|
void *object;
|
|
|
int searchnode = (node == NUMA_NO_NODE) ? numa_node_id() : node;
|
|
|
|
|
|
- object = get_partial_node(s, get_node(s, searchnode), c);
|
|
|
+ object = get_partial_node(s, get_node(s, searchnode), c, flags);
|
|
|
if (object || node != NUMA_NO_NODE)
|
|
|
return object;
|
|
|
|