|
@@ -1493,7 +1493,7 @@ static inline void remove_partial(struct kmem_cache_node *n,
|
|
|
*/
|
|
|
static inline void *acquire_slab(struct kmem_cache *s,
|
|
|
struct kmem_cache_node *n, struct page *page,
|
|
|
- int mode)
|
|
|
+ int mode, int *objects)
|
|
|
{
|
|
|
void *freelist;
|
|
|
unsigned long counters;
|
|
@@ -1507,6 +1507,7 @@ static inline void *acquire_slab(struct kmem_cache *s,
|
|
|
freelist = page->freelist;
|
|
|
counters = page->counters;
|
|
|
new.counters = counters;
|
|
|
+ *objects = new.objects - new.inuse;
|
|
|
if (mode) {
|
|
|
new.inuse = page->objects;
|
|
|
new.freelist = NULL;
|
|
@@ -1528,7 +1529,7 @@ static inline void *acquire_slab(struct kmem_cache *s,
|
|
|
return freelist;
|
|
|
}
|
|
|
|
|
|
-static int put_cpu_partial(struct kmem_cache *s, struct page *page, int drain);
|
|
|
+static void put_cpu_partial(struct kmem_cache *s, struct page *page, int drain);
|
|
|
static inline bool pfmemalloc_match(struct page *page, gfp_t gfpflags);
|
|
|
|
|
|
/*
|
|
@@ -1539,6 +1540,8 @@ static void *get_partial_node(struct kmem_cache *s, struct kmem_cache_node *n,
|
|
|
{
|
|
|
struct page *page, *page2;
|
|
|
void *object = NULL;
|
|
|
+ int available = 0;
|
|
|
+ int objects;
|
|
|
|
|
|
/*
|
|
|
* Racy check. If we mistakenly see no partial slabs then we
|
|
@@ -1552,22 +1555,21 @@ static void *get_partial_node(struct kmem_cache *s, struct kmem_cache_node *n,
|
|
|
spin_lock(&n->list_lock);
|
|
|
list_for_each_entry_safe(page, page2, &n->partial, lru) {
|
|
|
void *t;
|
|
|
- int available;
|
|
|
|
|
|
if (!pfmemalloc_match(page, flags))
|
|
|
continue;
|
|
|
|
|
|
- t = acquire_slab(s, n, page, object == NULL);
|
|
|
+ t = acquire_slab(s, n, page, object == NULL, &objects);
|
|
|
if (!t)
|
|
|
break;
|
|
|
|
|
|
+ available += objects;
|
|
|
if (!object) {
|
|
|
c->page = page;
|
|
|
stat(s, ALLOC_FROM_PARTIAL);
|
|
|
object = t;
|
|
|
- available = page->objects - page->inuse;
|
|
|
} else {
|
|
|
- available = put_cpu_partial(s, page, 0);
|
|
|
+ put_cpu_partial(s, page, 0);
|
|
|
stat(s, CPU_PARTIAL_NODE);
|
|
|
}
|
|
|
if (kmem_cache_debug(s) || available > s->cpu_partial / 2)
|
|
@@ -1946,7 +1948,7 @@ static void unfreeze_partials(struct kmem_cache *s,
|
|
|
* If we did not find a slot then simply move all the partials to the
|
|
|
* per node partial list.
|
|
|
*/
|
|
|
-static int put_cpu_partial(struct kmem_cache *s, struct page *page, int drain)
|
|
|
+static void put_cpu_partial(struct kmem_cache *s, struct page *page, int drain)
|
|
|
{
|
|
|
struct page *oldpage;
|
|
|
int pages;
|
|
@@ -1984,7 +1986,6 @@ static int put_cpu_partial(struct kmem_cache *s, struct page *page, int drain)
|
|
|
page->next = oldpage;
|
|
|
|
|
|
} while (this_cpu_cmpxchg(s->cpu_slab->partial, oldpage, page) != oldpage);
|
|
|
- return pobjects;
|
|
|
}
|
|
|
|
|
|
static inline void flush_slab(struct kmem_cache *s, struct kmem_cache_cpu *c)
|