|
@@ -1198,19 +1198,15 @@ static __always_inline int slab_trylock(struct page *page)
|
|
|
/*
|
|
|
* Management of partially allocated slabs
|
|
|
*/
|
|
|
-static void add_partial_tail(struct kmem_cache_node *n, struct page *page)
|
|
|
+static void add_partial(struct kmem_cache_node *n,
|
|
|
+ struct page *page, int tail)
|
|
|
{
|
|
|
spin_lock(&n->list_lock);
|
|
|
n->nr_partial++;
|
|
|
- list_add_tail(&page->lru, &n->partial);
|
|
|
- spin_unlock(&n->list_lock);
|
|
|
-}
|
|
|
-
|
|
|
-static void add_partial(struct kmem_cache_node *n, struct page *page)
|
|
|
-{
|
|
|
- spin_lock(&n->list_lock);
|
|
|
- n->nr_partial++;
|
|
|
- list_add(&page->lru, &n->partial);
|
|
|
+ if (tail)
|
|
|
+ list_add_tail(&page->lru, &n->partial);
|
|
|
+ else
|
|
|
+ list_add(&page->lru, &n->partial);
|
|
|
spin_unlock(&n->list_lock);
|
|
|
}
|
|
|
|
|
@@ -1339,7 +1335,7 @@ static struct page *get_partial(struct kmem_cache *s, gfp_t flags, int node)
|
|
|
*
|
|
|
* On exit the slab lock will have been dropped.
|
|
|
*/
|
|
|
-static void unfreeze_slab(struct kmem_cache *s, struct page *page)
|
|
|
+static void unfreeze_slab(struct kmem_cache *s, struct page *page, int tail)
|
|
|
{
|
|
|
struct kmem_cache_node *n = get_node(s, page_to_nid(page));
|
|
|
|
|
@@ -1347,7 +1343,7 @@ static void unfreeze_slab(struct kmem_cache *s, struct page *page)
|
|
|
if (page->inuse) {
|
|
|
|
|
|
if (page->freelist)
|
|
|
- add_partial(n, page);
|
|
|
+ add_partial(n, page, tail);
|
|
|
else if (SlabDebug(page) && (s->flags & SLAB_STORE_USER))
|
|
|
add_full(n, page);
|
|
|
slab_unlock(page);
|
|
@@ -1362,7 +1358,7 @@ static void unfreeze_slab(struct kmem_cache *s, struct page *page)
|
|
|
* partial list stays small. kmem_cache_shrink can
|
|
|
* reclaim empty slabs from the partial list.
|
|
|
*/
|
|
|
- add_partial_tail(n, page);
|
|
|
+ add_partial(n, page, 1);
|
|
|
slab_unlock(page);
|
|
|
} else {
|
|
|
slab_unlock(page);
|
|
@@ -1377,6 +1373,7 @@ static void unfreeze_slab(struct kmem_cache *s, struct page *page)
|
|
|
static void deactivate_slab(struct kmem_cache *s, struct kmem_cache_cpu *c)
|
|
|
{
|
|
|
struct page *page = c->page;
|
|
|
+ int tail = 1;
|
|
|
/*
|
|
|
* Merge cpu freelist into freelist. Typically we get here
|
|
|
* because both freelists are empty. So this is unlikely
|
|
@@ -1385,6 +1382,8 @@ static void deactivate_slab(struct kmem_cache *s, struct kmem_cache_cpu *c)
|
|
|
while (unlikely(c->freelist)) {
|
|
|
void **object;
|
|
|
|
|
|
+ tail = 0; /* Hot objects. Put the slab first */
|
|
|
+
|
|
|
/* Retrieve object from cpu_freelist */
|
|
|
object = c->freelist;
|
|
|
c->freelist = c->freelist[c->offset];
|
|
@@ -1395,7 +1394,7 @@ static void deactivate_slab(struct kmem_cache *s, struct kmem_cache_cpu *c)
|
|
|
page->inuse--;
|
|
|
}
|
|
|
c->page = NULL;
|
|
|
- unfreeze_slab(s, page);
|
|
|
+ unfreeze_slab(s, page, tail);
|
|
|
}
|
|
|
|
|
|
static inline void flush_slab(struct kmem_cache *s, struct kmem_cache_cpu *c)
|
|
@@ -1617,7 +1616,7 @@ checks_ok:
|
|
|
* then add it.
|
|
|
*/
|
|
|
if (unlikely(!prior))
|
|
|
- add_partial_tail(get_node(s, page_to_nid(page)), page);
|
|
|
+ add_partial(get_node(s, page_to_nid(page)), page, 1);
|
|
|
|
|
|
out_unlock:
|
|
|
slab_unlock(page);
|
|
@@ -2025,7 +2024,7 @@ static struct kmem_cache_node *early_kmem_cache_node_alloc(gfp_t gfpflags,
|
|
|
#endif
|
|
|
init_kmem_cache_node(n);
|
|
|
atomic_long_inc(&n->nr_slabs);
|
|
|
- add_partial(n, page);
|
|
|
+ add_partial(n, page, 0);
|
|
|
return n;
|
|
|
}
|
|
|
|