|
@@ -2970,13 +2970,13 @@ static void list_slab_objects(struct kmem_cache *s, struct page *page,
|
|
|
|
|
|
/*
|
|
/*
|
|
* Attempt to free all partial slabs on a node.
|
|
* Attempt to free all partial slabs on a node.
|
|
|
|
+ * This is called from kmem_cache_close(). We must be the last thread
|
|
|
|
+ * using the cache and therefore we do not need to lock anymore.
|
|
*/
|
|
*/
|
|
static void free_partial(struct kmem_cache *s, struct kmem_cache_node *n)
|
|
static void free_partial(struct kmem_cache *s, struct kmem_cache_node *n)
|
|
{
|
|
{
|
|
- unsigned long flags;
|
|
|
|
struct page *page, *h;
|
|
struct page *page, *h;
|
|
|
|
|
|
- spin_lock_irqsave(&n->list_lock, flags);
|
|
|
|
list_for_each_entry_safe(page, h, &n->partial, lru) {
|
|
list_for_each_entry_safe(page, h, &n->partial, lru) {
|
|
if (!page->inuse) {
|
|
if (!page->inuse) {
|
|
remove_partial(n, page);
|
|
remove_partial(n, page);
|
|
@@ -2986,7 +2986,6 @@ static void free_partial(struct kmem_cache *s, struct kmem_cache_node *n)
|
|
"Objects remaining on kmem_cache_close()");
|
|
"Objects remaining on kmem_cache_close()");
|
|
}
|
|
}
|
|
}
|
|
}
|
|
- spin_unlock_irqrestore(&n->list_lock, flags);
|
|
|
|
}
|
|
}
|
|
|
|
|
|
/*
|
|
/*
|
|
@@ -3020,6 +3019,7 @@ void kmem_cache_destroy(struct kmem_cache *s)
|
|
s->refcount--;
|
|
s->refcount--;
|
|
if (!s->refcount) {
|
|
if (!s->refcount) {
|
|
list_del(&s->list);
|
|
list_del(&s->list);
|
|
|
|
+ up_write(&slub_lock);
|
|
if (kmem_cache_close(s)) {
|
|
if (kmem_cache_close(s)) {
|
|
printk(KERN_ERR "SLUB %s: %s called for cache that "
|
|
printk(KERN_ERR "SLUB %s: %s called for cache that "
|
|
"still has objects.\n", s->name, __func__);
|
|
"still has objects.\n", s->name, __func__);
|
|
@@ -3028,8 +3028,8 @@ void kmem_cache_destroy(struct kmem_cache *s)
|
|
if (s->flags & SLAB_DESTROY_BY_RCU)
|
|
if (s->flags & SLAB_DESTROY_BY_RCU)
|
|
rcu_barrier();
|
|
rcu_barrier();
|
|
sysfs_slab_remove(s);
|
|
sysfs_slab_remove(s);
|
|
- }
|
|
|
|
- up_write(&slub_lock);
|
|
|
|
|
|
+ } else
|
|
|
|
+ up_write(&slub_lock);
|
|
}
|
|
}
|
|
EXPORT_SYMBOL(kmem_cache_destroy);
|
|
EXPORT_SYMBOL(kmem_cache_destroy);
|
|
|
|
|
|
@@ -3347,23 +3347,23 @@ int kmem_cache_shrink(struct kmem_cache *s)
|
|
* list_lock. page->inuse here is the upper limit.
|
|
* list_lock. page->inuse here is the upper limit.
|
|
*/
|
|
*/
|
|
list_for_each_entry_safe(page, t, &n->partial, lru) {
|
|
list_for_each_entry_safe(page, t, &n->partial, lru) {
|
|
- if (!page->inuse) {
|
|
|
|
- remove_partial(n, page);
|
|
|
|
- discard_slab(s, page);
|
|
|
|
- } else {
|
|
|
|
- list_move(&page->lru,
|
|
|
|
- slabs_by_inuse + page->inuse);
|
|
|
|
- }
|
|
|
|
|
|
+ list_move(&page->lru, slabs_by_inuse + page->inuse);
|
|
|
|
+ if (!page->inuse)
|
|
|
|
+ n->nr_partial--;
|
|
}
|
|
}
|
|
|
|
|
|
/*
|
|
/*
|
|
* Rebuild the partial list with the slabs filled up most
|
|
* Rebuild the partial list with the slabs filled up most
|
|
* first and the least used slabs at the end.
|
|
* first and the least used slabs at the end.
|
|
*/
|
|
*/
|
|
- for (i = objects - 1; i >= 0; i--)
|
|
|
|
|
|
+ for (i = objects - 1; i > 0; i--)
|
|
list_splice(slabs_by_inuse + i, n->partial.prev);
|
|
list_splice(slabs_by_inuse + i, n->partial.prev);
|
|
|
|
|
|
spin_unlock_irqrestore(&n->list_lock, flags);
|
|
spin_unlock_irqrestore(&n->list_lock, flags);
|
|
|
|
+
|
|
|
|
+ /* Release empty slabs */
|
|
|
|
+ list_for_each_entry_safe(page, t, slabs_by_inuse, lru)
|
|
|
|
+ discard_slab(s, page);
|
|
}
|
|
}
|
|
|
|
|
|
kfree(slabs_by_inuse);
|
|
kfree(slabs_by_inuse);
|