|
@@ -2688,21 +2688,6 @@ void kfree(const void *x)
|
|
|
}
|
|
|
EXPORT_SYMBOL(kfree);
|
|
|
|
|
|
-#if defined(CONFIG_SLUB_DEBUG) || defined(CONFIG_SLABINFO)
|
|
|
-static unsigned long count_partial(struct kmem_cache_node *n)
|
|
|
-{
|
|
|
- unsigned long flags;
|
|
|
- unsigned long x = 0;
|
|
|
- struct page *page;
|
|
|
-
|
|
|
- spin_lock_irqsave(&n->list_lock, flags);
|
|
|
- list_for_each_entry(page, &n->partial, lru)
|
|
|
- x += page->inuse;
|
|
|
- spin_unlock_irqrestore(&n->list_lock, flags);
|
|
|
- return x;
|
|
|
-}
|
|
|
-#endif
|
|
|
-
|
|
|
/*
|
|
|
* kmem_cache_shrink removes empty slabs from the partial lists and sorts
|
|
|
* the remaining slabs by the number of items in use. The slabs with the
|
|
@@ -3181,6 +3166,21 @@ void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags,
|
|
|
return slab_alloc(s, gfpflags, node, caller);
|
|
|
}
|
|
|
|
|
|
+#if (defined(CONFIG_SYSFS) && defined(CONFIG_SLUB_DEBUG)) || defined(CONFIG_SLABINFO)
|
|
|
+static unsigned long count_partial(struct kmem_cache_node *n)
|
|
|
+{
|
|
|
+ unsigned long flags;
|
|
|
+ unsigned long x = 0;
|
|
|
+ struct page *page;
|
|
|
+
|
|
|
+ spin_lock_irqsave(&n->list_lock, flags);
|
|
|
+ list_for_each_entry(page, &n->partial, lru)
|
|
|
+ x += page->inuse;
|
|
|
+ spin_unlock_irqrestore(&n->list_lock, flags);
|
|
|
+ return x;
|
|
|
+}
|
|
|
+#endif
|
|
|
+
|
|
|
#if defined(CONFIG_SYSFS) && defined(CONFIG_SLUB_DEBUG)
|
|
|
static int validate_slab(struct kmem_cache *s, struct page *page,
|
|
|
unsigned long *map)
|