|
@@ -107,11 +107,17 @@
|
|
|
* the fast path and disables lockless freelists.
|
|
|
*/
|
|
|
|
|
|
+#define SLAB_DEBUG_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER | \
|
|
|
+ SLAB_TRACE | SLAB_DEBUG_FREE)
|
|
|
+
|
|
|
+static inline int kmem_cache_debug(struct kmem_cache *s)
|
|
|
+{
|
|
|
#ifdef CONFIG_SLUB_DEBUG
|
|
|
-#define SLABDEBUG 1
|
|
|
+ return unlikely(s->flags & SLAB_DEBUG_FLAGS);
|
|
|
#else
|
|
|
-#define SLABDEBUG 0
|
|
|
+ return 0;
|
|
|
#endif
|
|
|
+}
|
|
|
|
|
|
/*
|
|
|
* Issues still to be resolved:
|
|
@@ -1157,9 +1163,6 @@ static struct page *new_slab(struct kmem_cache *s, gfp_t flags, int node)
|
|
|
inc_slabs_node(s, page_to_nid(page), page->objects);
|
|
|
page->slab = s;
|
|
|
page->flags |= 1 << PG_slab;
|
|
|
- if (s->flags & (SLAB_DEBUG_FREE | SLAB_RED_ZONE | SLAB_POISON |
|
|
|
- SLAB_STORE_USER | SLAB_TRACE))
|
|
|
- __SetPageSlubDebug(page);
|
|
|
|
|
|
start = page_address(page);
|
|
|
|
|
@@ -1186,14 +1189,13 @@ static void __free_slab(struct kmem_cache *s, struct page *page)
|
|
|
int order = compound_order(page);
|
|
|
int pages = 1 << order;
|
|
|
|
|
|
- if (unlikely(SLABDEBUG && PageSlubDebug(page))) {
|
|
|
+ if (kmem_cache_debug(s)) {
|
|
|
void *p;
|
|
|
|
|
|
slab_pad_check(s, page);
|
|
|
for_each_object(p, s, page_address(page),
|
|
|
page->objects)
|
|
|
check_object(s, page, p, 0);
|
|
|
- __ClearPageSlubDebug(page);
|
|
|
}
|
|
|
|
|
|
kmemcheck_free_shadow(page, compound_order(page));
|
|
@@ -1415,8 +1417,7 @@ static void unfreeze_slab(struct kmem_cache *s, struct page *page, int tail)
|
|
|
stat(s, tail ? DEACTIVATE_TO_TAIL : DEACTIVATE_TO_HEAD);
|
|
|
} else {
|
|
|
stat(s, DEACTIVATE_FULL);
|
|
|
- if (SLABDEBUG && PageSlubDebug(page) &&
|
|
|
- (s->flags & SLAB_STORE_USER))
|
|
|
+ if (kmem_cache_debug(s) && (s->flags & SLAB_STORE_USER))
|
|
|
add_full(n, page);
|
|
|
}
|
|
|
slab_unlock(page);
|
|
@@ -1624,7 +1625,7 @@ load_freelist:
|
|
|
object = c->page->freelist;
|
|
|
if (unlikely(!object))
|
|
|
goto another_slab;
|
|
|
- if (unlikely(SLABDEBUG && PageSlubDebug(c->page)))
|
|
|
+ if (kmem_cache_debug(s))
|
|
|
goto debug;
|
|
|
|
|
|
c->freelist = get_freepointer(s, object);
|
|
@@ -1783,7 +1784,7 @@ static void __slab_free(struct kmem_cache *s, struct page *page,
|
|
|
stat(s, FREE_SLOWPATH);
|
|
|
slab_lock(page);
|
|
|
|
|
|
- if (unlikely(SLABDEBUG && PageSlubDebug(page)))
|
|
|
+ if (kmem_cache_debug(s))
|
|
|
goto debug;
|
|
|
|
|
|
checks_ok:
|
|
@@ -3398,16 +3399,6 @@ static void validate_slab_slab(struct kmem_cache *s, struct page *page,
|
|
|
} else
|
|
|
printk(KERN_INFO "SLUB %s: Skipped busy slab 0x%p\n",
|
|
|
s->name, page);
|
|
|
-
|
|
|
- if (s->flags & DEBUG_DEFAULT_FLAGS) {
|
|
|
- if (!PageSlubDebug(page))
|
|
|
- printk(KERN_ERR "SLUB %s: SlubDebug not set "
|
|
|
- "on slab 0x%p\n", s->name, page);
|
|
|
- } else {
|
|
|
- if (PageSlubDebug(page))
|
|
|
- printk(KERN_ERR "SLUB %s: SlubDebug set on "
|
|
|
- "slab 0x%p\n", s->name, page);
|
|
|
- }
|
|
|
}
|
|
|
|
|
|
static int validate_slab_node(struct kmem_cache *s,
|