|
@@ -837,6 +837,35 @@ static void remove_full(struct kmem_cache *s, struct page *page)
|
|
spin_unlock(&n->list_lock);
|
|
spin_unlock(&n->list_lock);
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+/* Tracking of the number of slabs for debugging purposes */
|
|
|
|
+static inline unsigned long slabs_node(struct kmem_cache *s, int node)
|
|
|
|
+{
|
|
|
|
+ struct kmem_cache_node *n = get_node(s, node);
|
|
|
|
+
|
|
|
|
+ return atomic_long_read(&n->nr_slabs);
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static inline void inc_slabs_node(struct kmem_cache *s, int node)
|
|
|
|
+{
|
|
|
|
+ struct kmem_cache_node *n = get_node(s, node);
|
|
|
|
+
|
|
|
|
+ /*
|
|
|
|
+ * May be called early in order to allocate a slab for the
|
|
|
|
+ * kmem_cache_node structure. Solve the chicken-egg
|
|
|
|
+ * dilemma by deferring the increment of the count during
|
|
|
|
+ * bootstrap (see early_kmem_cache_node_alloc).
|
|
|
|
+ */
|
|
|
|
+ if (!NUMA_BUILD || n)
|
|
|
|
+ atomic_long_inc(&n->nr_slabs);
|
|
|
|
+}
|
|
|
|
+static inline void dec_slabs_node(struct kmem_cache *s, int node)
|
|
|
|
+{
|
|
|
|
+ struct kmem_cache_node *n = get_node(s, node);
|
|
|
|
+
|
|
|
|
+ atomic_long_dec(&n->nr_slabs);
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+/* Object debug checks for alloc/free paths */
|
|
static void setup_object_debug(struct kmem_cache *s, struct page *page,
|
|
static void setup_object_debug(struct kmem_cache *s, struct page *page,
|
|
void *object)
|
|
void *object)
|
|
{
|
|
{
|
|
@@ -1028,6 +1057,11 @@ static inline unsigned long kmem_cache_flags(unsigned long objsize,
|
|
return flags;
|
|
return flags;
|
|
}
|
|
}
|
|
#define slub_debug 0
|
|
#define slub_debug 0
|
|
|
|
+
|
|
|
|
+static inline unsigned long slabs_node(struct kmem_cache *s, int node)
|
|
|
|
+ { return 0; }
|
|
|
|
+static inline void inc_slabs_node(struct kmem_cache *s, int node) {}
|
|
|
|
+static inline void dec_slabs_node(struct kmem_cache *s, int node) {}
|
|
#endif
|
|
#endif
|
|
/*
|
|
/*
|
|
* Slab allocation and freeing
|
|
* Slab allocation and freeing
|
|
@@ -1066,7 +1100,6 @@ static void setup_object(struct kmem_cache *s, struct page *page,
|
|
static struct page *new_slab(struct kmem_cache *s, gfp_t flags, int node)
|
|
static struct page *new_slab(struct kmem_cache *s, gfp_t flags, int node)
|
|
{
|
|
{
|
|
struct page *page;
|
|
struct page *page;
|
|
- struct kmem_cache_node *n;
|
|
|
|
void *start;
|
|
void *start;
|
|
void *last;
|
|
void *last;
|
|
void *p;
|
|
void *p;
|
|
@@ -1078,9 +1111,7 @@ static struct page *new_slab(struct kmem_cache *s, gfp_t flags, int node)
|
|
if (!page)
|
|
if (!page)
|
|
goto out;
|
|
goto out;
|
|
|
|
|
|
- n = get_node(s, page_to_nid(page));
|
|
|
|
- if (n)
|
|
|
|
- atomic_long_inc(&n->nr_slabs);
|
|
|
|
|
|
+ inc_slabs_node(s, page_to_nid(page));
|
|
page->slab = s;
|
|
page->slab = s;
|
|
page->flags |= 1 << PG_slab;
|
|
page->flags |= 1 << PG_slab;
|
|
if (s->flags & (SLAB_DEBUG_FREE | SLAB_RED_ZONE | SLAB_POISON |
|
|
if (s->flags & (SLAB_DEBUG_FREE | SLAB_RED_ZONE | SLAB_POISON |
|
|
@@ -1153,9 +1184,7 @@ static void free_slab(struct kmem_cache *s, struct page *page)
|
|
|
|
|
|
static void discard_slab(struct kmem_cache *s, struct page *page)
|
|
static void discard_slab(struct kmem_cache *s, struct page *page)
|
|
{
|
|
{
|
|
- struct kmem_cache_node *n = get_node(s, page_to_nid(page));
|
|
|
|
-
|
|
|
|
- atomic_long_dec(&n->nr_slabs);
|
|
|
|
|
|
+ dec_slabs_node(s, page_to_nid(page));
|
|
free_slab(s, page);
|
|
free_slab(s, page);
|
|
}
|
|
}
|
|
|
|
|
|
@@ -1894,10 +1923,10 @@ static void init_kmem_cache_cpu(struct kmem_cache *s,
|
|
static void init_kmem_cache_node(struct kmem_cache_node *n)
|
|
static void init_kmem_cache_node(struct kmem_cache_node *n)
|
|
{
|
|
{
|
|
n->nr_partial = 0;
|
|
n->nr_partial = 0;
|
|
- atomic_long_set(&n->nr_slabs, 0);
|
|
|
|
spin_lock_init(&n->list_lock);
|
|
spin_lock_init(&n->list_lock);
|
|
INIT_LIST_HEAD(&n->partial);
|
|
INIT_LIST_HEAD(&n->partial);
|
|
#ifdef CONFIG_SLUB_DEBUG
|
|
#ifdef CONFIG_SLUB_DEBUG
|
|
|
|
+ atomic_long_set(&n->nr_slabs, 0);
|
|
INIT_LIST_HEAD(&n->full);
|
|
INIT_LIST_HEAD(&n->full);
|
|
#endif
|
|
#endif
|
|
}
|
|
}
|
|
@@ -2066,7 +2095,7 @@ static struct kmem_cache_node *early_kmem_cache_node_alloc(gfp_t gfpflags,
|
|
init_tracking(kmalloc_caches, n);
|
|
init_tracking(kmalloc_caches, n);
|
|
#endif
|
|
#endif
|
|
init_kmem_cache_node(n);
|
|
init_kmem_cache_node(n);
|
|
- atomic_long_inc(&n->nr_slabs);
|
|
|
|
|
|
+ inc_slabs_node(kmalloc_caches, node);
|
|
|
|
|
|
/*
|
|
/*
|
|
* lockdep requires consistent irq usage for each lock
|
|
* lockdep requires consistent irq usage for each lock
|
|
@@ -2379,7 +2408,7 @@ static inline int kmem_cache_close(struct kmem_cache *s)
|
|
struct kmem_cache_node *n = get_node(s, node);
|
|
struct kmem_cache_node *n = get_node(s, node);
|
|
|
|
|
|
n->nr_partial -= free_list(s, n, &n->partial);
|
|
n->nr_partial -= free_list(s, n, &n->partial);
|
|
- if (atomic_long_read(&n->nr_slabs))
|
|
|
|
|
|
+ if (slabs_node(s, node))
|
|
return 1;
|
|
return 1;
|
|
}
|
|
}
|
|
free_kmem_cache_nodes(s);
|
|
free_kmem_cache_nodes(s);
|
|
@@ -2801,7 +2830,7 @@ static void slab_mem_offline_callback(void *arg)
|
|
* and offline_pages() function shoudn't call this
|
|
* and offline_pages() function shoudn't call this
|
|
* callback. So, we must fail.
|
|
* callback. So, we must fail.
|
|
*/
|
|
*/
|
|
- BUG_ON(atomic_long_read(&n->nr_slabs));
|
|
|
|
|
|
+ BUG_ON(slabs_node(s, offline_node));
|
|
|
|
|
|
s->node[offline_node] = NULL;
|
|
s->node[offline_node] = NULL;
|
|
kmem_cache_free(kmalloc_caches, n);
|
|
kmem_cache_free(kmalloc_caches, n);
|