|
@@ -155,7 +155,7 @@ static inline bool kmem_cache_has_cpu_partial(struct kmem_cache *s)
|
|
|
/*
|
|
|
* Maximum number of desirable partial slabs.
|
|
|
* The existence of more partial slabs makes kmem_cache_shrink
|
|
|
- * sort the partial list by the number of objects in the.
|
|
|
+ * sort the partial list by the number of objects in use.
|
|
|
*/
|
|
|
#define MAX_PARTIAL 10
|
|
|
|
|
@@ -933,6 +933,16 @@ static void trace(struct kmem_cache *s, struct page *page, void *object,
|
|
|
* Hooks for other subsystems that check memory allocations. In a typical
|
|
|
* production configuration these hooks all should produce no code at all.
|
|
|
*/
|
|
|
+static inline void kmalloc_large_node_hook(void *ptr, size_t size, gfp_t flags)
|
|
|
+{
|
|
|
+ kmemleak_alloc(ptr, size, 1, flags);
|
|
|
+}
|
|
|
+
|
|
|
+static inline void kfree_hook(const void *x)
|
|
|
+{
|
|
|
+ kmemleak_free(x);
|
|
|
+}
|
|
|
+
|
|
|
static inline int slab_pre_alloc_hook(struct kmem_cache *s, gfp_t flags)
|
|
|
{
|
|
|
flags &= gfp_allowed_mask;
|
|
@@ -1217,8 +1227,8 @@ static unsigned long kmem_cache_flags(unsigned long object_size,
|
|
|
/*
|
|
|
* Enable debugging if selected on the kernel commandline.
|
|
|
*/
|
|
|
- if (slub_debug && (!slub_debug_slabs ||
|
|
|
- !strncmp(slub_debug_slabs, name, strlen(slub_debug_slabs))))
|
|
|
+ if (slub_debug && (!slub_debug_slabs || (name &&
|
|
|
+ !strncmp(slub_debug_slabs, name, strlen(slub_debug_slabs)))))
|
|
|
flags |= slub_debug;
|
|
|
|
|
|
return flags;
|
|
@@ -1260,13 +1270,30 @@ static inline void inc_slabs_node(struct kmem_cache *s, int node,
|
|
|
static inline void dec_slabs_node(struct kmem_cache *s, int node,
|
|
|
int objects) {}
|
|
|
|
|
|
+static inline void kmalloc_large_node_hook(void *ptr, size_t size, gfp_t flags)
|
|
|
+{
|
|
|
+ kmemleak_alloc(ptr, size, 1, flags);
|
|
|
+}
|
|
|
+
|
|
|
+static inline void kfree_hook(const void *x)
|
|
|
+{
|
|
|
+ kmemleak_free(x);
|
|
|
+}
|
|
|
+
|
|
|
static inline int slab_pre_alloc_hook(struct kmem_cache *s, gfp_t flags)
|
|
|
{ return 0; }
|
|
|
|
|
|
static inline void slab_post_alloc_hook(struct kmem_cache *s, gfp_t flags,
|
|
|
- void *object) {}
|
|
|
+ void *object)
|
|
|
+{
|
|
|
+ kmemleak_alloc_recursive(object, s->object_size, 1, s->flags,
|
|
|
+ flags & gfp_allowed_mask);
|
|
|
+}
|
|
|
|
|
|
-static inline void slab_free_hook(struct kmem_cache *s, void *x) {}
|
|
|
+static inline void slab_free_hook(struct kmem_cache *s, void *x)
|
|
|
+{
|
|
|
+ kmemleak_free_recursive(x, s->flags);
|
|
|
+}
|
|
|
|
|
|
#endif /* CONFIG_SLUB_DEBUG */
|
|
|
|
|
@@ -2829,8 +2856,8 @@ static struct kmem_cache *kmem_cache_node;
|
|
|
* slab on the node for this slabcache. There are no concurrent accesses
|
|
|
* possible.
|
|
|
*
|
|
|
- * Note that this function only works on the kmalloc_node_cache
|
|
|
- * when allocating for the kmalloc_node_cache. This is used for bootstrapping
|
|
|
+ * Note that this function only works on the kmem_cache_node
|
|
|
+ * when allocating for the kmem_cache_node. This is used for bootstrapping
|
|
|
* memory on a fresh node that has no slab structures yet.
|
|
|
*/
|
|
|
static void early_kmem_cache_node_alloc(int node)
|
|
@@ -3272,7 +3299,7 @@ static void *kmalloc_large_node(size_t size, gfp_t flags, int node)
|
|
|
if (page)
|
|
|
ptr = page_address(page);
|
|
|
|
|
|
- kmemleak_alloc(ptr, size, 1, flags);
|
|
|
+ kmalloc_large_node_hook(ptr, size, flags);
|
|
|
return ptr;
|
|
|
}
|
|
|
|
|
@@ -3336,7 +3363,7 @@ void kfree(const void *x)
|
|
|
page = virt_to_head_page(x);
|
|
|
if (unlikely(!PageSlab(page))) {
|
|
|
BUG_ON(!PageCompound(page));
|
|
|
- kmemleak_free(x);
|
|
|
+ kfree_hook(x);
|
|
|
__free_memcg_kmem_pages(page, compound_order(page));
|
|
|
return;
|
|
|
}
|