|
@@ -928,6 +928,16 @@ static void trace(struct kmem_cache *s, struct page *page, void *object,
|
|
|
* Hooks for other subsystems that check memory allocations. In a typical
|
|
|
* production configuration these hooks all should produce no code at all.
|
|
|
*/
|
|
|
+static inline void kmalloc_large_node_hook(void *ptr, size_t size, gfp_t flags)
|
|
|
+{
|
|
|
+ kmemleak_alloc(ptr, size, 1, flags);
|
|
|
+}
|
|
|
+
|
|
|
+static inline void kfree_hook(const void *x)
|
|
|
+{
|
|
|
+ kmemleak_free(x);
|
|
|
+}
|
|
|
+
|
|
|
static inline int slab_pre_alloc_hook(struct kmem_cache *s, gfp_t flags)
|
|
|
{
|
|
|
flags &= gfp_allowed_mask;
|
|
@@ -1253,13 +1263,30 @@ static inline void inc_slabs_node(struct kmem_cache *s, int node,
|
|
|
static inline void dec_slabs_node(struct kmem_cache *s, int node,
|
|
|
int objects) {}
|
|
|
|
|
|
+static inline void kmalloc_large_node_hook(void *ptr, size_t size, gfp_t flags)
|
|
|
+{
|
|
|
+ kmemleak_alloc(ptr, size, 1, flags);
|
|
|
+}
|
|
|
+
|
|
|
+static inline void kfree_hook(const void *x)
|
|
|
+{
|
|
|
+ kmemleak_free(x);
|
|
|
+}
|
|
|
+
|
|
|
static inline int slab_pre_alloc_hook(struct kmem_cache *s, gfp_t flags)
|
|
|
{ return 0; }
|
|
|
|
|
|
static inline void slab_post_alloc_hook(struct kmem_cache *s, gfp_t flags,
|
|
|
- void *object) {}
|
|
|
+ void *object)
|
|
|
+{
|
|
|
+ kmemleak_alloc_recursive(object, s->object_size, 1, s->flags,
|
|
|
+ flags & gfp_allowed_mask);
|
|
|
+}
|
|
|
|
|
|
-static inline void slab_free_hook(struct kmem_cache *s, void *x) {}
|
|
|
+static inline void slab_free_hook(struct kmem_cache *s, void *x)
|
|
|
+{
|
|
|
+ kmemleak_free_recursive(x, s->flags);
|
|
|
+}
|
|
|
|
|
|
#endif /* CONFIG_SLUB_DEBUG */
|
|
|
|
|
@@ -3265,7 +3292,7 @@ static void *kmalloc_large_node(size_t size, gfp_t flags, int node)
|
|
|
if (page)
|
|
|
ptr = page_address(page);
|
|
|
|
|
|
- kmemleak_alloc(ptr, size, 1, flags);
|
|
|
+ kmalloc_large_node_hook(ptr, size, flags);
|
|
|
return ptr;
|
|
|
}
|
|
|
|
|
@@ -3365,7 +3392,7 @@ void kfree(const void *x)
|
|
|
page = virt_to_head_page(x);
|
|
|
if (unlikely(!PageSlab(page))) {
|
|
|
BUG_ON(!PageCompound(page));
|
|
|
- kmemleak_free(x);
|
|
|
+ kfree_hook(x);
|
|
|
__free_memcg_kmem_pages(page, compound_order(page));
|
|
|
return;
|
|
|
}
|