|
@@ -3256,6 +3256,7 @@ static struct notifier_block __cpuinitdata slab_notifier = {
|
|
|
void *__kmalloc_track_caller(size_t size, gfp_t gfpflags, unsigned long caller)
|
|
|
{
|
|
|
struct kmem_cache *s;
|
|
|
+ void *ret;
|
|
|
|
|
|
if (unlikely(size > PAGE_SIZE))
|
|
|
return kmalloc_large(size, gfpflags);
|
|
@@ -3265,13 +3266,20 @@ void *__kmalloc_track_caller(size_t size, gfp_t gfpflags, unsigned long caller)
|
|
|
if (unlikely(ZERO_OR_NULL_PTR(s)))
|
|
|
return s;
|
|
|
|
|
|
- return slab_alloc(s, gfpflags, -1, caller);
|
|
|
+ ret = slab_alloc(s, gfpflags, -1, caller);
|
|
|
+
|
|
|
+ /* Honor the call site pointer we recieved. */
|
|
|
+ kmemtrace_mark_alloc(KMEMTRACE_TYPE_KMALLOC, (unsigned long) caller,
|
|
|
+ ret, size, s->size, gfpflags);
|
|
|
+
|
|
|
+ return ret;
|
|
|
}
|
|
|
|
|
|
void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags,
|
|
|
int node, unsigned long caller)
|
|
|
{
|
|
|
struct kmem_cache *s;
|
|
|
+ void *ret;
|
|
|
|
|
|
if (unlikely(size > PAGE_SIZE))
|
|
|
return kmalloc_large_node(size, gfpflags, node);
|
|
@@ -3281,7 +3289,14 @@ void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags,
|
|
|
if (unlikely(ZERO_OR_NULL_PTR(s)))
|
|
|
return s;
|
|
|
|
|
|
- return slab_alloc(s, gfpflags, node, caller);
|
|
|
+ ret = slab_alloc(s, gfpflags, node, caller);
|
|
|
+
|
|
|
+ /* Honor the call site pointer we recieved. */
|
|
|
+ kmemtrace_mark_alloc_node(KMEMTRACE_TYPE_KMALLOC,
|
|
|
+ (unsigned long) caller, ret,
|
|
|
+ size, s->size, gfpflags, node);
|
|
|
+
|
|
|
+ return ret;
|
|
|
}
|
|
|
|
|
|
#ifdef CONFIG_SLUB_DEBUG
|