|
@@ -1891,7 +1891,7 @@ static void free_event(struct perf_event *event)
|
|
|
|
|
|
if (!event->parent) {
|
|
|
atomic_dec(&nr_events);
|
|
|
- if (event->attr.mmap)
|
|
|
+ if (event->attr.mmap || event->attr.mmap_data)
|
|
|
atomic_dec(&nr_mmap_events);
|
|
|
if (event->attr.comm)
|
|
|
atomic_dec(&nr_comm_events);
|
|
@@ -3491,7 +3491,7 @@ perf_event_read_event(struct perf_event *event,
|
|
|
/*
|
|
|
* task tracking -- fork/exit
|
|
|
*
|
|
|
- * enabled by: attr.comm | attr.mmap | attr.task
|
|
|
+ * enabled by: attr.comm | attr.mmap | attr.mmap_data | attr.task
|
|
|
*/
|
|
|
|
|
|
struct perf_task_event {
|
|
@@ -3541,7 +3541,8 @@ static int perf_event_task_match(struct perf_event *event)
|
|
|
if (event->cpu != -1 && event->cpu != smp_processor_id())
|
|
|
return 0;
|
|
|
|
|
|
- if (event->attr.comm || event->attr.mmap || event->attr.task)
|
|
|
+ if (event->attr.comm || event->attr.mmap ||
|
|
|
+ event->attr.mmap_data || event->attr.task)
|
|
|
return 1;
|
|
|
|
|
|
return 0;
|
|
@@ -3766,7 +3767,8 @@ static void perf_event_mmap_output(struct perf_event *event,
|
|
|
}
|
|
|
|
|
|
static int perf_event_mmap_match(struct perf_event *event,
|
|
|
- struct perf_mmap_event *mmap_event)
|
|
|
+ struct perf_mmap_event *mmap_event,
|
|
|
+ int executable)
|
|
|
{
|
|
|
if (event->state < PERF_EVENT_STATE_INACTIVE)
|
|
|
return 0;
|
|
@@ -3774,19 +3776,21 @@ static int perf_event_mmap_match(struct perf_event *event,
|
|
|
if (event->cpu != -1 && event->cpu != smp_processor_id())
|
|
|
return 0;
|
|
|
|
|
|
- if (event->attr.mmap)
|
|
|
+ if ((!executable && event->attr.mmap_data) ||
|
|
|
+ (executable && event->attr.mmap))
|
|
|
return 1;
|
|
|
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
|
static void perf_event_mmap_ctx(struct perf_event_context *ctx,
|
|
|
- struct perf_mmap_event *mmap_event)
|
|
|
+ struct perf_mmap_event *mmap_event,
|
|
|
+ int executable)
|
|
|
{
|
|
|
struct perf_event *event;
|
|
|
|
|
|
list_for_each_entry_rcu(event, &ctx->event_list, event_entry) {
|
|
|
- if (perf_event_mmap_match(event, mmap_event))
|
|
|
+ if (perf_event_mmap_match(event, mmap_event, executable))
|
|
|
perf_event_mmap_output(event, mmap_event);
|
|
|
}
|
|
|
}
|
|
@@ -3830,6 +3834,14 @@ static void perf_event_mmap_event(struct perf_mmap_event *mmap_event)
|
|
|
if (!vma->vm_mm) {
|
|
|
name = strncpy(tmp, "[vdso]", sizeof(tmp));
|
|
|
goto got_name;
|
|
|
+ } else if (vma->vm_start <= vma->vm_mm->start_brk &&
|
|
|
+ vma->vm_end >= vma->vm_mm->brk) {
|
|
|
+ name = strncpy(tmp, "[heap]", sizeof(tmp));
|
|
|
+ goto got_name;
|
|
|
+ } else if (vma->vm_start <= vma->vm_mm->start_stack &&
|
|
|
+ vma->vm_end >= vma->vm_mm->start_stack) {
|
|
|
+ name = strncpy(tmp, "[stack]", sizeof(tmp));
|
|
|
+ goto got_name;
|
|
|
}
|
|
|
|
|
|
name = strncpy(tmp, "//anon", sizeof(tmp));
|
|
@@ -3846,17 +3858,17 @@ got_name:
|
|
|
|
|
|
rcu_read_lock();
|
|
|
cpuctx = &get_cpu_var(perf_cpu_context);
|
|
|
- perf_event_mmap_ctx(&cpuctx->ctx, mmap_event);
|
|
|
+ perf_event_mmap_ctx(&cpuctx->ctx, mmap_event, vma->vm_flags & VM_EXEC);
|
|
|
ctx = rcu_dereference(current->perf_event_ctxp);
|
|
|
if (ctx)
|
|
|
- perf_event_mmap_ctx(ctx, mmap_event);
|
|
|
+ perf_event_mmap_ctx(ctx, mmap_event, vma->vm_flags & VM_EXEC);
|
|
|
put_cpu_var(perf_cpu_context);
|
|
|
rcu_read_unlock();
|
|
|
|
|
|
kfree(buf);
|
|
|
}
|
|
|
|
|
|
-void __perf_event_mmap(struct vm_area_struct *vma)
|
|
|
+void perf_event_mmap(struct vm_area_struct *vma)
|
|
|
{
|
|
|
struct perf_mmap_event mmap_event;
|
|
|
|
|
@@ -4911,7 +4923,7 @@ done:
|
|
|
|
|
|
if (!event->parent) {
|
|
|
atomic_inc(&nr_events);
|
|
|
- if (event->attr.mmap)
|
|
|
+ if (event->attr.mmap || event->attr.mmap_data)
|
|
|
atomic_inc(&nr_mmap_events);
|
|
|
if (event->attr.comm)
|
|
|
atomic_inc(&nr_comm_events);
|