Browse Source

perf_counter: Add PERF_SAMPLE_PERIOD

In order to allow easy tracking of the period, also provide means of
adding it to the sample data.

Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
LKML-Reference: <new-submission>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Peter Zijlstra 16 years ago
parent
commit
689802b2d0
2 changed files with 12 additions and 0 deletions
  1. 2 0
      include/linux/perf_counter.h
  2. 10 0
      kernel/perf_counter.c

+ 2 - 0
include/linux/perf_counter.h

@@ -106,6 +106,7 @@ enum perf_counter_sample_format {
 	PERF_SAMPLE_CALLCHAIN		= 1U << 5,
 	PERF_SAMPLE_CALLCHAIN		= 1U << 5,
 	PERF_SAMPLE_ID			= 1U << 6,
 	PERF_SAMPLE_ID			= 1U << 6,
 	PERF_SAMPLE_CPU			= 1U << 7,
 	PERF_SAMPLE_CPU			= 1U << 7,
+	PERF_SAMPLE_PERIOD		= 1U << 8,
 };
 };
 
 
 /*
 /*
@@ -260,6 +261,7 @@ enum perf_event_type {
 	 * struct {
 	 * struct {
 	 *	struct perf_event_header	header;
 	 *	struct perf_event_header	header;
 	 *	u64				time;
 	 *	u64				time;
+	 *	u64				id;
 	 *	u64				sample_period;
 	 *	u64				sample_period;
 	 * };
 	 * };
 	 */
 	 */

+ 10 - 0
kernel/perf_counter.c

@@ -2404,6 +2404,11 @@ static void perf_counter_output(struct perf_counter *counter,
 		cpu_entry.cpu = raw_smp_processor_id();
 		cpu_entry.cpu = raw_smp_processor_id();
 	}
 	}
 
 
+	if (sample_type & PERF_SAMPLE_PERIOD) {
+		header.type |= PERF_SAMPLE_PERIOD;
+		header.size += sizeof(u64);
+	}
+
 	if (sample_type & PERF_SAMPLE_GROUP) {
 	if (sample_type & PERF_SAMPLE_GROUP) {
 		header.type |= PERF_SAMPLE_GROUP;
 		header.type |= PERF_SAMPLE_GROUP;
 		header.size += sizeof(u64) +
 		header.size += sizeof(u64) +
@@ -2445,6 +2450,9 @@ static void perf_counter_output(struct perf_counter *counter,
 	if (sample_type & PERF_SAMPLE_CPU)
 	if (sample_type & PERF_SAMPLE_CPU)
 		perf_output_put(&handle, cpu_entry);
 		perf_output_put(&handle, cpu_entry);
 
 
+	if (sample_type & PERF_SAMPLE_PERIOD)
+		perf_output_put(&handle, counter->hw.sample_period);
+
 	/*
 	/*
 	 * XXX PERF_SAMPLE_GROUP vs inherited counters seems difficult.
 	 * XXX PERF_SAMPLE_GROUP vs inherited counters seems difficult.
 	 */
 	 */
@@ -2835,6 +2843,7 @@ static void perf_log_period(struct perf_counter *counter, u64 period)
 	struct {
 	struct {
 		struct perf_event_header	header;
 		struct perf_event_header	header;
 		u64				time;
 		u64				time;
+		u64				id;
 		u64				period;
 		u64				period;
 	} freq_event = {
 	} freq_event = {
 		.header = {
 		.header = {
@@ -2843,6 +2852,7 @@ static void perf_log_period(struct perf_counter *counter, u64 period)
 			.size = sizeof(freq_event),
 			.size = sizeof(freq_event),
 		},
 		},
 		.time = sched_clock(),
 		.time = sched_clock(),
+		.id = counter->id,
 		.period = period,
 		.period = period,
 	};
 	};