|
@@ -3351,6 +3351,18 @@ static int perf_event_index(struct perf_event *event)
|
|
|
return event->hw.idx + 1 - PERF_EVENT_INDEX_OFFSET;
|
|
|
}
|
|
|
|
|
|
+static void calc_timer_values(struct perf_event *event,
|
|
|
+ u64 *running,
|
|
|
+ u64 *enabled)
|
|
|
+{
|
|
|
+ u64 now, ctx_time;
|
|
|
+
|
|
|
+ now = perf_clock();
|
|
|
+ ctx_time = event->shadow_ctx_time + now;
|
|
|
+ *enabled = ctx_time - event->tstamp_enabled;
|
|
|
+ *running = ctx_time - event->tstamp_running;
|
|
|
+}
|
|
|
+
|
|
|
/*
|
|
|
* Callers need to ensure there can be no nesting of this function, otherwise
|
|
|
* the seqlock logic goes bad. We can not serialize this because the arch
|
|
@@ -3816,7 +3828,7 @@ static void perf_output_read_group(struct perf_output_handle *handle,
|
|
|
static void perf_output_read(struct perf_output_handle *handle,
|
|
|
struct perf_event *event)
|
|
|
{
|
|
|
- u64 enabled = 0, running = 0, now, ctx_time;
|
|
|
+ u64 enabled = 0, running = 0;
|
|
|
u64 read_format = event->attr.read_format;
|
|
|
|
|
|
/*
|
|
@@ -3828,12 +3840,8 @@ static void perf_output_read(struct perf_output_handle *handle,
|
|
|
* because of locking issue as we are called in
|
|
|
* NMI context
|
|
|
*/
|
|
|
- if (read_format & PERF_FORMAT_TOTAL_TIMES) {
|
|
|
- now = perf_clock();
|
|
|
- ctx_time = event->shadow_ctx_time + now;
|
|
|
- enabled = ctx_time - event->tstamp_enabled;
|
|
|
- running = ctx_time - event->tstamp_running;
|
|
|
- }
|
|
|
+ if (read_format & PERF_FORMAT_TOTAL_TIMES)
|
|
|
+ calc_timer_values(event, &enabled, &running);
|
|
|
|
|
|
if (event->attr.read_format & PERF_FORMAT_GROUP)
|
|
|
perf_output_read_group(handle, event, enabled, running);
|