|
@@ -3372,8 +3372,19 @@ void perf_event_update_userpage(struct perf_event *event)
|
|
|
{
|
|
|
struct perf_event_mmap_page *userpg;
|
|
|
struct ring_buffer *rb;
|
|
|
+ u64 enabled, running;
|
|
|
|
|
|
rcu_read_lock();
|
|
|
+ /*
|
|
|
+ * compute total_time_enabled, total_time_running
|
|
|
+ * based on snapshot values taken when the event
|
|
|
+ * was last scheduled in.
|
|
|
+ *
|
|
|
+ * we cannot simply called update_context_time()
|
|
|
+ * because of locking issue as we can be called in
|
|
|
+ * NMI context
|
|
|
+ */
|
|
|
+ calc_timer_values(event, &enabled, &running);
|
|
|
rb = rcu_dereference(event->rb);
|
|
|
if (!rb)
|
|
|
goto unlock;
|
|
@@ -3392,10 +3403,10 @@ void perf_event_update_userpage(struct perf_event *event)
|
|
|
if (event->state == PERF_EVENT_STATE_ACTIVE)
|
|
|
userpg->offset -= local64_read(&event->hw.prev_count);
|
|
|
|
|
|
- userpg->time_enabled = event->total_time_enabled +
|
|
|
+ userpg->time_enabled = enabled +
|
|
|
atomic64_read(&event->child_total_time_enabled);
|
|
|
|
|
|
- userpg->time_running = event->total_time_running +
|
|
|
+ userpg->time_running = running +
|
|
|
atomic64_read(&event->child_total_time_running);
|
|
|
|
|
|
barrier();
|