|
@@ -1503,10 +1503,21 @@ static void perf_counter_enable_on_exec(struct task_struct *task)
|
|
*/
|
|
*/
|
|
static void __perf_counter_read(void *info)
|
|
static void __perf_counter_read(void *info)
|
|
{
|
|
{
|
|
|
|
+ struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
|
|
struct perf_counter *counter = info;
|
|
struct perf_counter *counter = info;
|
|
struct perf_counter_context *ctx = counter->ctx;
|
|
struct perf_counter_context *ctx = counter->ctx;
|
|
unsigned long flags;
|
|
unsigned long flags;
|
|
|
|
|
|
|
|
+ /*
|
|
|
|
+ * If this is a task context, we need to check whether it is
|
|
|
|
+ * the current task context of this cpu. If not it has been
|
|
|
|
+ * scheduled out before the smp call arrived. In that case
|
|
|
|
+ * counter->count would have been updated to a recent sample
|
|
|
|
+ * when the counter was scheduled out.
|
|
|
|
+ */
|
|
|
|
+ if (ctx->task && cpuctx->task_ctx != ctx)
|
|
|
|
+ return;
|
|
|
|
+
|
|
local_irq_save(flags);
|
|
local_irq_save(flags);
|
|
if (ctx->is_active)
|
|
if (ctx->is_active)
|
|
update_context_time(ctx);
|
|
update_context_time(ctx);
|