|
@@ -29,13 +29,13 @@ probe_sched_switch(struct rq *__rq, struct task_struct *prev,
|
|
int cpu;
|
|
int cpu;
|
|
int pc;
|
|
int pc;
|
|
|
|
|
|
- if (!sched_ref || sched_stopped)
|
|
|
|
|
|
+ if (unlikely(!sched_ref))
|
|
return;
|
|
return;
|
|
|
|
|
|
tracing_record_cmdline(prev);
|
|
tracing_record_cmdline(prev);
|
|
tracing_record_cmdline(next);
|
|
tracing_record_cmdline(next);
|
|
|
|
|
|
- if (!tracer_enabled)
|
|
|
|
|
|
+ if (!tracer_enabled || sched_stopped)
|
|
return;
|
|
return;
|
|
|
|
|
|
pc = preempt_count();
|
|
pc = preempt_count();
|
|
@@ -56,15 +56,15 @@ probe_sched_wakeup(struct rq *__rq, struct task_struct *wakee, int success)
|
|
unsigned long flags;
|
|
unsigned long flags;
|
|
int cpu, pc;
|
|
int cpu, pc;
|
|
|
|
|
|
- if (!likely(tracer_enabled))
|
|
|
|
|
|
+ if (unlikely(!sched_ref))
|
|
return;
|
|
return;
|
|
|
|
|
|
- pc = preempt_count();
|
|
|
|
tracing_record_cmdline(current);
|
|
tracing_record_cmdline(current);
|
|
|
|
|
|
- if (sched_stopped)
|
|
|
|
|
|
+ if (!tracer_enabled || sched_stopped)
|
|
return;
|
|
return;
|
|
|
|
|
|
|
|
+ pc = preempt_count();
|
|
local_irq_save(flags);
|
|
local_irq_save(flags);
|
|
cpu = raw_smp_processor_id();
|
|
cpu = raw_smp_processor_id();
|
|
data = ctx_trace->data[cpu];
|
|
data = ctx_trace->data[cpu];
|