瀏覽代碼

ftrace: timestamp syncing, prepare

rename and uninline now() to ftrace_now().

Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Ingo Molnar 17 年之前
父節點
當前提交
750ed1a407

+ 2 - 2
kernel/trace/ftrace.c

@@ -531,7 +531,7 @@ static int notrace __ftrace_update_code(void *ignore)
 	save_ftrace_enabled = ftrace_enabled;
 	ftrace_enabled = 0;
 
-	start = now(raw_smp_processor_id());
+	start = ftrace_now(raw_smp_processor_id());
 	ftrace_update_cnt = 0;
 
 	/* No locks needed, the machine is stopped! */
@@ -550,7 +550,7 @@ static int notrace __ftrace_update_code(void *ignore)
 
 	}
 
-	stop = now(raw_smp_processor_id());
+	stop = ftrace_now(raw_smp_processor_id());
 	ftrace_update_time = stop - start;
 	ftrace_update_tot_cnt += ftrace_update_cnt;
 

+ 6 - 1
kernel/trace/trace.c

@@ -42,6 +42,11 @@ ns2usecs(cycle_t nsec)
 	return nsec;
 }
 
+notrace cycle_t ftrace_now(int cpu)
+{
+	return cpu_clock(cpu);
+}
+
 static atomic_t			tracer_counter;
 static struct trace_array	global_trace;
 
@@ -607,7 +612,7 @@ tracing_generic_entry_update(struct trace_entry *entry, unsigned long flags)
 	entry->idx		= atomic_inc_return(&tracer_counter);
 	entry->preempt_count	= pc & 0xff;
 	entry->pid		= tsk->pid;
-	entry->t		= now(raw_smp_processor_id());
+	entry->t		= ftrace_now(raw_smp_processor_id());
 	entry->flags = (irqs_disabled_flags(flags) ? TRACE_FLAG_IRQS_OFF : 0) |
 		((pc & HARDIRQ_MASK) ? TRACE_FLAG_HARDIRQ : 0) |
 		((pc & SOFTIRQ_MASK) ? TRACE_FLAG_SOFTIRQ : 0) |

+ 1 - 4
kernel/trace/trace.h

@@ -171,10 +171,7 @@ void update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu);
 void update_max_tr_single(struct trace_array *tr,
 			  struct task_struct *tsk, int cpu);
 
-static inline notrace cycle_t now(int cpu)
-{
-	return cpu_clock(cpu);
-}
+extern notrace cycle_t ftrace_now(int cpu);
 
 #ifdef CONFIG_SCHED_TRACER
 extern void notrace

+ 1 - 1
kernel/trace/trace_functions.c

@@ -20,7 +20,7 @@ static notrace void function_reset(struct trace_array *tr)
 {
 	int cpu;
 
-	tr->time_start = now(tr->cpu);
+	tr->time_start = ftrace_now(tr->cpu);
 
 	for_each_online_cpu(cpu)
 		tracing_reset(tr->data[cpu]);

+ 3 - 3
kernel/trace/trace_irqsoff.c

@@ -136,7 +136,7 @@ check_critical_timing(struct trace_array *tr,
 	 * as long as possible:
 	 */
 	T0 = data->preempt_timestamp;
-	T1 = now(cpu);
+	T1 = ftrace_now(cpu);
 	delta = T1-T0;
 
 	local_save_flags(flags);
@@ -186,7 +186,7 @@ out_unlock:
 
 out:
 	data->critical_sequence = max_sequence;
-	data->preempt_timestamp = now(cpu);
+	data->preempt_timestamp = ftrace_now(cpu);
 	tracing_reset(data);
 	ftrace(tr, data, CALLER_ADDR0, parent_ip, flags);
 }
@@ -215,7 +215,7 @@ start_critical_timing(unsigned long ip, unsigned long parent_ip)
 	atomic_inc(&data->disabled);
 
 	data->critical_sequence = max_sequence;
-	data->preempt_timestamp = now(cpu);
+	data->preempt_timestamp = ftrace_now(cpu);
 	data->critical_start = parent_ip ? : ip;
 	tracing_reset(data);
 

+ 1 - 1
kernel/trace/trace_sched_switch.c

@@ -61,7 +61,7 @@ static notrace void sched_switch_reset(struct trace_array *tr)
 {
 	int cpu;
 
-	tr->time_start = now(tr->cpu);
+	tr->time_start = ftrace_now(tr->cpu);
 
 	for_each_online_cpu(cpu)
 		tracing_reset(tr->data[cpu]);

+ 2 - 2
kernel/trace/trace_sched_wakeup.c

@@ -92,7 +92,7 @@ wakeup_sched_switch(struct task_struct *prev, struct task_struct *next)
 	 * as long as possible:
 	 */
 	T0 = data->preempt_timestamp;
-	T1 = now(cpu);
+	T1 = ftrace_now(cpu);
 	delta = T1-T0;
 
 	if (!report_latency(delta))
@@ -191,7 +191,7 @@ wakeup_check_start(struct trace_array *tr, struct task_struct *p,
 
 	local_save_flags(flags);
 
-	tr->data[wakeup_cpu]->preempt_timestamp = now(cpu);
+	tr->data[wakeup_cpu]->preempt_timestamp = ftrace_now(cpu);
 	ftrace(tr, tr->data[wakeup_cpu], CALLER_ADDR1, CALLER_ADDR2, flags);
 
 out_locked: