Преглед изворни кода

ftrace: insert in the ftrace_preempt_disable()/enable() functions

Impact: use new, consolidated APIs in ftrace plugins

This patch replaces the schedule safe preempt disable code with the
ftrace_preempt_disable() and ftrace_preempt_enable() safe functions.

Signed-off-by: Steven Rostedt <srostedt@redhat.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Steven Rostedt пре 16 година
родитељ
комит
182e9f5f70
4 измењених фајлова са 15 додато и 41 уклоњено
  1. 9 18
      kernel/trace/ring_buffer.c
  2. 2 6
      kernel/trace/trace.c
  3. 2 11
      kernel/trace/trace_sched_wakeup.c
  4. 2 6
      kernel/trace/trace_stack.c

+ 9 - 18
kernel/trace/ring_buffer.c

@@ -16,6 +16,8 @@
 #include <linux/list.h>
 #include <linux/list.h>
 #include <linux/fs.h>
 #include <linux/fs.h>
 
 
+#include "trace.h"
+
 /* Up this if you want to test the TIME_EXTENTS and normalization */
 /* Up this if you want to test the TIME_EXTENTS and normalization */
 #define DEBUG_SHIFT 0
 #define DEBUG_SHIFT 0
 
 
@@ -1122,8 +1124,7 @@ ring_buffer_lock_reserve(struct ring_buffer *buffer,
 		return NULL;
 		return NULL;
 
 
 	/* If we are tracing schedule, we don't want to recurse */
 	/* If we are tracing schedule, we don't want to recurse */
-	resched = need_resched();
-	preempt_disable_notrace();
+	resched = ftrace_preempt_disable();
 
 
 	cpu = raw_smp_processor_id();
 	cpu = raw_smp_processor_id();
 
 
@@ -1154,10 +1155,7 @@ ring_buffer_lock_reserve(struct ring_buffer *buffer,
 	return event;
 	return event;
 
 
  out:
  out:
-	if (resched)
-		preempt_enable_notrace();
-	else
-		preempt_enable_notrace();
+	ftrace_preempt_enable(resched);
 	return NULL;
 	return NULL;
 }
 }
 
 
@@ -1199,12 +1197,9 @@ int ring_buffer_unlock_commit(struct ring_buffer *buffer,
 	/*
 	/*
 	 * Only the last preempt count needs to restore preemption.
 	 * Only the last preempt count needs to restore preemption.
 	 */
 	 */
-	if (preempt_count() == 1) {
-		if (per_cpu(rb_need_resched, cpu))
-			preempt_enable_no_resched_notrace();
-		else
-			preempt_enable_notrace();
-	} else
+	if (preempt_count() == 1)
+		ftrace_preempt_enable(per_cpu(rb_need_resched, cpu));
+	else
 		preempt_enable_no_resched_notrace();
 		preempt_enable_no_resched_notrace();
 
 
 	return 0;
 	return 0;
@@ -1237,8 +1232,7 @@ int ring_buffer_write(struct ring_buffer *buffer,
 	if (atomic_read(&buffer->record_disabled))
 	if (atomic_read(&buffer->record_disabled))
 		return -EBUSY;
 		return -EBUSY;
 
 
-	resched = need_resched();
-	preempt_disable_notrace();
+	resched = ftrace_preempt_disable();
 
 
 	cpu = raw_smp_processor_id();
 	cpu = raw_smp_processor_id();
 
 
@@ -1264,10 +1258,7 @@ int ring_buffer_write(struct ring_buffer *buffer,
 
 
 	ret = 0;
 	ret = 0;
  out:
  out:
-	if (resched)
-		preempt_enable_no_resched_notrace();
-	else
-		preempt_enable_notrace();
+	ftrace_preempt_enable(resched);
 
 
 	return ret;
 	return ret;
 }
 }

+ 2 - 6
kernel/trace/trace.c

@@ -904,8 +904,7 @@ function_trace_call(unsigned long ip, unsigned long parent_ip)
 		return;
 		return;
 
 
 	pc = preempt_count();
 	pc = preempt_count();
-	resched = need_resched();
-	preempt_disable_notrace();
+	resched = ftrace_preempt_disable();
 	local_save_flags(flags);
 	local_save_flags(flags);
 	cpu = raw_smp_processor_id();
 	cpu = raw_smp_processor_id();
 	data = tr->data[cpu];
 	data = tr->data[cpu];
@@ -915,10 +914,7 @@ function_trace_call(unsigned long ip, unsigned long parent_ip)
 		trace_function(tr, data, ip, parent_ip, flags, pc);
 		trace_function(tr, data, ip, parent_ip, flags, pc);
 
 
 	atomic_dec(&data->disabled);
 	atomic_dec(&data->disabled);
-	if (resched)
-		preempt_enable_no_resched_notrace();
-	else
-		preempt_enable_notrace();
+	ftrace_preempt_enable(resched);
 }
 }
 
 
 static struct ftrace_ops trace_ops __read_mostly =
 static struct ftrace_ops trace_ops __read_mostly =

+ 2 - 11
kernel/trace/trace_sched_wakeup.c

@@ -50,8 +50,7 @@ wakeup_tracer_call(unsigned long ip, unsigned long parent_ip)
 		return;
 		return;
 
 
 	pc = preempt_count();
 	pc = preempt_count();
-	resched = need_resched();
-	preempt_disable_notrace();
+	resched = ftrace_preempt_disable();
 
 
 	cpu = raw_smp_processor_id();
 	cpu = raw_smp_processor_id();
 	data = tr->data[cpu];
 	data = tr->data[cpu];
@@ -81,15 +80,7 @@ wakeup_tracer_call(unsigned long ip, unsigned long parent_ip)
  out:
  out:
 	atomic_dec(&data->disabled);
 	atomic_dec(&data->disabled);
 
 
-	/*
-	 * To prevent recursion from the scheduler, if the
-	 * resched flag was set before we entered, then
-	 * don't reschedule.
-	 */
-	if (resched)
-		preempt_enable_no_resched_notrace();
-	else
-		preempt_enable_notrace();
+	ftrace_preempt_enable(resched);
 }
 }
 
 
 static struct ftrace_ops trace_ops __read_mostly =
 static struct ftrace_ops trace_ops __read_mostly =

+ 2 - 6
kernel/trace/trace_stack.c

@@ -107,8 +107,7 @@ stack_trace_call(unsigned long ip, unsigned long parent_ip)
 	if (unlikely(!ftrace_enabled || stack_trace_disabled))
 	if (unlikely(!ftrace_enabled || stack_trace_disabled))
 		return;
 		return;
 
 
-	resched = need_resched();
-	preempt_disable_notrace();
+	resched = ftrace_preempt_disable();
 
 
 	cpu = raw_smp_processor_id();
 	cpu = raw_smp_processor_id();
 	/* no atomic needed, we only modify this variable by this cpu */
 	/* no atomic needed, we only modify this variable by this cpu */
@@ -120,10 +119,7 @@ stack_trace_call(unsigned long ip, unsigned long parent_ip)
  out:
  out:
 	per_cpu(trace_active, cpu)--;
 	per_cpu(trace_active, cpu)--;
 	/* prevent recursion in schedule */
 	/* prevent recursion in schedule */
-	if (resched)
-		preempt_enable_no_resched_notrace();
-	else
-		preempt_enable_notrace();
+	ftrace_preempt_enable(resched);
 }
 }
 
 
 static struct ftrace_ops trace_ops __read_mostly =
 static struct ftrace_ops trace_ops __read_mostly =