|
@@ -1193,6 +1193,18 @@ void trace_nowake_buffer_unlock_commit(struct ring_buffer *buffer,
|
|
|
}
|
|
|
EXPORT_SYMBOL_GPL(trace_nowake_buffer_unlock_commit);
|
|
|
|
|
|
+void trace_nowake_buffer_unlock_commit_regs(struct ring_buffer *buffer,
|
|
|
+ struct ring_buffer_event *event,
|
|
|
+ unsigned long flags, int pc,
|
|
|
+ struct pt_regs *regs)
|
|
|
+{
|
|
|
+ ring_buffer_unlock_commit(buffer, event);
|
|
|
+
|
|
|
+ ftrace_trace_stack_regs(buffer, flags, 0, pc, regs);
|
|
|
+ ftrace_trace_userstack(buffer, flags, pc);
|
|
|
+}
|
|
|
+EXPORT_SYMBOL_GPL(trace_nowake_buffer_unlock_commit_regs);
|
|
|
+
|
|
|
void trace_current_buffer_discard_commit(struct ring_buffer *buffer,
|
|
|
struct ring_buffer_event *event)
|
|
|
{
|
|
@@ -1238,7 +1250,7 @@ ftrace(struct trace_array *tr, struct trace_array_cpu *data,
|
|
|
#ifdef CONFIG_STACKTRACE
|
|
|
static void __ftrace_trace_stack(struct ring_buffer *buffer,
|
|
|
unsigned long flags,
|
|
|
- int skip, int pc)
|
|
|
+ int skip, int pc, struct pt_regs *regs)
|
|
|
{
|
|
|
struct ftrace_event_call *call = &event_kernel_stack;
|
|
|
struct ring_buffer_event *event;
|
|
@@ -1257,24 +1269,36 @@ static void __ftrace_trace_stack(struct ring_buffer *buffer,
|
|
|
trace.skip = skip;
|
|
|
trace.entries = entry->caller;
|
|
|
|
|
|
- save_stack_trace(&trace);
|
|
|
+ if (regs)
|
|
|
+ save_stack_trace_regs(regs, &trace);
|
|
|
+ else
|
|
|
+ save_stack_trace(&trace);
|
|
|
if (!filter_check_discard(call, entry, buffer, event))
|
|
|
ring_buffer_unlock_commit(buffer, event);
|
|
|
}
|
|
|
|
|
|
+void ftrace_trace_stack_regs(struct ring_buffer *buffer, unsigned long flags,
|
|
|
+ int skip, int pc, struct pt_regs *regs)
|
|
|
+{
|
|
|
+ if (!(trace_flags & TRACE_ITER_STACKTRACE))
|
|
|
+ return;
|
|
|
+
|
|
|
+ __ftrace_trace_stack(buffer, flags, skip, pc, regs);
|
|
|
+}
|
|
|
+
|
|
|
void ftrace_trace_stack(struct ring_buffer *buffer, unsigned long flags,
|
|
|
int skip, int pc)
|
|
|
{
|
|
|
if (!(trace_flags & TRACE_ITER_STACKTRACE))
|
|
|
return;
|
|
|
|
|
|
- __ftrace_trace_stack(buffer, flags, skip, pc);
|
|
|
+ __ftrace_trace_stack(buffer, flags, skip, pc, NULL);
|
|
|
}
|
|
|
|
|
|
void __trace_stack(struct trace_array *tr, unsigned long flags, int skip,
|
|
|
int pc)
|
|
|
{
|
|
|
- __ftrace_trace_stack(tr->buffer, flags, skip, pc);
|
|
|
+ __ftrace_trace_stack(tr->buffer, flags, skip, pc, NULL);
|
|
|
}
|
|
|
|
|
|
/**
|
|
@@ -1290,7 +1314,7 @@ void trace_dump_stack(void)
|
|
|
local_save_flags(flags);
|
|
|
|
|
|
/* skipping 3 traces, seems to get us at the caller of this function */
|
|
|
- __ftrace_trace_stack(global_trace.buffer, flags, 3, preempt_count());
|
|
|
+ __ftrace_trace_stack(global_trace.buffer, flags, 3, preempt_count(), NULL);
|
|
|
}
|
|
|
|
|
|
static DEFINE_PER_CPU(int, user_stack_count);
|