|
@@ -1284,6 +1284,8 @@ void trace_dump_stack(void)
|
|
__ftrace_trace_stack(global_trace.buffer, flags, 3, preempt_count());
|
|
__ftrace_trace_stack(global_trace.buffer, flags, 3, preempt_count());
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+static DEFINE_PER_CPU(int, user_stack_count);
|
|
|
|
+
|
|
void
|
|
void
|
|
ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags, int pc)
|
|
ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags, int pc)
|
|
{
|
|
{
|
|
@@ -1302,6 +1304,18 @@ ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags, int pc)
|
|
if (unlikely(in_nmi()))
|
|
if (unlikely(in_nmi()))
|
|
return;
|
|
return;
|
|
|
|
|
|
|
|
+ /*
|
|
|
|
+ * prevent recursion, since the user stack tracing may
|
|
|
|
+ * trigger other kernel events.
|
|
|
|
+ */
|
|
|
|
+ preempt_disable();
|
|
|
|
+ if (__this_cpu_read(user_stack_count))
|
|
|
|
+ goto out;
|
|
|
|
+
|
|
|
|
+ __this_cpu_inc(user_stack_count);
|
|
|
|
+
|
|
|
|
+
|
|
|
|
+
|
|
event = trace_buffer_lock_reserve(buffer, TRACE_USER_STACK,
|
|
event = trace_buffer_lock_reserve(buffer, TRACE_USER_STACK,
|
|
sizeof(*entry), flags, pc);
|
|
sizeof(*entry), flags, pc);
|
|
if (!event)
|
|
if (!event)
|
|
@@ -1319,6 +1333,11 @@ ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags, int pc)
|
|
save_stack_trace_user(&trace);
|
|
save_stack_trace_user(&trace);
|
|
if (!filter_check_discard(call, entry, buffer, event))
|
|
if (!filter_check_discard(call, entry, buffer, event))
|
|
ring_buffer_unlock_commit(buffer, event);
|
|
ring_buffer_unlock_commit(buffer, event);
|
|
|
|
+
|
|
|
|
+ __this_cpu_dec(user_stack_count);
|
|
|
|
+
|
|
|
|
+ out:
|
|
|
|
+ preempt_enable();
|
|
}
|
|
}
|
|
|
|
|
|
#ifdef UNUSED
|
|
#ifdef UNUSED
|