|
@@ -1313,12 +1313,10 @@ ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags, int pc)
|
|
|
|
|
|
__this_cpu_inc(user_stack_count);
|
|
__this_cpu_inc(user_stack_count);
|
|
|
|
|
|
-
|
|
|
|
-
|
|
|
|
event = trace_buffer_lock_reserve(buffer, TRACE_USER_STACK,
|
|
event = trace_buffer_lock_reserve(buffer, TRACE_USER_STACK,
|
|
sizeof(*entry), flags, pc);
|
|
sizeof(*entry), flags, pc);
|
|
if (!event)
|
|
if (!event)
|
|
- return;
|
|
|
|
|
|
+ goto out_drop_count;
|
|
entry = ring_buffer_event_data(event);
|
|
entry = ring_buffer_event_data(event);
|
|
|
|
|
|
entry->tgid = current->tgid;
|
|
entry->tgid = current->tgid;
|
|
@@ -1333,8 +1331,8 @@ ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags, int pc)
|
|
if (!filter_check_discard(call, entry, buffer, event))
|
|
if (!filter_check_discard(call, entry, buffer, event))
|
|
ring_buffer_unlock_commit(buffer, event);
|
|
ring_buffer_unlock_commit(buffer, event);
|
|
|
|
|
|
|
|
+ out_drop_count:
|
|
__this_cpu_dec(user_stack_count);
|
|
__this_cpu_dec(user_stack_count);
|
|
-
|
|
|
|
out:
|
|
out:
|
|
preempt_enable();
|
|
preempt_enable();
|
|
}
|
|
}
|