|
@@ -302,6 +302,7 @@ static int __init syscall_exit_define_fields(struct ftrace_event_call *call)
|
|
|
static void ftrace_syscall_enter(void *data, struct pt_regs *regs, long id)
|
|
|
{
|
|
|
struct trace_array *tr = data;
|
|
|
+ struct ftrace_event_file *ftrace_file;
|
|
|
struct syscall_trace_enter *entry;
|
|
|
struct syscall_metadata *sys_data;
|
|
|
struct ring_buffer_event *event;
|
|
@@ -314,7 +315,13 @@ static void ftrace_syscall_enter(void *data, struct pt_regs *regs, long id)
|
|
|
syscall_nr = trace_get_syscall_nr(current, regs);
|
|
|
if (syscall_nr < 0)
|
|
|
return;
|
|
|
- if (!test_bit(syscall_nr, tr->enabled_enter_syscalls))
|
|
|
+
|
|
|
+ /* Here we're inside tp handler's rcu_read_lock_sched (__DO_TRACE) */
|
|
|
+ ftrace_file = rcu_dereference_sched(tr->enter_syscall_files[syscall_nr]);
|
|
|
+ if (!ftrace_file)
|
|
|
+ return;
|
|
|
+
|
|
|
+ if (test_bit(FTRACE_EVENT_FL_SOFT_DISABLED_BIT, &ftrace_file->flags))
|
|
|
return;
|
|
|
|
|
|
sys_data = syscall_nr_to_meta(syscall_nr);
|
|
@@ -336,8 +343,7 @@ static void ftrace_syscall_enter(void *data, struct pt_regs *regs, long id)
|
|
|
entry->nr = syscall_nr;
|
|
|
syscall_get_arguments(current, regs, 0, sys_data->nb_args, entry->args);
|
|
|
|
|
|
- if (!call_filter_check_discard(sys_data->enter_event, entry,
|
|
|
- buffer, event))
|
|
|
+ if (!filter_check_discard(ftrace_file, entry, buffer, event))
|
|
|
trace_current_buffer_unlock_commit(buffer, event,
|
|
|
irq_flags, pc);
|
|
|
}
|
|
@@ -345,6 +351,7 @@ static void ftrace_syscall_enter(void *data, struct pt_regs *regs, long id)
|
|
|
static void ftrace_syscall_exit(void *data, struct pt_regs *regs, long ret)
|
|
|
{
|
|
|
struct trace_array *tr = data;
|
|
|
+ struct ftrace_event_file *ftrace_file;
|
|
|
struct syscall_trace_exit *entry;
|
|
|
struct syscall_metadata *sys_data;
|
|
|
struct ring_buffer_event *event;
|
|
@@ -356,7 +363,13 @@ static void ftrace_syscall_exit(void *data, struct pt_regs *regs, long ret)
|
|
|
syscall_nr = trace_get_syscall_nr(current, regs);
|
|
|
if (syscall_nr < 0)
|
|
|
return;
|
|
|
- if (!test_bit(syscall_nr, tr->enabled_exit_syscalls))
|
|
|
+
|
|
|
+ /* Here we're inside tp handler's rcu_read_lock_sched (__DO_TRACE()) */
|
|
|
+ ftrace_file = rcu_dereference_sched(tr->exit_syscall_files[syscall_nr]);
|
|
|
+ if (!ftrace_file)
|
|
|
+ return;
|
|
|
+
|
|
|
+ if (test_bit(FTRACE_EVENT_FL_SOFT_DISABLED_BIT, &ftrace_file->flags))
|
|
|
return;
|
|
|
|
|
|
sys_data = syscall_nr_to_meta(syscall_nr);
|
|
@@ -377,8 +390,7 @@ static void ftrace_syscall_exit(void *data, struct pt_regs *regs, long ret)
|
|
|
entry->nr = syscall_nr;
|
|
|
entry->ret = syscall_get_return_value(current, regs);
|
|
|
|
|
|
- if (!call_filter_check_discard(sys_data->exit_event, entry,
|
|
|
- buffer, event))
|
|
|
+ if (!filter_check_discard(ftrace_file, entry, buffer, event))
|
|
|
trace_current_buffer_unlock_commit(buffer, event,
|
|
|
irq_flags, pc);
|
|
|
}
|
|
@@ -397,7 +409,7 @@ static int reg_event_syscall_enter(struct ftrace_event_file *file,
|
|
|
if (!tr->sys_refcount_enter)
|
|
|
ret = register_trace_sys_enter(ftrace_syscall_enter, tr);
|
|
|
if (!ret) {
|
|
|
- set_bit(num, tr->enabled_enter_syscalls);
|
|
|
+ rcu_assign_pointer(tr->enter_syscall_files[num], file);
|
|
|
tr->sys_refcount_enter++;
|
|
|
}
|
|
|
mutex_unlock(&syscall_trace_lock);
|
|
@@ -415,10 +427,15 @@ static void unreg_event_syscall_enter(struct ftrace_event_file *file,
|
|
|
return;
|
|
|
mutex_lock(&syscall_trace_lock);
|
|
|
tr->sys_refcount_enter--;
|
|
|
- clear_bit(num, tr->enabled_enter_syscalls);
|
|
|
+ rcu_assign_pointer(tr->enter_syscall_files[num], NULL);
|
|
|
if (!tr->sys_refcount_enter)
|
|
|
unregister_trace_sys_enter(ftrace_syscall_enter, tr);
|
|
|
mutex_unlock(&syscall_trace_lock);
|
|
|
+ /*
|
|
|
+ * Callers expect the event to be completely disabled on
|
|
|
+ * return, so wait for current handlers to finish.
|
|
|
+ */
|
|
|
+ synchronize_sched();
|
|
|
}
|
|
|
|
|
|
static int reg_event_syscall_exit(struct ftrace_event_file *file,
|
|
@@ -435,7 +452,7 @@ static int reg_event_syscall_exit(struct ftrace_event_file *file,
|
|
|
if (!tr->sys_refcount_exit)
|
|
|
ret = register_trace_sys_exit(ftrace_syscall_exit, tr);
|
|
|
if (!ret) {
|
|
|
- set_bit(num, tr->enabled_exit_syscalls);
|
|
|
+ rcu_assign_pointer(tr->exit_syscall_files[num], file);
|
|
|
tr->sys_refcount_exit++;
|
|
|
}
|
|
|
mutex_unlock(&syscall_trace_lock);
|
|
@@ -453,10 +470,15 @@ static void unreg_event_syscall_exit(struct ftrace_event_file *file,
|
|
|
return;
|
|
|
mutex_lock(&syscall_trace_lock);
|
|
|
tr->sys_refcount_exit--;
|
|
|
- clear_bit(num, tr->enabled_exit_syscalls);
|
|
|
+ rcu_assign_pointer(tr->exit_syscall_files[num], NULL);
|
|
|
if (!tr->sys_refcount_exit)
|
|
|
unregister_trace_sys_exit(ftrace_syscall_exit, tr);
|
|
|
mutex_unlock(&syscall_trace_lock);
|
|
|
+ /*
|
|
|
+ * Callers expect the event to be completely disabled on
|
|
|
+ * return, so wait for current handlers to finish.
|
|
|
+ */
|
|
|
+ synchronize_sched();
|
|
|
}
|
|
|
|
|
|
static int __init init_syscall_trace(struct ftrace_event_call *call)
|