|
@@ -1,15 +1,16 @@
|
|
|
#include <trace/syscall.h>
|
|
|
#include <linux/kernel.h>
|
|
|
+#include <linux/ftrace.h>
|
|
|
#include <asm/syscall.h>
|
|
|
|
|
|
#include "trace_output.h"
|
|
|
#include "trace.h"
|
|
|
|
|
|
-/* Keep a counter of the syscall tracing users */
|
|
|
-static int refcount;
|
|
|
-
|
|
|
-/* Prevent from races on thread flags toggling */
|
|
|
static DEFINE_MUTEX(syscall_trace_lock);
|
|
|
+static int sys_refcount_enter;
|
|
|
+static int sys_refcount_exit;
|
|
|
+static DECLARE_BITMAP(enabled_enter_syscalls, FTRACE_SYSCALL_MAX);
|
|
|
+static DECLARE_BITMAP(enabled_exit_syscalls, FTRACE_SYSCALL_MAX);
|
|
|
|
|
|
/* Option to display the parameters types */
|
|
|
enum {
|
|
@@ -95,53 +96,7 @@ print_syscall_exit(struct trace_iterator *iter, int flags)
|
|
|
return TRACE_TYPE_HANDLED;
|
|
|
}
|
|
|
|
|
|
-void start_ftrace_syscalls(void)
|
|
|
-{
|
|
|
- unsigned long flags;
|
|
|
- struct task_struct *g, *t;
|
|
|
-
|
|
|
- mutex_lock(&syscall_trace_lock);
|
|
|
-
|
|
|
- /* Don't enable the flag on the tasks twice */
|
|
|
- if (++refcount != 1)
|
|
|
- goto unlock;
|
|
|
-
|
|
|
- read_lock_irqsave(&tasklist_lock, flags);
|
|
|
-
|
|
|
- do_each_thread(g, t) {
|
|
|
- set_tsk_thread_flag(t, TIF_SYSCALL_FTRACE);
|
|
|
- } while_each_thread(g, t);
|
|
|
-
|
|
|
- read_unlock_irqrestore(&tasklist_lock, flags);
|
|
|
-
|
|
|
-unlock:
|
|
|
- mutex_unlock(&syscall_trace_lock);
|
|
|
-}
|
|
|
-
|
|
|
-void stop_ftrace_syscalls(void)
|
|
|
-{
|
|
|
- unsigned long flags;
|
|
|
- struct task_struct *g, *t;
|
|
|
-
|
|
|
- mutex_lock(&syscall_trace_lock);
|
|
|
-
|
|
|
- /* There are perhaps still some users */
|
|
|
- if (--refcount)
|
|
|
- goto unlock;
|
|
|
-
|
|
|
- read_lock_irqsave(&tasklist_lock, flags);
|
|
|
-
|
|
|
- do_each_thread(g, t) {
|
|
|
- clear_tsk_thread_flag(t, TIF_SYSCALL_FTRACE);
|
|
|
- } while_each_thread(g, t);
|
|
|
-
|
|
|
- read_unlock_irqrestore(&tasklist_lock, flags);
|
|
|
-
|
|
|
-unlock:
|
|
|
- mutex_unlock(&syscall_trace_lock);
|
|
|
-}
|
|
|
-
|
|
|
-void ftrace_syscall_enter(struct pt_regs *regs)
|
|
|
+void ftrace_syscall_enter(struct pt_regs *regs, long id)
|
|
|
{
|
|
|
struct syscall_trace_enter *entry;
|
|
|
struct syscall_metadata *sys_data;
|
|
@@ -150,6 +105,8 @@ void ftrace_syscall_enter(struct pt_regs *regs)
|
|
|
int syscall_nr;
|
|
|
|
|
|
syscall_nr = syscall_get_nr(current, regs);
|
|
|
+ if (!test_bit(syscall_nr, enabled_enter_syscalls))
|
|
|
+ return;
|
|
|
|
|
|
sys_data = syscall_nr_to_meta(syscall_nr);
|
|
|
if (!sys_data)
|
|
@@ -170,7 +127,7 @@ void ftrace_syscall_enter(struct pt_regs *regs)
|
|
|
trace_wake_up();
|
|
|
}
|
|
|
|
|
|
-void ftrace_syscall_exit(struct pt_regs *regs)
|
|
|
+void ftrace_syscall_exit(struct pt_regs *regs, long ret)
|
|
|
{
|
|
|
struct syscall_trace_exit *entry;
|
|
|
struct syscall_metadata *sys_data;
|
|
@@ -178,6 +135,8 @@ void ftrace_syscall_exit(struct pt_regs *regs)
|
|
|
int syscall_nr;
|
|
|
|
|
|
syscall_nr = syscall_get_nr(current, regs);
|
|
|
+ if (!test_bit(syscall_nr, enabled_exit_syscalls))
|
|
|
+ return;
|
|
|
|
|
|
sys_data = syscall_nr_to_meta(syscall_nr);
|
|
|
if (!sys_data)
|
|
@@ -196,54 +155,94 @@ void ftrace_syscall_exit(struct pt_regs *regs)
|
|
|
trace_wake_up();
|
|
|
}
|
|
|
|
|
|
-static int init_syscall_tracer(struct trace_array *tr)
|
|
|
+int reg_event_syscall_enter(void *ptr)
|
|
|
{
|
|
|
- start_ftrace_syscalls();
|
|
|
-
|
|
|
- return 0;
|
|
|
+ int ret = 0;
|
|
|
+ int num;
|
|
|
+ char *name;
|
|
|
+
|
|
|
+ name = (char *)ptr;
|
|
|
+ num = syscall_name_to_nr(name);
|
|
|
+ if (num < 0 || num >= FTRACE_SYSCALL_MAX)
|
|
|
+ return -ENOSYS;
|
|
|
+ mutex_lock(&syscall_trace_lock);
|
|
|
+ if (!sys_refcount_enter)
|
|
|
+ ret = register_trace_syscall_enter(ftrace_syscall_enter);
|
|
|
+ if (ret) {
|
|
|
+ pr_info("event trace: Could not activate"
|
|
|
+ "syscall entry trace point");
|
|
|
+ } else {
|
|
|
+ set_bit(num, enabled_enter_syscalls);
|
|
|
+ sys_refcount_enter++;
|
|
|
+ }
|
|
|
+ mutex_unlock(&syscall_trace_lock);
|
|
|
+ return ret;
|
|
|
}
|
|
|
|
|
|
-static void reset_syscall_tracer(struct trace_array *tr)
|
|
|
+void unreg_event_syscall_enter(void *ptr)
|
|
|
{
|
|
|
- stop_ftrace_syscalls();
|
|
|
- tracing_reset_online_cpus(tr);
|
|
|
-}
|
|
|
-
|
|
|
-static struct trace_event syscall_enter_event = {
|
|
|
- .type = TRACE_SYSCALL_ENTER,
|
|
|
- .trace = print_syscall_enter,
|
|
|
-};
|
|
|
-
|
|
|
-static struct trace_event syscall_exit_event = {
|
|
|
- .type = TRACE_SYSCALL_EXIT,
|
|
|
- .trace = print_syscall_exit,
|
|
|
-};
|
|
|
+ int num;
|
|
|
+ char *name;
|
|
|
|
|
|
-static struct tracer syscall_tracer __read_mostly = {
|
|
|
- .name = "syscall",
|
|
|
- .init = init_syscall_tracer,
|
|
|
- .reset = reset_syscall_tracer,
|
|
|
- .flags = &syscalls_flags,
|
|
|
-};
|
|
|
+ name = (char *)ptr;
|
|
|
+ num = syscall_name_to_nr(name);
|
|
|
+ if (num < 0 || num >= FTRACE_SYSCALL_MAX)
|
|
|
+ return;
|
|
|
+ mutex_lock(&syscall_trace_lock);
|
|
|
+ sys_refcount_enter--;
|
|
|
+ clear_bit(num, enabled_enter_syscalls);
|
|
|
+ if (!sys_refcount_enter)
|
|
|
+ unregister_trace_syscall_enter(ftrace_syscall_enter);
|
|
|
+ mutex_unlock(&syscall_trace_lock);
|
|
|
+}
|
|
|
|
|
|
-__init int register_ftrace_syscalls(void)
|
|
|
+int reg_event_syscall_exit(void *ptr)
|
|
|
{
|
|
|
- int ret;
|
|
|
-
|
|
|
- ret = register_ftrace_event(&syscall_enter_event);
|
|
|
- if (!ret) {
|
|
|
- printk(KERN_WARNING "event %d failed to register\n",
|
|
|
- syscall_enter_event.type);
|
|
|
- WARN_ON_ONCE(1);
|
|
|
+ int ret = 0;
|
|
|
+ int num;
|
|
|
+ char *name;
|
|
|
+
|
|
|
+ name = (char *)ptr;
|
|
|
+ num = syscall_name_to_nr(name);
|
|
|
+ if (num < 0 || num >= FTRACE_SYSCALL_MAX)
|
|
|
+ return -ENOSYS;
|
|
|
+ mutex_lock(&syscall_trace_lock);
|
|
|
+ if (!sys_refcount_exit)
|
|
|
+ ret = register_trace_syscall_exit(ftrace_syscall_exit);
|
|
|
+ if (ret) {
|
|
|
+ pr_info("event trace: Could not activate"
|
|
|
+ "syscall exit trace point");
|
|
|
+ } else {
|
|
|
+ set_bit(num, enabled_exit_syscalls);
|
|
|
+ sys_refcount_exit++;
|
|
|
}
|
|
|
+ mutex_unlock(&syscall_trace_lock);
|
|
|
+ return ret;
|
|
|
+}
|
|
|
|
|
|
- ret = register_ftrace_event(&syscall_exit_event);
|
|
|
- if (!ret) {
|
|
|
- printk(KERN_WARNING "event %d failed to register\n",
|
|
|
- syscall_exit_event.type);
|
|
|
- WARN_ON_ONCE(1);
|
|
|
- }
|
|
|
+void unreg_event_syscall_exit(void *ptr)
|
|
|
+{
|
|
|
+ int num;
|
|
|
+ char *name;
|
|
|
|
|
|
- return register_tracer(&syscall_tracer);
|
|
|
+ name = (char *)ptr;
|
|
|
+ num = syscall_name_to_nr(name);
|
|
|
+ if (num < 0 || num >= FTRACE_SYSCALL_MAX)
|
|
|
+ return;
|
|
|
+ mutex_lock(&syscall_trace_lock);
|
|
|
+ sys_refcount_exit--;
|
|
|
+ clear_bit(num, enabled_exit_syscalls);
|
|
|
+ if (!sys_refcount_exit)
|
|
|
+ unregister_trace_syscall_exit(ftrace_syscall_exit);
|
|
|
+ mutex_unlock(&syscall_trace_lock);
|
|
|
}
|
|
|
-device_initcall(register_ftrace_syscalls);
|
|
|
+
|
|
|
+struct trace_event event_syscall_enter = {
|
|
|
+ .trace = print_syscall_enter,
|
|
|
+ .type = TRACE_SYSCALL_ENTER
|
|
|
+};
|
|
|
+
|
|
|
+struct trace_event event_syscall_exit = {
|
|
|
+ .trace = print_syscall_exit,
|
|
|
+ .type = TRACE_SYSCALL_EXIT
|
|
|
+};
|