trace_syscalls.c 5.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242
  1. #include <linux/kernel.h>
  2. #include <linux/ftrace.h>
  3. #include <asm/syscall.h>
  4. #include "trace_output.h"
  5. #include "trace.h"
  6. static atomic_t refcount;
  7. /* Our two options */
  8. enum {
  9. TRACE_SYSCALLS_OPT_TYPES = 0x1,
  10. };
  11. static struct tracer_opt syscalls_opts[] = {
  12. { TRACER_OPT(syscall_arg_type, TRACE_SYSCALLS_OPT_TYPES) },
  13. { }
  14. };
  15. static struct tracer_flags syscalls_flags = {
  16. .val = 0, /* By default: no args types */
  17. .opts = syscalls_opts
  18. };
  19. enum print_line_t
  20. print_syscall_enter(struct trace_iterator *iter, int flags)
  21. {
  22. struct trace_seq *s = &iter->seq;
  23. struct trace_entry *ent = iter->ent;
  24. struct syscall_trace_enter *trace;
  25. struct syscall_metadata *entry;
  26. int i, ret, syscall;
  27. trace_assign_type(trace, ent);
  28. syscall = trace->nr;
  29. entry = syscall_nr_to_meta(syscall);
  30. if (!entry)
  31. goto end;
  32. ret = trace_seq_printf(s, "%s(", entry->name);
  33. if (!ret)
  34. return TRACE_TYPE_PARTIAL_LINE;
  35. for (i = 0; i < entry->nb_args; i++) {
  36. /* parameter types */
  37. if (syscalls_flags.val & TRACE_SYSCALLS_OPT_TYPES) {
  38. ret = trace_seq_printf(s, "%s ", entry->types[i]);
  39. if (!ret)
  40. return TRACE_TYPE_PARTIAL_LINE;
  41. }
  42. /* parameter values */
  43. ret = trace_seq_printf(s, "%s: %lx%s ", entry->args[i],
  44. trace->args[i],
  45. i == entry->nb_args - 1 ? ")" : ",");
  46. if (!ret)
  47. return TRACE_TYPE_PARTIAL_LINE;
  48. }
  49. end:
  50. trace_seq_printf(s, "\n");
  51. return TRACE_TYPE_HANDLED;
  52. }
  53. enum print_line_t
  54. print_syscall_exit(struct trace_iterator *iter, int flags)
  55. {
  56. struct trace_seq *s = &iter->seq;
  57. struct trace_entry *ent = iter->ent;
  58. struct syscall_trace_exit *trace;
  59. int syscall;
  60. struct syscall_metadata *entry;
  61. int ret;
  62. trace_assign_type(trace, ent);
  63. syscall = trace->nr;
  64. entry = syscall_nr_to_meta(syscall);
  65. if (!entry) {
  66. trace_seq_printf(s, "\n");
  67. return TRACE_TYPE_HANDLED;
  68. }
  69. ret = trace_seq_printf(s, "%s -> 0x%lx\n", entry->name,
  70. trace->ret);
  71. if (!ret)
  72. return TRACE_TYPE_PARTIAL_LINE;
  73. return TRACE_TYPE_HANDLED;
  74. }
  75. void start_ftrace_syscalls(void)
  76. {
  77. unsigned long flags;
  78. struct task_struct *g, *t;
  79. /* Don't enable the flag on the tasks twice */
  80. if (atomic_inc_return(&refcount) != 1)
  81. return;
  82. arch_init_ftrace_syscalls();
  83. read_lock_irqsave(&tasklist_lock, flags);
  84. do_each_thread(g, t) {
  85. set_tsk_thread_flag(t, TIF_SYSCALL_FTRACE);
  86. } while_each_thread(g, t);
  87. read_unlock_irqrestore(&tasklist_lock, flags);
  88. }
  89. void stop_ftrace_syscalls(void)
  90. {
  91. unsigned long flags;
  92. struct task_struct *g, *t;
  93. /* There are perhaps still some users */
  94. if (atomic_dec_return(&refcount))
  95. return;
  96. read_lock_irqsave(&tasklist_lock, flags);
  97. do_each_thread(g, t) {
  98. clear_tsk_thread_flag(t, TIF_SYSCALL_FTRACE);
  99. } while_each_thread(g, t);
  100. read_unlock_irqrestore(&tasklist_lock, flags);
  101. }
  102. void ftrace_syscall_enter(struct pt_regs *regs)
  103. {
  104. struct syscall_trace_enter *entry;
  105. struct syscall_metadata *sys_data;
  106. struct ring_buffer_event *event;
  107. int size;
  108. int syscall_nr;
  109. int cpu;
  110. syscall_nr = syscall_get_nr(current, regs);
  111. cpu = raw_smp_processor_id();
  112. sys_data = syscall_nr_to_meta(syscall_nr);
  113. if (!sys_data)
  114. return;
  115. size = sizeof(*entry) + sizeof(unsigned long) * sys_data->nb_args;
  116. event = trace_current_buffer_lock_reserve(TRACE_SYSCALL_ENTER, size,
  117. 0, 0);
  118. if (!event)
  119. return;
  120. entry = ring_buffer_event_data(event);
  121. entry->nr = syscall_nr;
  122. syscall_get_arguments(current, regs, 0, sys_data->nb_args, entry->args);
  123. trace_current_buffer_unlock_commit(event, 0, 0);
  124. trace_wake_up();
  125. }
  126. void ftrace_syscall_exit(struct pt_regs *regs)
  127. {
  128. struct syscall_trace_exit *entry;
  129. struct syscall_metadata *sys_data;
  130. struct ring_buffer_event *event;
  131. int syscall_nr;
  132. int cpu;
  133. syscall_nr = syscall_get_nr(current, regs);
  134. cpu = raw_smp_processor_id();
  135. sys_data = syscall_nr_to_meta(syscall_nr);
  136. if (!sys_data)
  137. return;
  138. event = trace_current_buffer_lock_reserve(TRACE_SYSCALL_EXIT,
  139. sizeof(*entry), 0, 0);
  140. if (!event)
  141. return;
  142. entry = ring_buffer_event_data(event);
  143. entry->nr = syscall_nr;
  144. entry->ret = syscall_get_return_value(current, regs);
  145. trace_current_buffer_unlock_commit(event, 0, 0);
  146. trace_wake_up();
  147. }
  148. static int init_syscall_tracer(struct trace_array *tr)
  149. {
  150. start_ftrace_syscalls();
  151. return 0;
  152. }
  153. static void reset_syscall_tracer(struct trace_array *tr)
  154. {
  155. stop_ftrace_syscalls();
  156. tracing_reset_online_cpus(tr);
  157. }
  158. static struct trace_event syscall_enter_event = {
  159. .type = TRACE_SYSCALL_ENTER,
  160. .trace = print_syscall_enter,
  161. };
  162. static struct trace_event syscall_exit_event = {
  163. .type = TRACE_SYSCALL_EXIT,
  164. .trace = print_syscall_exit,
  165. };
  166. static struct tracer syscall_tracer __read_mostly = {
  167. .name = "syscall",
  168. .init = init_syscall_tracer,
  169. .reset = reset_syscall_tracer,
  170. .flags = &syscalls_flags,
  171. };
  172. __init int register_ftrace_syscalls(void)
  173. {
  174. int ret;
  175. ret = register_ftrace_event(&syscall_enter_event);
  176. if (!ret) {
  177. printk(KERN_WARNING "event %d failed to register\n",
  178. syscall_enter_event.type);
  179. WARN_ON_ONCE(1);
  180. }
  181. ret = register_ftrace_event(&syscall_exit_event);
  182. if (!ret) {
  183. printk(KERN_WARNING "event %d failed to register\n",
  184. syscall_exit_event.type);
  185. WARN_ON_ONCE(1);
  186. }
  187. return register_tracer(&syscall_tracer);
  188. }
  189. device_initcall(register_ftrace_syscalls);