trace_syscalls.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692
  1. #include <trace/syscall.h>
  2. #include <trace/events/syscalls.h>
  3. #include <linux/slab.h>
  4. #include <linux/kernel.h>
  5. #include <linux/module.h> /* for MODULE_NAME_LEN via KSYM_SYMBOL_LEN */
  6. #include <linux/ftrace.h>
  7. #include <linux/perf_event.h>
  8. #include <asm/syscall.h>
  9. #include <asm/asm-offsets.h>
  10. #include "trace_output.h"
  11. #include "trace.h"
  12. static DEFINE_MUTEX(syscall_trace_lock);
  13. static int sys_refcount_enter;
  14. static int sys_refcount_exit;
  15. static DECLARE_BITMAP(enabled_enter_syscalls, NR_syscalls);
  16. static DECLARE_BITMAP(enabled_exit_syscalls, NR_syscalls);
  17. static int syscall_enter_register(struct ftrace_event_call *event,
  18. enum trace_reg type);
  19. static int syscall_exit_register(struct ftrace_event_call *event,
  20. enum trace_reg type);
  21. static int syscall_enter_define_fields(struct ftrace_event_call *call);
  22. static int syscall_exit_define_fields(struct ftrace_event_call *call);
  23. static struct list_head *
  24. syscall_get_enter_fields(struct ftrace_event_call *call)
  25. {
  26. struct syscall_metadata *entry = call->data;
  27. return &entry->enter_fields;
  28. }
  29. struct trace_event_functions enter_syscall_print_funcs = {
  30. .trace = print_syscall_enter,
  31. };
  32. struct trace_event_functions exit_syscall_print_funcs = {
  33. .trace = print_syscall_exit,
  34. };
  35. struct ftrace_event_class event_class_syscall_enter = {
  36. .system = "syscalls",
  37. .reg = syscall_enter_register,
  38. .define_fields = syscall_enter_define_fields,
  39. .get_fields = syscall_get_enter_fields,
  40. .raw_init = init_syscall_trace,
  41. };
  42. struct ftrace_event_class event_class_syscall_exit = {
  43. .system = "syscalls",
  44. .reg = syscall_exit_register,
  45. .define_fields = syscall_exit_define_fields,
  46. .fields = LIST_HEAD_INIT(event_class_syscall_exit.fields),
  47. .raw_init = init_syscall_trace,
  48. };
  49. extern struct syscall_metadata *__start_syscalls_metadata[];
  50. extern struct syscall_metadata *__stop_syscalls_metadata[];
  51. static struct syscall_metadata **syscalls_metadata;
  52. #ifndef ARCH_HAS_SYSCALL_MATCH_SYM_NAME
  53. static inline bool arch_syscall_match_sym_name(const char *sym, const char *name)
  54. {
  55. /*
  56. * Only compare after the "sys" prefix. Archs that use
  57. * syscall wrappers may have syscalls symbols aliases prefixed
  58. * with "SyS" instead of "sys", leading to an unwanted
  59. * mismatch.
  60. */
  61. return !strcmp(sym + 3, name + 3);
  62. }
  63. #endif
  64. static __init struct syscall_metadata *
  65. find_syscall_meta(unsigned long syscall)
  66. {
  67. struct syscall_metadata **start;
  68. struct syscall_metadata **stop;
  69. char str[KSYM_SYMBOL_LEN];
  70. start = __start_syscalls_metadata;
  71. stop = __stop_syscalls_metadata;
  72. kallsyms_lookup(syscall, NULL, NULL, NULL, str);
  73. if (arch_syscall_match_sym_name(str, "sys_ni_syscall"))
  74. return NULL;
  75. for ( ; start < stop; start++) {
  76. if ((*start)->name && arch_syscall_match_sym_name(str, (*start)->name))
  77. return *start;
  78. }
  79. return NULL;
  80. }
  81. static struct syscall_metadata *syscall_nr_to_meta(int nr)
  82. {
  83. if (!syscalls_metadata || nr >= NR_syscalls || nr < 0)
  84. return NULL;
  85. return syscalls_metadata[nr];
  86. }
  87. enum print_line_t
  88. print_syscall_enter(struct trace_iterator *iter, int flags,
  89. struct trace_event *event)
  90. {
  91. struct trace_seq *s = &iter->seq;
  92. struct trace_entry *ent = iter->ent;
  93. struct syscall_trace_enter *trace;
  94. struct syscall_metadata *entry;
  95. int i, ret, syscall;
  96. trace = (typeof(trace))ent;
  97. syscall = trace->nr;
  98. entry = syscall_nr_to_meta(syscall);
  99. if (!entry)
  100. goto end;
  101. if (entry->enter_event->event.type != ent->type) {
  102. WARN_ON_ONCE(1);
  103. goto end;
  104. }
  105. ret = trace_seq_printf(s, "%s(", entry->name);
  106. if (!ret)
  107. return TRACE_TYPE_PARTIAL_LINE;
  108. for (i = 0; i < entry->nb_args; i++) {
  109. /* parameter types */
  110. if (trace_flags & TRACE_ITER_VERBOSE) {
  111. ret = trace_seq_printf(s, "%s ", entry->types[i]);
  112. if (!ret)
  113. return TRACE_TYPE_PARTIAL_LINE;
  114. }
  115. /* parameter values */
  116. ret = trace_seq_printf(s, "%s: %lx%s", entry->args[i],
  117. trace->args[i],
  118. i == entry->nb_args - 1 ? "" : ", ");
  119. if (!ret)
  120. return TRACE_TYPE_PARTIAL_LINE;
  121. }
  122. ret = trace_seq_putc(s, ')');
  123. if (!ret)
  124. return TRACE_TYPE_PARTIAL_LINE;
  125. end:
  126. ret = trace_seq_putc(s, '\n');
  127. if (!ret)
  128. return TRACE_TYPE_PARTIAL_LINE;
  129. return TRACE_TYPE_HANDLED;
  130. }
  131. enum print_line_t
  132. print_syscall_exit(struct trace_iterator *iter, int flags,
  133. struct trace_event *event)
  134. {
  135. struct trace_seq *s = &iter->seq;
  136. struct trace_entry *ent = iter->ent;
  137. struct syscall_trace_exit *trace;
  138. int syscall;
  139. struct syscall_metadata *entry;
  140. int ret;
  141. trace = (typeof(trace))ent;
  142. syscall = trace->nr;
  143. entry = syscall_nr_to_meta(syscall);
  144. if (!entry) {
  145. trace_seq_printf(s, "\n");
  146. return TRACE_TYPE_HANDLED;
  147. }
  148. if (entry->exit_event->event.type != ent->type) {
  149. WARN_ON_ONCE(1);
  150. return TRACE_TYPE_UNHANDLED;
  151. }
  152. ret = trace_seq_printf(s, "%s -> 0x%lx\n", entry->name,
  153. trace->ret);
  154. if (!ret)
  155. return TRACE_TYPE_PARTIAL_LINE;
  156. return TRACE_TYPE_HANDLED;
  157. }
  158. extern char *__bad_type_size(void);
  159. #define SYSCALL_FIELD(type, name) \
  160. sizeof(type) != sizeof(trace.name) ? \
  161. __bad_type_size() : \
  162. #type, #name, offsetof(typeof(trace), name), \
  163. sizeof(trace.name), is_signed_type(type)
  164. static
  165. int __set_enter_print_fmt(struct syscall_metadata *entry, char *buf, int len)
  166. {
  167. int i;
  168. int pos = 0;
  169. /* When len=0, we just calculate the needed length */
  170. #define LEN_OR_ZERO (len ? len - pos : 0)
  171. pos += snprintf(buf + pos, LEN_OR_ZERO, "\"");
  172. for (i = 0; i < entry->nb_args; i++) {
  173. pos += snprintf(buf + pos, LEN_OR_ZERO, "%s: 0x%%0%zulx%s",
  174. entry->args[i], sizeof(unsigned long),
  175. i == entry->nb_args - 1 ? "" : ", ");
  176. }
  177. pos += snprintf(buf + pos, LEN_OR_ZERO, "\"");
  178. for (i = 0; i < entry->nb_args; i++) {
  179. pos += snprintf(buf + pos, LEN_OR_ZERO,
  180. ", ((unsigned long)(REC->%s))", entry->args[i]);
  181. }
  182. #undef LEN_OR_ZERO
  183. /* return the length of print_fmt */
  184. return pos;
  185. }
  186. static int set_syscall_print_fmt(struct ftrace_event_call *call)
  187. {
  188. char *print_fmt;
  189. int len;
  190. struct syscall_metadata *entry = call->data;
  191. if (entry->enter_event != call) {
  192. call->print_fmt = "\"0x%lx\", REC->ret";
  193. return 0;
  194. }
  195. /* First: called with 0 length to calculate the needed length */
  196. len = __set_enter_print_fmt(entry, NULL, 0);
  197. print_fmt = kmalloc(len + 1, GFP_KERNEL);
  198. if (!print_fmt)
  199. return -ENOMEM;
  200. /* Second: actually write the @print_fmt */
  201. __set_enter_print_fmt(entry, print_fmt, len + 1);
  202. call->print_fmt = print_fmt;
  203. return 0;
  204. }
  205. static void free_syscall_print_fmt(struct ftrace_event_call *call)
  206. {
  207. struct syscall_metadata *entry = call->data;
  208. if (entry->enter_event == call)
  209. kfree(call->print_fmt);
  210. }
  211. static int syscall_enter_define_fields(struct ftrace_event_call *call)
  212. {
  213. struct syscall_trace_enter trace;
  214. struct syscall_metadata *meta = call->data;
  215. int ret;
  216. int i;
  217. int offset = offsetof(typeof(trace), args);
  218. ret = trace_define_field(call, SYSCALL_FIELD(int, nr), FILTER_OTHER);
  219. if (ret)
  220. return ret;
  221. for (i = 0; i < meta->nb_args; i++) {
  222. ret = trace_define_field(call, meta->types[i],
  223. meta->args[i], offset,
  224. sizeof(unsigned long), 0,
  225. FILTER_OTHER);
  226. offset += sizeof(unsigned long);
  227. }
  228. return ret;
  229. }
  230. static int syscall_exit_define_fields(struct ftrace_event_call *call)
  231. {
  232. struct syscall_trace_exit trace;
  233. int ret;
  234. ret = trace_define_field(call, SYSCALL_FIELD(int, nr), FILTER_OTHER);
  235. if (ret)
  236. return ret;
  237. ret = trace_define_field(call, SYSCALL_FIELD(long, ret),
  238. FILTER_OTHER);
  239. return ret;
  240. }
  241. void ftrace_syscall_enter(void *ignore, struct pt_regs *regs, long id)
  242. {
  243. struct syscall_trace_enter *entry;
  244. struct syscall_metadata *sys_data;
  245. struct ring_buffer_event *event;
  246. struct ring_buffer *buffer;
  247. int size;
  248. int syscall_nr;
  249. syscall_nr = syscall_get_nr(current, regs);
  250. if (syscall_nr < 0)
  251. return;
  252. if (!test_bit(syscall_nr, enabled_enter_syscalls))
  253. return;
  254. sys_data = syscall_nr_to_meta(syscall_nr);
  255. if (!sys_data)
  256. return;
  257. size = sizeof(*entry) + sizeof(unsigned long) * sys_data->nb_args;
  258. event = trace_current_buffer_lock_reserve(&buffer,
  259. sys_data->enter_event->event.type, size, 0, 0);
  260. if (!event)
  261. return;
  262. entry = ring_buffer_event_data(event);
  263. entry->nr = syscall_nr;
  264. syscall_get_arguments(current, regs, 0, sys_data->nb_args, entry->args);
  265. if (!filter_current_check_discard(buffer, sys_data->enter_event,
  266. entry, event))
  267. trace_current_buffer_unlock_commit(buffer, event, 0, 0);
  268. }
  269. void ftrace_syscall_exit(void *ignore, struct pt_regs *regs, long ret)
  270. {
  271. struct syscall_trace_exit *entry;
  272. struct syscall_metadata *sys_data;
  273. struct ring_buffer_event *event;
  274. struct ring_buffer *buffer;
  275. int syscall_nr;
  276. syscall_nr = syscall_get_nr(current, regs);
  277. if (syscall_nr < 0)
  278. return;
  279. if (!test_bit(syscall_nr, enabled_exit_syscalls))
  280. return;
  281. sys_data = syscall_nr_to_meta(syscall_nr);
  282. if (!sys_data)
  283. return;
  284. event = trace_current_buffer_lock_reserve(&buffer,
  285. sys_data->exit_event->event.type, sizeof(*entry), 0, 0);
  286. if (!event)
  287. return;
  288. entry = ring_buffer_event_data(event);
  289. entry->nr = syscall_nr;
  290. entry->ret = syscall_get_return_value(current, regs);
  291. if (!filter_current_check_discard(buffer, sys_data->exit_event,
  292. entry, event))
  293. trace_current_buffer_unlock_commit(buffer, event, 0, 0);
  294. }
  295. int reg_event_syscall_enter(struct ftrace_event_call *call)
  296. {
  297. int ret = 0;
  298. int num;
  299. num = ((struct syscall_metadata *)call->data)->syscall_nr;
  300. if (WARN_ON_ONCE(num < 0 || num >= NR_syscalls))
  301. return -ENOSYS;
  302. mutex_lock(&syscall_trace_lock);
  303. if (!sys_refcount_enter)
  304. ret = register_trace_sys_enter(ftrace_syscall_enter, NULL);
  305. if (!ret) {
  306. set_bit(num, enabled_enter_syscalls);
  307. sys_refcount_enter++;
  308. }
  309. mutex_unlock(&syscall_trace_lock);
  310. return ret;
  311. }
  312. void unreg_event_syscall_enter(struct ftrace_event_call *call)
  313. {
  314. int num;
  315. num = ((struct syscall_metadata *)call->data)->syscall_nr;
  316. if (WARN_ON_ONCE(num < 0 || num >= NR_syscalls))
  317. return;
  318. mutex_lock(&syscall_trace_lock);
  319. sys_refcount_enter--;
  320. clear_bit(num, enabled_enter_syscalls);
  321. if (!sys_refcount_enter)
  322. unregister_trace_sys_enter(ftrace_syscall_enter, NULL);
  323. mutex_unlock(&syscall_trace_lock);
  324. }
  325. int reg_event_syscall_exit(struct ftrace_event_call *call)
  326. {
  327. int ret = 0;
  328. int num;
  329. num = ((struct syscall_metadata *)call->data)->syscall_nr;
  330. if (WARN_ON_ONCE(num < 0 || num >= NR_syscalls))
  331. return -ENOSYS;
  332. mutex_lock(&syscall_trace_lock);
  333. if (!sys_refcount_exit)
  334. ret = register_trace_sys_exit(ftrace_syscall_exit, NULL);
  335. if (!ret) {
  336. set_bit(num, enabled_exit_syscalls);
  337. sys_refcount_exit++;
  338. }
  339. mutex_unlock(&syscall_trace_lock);
  340. return ret;
  341. }
  342. void unreg_event_syscall_exit(struct ftrace_event_call *call)
  343. {
  344. int num;
  345. num = ((struct syscall_metadata *)call->data)->syscall_nr;
  346. if (WARN_ON_ONCE(num < 0 || num >= NR_syscalls))
  347. return;
  348. mutex_lock(&syscall_trace_lock);
  349. sys_refcount_exit--;
  350. clear_bit(num, enabled_exit_syscalls);
  351. if (!sys_refcount_exit)
  352. unregister_trace_sys_exit(ftrace_syscall_exit, NULL);
  353. mutex_unlock(&syscall_trace_lock);
  354. }
  355. int init_syscall_trace(struct ftrace_event_call *call)
  356. {
  357. int id;
  358. int num;
  359. num = ((struct syscall_metadata *)call->data)->syscall_nr;
  360. if (num < 0 || num >= NR_syscalls) {
  361. pr_debug("syscall %s metadata not mapped, disabling ftrace event\n",
  362. ((struct syscall_metadata *)call->data)->name);
  363. return -ENOSYS;
  364. }
  365. if (set_syscall_print_fmt(call) < 0)
  366. return -ENOMEM;
  367. id = trace_event_raw_init(call);
  368. if (id < 0) {
  369. free_syscall_print_fmt(call);
  370. return id;
  371. }
  372. return id;
  373. }
  374. unsigned long __init __weak arch_syscall_addr(int nr)
  375. {
  376. return (unsigned long)sys_call_table[nr];
  377. }
  378. int __init init_ftrace_syscalls(void)
  379. {
  380. struct syscall_metadata *meta;
  381. unsigned long addr;
  382. int i;
  383. syscalls_metadata = kzalloc(sizeof(*syscalls_metadata) *
  384. NR_syscalls, GFP_KERNEL);
  385. if (!syscalls_metadata) {
  386. WARN_ON(1);
  387. return -ENOMEM;
  388. }
  389. for (i = 0; i < NR_syscalls; i++) {
  390. addr = arch_syscall_addr(i);
  391. meta = find_syscall_meta(addr);
  392. if (!meta)
  393. continue;
  394. meta->syscall_nr = i;
  395. syscalls_metadata[i] = meta;
  396. }
  397. return 0;
  398. }
  399. core_initcall(init_ftrace_syscalls);
  400. #ifdef CONFIG_PERF_EVENTS
  401. static DECLARE_BITMAP(enabled_perf_enter_syscalls, NR_syscalls);
  402. static DECLARE_BITMAP(enabled_perf_exit_syscalls, NR_syscalls);
  403. static int sys_perf_refcount_enter;
  404. static int sys_perf_refcount_exit;
  405. static void perf_syscall_enter(void *ignore, struct pt_regs *regs, long id)
  406. {
  407. struct syscall_metadata *sys_data;
  408. struct syscall_trace_enter *rec;
  409. struct hlist_head *head;
  410. int syscall_nr;
  411. int rctx;
  412. int size;
  413. syscall_nr = syscall_get_nr(current, regs);
  414. if (!test_bit(syscall_nr, enabled_perf_enter_syscalls))
  415. return;
  416. sys_data = syscall_nr_to_meta(syscall_nr);
  417. if (!sys_data)
  418. return;
  419. /* get the size after alignment with the u32 buffer size field */
  420. size = sizeof(unsigned long) * sys_data->nb_args + sizeof(*rec);
  421. size = ALIGN(size + sizeof(u32), sizeof(u64));
  422. size -= sizeof(u32);
  423. if (WARN_ONCE(size > PERF_MAX_TRACE_SIZE,
  424. "perf buffer not large enough"))
  425. return;
  426. rec = (struct syscall_trace_enter *)perf_trace_buf_prepare(size,
  427. sys_data->enter_event->event.type, regs, &rctx);
  428. if (!rec)
  429. return;
  430. rec->nr = syscall_nr;
  431. syscall_get_arguments(current, regs, 0, sys_data->nb_args,
  432. (unsigned long *)&rec->args);
  433. head = this_cpu_ptr(sys_data->enter_event->perf_events);
  434. perf_trace_buf_submit(rec, size, rctx, 0, 1, regs, head);
  435. }
  436. int perf_sysenter_enable(struct ftrace_event_call *call)
  437. {
  438. int ret = 0;
  439. int num;
  440. num = ((struct syscall_metadata *)call->data)->syscall_nr;
  441. mutex_lock(&syscall_trace_lock);
  442. if (!sys_perf_refcount_enter)
  443. ret = register_trace_sys_enter(perf_syscall_enter, NULL);
  444. if (ret) {
  445. pr_info("event trace: Could not activate"
  446. "syscall entry trace point");
  447. } else {
  448. set_bit(num, enabled_perf_enter_syscalls);
  449. sys_perf_refcount_enter++;
  450. }
  451. mutex_unlock(&syscall_trace_lock);
  452. return ret;
  453. }
  454. void perf_sysenter_disable(struct ftrace_event_call *call)
  455. {
  456. int num;
  457. num = ((struct syscall_metadata *)call->data)->syscall_nr;
  458. mutex_lock(&syscall_trace_lock);
  459. sys_perf_refcount_enter--;
  460. clear_bit(num, enabled_perf_enter_syscalls);
  461. if (!sys_perf_refcount_enter)
  462. unregister_trace_sys_enter(perf_syscall_enter, NULL);
  463. mutex_unlock(&syscall_trace_lock);
  464. }
  465. static void perf_syscall_exit(void *ignore, struct pt_regs *regs, long ret)
  466. {
  467. struct syscall_metadata *sys_data;
  468. struct syscall_trace_exit *rec;
  469. struct hlist_head *head;
  470. int syscall_nr;
  471. int rctx;
  472. int size;
  473. syscall_nr = syscall_get_nr(current, regs);
  474. if (!test_bit(syscall_nr, enabled_perf_exit_syscalls))
  475. return;
  476. sys_data = syscall_nr_to_meta(syscall_nr);
  477. if (!sys_data)
  478. return;
  479. /* We can probably do that at build time */
  480. size = ALIGN(sizeof(*rec) + sizeof(u32), sizeof(u64));
  481. size -= sizeof(u32);
  482. /*
  483. * Impossible, but be paranoid with the future
  484. * How to put this check outside runtime?
  485. */
  486. if (WARN_ONCE(size > PERF_MAX_TRACE_SIZE,
  487. "exit event has grown above perf buffer size"))
  488. return;
  489. rec = (struct syscall_trace_exit *)perf_trace_buf_prepare(size,
  490. sys_data->exit_event->event.type, regs, &rctx);
  491. if (!rec)
  492. return;
  493. rec->nr = syscall_nr;
  494. rec->ret = syscall_get_return_value(current, regs);
  495. head = this_cpu_ptr(sys_data->exit_event->perf_events);
  496. perf_trace_buf_submit(rec, size, rctx, 0, 1, regs, head);
  497. }
  498. int perf_sysexit_enable(struct ftrace_event_call *call)
  499. {
  500. int ret = 0;
  501. int num;
  502. num = ((struct syscall_metadata *)call->data)->syscall_nr;
  503. mutex_lock(&syscall_trace_lock);
  504. if (!sys_perf_refcount_exit)
  505. ret = register_trace_sys_exit(perf_syscall_exit, NULL);
  506. if (ret) {
  507. pr_info("event trace: Could not activate"
  508. "syscall exit trace point");
  509. } else {
  510. set_bit(num, enabled_perf_exit_syscalls);
  511. sys_perf_refcount_exit++;
  512. }
  513. mutex_unlock(&syscall_trace_lock);
  514. return ret;
  515. }
  516. void perf_sysexit_disable(struct ftrace_event_call *call)
  517. {
  518. int num;
  519. num = ((struct syscall_metadata *)call->data)->syscall_nr;
  520. mutex_lock(&syscall_trace_lock);
  521. sys_perf_refcount_exit--;
  522. clear_bit(num, enabled_perf_exit_syscalls);
  523. if (!sys_perf_refcount_exit)
  524. unregister_trace_sys_exit(perf_syscall_exit, NULL);
  525. mutex_unlock(&syscall_trace_lock);
  526. }
  527. #endif /* CONFIG_PERF_EVENTS */
  528. static int syscall_enter_register(struct ftrace_event_call *event,
  529. enum trace_reg type)
  530. {
  531. switch (type) {
  532. case TRACE_REG_REGISTER:
  533. return reg_event_syscall_enter(event);
  534. case TRACE_REG_UNREGISTER:
  535. unreg_event_syscall_enter(event);
  536. return 0;
  537. #ifdef CONFIG_PERF_EVENTS
  538. case TRACE_REG_PERF_REGISTER:
  539. return perf_sysenter_enable(event);
  540. case TRACE_REG_PERF_UNREGISTER:
  541. perf_sysenter_disable(event);
  542. return 0;
  543. #endif
  544. }
  545. return 0;
  546. }
  547. static int syscall_exit_register(struct ftrace_event_call *event,
  548. enum trace_reg type)
  549. {
  550. switch (type) {
  551. case TRACE_REG_REGISTER:
  552. return reg_event_syscall_exit(event);
  553. case TRACE_REG_UNREGISTER:
  554. unreg_event_syscall_exit(event);
  555. return 0;
  556. #ifdef CONFIG_PERF_EVENTS
  557. case TRACE_REG_PERF_REGISTER:
  558. return perf_sysexit_enable(event);
  559. case TRACE_REG_PERF_UNREGISTER:
  560. perf_sysexit_disable(event);
  561. return 0;
  562. #endif
  563. }
  564. return 0;
  565. }