trace_syscalls.c 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687
  1. #include <trace/syscall.h>
  2. #include <trace/events/syscalls.h>
  3. #include <linux/slab.h>
  4. #include <linux/kernel.h>
  5. #include <linux/ftrace.h>
  6. #include <linux/perf_event.h>
  7. #include <asm/syscall.h>
  8. #include "trace_output.h"
  9. #include "trace.h"
  10. static DEFINE_MUTEX(syscall_trace_lock);
  11. static int sys_refcount_enter;
  12. static int sys_refcount_exit;
  13. static DECLARE_BITMAP(enabled_enter_syscalls, NR_syscalls);
  14. static DECLARE_BITMAP(enabled_exit_syscalls, NR_syscalls);
  15. static int syscall_enter_register(struct ftrace_event_call *event,
  16. enum trace_reg type);
  17. static int syscall_exit_register(struct ftrace_event_call *event,
  18. enum trace_reg type);
  19. static int syscall_enter_define_fields(struct ftrace_event_call *call);
  20. static int syscall_exit_define_fields(struct ftrace_event_call *call);
  21. static struct list_head *
  22. syscall_get_enter_fields(struct ftrace_event_call *call)
  23. {
  24. struct syscall_metadata *entry = call->data;
  25. return &entry->enter_fields;
  26. }
  27. struct trace_event_functions enter_syscall_print_funcs = {
  28. .trace = print_syscall_enter,
  29. };
  30. struct trace_event_functions exit_syscall_print_funcs = {
  31. .trace = print_syscall_exit,
  32. };
  33. struct ftrace_event_class event_class_syscall_enter = {
  34. .system = "syscalls",
  35. .reg = syscall_enter_register,
  36. .define_fields = syscall_enter_define_fields,
  37. .get_fields = syscall_get_enter_fields,
  38. .raw_init = init_syscall_trace,
  39. };
  40. struct ftrace_event_class event_class_syscall_exit = {
  41. .system = "syscalls",
  42. .reg = syscall_exit_register,
  43. .define_fields = syscall_exit_define_fields,
  44. .fields = LIST_HEAD_INIT(event_class_syscall_exit.fields),
  45. .raw_init = init_syscall_trace,
  46. };
  47. extern struct syscall_metadata *__start_syscalls_metadata[];
  48. extern struct syscall_metadata *__stop_syscalls_metadata[];
  49. static struct syscall_metadata **syscalls_metadata;
  50. #ifndef ARCH_HAS_SYSCALL_MATCH_SYM_NAME
  51. static inline bool arch_syscall_match_sym_name(const char *sym, const char *name)
  52. {
  53. /*
  54. * Only compare after the "sys" prefix. Archs that use
  55. * syscall wrappers may have syscalls symbols aliases prefixed
  56. * with "SyS" instead of "sys", leading to an unwanted
  57. * mismatch.
  58. */
  59. return !strcmp(sym + 3, name + 3);
  60. }
  61. #endif
  62. static __init struct syscall_metadata *
  63. find_syscall_meta(unsigned long syscall)
  64. {
  65. struct syscall_metadata **start;
  66. struct syscall_metadata **stop;
  67. char str[KSYM_SYMBOL_LEN];
  68. start = __start_syscalls_metadata;
  69. stop = __stop_syscalls_metadata;
  70. kallsyms_lookup(syscall, NULL, NULL, NULL, str);
  71. for ( ; start < stop; start++) {
  72. if ((*start)->name && arch_syscall_match_sym_name(str, (*start)->name))
  73. return *start;
  74. }
  75. return NULL;
  76. }
  77. static struct syscall_metadata *syscall_nr_to_meta(int nr)
  78. {
  79. if (!syscalls_metadata || nr >= NR_syscalls || nr < 0)
  80. return NULL;
  81. return syscalls_metadata[nr];
  82. }
  83. enum print_line_t
  84. print_syscall_enter(struct trace_iterator *iter, int flags,
  85. struct trace_event *event)
  86. {
  87. struct trace_seq *s = &iter->seq;
  88. struct trace_entry *ent = iter->ent;
  89. struct syscall_trace_enter *trace;
  90. struct syscall_metadata *entry;
  91. int i, ret, syscall;
  92. trace = (typeof(trace))ent;
  93. syscall = trace->nr;
  94. entry = syscall_nr_to_meta(syscall);
  95. if (!entry)
  96. goto end;
  97. if (entry->enter_event->event.type != ent->type) {
  98. WARN_ON_ONCE(1);
  99. goto end;
  100. }
  101. ret = trace_seq_printf(s, "%s(", entry->name);
  102. if (!ret)
  103. return TRACE_TYPE_PARTIAL_LINE;
  104. for (i = 0; i < entry->nb_args; i++) {
  105. /* parameter types */
  106. if (trace_flags & TRACE_ITER_VERBOSE) {
  107. ret = trace_seq_printf(s, "%s ", entry->types[i]);
  108. if (!ret)
  109. return TRACE_TYPE_PARTIAL_LINE;
  110. }
  111. /* parameter values */
  112. ret = trace_seq_printf(s, "%s: %lx%s", entry->args[i],
  113. trace->args[i],
  114. i == entry->nb_args - 1 ? "" : ", ");
  115. if (!ret)
  116. return TRACE_TYPE_PARTIAL_LINE;
  117. }
  118. ret = trace_seq_putc(s, ')');
  119. if (!ret)
  120. return TRACE_TYPE_PARTIAL_LINE;
  121. end:
  122. ret = trace_seq_putc(s, '\n');
  123. if (!ret)
  124. return TRACE_TYPE_PARTIAL_LINE;
  125. return TRACE_TYPE_HANDLED;
  126. }
  127. enum print_line_t
  128. print_syscall_exit(struct trace_iterator *iter, int flags,
  129. struct trace_event *event)
  130. {
  131. struct trace_seq *s = &iter->seq;
  132. struct trace_entry *ent = iter->ent;
  133. struct syscall_trace_exit *trace;
  134. int syscall;
  135. struct syscall_metadata *entry;
  136. int ret;
  137. trace = (typeof(trace))ent;
  138. syscall = trace->nr;
  139. entry = syscall_nr_to_meta(syscall);
  140. if (!entry) {
  141. trace_seq_printf(s, "\n");
  142. return TRACE_TYPE_HANDLED;
  143. }
  144. if (entry->exit_event->event.type != ent->type) {
  145. WARN_ON_ONCE(1);
  146. return TRACE_TYPE_UNHANDLED;
  147. }
  148. ret = trace_seq_printf(s, "%s -> 0x%lx\n", entry->name,
  149. trace->ret);
  150. if (!ret)
  151. return TRACE_TYPE_PARTIAL_LINE;
  152. return TRACE_TYPE_HANDLED;
  153. }
  154. extern char *__bad_type_size(void);
  155. #define SYSCALL_FIELD(type, name) \
  156. sizeof(type) != sizeof(trace.name) ? \
  157. __bad_type_size() : \
  158. #type, #name, offsetof(typeof(trace), name), \
  159. sizeof(trace.name), is_signed_type(type)
  160. static
  161. int __set_enter_print_fmt(struct syscall_metadata *entry, char *buf, int len)
  162. {
  163. int i;
  164. int pos = 0;
  165. /* When len=0, we just calculate the needed length */
  166. #define LEN_OR_ZERO (len ? len - pos : 0)
  167. pos += snprintf(buf + pos, LEN_OR_ZERO, "\"");
  168. for (i = 0; i < entry->nb_args; i++) {
  169. pos += snprintf(buf + pos, LEN_OR_ZERO, "%s: 0x%%0%zulx%s",
  170. entry->args[i], sizeof(unsigned long),
  171. i == entry->nb_args - 1 ? "" : ", ");
  172. }
  173. pos += snprintf(buf + pos, LEN_OR_ZERO, "\"");
  174. for (i = 0; i < entry->nb_args; i++) {
  175. pos += snprintf(buf + pos, LEN_OR_ZERO,
  176. ", ((unsigned long)(REC->%s))", entry->args[i]);
  177. }
  178. #undef LEN_OR_ZERO
  179. /* return the length of print_fmt */
  180. return pos;
  181. }
  182. static int set_syscall_print_fmt(struct ftrace_event_call *call)
  183. {
  184. char *print_fmt;
  185. int len;
  186. struct syscall_metadata *entry = call->data;
  187. if (entry->enter_event != call) {
  188. call->print_fmt = "\"0x%lx\", REC->ret";
  189. return 0;
  190. }
  191. /* First: called with 0 length to calculate the needed length */
  192. len = __set_enter_print_fmt(entry, NULL, 0);
  193. print_fmt = kmalloc(len + 1, GFP_KERNEL);
  194. if (!print_fmt)
  195. return -ENOMEM;
  196. /* Second: actually write the @print_fmt */
  197. __set_enter_print_fmt(entry, print_fmt, len + 1);
  198. call->print_fmt = print_fmt;
  199. return 0;
  200. }
  201. static void free_syscall_print_fmt(struct ftrace_event_call *call)
  202. {
  203. struct syscall_metadata *entry = call->data;
  204. if (entry->enter_event == call)
  205. kfree(call->print_fmt);
  206. }
  207. static int syscall_enter_define_fields(struct ftrace_event_call *call)
  208. {
  209. struct syscall_trace_enter trace;
  210. struct syscall_metadata *meta = call->data;
  211. int ret;
  212. int i;
  213. int offset = offsetof(typeof(trace), args);
  214. ret = trace_define_field(call, SYSCALL_FIELD(int, nr), FILTER_OTHER);
  215. if (ret)
  216. return ret;
  217. for (i = 0; i < meta->nb_args; i++) {
  218. ret = trace_define_field(call, meta->types[i],
  219. meta->args[i], offset,
  220. sizeof(unsigned long), 0,
  221. FILTER_OTHER);
  222. offset += sizeof(unsigned long);
  223. }
  224. return ret;
  225. }
  226. static int syscall_exit_define_fields(struct ftrace_event_call *call)
  227. {
  228. struct syscall_trace_exit trace;
  229. int ret;
  230. ret = trace_define_field(call, SYSCALL_FIELD(int, nr), FILTER_OTHER);
  231. if (ret)
  232. return ret;
  233. ret = trace_define_field(call, SYSCALL_FIELD(long, ret),
  234. FILTER_OTHER);
  235. return ret;
  236. }
  237. void ftrace_syscall_enter(void *ignore, struct pt_regs *regs, long id)
  238. {
  239. struct syscall_trace_enter *entry;
  240. struct syscall_metadata *sys_data;
  241. struct ring_buffer_event *event;
  242. struct ring_buffer *buffer;
  243. int size;
  244. int syscall_nr;
  245. syscall_nr = syscall_get_nr(current, regs);
  246. if (syscall_nr < 0)
  247. return;
  248. if (!test_bit(syscall_nr, enabled_enter_syscalls))
  249. return;
  250. sys_data = syscall_nr_to_meta(syscall_nr);
  251. if (!sys_data)
  252. return;
  253. size = sizeof(*entry) + sizeof(unsigned long) * sys_data->nb_args;
  254. event = trace_current_buffer_lock_reserve(&buffer,
  255. sys_data->enter_event->event.type, size, 0, 0);
  256. if (!event)
  257. return;
  258. entry = ring_buffer_event_data(event);
  259. entry->nr = syscall_nr;
  260. syscall_get_arguments(current, regs, 0, sys_data->nb_args, entry->args);
  261. if (!filter_current_check_discard(buffer, sys_data->enter_event,
  262. entry, event))
  263. trace_current_buffer_unlock_commit(buffer, event, 0, 0);
  264. }
  265. void ftrace_syscall_exit(void *ignore, struct pt_regs *regs, long ret)
  266. {
  267. struct syscall_trace_exit *entry;
  268. struct syscall_metadata *sys_data;
  269. struct ring_buffer_event *event;
  270. struct ring_buffer *buffer;
  271. int syscall_nr;
  272. syscall_nr = syscall_get_nr(current, regs);
  273. if (syscall_nr < 0)
  274. return;
  275. if (!test_bit(syscall_nr, enabled_exit_syscalls))
  276. return;
  277. sys_data = syscall_nr_to_meta(syscall_nr);
  278. if (!sys_data)
  279. return;
  280. event = trace_current_buffer_lock_reserve(&buffer,
  281. sys_data->exit_event->event.type, sizeof(*entry), 0, 0);
  282. if (!event)
  283. return;
  284. entry = ring_buffer_event_data(event);
  285. entry->nr = syscall_nr;
  286. entry->ret = syscall_get_return_value(current, regs);
  287. if (!filter_current_check_discard(buffer, sys_data->exit_event,
  288. entry, event))
  289. trace_current_buffer_unlock_commit(buffer, event, 0, 0);
  290. }
  291. int reg_event_syscall_enter(struct ftrace_event_call *call)
  292. {
  293. int ret = 0;
  294. int num;
  295. num = ((struct syscall_metadata *)call->data)->syscall_nr;
  296. if (WARN_ON_ONCE(num < 0 || num >= NR_syscalls))
  297. return -ENOSYS;
  298. mutex_lock(&syscall_trace_lock);
  299. if (!sys_refcount_enter)
  300. ret = register_trace_sys_enter(ftrace_syscall_enter, NULL);
  301. if (!ret) {
  302. set_bit(num, enabled_enter_syscalls);
  303. sys_refcount_enter++;
  304. }
  305. mutex_unlock(&syscall_trace_lock);
  306. return ret;
  307. }
  308. void unreg_event_syscall_enter(struct ftrace_event_call *call)
  309. {
  310. int num;
  311. num = ((struct syscall_metadata *)call->data)->syscall_nr;
  312. if (WARN_ON_ONCE(num < 0 || num >= NR_syscalls))
  313. return;
  314. mutex_lock(&syscall_trace_lock);
  315. sys_refcount_enter--;
  316. clear_bit(num, enabled_enter_syscalls);
  317. if (!sys_refcount_enter)
  318. unregister_trace_sys_enter(ftrace_syscall_enter, NULL);
  319. mutex_unlock(&syscall_trace_lock);
  320. }
  321. int reg_event_syscall_exit(struct ftrace_event_call *call)
  322. {
  323. int ret = 0;
  324. int num;
  325. num = ((struct syscall_metadata *)call->data)->syscall_nr;
  326. if (WARN_ON_ONCE(num < 0 || num >= NR_syscalls))
  327. return -ENOSYS;
  328. mutex_lock(&syscall_trace_lock);
  329. if (!sys_refcount_exit)
  330. ret = register_trace_sys_exit(ftrace_syscall_exit, NULL);
  331. if (!ret) {
  332. set_bit(num, enabled_exit_syscalls);
  333. sys_refcount_exit++;
  334. }
  335. mutex_unlock(&syscall_trace_lock);
  336. return ret;
  337. }
  338. void unreg_event_syscall_exit(struct ftrace_event_call *call)
  339. {
  340. int num;
  341. num = ((struct syscall_metadata *)call->data)->syscall_nr;
  342. if (WARN_ON_ONCE(num < 0 || num >= NR_syscalls))
  343. return;
  344. mutex_lock(&syscall_trace_lock);
  345. sys_refcount_exit--;
  346. clear_bit(num, enabled_exit_syscalls);
  347. if (!sys_refcount_exit)
  348. unregister_trace_sys_exit(ftrace_syscall_exit, NULL);
  349. mutex_unlock(&syscall_trace_lock);
  350. }
  351. int init_syscall_trace(struct ftrace_event_call *call)
  352. {
  353. int id;
  354. int num;
  355. num = ((struct syscall_metadata *)call->data)->syscall_nr;
  356. if (num < 0 || num >= NR_syscalls) {
  357. pr_debug("syscall %s metadata not mapped, disabling ftrace event\n",
  358. ((struct syscall_metadata *)call->data)->name);
  359. return -ENOSYS;
  360. }
  361. if (set_syscall_print_fmt(call) < 0)
  362. return -ENOMEM;
  363. id = trace_event_raw_init(call);
  364. if (id < 0) {
  365. free_syscall_print_fmt(call);
  366. return id;
  367. }
  368. return id;
  369. }
  370. unsigned long __init __weak arch_syscall_addr(int nr)
  371. {
  372. return (unsigned long)sys_call_table[nr];
  373. }
  374. int __init init_ftrace_syscalls(void)
  375. {
  376. struct syscall_metadata *meta;
  377. unsigned long addr;
  378. int i;
  379. syscalls_metadata = kzalloc(sizeof(*syscalls_metadata) *
  380. NR_syscalls, GFP_KERNEL);
  381. if (!syscalls_metadata) {
  382. WARN_ON(1);
  383. return -ENOMEM;
  384. }
  385. for (i = 0; i < NR_syscalls; i++) {
  386. addr = arch_syscall_addr(i);
  387. meta = find_syscall_meta(addr);
  388. if (!meta)
  389. continue;
  390. meta->syscall_nr = i;
  391. syscalls_metadata[i] = meta;
  392. }
  393. return 0;
  394. }
  395. core_initcall(init_ftrace_syscalls);
  396. #ifdef CONFIG_PERF_EVENTS
  397. static DECLARE_BITMAP(enabled_perf_enter_syscalls, NR_syscalls);
  398. static DECLARE_BITMAP(enabled_perf_exit_syscalls, NR_syscalls);
  399. static int sys_perf_refcount_enter;
  400. static int sys_perf_refcount_exit;
  401. static void perf_syscall_enter(void *ignore, struct pt_regs *regs, long id)
  402. {
  403. struct syscall_metadata *sys_data;
  404. struct syscall_trace_enter *rec;
  405. struct hlist_head *head;
  406. int syscall_nr;
  407. int rctx;
  408. int size;
  409. syscall_nr = syscall_get_nr(current, regs);
  410. if (!test_bit(syscall_nr, enabled_perf_enter_syscalls))
  411. return;
  412. sys_data = syscall_nr_to_meta(syscall_nr);
  413. if (!sys_data)
  414. return;
  415. /* get the size after alignment with the u32 buffer size field */
  416. size = sizeof(unsigned long) * sys_data->nb_args + sizeof(*rec);
  417. size = ALIGN(size + sizeof(u32), sizeof(u64));
  418. size -= sizeof(u32);
  419. if (WARN_ONCE(size > PERF_MAX_TRACE_SIZE,
  420. "perf buffer not large enough"))
  421. return;
  422. rec = (struct syscall_trace_enter *)perf_trace_buf_prepare(size,
  423. sys_data->enter_event->event.type, regs, &rctx);
  424. if (!rec)
  425. return;
  426. rec->nr = syscall_nr;
  427. syscall_get_arguments(current, regs, 0, sys_data->nb_args,
  428. (unsigned long *)&rec->args);
  429. head = this_cpu_ptr(sys_data->enter_event->perf_events);
  430. perf_trace_buf_submit(rec, size, rctx, 0, 1, regs, head);
  431. }
  432. int perf_sysenter_enable(struct ftrace_event_call *call)
  433. {
  434. int ret = 0;
  435. int num;
  436. num = ((struct syscall_metadata *)call->data)->syscall_nr;
  437. mutex_lock(&syscall_trace_lock);
  438. if (!sys_perf_refcount_enter)
  439. ret = register_trace_sys_enter(perf_syscall_enter, NULL);
  440. if (ret) {
  441. pr_info("event trace: Could not activate"
  442. "syscall entry trace point");
  443. } else {
  444. set_bit(num, enabled_perf_enter_syscalls);
  445. sys_perf_refcount_enter++;
  446. }
  447. mutex_unlock(&syscall_trace_lock);
  448. return ret;
  449. }
  450. void perf_sysenter_disable(struct ftrace_event_call *call)
  451. {
  452. int num;
  453. num = ((struct syscall_metadata *)call->data)->syscall_nr;
  454. mutex_lock(&syscall_trace_lock);
  455. sys_perf_refcount_enter--;
  456. clear_bit(num, enabled_perf_enter_syscalls);
  457. if (!sys_perf_refcount_enter)
  458. unregister_trace_sys_enter(perf_syscall_enter, NULL);
  459. mutex_unlock(&syscall_trace_lock);
  460. }
  461. static void perf_syscall_exit(void *ignore, struct pt_regs *regs, long ret)
  462. {
  463. struct syscall_metadata *sys_data;
  464. struct syscall_trace_exit *rec;
  465. struct hlist_head *head;
  466. int syscall_nr;
  467. int rctx;
  468. int size;
  469. syscall_nr = syscall_get_nr(current, regs);
  470. if (!test_bit(syscall_nr, enabled_perf_exit_syscalls))
  471. return;
  472. sys_data = syscall_nr_to_meta(syscall_nr);
  473. if (!sys_data)
  474. return;
  475. /* We can probably do that at build time */
  476. size = ALIGN(sizeof(*rec) + sizeof(u32), sizeof(u64));
  477. size -= sizeof(u32);
  478. /*
  479. * Impossible, but be paranoid with the future
  480. * How to put this check outside runtime?
  481. */
  482. if (WARN_ONCE(size > PERF_MAX_TRACE_SIZE,
  483. "exit event has grown above perf buffer size"))
  484. return;
  485. rec = (struct syscall_trace_exit *)perf_trace_buf_prepare(size,
  486. sys_data->exit_event->event.type, regs, &rctx);
  487. if (!rec)
  488. return;
  489. rec->nr = syscall_nr;
  490. rec->ret = syscall_get_return_value(current, regs);
  491. head = this_cpu_ptr(sys_data->exit_event->perf_events);
  492. perf_trace_buf_submit(rec, size, rctx, 0, 1, regs, head);
  493. }
  494. int perf_sysexit_enable(struct ftrace_event_call *call)
  495. {
  496. int ret = 0;
  497. int num;
  498. num = ((struct syscall_metadata *)call->data)->syscall_nr;
  499. mutex_lock(&syscall_trace_lock);
  500. if (!sys_perf_refcount_exit)
  501. ret = register_trace_sys_exit(perf_syscall_exit, NULL);
  502. if (ret) {
  503. pr_info("event trace: Could not activate"
  504. "syscall exit trace point");
  505. } else {
  506. set_bit(num, enabled_perf_exit_syscalls);
  507. sys_perf_refcount_exit++;
  508. }
  509. mutex_unlock(&syscall_trace_lock);
  510. return ret;
  511. }
  512. void perf_sysexit_disable(struct ftrace_event_call *call)
  513. {
  514. int num;
  515. num = ((struct syscall_metadata *)call->data)->syscall_nr;
  516. mutex_lock(&syscall_trace_lock);
  517. sys_perf_refcount_exit--;
  518. clear_bit(num, enabled_perf_exit_syscalls);
  519. if (!sys_perf_refcount_exit)
  520. unregister_trace_sys_exit(perf_syscall_exit, NULL);
  521. mutex_unlock(&syscall_trace_lock);
  522. }
  523. #endif /* CONFIG_PERF_EVENTS */
  524. static int syscall_enter_register(struct ftrace_event_call *event,
  525. enum trace_reg type)
  526. {
  527. switch (type) {
  528. case TRACE_REG_REGISTER:
  529. return reg_event_syscall_enter(event);
  530. case TRACE_REG_UNREGISTER:
  531. unreg_event_syscall_enter(event);
  532. return 0;
  533. #ifdef CONFIG_PERF_EVENTS
  534. case TRACE_REG_PERF_REGISTER:
  535. return perf_sysenter_enable(event);
  536. case TRACE_REG_PERF_UNREGISTER:
  537. perf_sysenter_disable(event);
  538. return 0;
  539. #endif
  540. }
  541. return 0;
  542. }
  543. static int syscall_exit_register(struct ftrace_event_call *event,
  544. enum trace_reg type)
  545. {
  546. switch (type) {
  547. case TRACE_REG_REGISTER:
  548. return reg_event_syscall_exit(event);
  549. case TRACE_REG_UNREGISTER:
  550. unreg_event_syscall_exit(event);
  551. return 0;
  552. #ifdef CONFIG_PERF_EVENTS
  553. case TRACE_REG_PERF_REGISTER:
  554. return perf_sysexit_enable(event);
  555. case TRACE_REG_PERF_UNREGISTER:
  556. perf_sysexit_disable(event);
  557. return 0;
  558. #endif
  559. }
  560. return 0;
  561. }