trace_syscalls.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690
  1. #include <trace/syscall.h>
  2. #include <trace/events/syscalls.h>
  3. #include <linux/slab.h>
  4. #include <linux/kernel.h>
  5. #include <linux/ftrace.h>
  6. #include <linux/perf_event.h>
  7. #include <asm/syscall.h>
  8. #include "trace_output.h"
  9. #include "trace.h"
  10. static DEFINE_MUTEX(syscall_trace_lock);
  11. static int sys_refcount_enter;
  12. static int sys_refcount_exit;
  13. static DECLARE_BITMAP(enabled_enter_syscalls, NR_syscalls);
  14. static DECLARE_BITMAP(enabled_exit_syscalls, NR_syscalls);
  15. static int syscall_enter_register(struct ftrace_event_call *event,
  16. enum trace_reg type);
  17. static int syscall_exit_register(struct ftrace_event_call *event,
  18. enum trace_reg type);
  19. static int syscall_enter_define_fields(struct ftrace_event_call *call);
  20. static int syscall_exit_define_fields(struct ftrace_event_call *call);
  21. static struct list_head *
  22. syscall_get_enter_fields(struct ftrace_event_call *call)
  23. {
  24. struct syscall_metadata *entry = call->data;
  25. return &entry->enter_fields;
  26. }
  27. struct trace_event_functions enter_syscall_print_funcs = {
  28. .trace = print_syscall_enter,
  29. };
  30. struct trace_event_functions exit_syscall_print_funcs = {
  31. .trace = print_syscall_exit,
  32. };
  33. struct ftrace_event_class event_class_syscall_enter = {
  34. .system = "syscalls",
  35. .reg = syscall_enter_register,
  36. .define_fields = syscall_enter_define_fields,
  37. .get_fields = syscall_get_enter_fields,
  38. .raw_init = init_syscall_trace,
  39. };
  40. struct ftrace_event_class event_class_syscall_exit = {
  41. .system = "syscalls",
  42. .reg = syscall_exit_register,
  43. .define_fields = syscall_exit_define_fields,
  44. .fields = LIST_HEAD_INIT(event_class_syscall_exit.fields),
  45. .raw_init = init_syscall_trace,
  46. };
  47. extern struct syscall_metadata *__start_syscalls_metadata[];
  48. extern struct syscall_metadata *__stop_syscalls_metadata[];
  49. static struct syscall_metadata **syscalls_metadata;
  50. #ifndef ARCH_HAS_SYSCALL_MATCH_SYM_NAME
  51. static inline bool arch_syscall_match_sym_name(const char *sym, const char *name)
  52. {
  53. /*
  54. * Only compare after the "sys" prefix. Archs that use
  55. * syscall wrappers may have syscalls symbols aliases prefixed
  56. * with "SyS" instead of "sys", leading to an unwanted
  57. * mismatch.
  58. */
  59. return !strcmp(sym + 3, name + 3);
  60. }
  61. #endif
  62. static __init struct syscall_metadata *
  63. find_syscall_meta(unsigned long syscall)
  64. {
  65. struct syscall_metadata **start;
  66. struct syscall_metadata **stop;
  67. char str[KSYM_SYMBOL_LEN];
  68. start = __start_syscalls_metadata;
  69. stop = __stop_syscalls_metadata;
  70. kallsyms_lookup(syscall, NULL, NULL, NULL, str);
  71. if (arch_syscall_match_sym_name(str, "sys_ni_syscall"))
  72. return NULL;
  73. for ( ; start < stop; start++) {
  74. if ((*start)->name && arch_syscall_match_sym_name(str, (*start)->name))
  75. return *start;
  76. }
  77. return NULL;
  78. }
  79. static struct syscall_metadata *syscall_nr_to_meta(int nr)
  80. {
  81. if (!syscalls_metadata || nr >= NR_syscalls || nr < 0)
  82. return NULL;
  83. return syscalls_metadata[nr];
  84. }
  85. enum print_line_t
  86. print_syscall_enter(struct trace_iterator *iter, int flags,
  87. struct trace_event *event)
  88. {
  89. struct trace_seq *s = &iter->seq;
  90. struct trace_entry *ent = iter->ent;
  91. struct syscall_trace_enter *trace;
  92. struct syscall_metadata *entry;
  93. int i, ret, syscall;
  94. trace = (typeof(trace))ent;
  95. syscall = trace->nr;
  96. entry = syscall_nr_to_meta(syscall);
  97. if (!entry)
  98. goto end;
  99. if (entry->enter_event->event.type != ent->type) {
  100. WARN_ON_ONCE(1);
  101. goto end;
  102. }
  103. ret = trace_seq_printf(s, "%s(", entry->name);
  104. if (!ret)
  105. return TRACE_TYPE_PARTIAL_LINE;
  106. for (i = 0; i < entry->nb_args; i++) {
  107. /* parameter types */
  108. if (trace_flags & TRACE_ITER_VERBOSE) {
  109. ret = trace_seq_printf(s, "%s ", entry->types[i]);
  110. if (!ret)
  111. return TRACE_TYPE_PARTIAL_LINE;
  112. }
  113. /* parameter values */
  114. ret = trace_seq_printf(s, "%s: %lx%s", entry->args[i],
  115. trace->args[i],
  116. i == entry->nb_args - 1 ? "" : ", ");
  117. if (!ret)
  118. return TRACE_TYPE_PARTIAL_LINE;
  119. }
  120. ret = trace_seq_putc(s, ')');
  121. if (!ret)
  122. return TRACE_TYPE_PARTIAL_LINE;
  123. end:
  124. ret = trace_seq_putc(s, '\n');
  125. if (!ret)
  126. return TRACE_TYPE_PARTIAL_LINE;
  127. return TRACE_TYPE_HANDLED;
  128. }
  129. enum print_line_t
  130. print_syscall_exit(struct trace_iterator *iter, int flags,
  131. struct trace_event *event)
  132. {
  133. struct trace_seq *s = &iter->seq;
  134. struct trace_entry *ent = iter->ent;
  135. struct syscall_trace_exit *trace;
  136. int syscall;
  137. struct syscall_metadata *entry;
  138. int ret;
  139. trace = (typeof(trace))ent;
  140. syscall = trace->nr;
  141. entry = syscall_nr_to_meta(syscall);
  142. if (!entry) {
  143. trace_seq_printf(s, "\n");
  144. return TRACE_TYPE_HANDLED;
  145. }
  146. if (entry->exit_event->event.type != ent->type) {
  147. WARN_ON_ONCE(1);
  148. return TRACE_TYPE_UNHANDLED;
  149. }
  150. ret = trace_seq_printf(s, "%s -> 0x%lx\n", entry->name,
  151. trace->ret);
  152. if (!ret)
  153. return TRACE_TYPE_PARTIAL_LINE;
  154. return TRACE_TYPE_HANDLED;
  155. }
  156. extern char *__bad_type_size(void);
  157. #define SYSCALL_FIELD(type, name) \
  158. sizeof(type) != sizeof(trace.name) ? \
  159. __bad_type_size() : \
  160. #type, #name, offsetof(typeof(trace), name), \
  161. sizeof(trace.name), is_signed_type(type)
  162. static
  163. int __set_enter_print_fmt(struct syscall_metadata *entry, char *buf, int len)
  164. {
  165. int i;
  166. int pos = 0;
  167. /* When len=0, we just calculate the needed length */
  168. #define LEN_OR_ZERO (len ? len - pos : 0)
  169. pos += snprintf(buf + pos, LEN_OR_ZERO, "\"");
  170. for (i = 0; i < entry->nb_args; i++) {
  171. pos += snprintf(buf + pos, LEN_OR_ZERO, "%s: 0x%%0%zulx%s",
  172. entry->args[i], sizeof(unsigned long),
  173. i == entry->nb_args - 1 ? "" : ", ");
  174. }
  175. pos += snprintf(buf + pos, LEN_OR_ZERO, "\"");
  176. for (i = 0; i < entry->nb_args; i++) {
  177. pos += snprintf(buf + pos, LEN_OR_ZERO,
  178. ", ((unsigned long)(REC->%s))", entry->args[i]);
  179. }
  180. #undef LEN_OR_ZERO
  181. /* return the length of print_fmt */
  182. return pos;
  183. }
  184. static int set_syscall_print_fmt(struct ftrace_event_call *call)
  185. {
  186. char *print_fmt;
  187. int len;
  188. struct syscall_metadata *entry = call->data;
  189. if (entry->enter_event != call) {
  190. call->print_fmt = "\"0x%lx\", REC->ret";
  191. return 0;
  192. }
  193. /* First: called with 0 length to calculate the needed length */
  194. len = __set_enter_print_fmt(entry, NULL, 0);
  195. print_fmt = kmalloc(len + 1, GFP_KERNEL);
  196. if (!print_fmt)
  197. return -ENOMEM;
  198. /* Second: actually write the @print_fmt */
  199. __set_enter_print_fmt(entry, print_fmt, len + 1);
  200. call->print_fmt = print_fmt;
  201. return 0;
  202. }
  203. static void free_syscall_print_fmt(struct ftrace_event_call *call)
  204. {
  205. struct syscall_metadata *entry = call->data;
  206. if (entry->enter_event == call)
  207. kfree(call->print_fmt);
  208. }
  209. static int syscall_enter_define_fields(struct ftrace_event_call *call)
  210. {
  211. struct syscall_trace_enter trace;
  212. struct syscall_metadata *meta = call->data;
  213. int ret;
  214. int i;
  215. int offset = offsetof(typeof(trace), args);
  216. ret = trace_define_field(call, SYSCALL_FIELD(int, nr), FILTER_OTHER);
  217. if (ret)
  218. return ret;
  219. for (i = 0; i < meta->nb_args; i++) {
  220. ret = trace_define_field(call, meta->types[i],
  221. meta->args[i], offset,
  222. sizeof(unsigned long), 0,
  223. FILTER_OTHER);
  224. offset += sizeof(unsigned long);
  225. }
  226. return ret;
  227. }
  228. static int syscall_exit_define_fields(struct ftrace_event_call *call)
  229. {
  230. struct syscall_trace_exit trace;
  231. int ret;
  232. ret = trace_define_field(call, SYSCALL_FIELD(int, nr), FILTER_OTHER);
  233. if (ret)
  234. return ret;
  235. ret = trace_define_field(call, SYSCALL_FIELD(long, ret),
  236. FILTER_OTHER);
  237. return ret;
  238. }
  239. void ftrace_syscall_enter(void *ignore, struct pt_regs *regs, long id)
  240. {
  241. struct syscall_trace_enter *entry;
  242. struct syscall_metadata *sys_data;
  243. struct ring_buffer_event *event;
  244. struct ring_buffer *buffer;
  245. int size;
  246. int syscall_nr;
  247. syscall_nr = syscall_get_nr(current, regs);
  248. if (syscall_nr < 0)
  249. return;
  250. if (!test_bit(syscall_nr, enabled_enter_syscalls))
  251. return;
  252. sys_data = syscall_nr_to_meta(syscall_nr);
  253. if (!sys_data)
  254. return;
  255. size = sizeof(*entry) + sizeof(unsigned long) * sys_data->nb_args;
  256. event = trace_current_buffer_lock_reserve(&buffer,
  257. sys_data->enter_event->event.type, size, 0, 0);
  258. if (!event)
  259. return;
  260. entry = ring_buffer_event_data(event);
  261. entry->nr = syscall_nr;
  262. syscall_get_arguments(current, regs, 0, sys_data->nb_args, entry->args);
  263. if (!filter_current_check_discard(buffer, sys_data->enter_event,
  264. entry, event))
  265. trace_current_buffer_unlock_commit(buffer, event, 0, 0);
  266. }
  267. void ftrace_syscall_exit(void *ignore, struct pt_regs *regs, long ret)
  268. {
  269. struct syscall_trace_exit *entry;
  270. struct syscall_metadata *sys_data;
  271. struct ring_buffer_event *event;
  272. struct ring_buffer *buffer;
  273. int syscall_nr;
  274. syscall_nr = syscall_get_nr(current, regs);
  275. if (syscall_nr < 0)
  276. return;
  277. if (!test_bit(syscall_nr, enabled_exit_syscalls))
  278. return;
  279. sys_data = syscall_nr_to_meta(syscall_nr);
  280. if (!sys_data)
  281. return;
  282. event = trace_current_buffer_lock_reserve(&buffer,
  283. sys_data->exit_event->event.type, sizeof(*entry), 0, 0);
  284. if (!event)
  285. return;
  286. entry = ring_buffer_event_data(event);
  287. entry->nr = syscall_nr;
  288. entry->ret = syscall_get_return_value(current, regs);
  289. if (!filter_current_check_discard(buffer, sys_data->exit_event,
  290. entry, event))
  291. trace_current_buffer_unlock_commit(buffer, event, 0, 0);
  292. }
  293. int reg_event_syscall_enter(struct ftrace_event_call *call)
  294. {
  295. int ret = 0;
  296. int num;
  297. num = ((struct syscall_metadata *)call->data)->syscall_nr;
  298. if (WARN_ON_ONCE(num < 0 || num >= NR_syscalls))
  299. return -ENOSYS;
  300. mutex_lock(&syscall_trace_lock);
  301. if (!sys_refcount_enter)
  302. ret = register_trace_sys_enter(ftrace_syscall_enter, NULL);
  303. if (!ret) {
  304. set_bit(num, enabled_enter_syscalls);
  305. sys_refcount_enter++;
  306. }
  307. mutex_unlock(&syscall_trace_lock);
  308. return ret;
  309. }
  310. void unreg_event_syscall_enter(struct ftrace_event_call *call)
  311. {
  312. int num;
  313. num = ((struct syscall_metadata *)call->data)->syscall_nr;
  314. if (WARN_ON_ONCE(num < 0 || num >= NR_syscalls))
  315. return;
  316. mutex_lock(&syscall_trace_lock);
  317. sys_refcount_enter--;
  318. clear_bit(num, enabled_enter_syscalls);
  319. if (!sys_refcount_enter)
  320. unregister_trace_sys_enter(ftrace_syscall_enter, NULL);
  321. mutex_unlock(&syscall_trace_lock);
  322. }
  323. int reg_event_syscall_exit(struct ftrace_event_call *call)
  324. {
  325. int ret = 0;
  326. int num;
  327. num = ((struct syscall_metadata *)call->data)->syscall_nr;
  328. if (WARN_ON_ONCE(num < 0 || num >= NR_syscalls))
  329. return -ENOSYS;
  330. mutex_lock(&syscall_trace_lock);
  331. if (!sys_refcount_exit)
  332. ret = register_trace_sys_exit(ftrace_syscall_exit, NULL);
  333. if (!ret) {
  334. set_bit(num, enabled_exit_syscalls);
  335. sys_refcount_exit++;
  336. }
  337. mutex_unlock(&syscall_trace_lock);
  338. return ret;
  339. }
  340. void unreg_event_syscall_exit(struct ftrace_event_call *call)
  341. {
  342. int num;
  343. num = ((struct syscall_metadata *)call->data)->syscall_nr;
  344. if (WARN_ON_ONCE(num < 0 || num >= NR_syscalls))
  345. return;
  346. mutex_lock(&syscall_trace_lock);
  347. sys_refcount_exit--;
  348. clear_bit(num, enabled_exit_syscalls);
  349. if (!sys_refcount_exit)
  350. unregister_trace_sys_exit(ftrace_syscall_exit, NULL);
  351. mutex_unlock(&syscall_trace_lock);
  352. }
  353. int init_syscall_trace(struct ftrace_event_call *call)
  354. {
  355. int id;
  356. int num;
  357. num = ((struct syscall_metadata *)call->data)->syscall_nr;
  358. if (num < 0 || num >= NR_syscalls) {
  359. pr_debug("syscall %s metadata not mapped, disabling ftrace event\n",
  360. ((struct syscall_metadata *)call->data)->name);
  361. return -ENOSYS;
  362. }
  363. if (set_syscall_print_fmt(call) < 0)
  364. return -ENOMEM;
  365. id = trace_event_raw_init(call);
  366. if (id < 0) {
  367. free_syscall_print_fmt(call);
  368. return id;
  369. }
  370. return id;
  371. }
  372. unsigned long __init __weak arch_syscall_addr(int nr)
  373. {
  374. return (unsigned long)sys_call_table[nr];
  375. }
  376. int __init init_ftrace_syscalls(void)
  377. {
  378. struct syscall_metadata *meta;
  379. unsigned long addr;
  380. int i;
  381. syscalls_metadata = kzalloc(sizeof(*syscalls_metadata) *
  382. NR_syscalls, GFP_KERNEL);
  383. if (!syscalls_metadata) {
  384. WARN_ON(1);
  385. return -ENOMEM;
  386. }
  387. for (i = 0; i < NR_syscalls; i++) {
  388. addr = arch_syscall_addr(i);
  389. meta = find_syscall_meta(addr);
  390. if (!meta)
  391. continue;
  392. meta->syscall_nr = i;
  393. syscalls_metadata[i] = meta;
  394. }
  395. return 0;
  396. }
  397. core_initcall(init_ftrace_syscalls);
  398. #ifdef CONFIG_PERF_EVENTS
  399. static DECLARE_BITMAP(enabled_perf_enter_syscalls, NR_syscalls);
  400. static DECLARE_BITMAP(enabled_perf_exit_syscalls, NR_syscalls);
  401. static int sys_perf_refcount_enter;
  402. static int sys_perf_refcount_exit;
  403. static void perf_syscall_enter(void *ignore, struct pt_regs *regs, long id)
  404. {
  405. struct syscall_metadata *sys_data;
  406. struct syscall_trace_enter *rec;
  407. struct hlist_head *head;
  408. int syscall_nr;
  409. int rctx;
  410. int size;
  411. syscall_nr = syscall_get_nr(current, regs);
  412. if (!test_bit(syscall_nr, enabled_perf_enter_syscalls))
  413. return;
  414. sys_data = syscall_nr_to_meta(syscall_nr);
  415. if (!sys_data)
  416. return;
  417. /* get the size after alignment with the u32 buffer size field */
  418. size = sizeof(unsigned long) * sys_data->nb_args + sizeof(*rec);
  419. size = ALIGN(size + sizeof(u32), sizeof(u64));
  420. size -= sizeof(u32);
  421. if (WARN_ONCE(size > PERF_MAX_TRACE_SIZE,
  422. "perf buffer not large enough"))
  423. return;
  424. rec = (struct syscall_trace_enter *)perf_trace_buf_prepare(size,
  425. sys_data->enter_event->event.type, regs, &rctx);
  426. if (!rec)
  427. return;
  428. rec->nr = syscall_nr;
  429. syscall_get_arguments(current, regs, 0, sys_data->nb_args,
  430. (unsigned long *)&rec->args);
  431. head = this_cpu_ptr(sys_data->enter_event->perf_events);
  432. perf_trace_buf_submit(rec, size, rctx, 0, 1, regs, head);
  433. }
  434. int perf_sysenter_enable(struct ftrace_event_call *call)
  435. {
  436. int ret = 0;
  437. int num;
  438. num = ((struct syscall_metadata *)call->data)->syscall_nr;
  439. mutex_lock(&syscall_trace_lock);
  440. if (!sys_perf_refcount_enter)
  441. ret = register_trace_sys_enter(perf_syscall_enter, NULL);
  442. if (ret) {
  443. pr_info("event trace: Could not activate"
  444. "syscall entry trace point");
  445. } else {
  446. set_bit(num, enabled_perf_enter_syscalls);
  447. sys_perf_refcount_enter++;
  448. }
  449. mutex_unlock(&syscall_trace_lock);
  450. return ret;
  451. }
  452. void perf_sysenter_disable(struct ftrace_event_call *call)
  453. {
  454. int num;
  455. num = ((struct syscall_metadata *)call->data)->syscall_nr;
  456. mutex_lock(&syscall_trace_lock);
  457. sys_perf_refcount_enter--;
  458. clear_bit(num, enabled_perf_enter_syscalls);
  459. if (!sys_perf_refcount_enter)
  460. unregister_trace_sys_enter(perf_syscall_enter, NULL);
  461. mutex_unlock(&syscall_trace_lock);
  462. }
  463. static void perf_syscall_exit(void *ignore, struct pt_regs *regs, long ret)
  464. {
  465. struct syscall_metadata *sys_data;
  466. struct syscall_trace_exit *rec;
  467. struct hlist_head *head;
  468. int syscall_nr;
  469. int rctx;
  470. int size;
  471. syscall_nr = syscall_get_nr(current, regs);
  472. if (!test_bit(syscall_nr, enabled_perf_exit_syscalls))
  473. return;
  474. sys_data = syscall_nr_to_meta(syscall_nr);
  475. if (!sys_data)
  476. return;
  477. /* We can probably do that at build time */
  478. size = ALIGN(sizeof(*rec) + sizeof(u32), sizeof(u64));
  479. size -= sizeof(u32);
  480. /*
  481. * Impossible, but be paranoid with the future
  482. * How to put this check outside runtime?
  483. */
  484. if (WARN_ONCE(size > PERF_MAX_TRACE_SIZE,
  485. "exit event has grown above perf buffer size"))
  486. return;
  487. rec = (struct syscall_trace_exit *)perf_trace_buf_prepare(size,
  488. sys_data->exit_event->event.type, regs, &rctx);
  489. if (!rec)
  490. return;
  491. rec->nr = syscall_nr;
  492. rec->ret = syscall_get_return_value(current, regs);
  493. head = this_cpu_ptr(sys_data->exit_event->perf_events);
  494. perf_trace_buf_submit(rec, size, rctx, 0, 1, regs, head);
  495. }
  496. int perf_sysexit_enable(struct ftrace_event_call *call)
  497. {
  498. int ret = 0;
  499. int num;
  500. num = ((struct syscall_metadata *)call->data)->syscall_nr;
  501. mutex_lock(&syscall_trace_lock);
  502. if (!sys_perf_refcount_exit)
  503. ret = register_trace_sys_exit(perf_syscall_exit, NULL);
  504. if (ret) {
  505. pr_info("event trace: Could not activate"
  506. "syscall exit trace point");
  507. } else {
  508. set_bit(num, enabled_perf_exit_syscalls);
  509. sys_perf_refcount_exit++;
  510. }
  511. mutex_unlock(&syscall_trace_lock);
  512. return ret;
  513. }
  514. void perf_sysexit_disable(struct ftrace_event_call *call)
  515. {
  516. int num;
  517. num = ((struct syscall_metadata *)call->data)->syscall_nr;
  518. mutex_lock(&syscall_trace_lock);
  519. sys_perf_refcount_exit--;
  520. clear_bit(num, enabled_perf_exit_syscalls);
  521. if (!sys_perf_refcount_exit)
  522. unregister_trace_sys_exit(perf_syscall_exit, NULL);
  523. mutex_unlock(&syscall_trace_lock);
  524. }
  525. #endif /* CONFIG_PERF_EVENTS */
  526. static int syscall_enter_register(struct ftrace_event_call *event,
  527. enum trace_reg type)
  528. {
  529. switch (type) {
  530. case TRACE_REG_REGISTER:
  531. return reg_event_syscall_enter(event);
  532. case TRACE_REG_UNREGISTER:
  533. unreg_event_syscall_enter(event);
  534. return 0;
  535. #ifdef CONFIG_PERF_EVENTS
  536. case TRACE_REG_PERF_REGISTER:
  537. return perf_sysenter_enable(event);
  538. case TRACE_REG_PERF_UNREGISTER:
  539. perf_sysenter_disable(event);
  540. return 0;
  541. #endif
  542. }
  543. return 0;
  544. }
  545. static int syscall_exit_register(struct ftrace_event_call *event,
  546. enum trace_reg type)
  547. {
  548. switch (type) {
  549. case TRACE_REG_REGISTER:
  550. return reg_event_syscall_exit(event);
  551. case TRACE_REG_UNREGISTER:
  552. unreg_event_syscall_exit(event);
  553. return 0;
  554. #ifdef CONFIG_PERF_EVENTS
  555. case TRACE_REG_PERF_REGISTER:
  556. return perf_sysexit_enable(event);
  557. case TRACE_REG_PERF_UNREGISTER:
  558. perf_sysexit_disable(event);
  559. return 0;
  560. #endif
  561. }
  562. return 0;
  563. }