trace_syscalls.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702
  1. #include <trace/syscall.h>
  2. #include <trace/events/syscalls.h>
  3. #include <linux/slab.h>
  4. #include <linux/kernel.h>
  5. #include <linux/module.h> /* for MODULE_NAME_LEN via KSYM_SYMBOL_LEN */
  6. #include <linux/ftrace.h>
  7. #include <linux/perf_event.h>
  8. #include <asm/syscall.h>
  9. #include "trace_output.h"
  10. #include "trace.h"
  11. static DEFINE_MUTEX(syscall_trace_lock);
  12. static int sys_refcount_enter;
  13. static int sys_refcount_exit;
  14. static DECLARE_BITMAP(enabled_enter_syscalls, NR_syscalls);
  15. static DECLARE_BITMAP(enabled_exit_syscalls, NR_syscalls);
  16. static int syscall_enter_register(struct ftrace_event_call *event,
  17. enum trace_reg type, void *data);
  18. static int syscall_exit_register(struct ftrace_event_call *event,
  19. enum trace_reg type, void *data);
  20. static struct list_head *
  21. syscall_get_enter_fields(struct ftrace_event_call *call)
  22. {
  23. struct syscall_metadata *entry = call->data;
  24. return &entry->enter_fields;
  25. }
  26. extern struct syscall_metadata *__start_syscalls_metadata[];
  27. extern struct syscall_metadata *__stop_syscalls_metadata[];
  28. static struct syscall_metadata **syscalls_metadata;
  29. #ifndef ARCH_HAS_SYSCALL_MATCH_SYM_NAME
  30. static inline bool arch_syscall_match_sym_name(const char *sym, const char *name)
  31. {
  32. /*
  33. * Only compare after the "sys" prefix. Archs that use
  34. * syscall wrappers may have syscalls symbols aliases prefixed
  35. * with "SyS" instead of "sys", leading to an unwanted
  36. * mismatch.
  37. */
  38. return !strcmp(sym + 3, name + 3);
  39. }
  40. #endif
  41. static __init struct syscall_metadata *
  42. find_syscall_meta(unsigned long syscall)
  43. {
  44. struct syscall_metadata **start;
  45. struct syscall_metadata **stop;
  46. char str[KSYM_SYMBOL_LEN];
  47. start = __start_syscalls_metadata;
  48. stop = __stop_syscalls_metadata;
  49. kallsyms_lookup(syscall, NULL, NULL, NULL, str);
  50. if (arch_syscall_match_sym_name(str, "sys_ni_syscall"))
  51. return NULL;
  52. for ( ; start < stop; start++) {
  53. if ((*start)->name && arch_syscall_match_sym_name(str, (*start)->name))
  54. return *start;
  55. }
  56. return NULL;
  57. }
  58. static struct syscall_metadata *syscall_nr_to_meta(int nr)
  59. {
  60. if (!syscalls_metadata || nr >= NR_syscalls || nr < 0)
  61. return NULL;
  62. return syscalls_metadata[nr];
  63. }
  64. enum print_line_t
  65. print_syscall_enter(struct trace_iterator *iter, int flags,
  66. struct trace_event *event)
  67. {
  68. struct trace_seq *s = &iter->seq;
  69. struct trace_entry *ent = iter->ent;
  70. struct syscall_trace_enter *trace;
  71. struct syscall_metadata *entry;
  72. int i, ret, syscall;
  73. trace = (typeof(trace))ent;
  74. syscall = trace->nr;
  75. entry = syscall_nr_to_meta(syscall);
  76. if (!entry)
  77. goto end;
  78. if (entry->enter_event->event.type != ent->type) {
  79. WARN_ON_ONCE(1);
  80. goto end;
  81. }
  82. ret = trace_seq_printf(s, "%s(", entry->name);
  83. if (!ret)
  84. return TRACE_TYPE_PARTIAL_LINE;
  85. for (i = 0; i < entry->nb_args; i++) {
  86. /* parameter types */
  87. if (trace_flags & TRACE_ITER_VERBOSE) {
  88. ret = trace_seq_printf(s, "%s ", entry->types[i]);
  89. if (!ret)
  90. return TRACE_TYPE_PARTIAL_LINE;
  91. }
  92. /* parameter values */
  93. ret = trace_seq_printf(s, "%s: %lx%s", entry->args[i],
  94. trace->args[i],
  95. i == entry->nb_args - 1 ? "" : ", ");
  96. if (!ret)
  97. return TRACE_TYPE_PARTIAL_LINE;
  98. }
  99. ret = trace_seq_putc(s, ')');
  100. if (!ret)
  101. return TRACE_TYPE_PARTIAL_LINE;
  102. end:
  103. ret = trace_seq_putc(s, '\n');
  104. if (!ret)
  105. return TRACE_TYPE_PARTIAL_LINE;
  106. return TRACE_TYPE_HANDLED;
  107. }
  108. enum print_line_t
  109. print_syscall_exit(struct trace_iterator *iter, int flags,
  110. struct trace_event *event)
  111. {
  112. struct trace_seq *s = &iter->seq;
  113. struct trace_entry *ent = iter->ent;
  114. struct syscall_trace_exit *trace;
  115. int syscall;
  116. struct syscall_metadata *entry;
  117. int ret;
  118. trace = (typeof(trace))ent;
  119. syscall = trace->nr;
  120. entry = syscall_nr_to_meta(syscall);
  121. if (!entry) {
  122. trace_seq_printf(s, "\n");
  123. return TRACE_TYPE_HANDLED;
  124. }
  125. if (entry->exit_event->event.type != ent->type) {
  126. WARN_ON_ONCE(1);
  127. return TRACE_TYPE_UNHANDLED;
  128. }
  129. ret = trace_seq_printf(s, "%s -> 0x%lx\n", entry->name,
  130. trace->ret);
  131. if (!ret)
  132. return TRACE_TYPE_PARTIAL_LINE;
  133. return TRACE_TYPE_HANDLED;
  134. }
  135. extern char *__bad_type_size(void);
  136. #define SYSCALL_FIELD(type, name) \
  137. sizeof(type) != sizeof(trace.name) ? \
  138. __bad_type_size() : \
  139. #type, #name, offsetof(typeof(trace), name), \
  140. sizeof(trace.name), is_signed_type(type)
  141. static
  142. int __set_enter_print_fmt(struct syscall_metadata *entry, char *buf, int len)
  143. {
  144. int i;
  145. int pos = 0;
  146. /* When len=0, we just calculate the needed length */
  147. #define LEN_OR_ZERO (len ? len - pos : 0)
  148. pos += snprintf(buf + pos, LEN_OR_ZERO, "\"");
  149. for (i = 0; i < entry->nb_args; i++) {
  150. pos += snprintf(buf + pos, LEN_OR_ZERO, "%s: 0x%%0%zulx%s",
  151. entry->args[i], sizeof(unsigned long),
  152. i == entry->nb_args - 1 ? "" : ", ");
  153. }
  154. pos += snprintf(buf + pos, LEN_OR_ZERO, "\"");
  155. for (i = 0; i < entry->nb_args; i++) {
  156. pos += snprintf(buf + pos, LEN_OR_ZERO,
  157. ", ((unsigned long)(REC->%s))", entry->args[i]);
  158. }
  159. #undef LEN_OR_ZERO
  160. /* return the length of print_fmt */
  161. return pos;
  162. }
  163. static int set_syscall_print_fmt(struct ftrace_event_call *call)
  164. {
  165. char *print_fmt;
  166. int len;
  167. struct syscall_metadata *entry = call->data;
  168. if (entry->enter_event != call) {
  169. call->print_fmt = "\"0x%lx\", REC->ret";
  170. return 0;
  171. }
  172. /* First: called with 0 length to calculate the needed length */
  173. len = __set_enter_print_fmt(entry, NULL, 0);
  174. print_fmt = kmalloc(len + 1, GFP_KERNEL);
  175. if (!print_fmt)
  176. return -ENOMEM;
  177. /* Second: actually write the @print_fmt */
  178. __set_enter_print_fmt(entry, print_fmt, len + 1);
  179. call->print_fmt = print_fmt;
  180. return 0;
  181. }
  182. static void free_syscall_print_fmt(struct ftrace_event_call *call)
  183. {
  184. struct syscall_metadata *entry = call->data;
  185. if (entry->enter_event == call)
  186. kfree(call->print_fmt);
  187. }
  188. static int syscall_enter_define_fields(struct ftrace_event_call *call)
  189. {
  190. struct syscall_trace_enter trace;
  191. struct syscall_metadata *meta = call->data;
  192. int ret;
  193. int i;
  194. int offset = offsetof(typeof(trace), args);
  195. ret = trace_define_field(call, SYSCALL_FIELD(int, nr), FILTER_OTHER);
  196. if (ret)
  197. return ret;
  198. for (i = 0; i < meta->nb_args; i++) {
  199. ret = trace_define_field(call, meta->types[i],
  200. meta->args[i], offset,
  201. sizeof(unsigned long), 0,
  202. FILTER_OTHER);
  203. offset += sizeof(unsigned long);
  204. }
  205. return ret;
  206. }
  207. static int syscall_exit_define_fields(struct ftrace_event_call *call)
  208. {
  209. struct syscall_trace_exit trace;
  210. int ret;
  211. ret = trace_define_field(call, SYSCALL_FIELD(int, nr), FILTER_OTHER);
  212. if (ret)
  213. return ret;
  214. ret = trace_define_field(call, SYSCALL_FIELD(long, ret),
  215. FILTER_OTHER);
  216. return ret;
  217. }
  218. void ftrace_syscall_enter(void *ignore, struct pt_regs *regs, long id)
  219. {
  220. struct syscall_trace_enter *entry;
  221. struct syscall_metadata *sys_data;
  222. struct ring_buffer_event *event;
  223. struct ring_buffer *buffer;
  224. int size;
  225. int syscall_nr;
  226. syscall_nr = syscall_get_nr(current, regs);
  227. if (syscall_nr < 0)
  228. return;
  229. if (!test_bit(syscall_nr, enabled_enter_syscalls))
  230. return;
  231. sys_data = syscall_nr_to_meta(syscall_nr);
  232. if (!sys_data)
  233. return;
  234. size = sizeof(*entry) + sizeof(unsigned long) * sys_data->nb_args;
  235. event = trace_current_buffer_lock_reserve(&buffer,
  236. sys_data->enter_event->event.type, size, 0, 0);
  237. if (!event)
  238. return;
  239. entry = ring_buffer_event_data(event);
  240. entry->nr = syscall_nr;
  241. syscall_get_arguments(current, regs, 0, sys_data->nb_args, entry->args);
  242. if (!filter_current_check_discard(buffer, sys_data->enter_event,
  243. entry, event))
  244. trace_current_buffer_unlock_commit(buffer, event, 0, 0);
  245. }
  246. void ftrace_syscall_exit(void *ignore, struct pt_regs *regs, long ret)
  247. {
  248. struct syscall_trace_exit *entry;
  249. struct syscall_metadata *sys_data;
  250. struct ring_buffer_event *event;
  251. struct ring_buffer *buffer;
  252. int syscall_nr;
  253. syscall_nr = syscall_get_nr(current, regs);
  254. if (syscall_nr < 0)
  255. return;
  256. if (!test_bit(syscall_nr, enabled_exit_syscalls))
  257. return;
  258. sys_data = syscall_nr_to_meta(syscall_nr);
  259. if (!sys_data)
  260. return;
  261. event = trace_current_buffer_lock_reserve(&buffer,
  262. sys_data->exit_event->event.type, sizeof(*entry), 0, 0);
  263. if (!event)
  264. return;
  265. entry = ring_buffer_event_data(event);
  266. entry->nr = syscall_nr;
  267. entry->ret = syscall_get_return_value(current, regs);
  268. if (!filter_current_check_discard(buffer, sys_data->exit_event,
  269. entry, event))
  270. trace_current_buffer_unlock_commit(buffer, event, 0, 0);
  271. }
  272. int reg_event_syscall_enter(struct ftrace_event_call *call)
  273. {
  274. int ret = 0;
  275. int num;
  276. num = ((struct syscall_metadata *)call->data)->syscall_nr;
  277. if (WARN_ON_ONCE(num < 0 || num >= NR_syscalls))
  278. return -ENOSYS;
  279. mutex_lock(&syscall_trace_lock);
  280. if (!sys_refcount_enter)
  281. ret = register_trace_sys_enter(ftrace_syscall_enter, NULL);
  282. if (!ret) {
  283. set_bit(num, enabled_enter_syscalls);
  284. sys_refcount_enter++;
  285. }
  286. mutex_unlock(&syscall_trace_lock);
  287. return ret;
  288. }
  289. void unreg_event_syscall_enter(struct ftrace_event_call *call)
  290. {
  291. int num;
  292. num = ((struct syscall_metadata *)call->data)->syscall_nr;
  293. if (WARN_ON_ONCE(num < 0 || num >= NR_syscalls))
  294. return;
  295. mutex_lock(&syscall_trace_lock);
  296. sys_refcount_enter--;
  297. clear_bit(num, enabled_enter_syscalls);
  298. if (!sys_refcount_enter)
  299. unregister_trace_sys_enter(ftrace_syscall_enter, NULL);
  300. mutex_unlock(&syscall_trace_lock);
  301. }
  302. int reg_event_syscall_exit(struct ftrace_event_call *call)
  303. {
  304. int ret = 0;
  305. int num;
  306. num = ((struct syscall_metadata *)call->data)->syscall_nr;
  307. if (WARN_ON_ONCE(num < 0 || num >= NR_syscalls))
  308. return -ENOSYS;
  309. mutex_lock(&syscall_trace_lock);
  310. if (!sys_refcount_exit)
  311. ret = register_trace_sys_exit(ftrace_syscall_exit, NULL);
  312. if (!ret) {
  313. set_bit(num, enabled_exit_syscalls);
  314. sys_refcount_exit++;
  315. }
  316. mutex_unlock(&syscall_trace_lock);
  317. return ret;
  318. }
  319. void unreg_event_syscall_exit(struct ftrace_event_call *call)
  320. {
  321. int num;
  322. num = ((struct syscall_metadata *)call->data)->syscall_nr;
  323. if (WARN_ON_ONCE(num < 0 || num >= NR_syscalls))
  324. return;
  325. mutex_lock(&syscall_trace_lock);
  326. sys_refcount_exit--;
  327. clear_bit(num, enabled_exit_syscalls);
  328. if (!sys_refcount_exit)
  329. unregister_trace_sys_exit(ftrace_syscall_exit, NULL);
  330. mutex_unlock(&syscall_trace_lock);
  331. }
  332. static int init_syscall_trace(struct ftrace_event_call *call)
  333. {
  334. int id;
  335. int num;
  336. num = ((struct syscall_metadata *)call->data)->syscall_nr;
  337. if (num < 0 || num >= NR_syscalls) {
  338. pr_debug("syscall %s metadata not mapped, disabling ftrace event\n",
  339. ((struct syscall_metadata *)call->data)->name);
  340. return -ENOSYS;
  341. }
  342. if (set_syscall_print_fmt(call) < 0)
  343. return -ENOMEM;
  344. id = trace_event_raw_init(call);
  345. if (id < 0) {
  346. free_syscall_print_fmt(call);
  347. return id;
  348. }
  349. return id;
  350. }
  351. struct trace_event_functions enter_syscall_print_funcs = {
  352. .trace = print_syscall_enter,
  353. };
  354. struct trace_event_functions exit_syscall_print_funcs = {
  355. .trace = print_syscall_exit,
  356. };
  357. struct ftrace_event_class event_class_syscall_enter = {
  358. .system = "syscalls",
  359. .reg = syscall_enter_register,
  360. .define_fields = syscall_enter_define_fields,
  361. .get_fields = syscall_get_enter_fields,
  362. .raw_init = init_syscall_trace,
  363. };
  364. struct ftrace_event_class event_class_syscall_exit = {
  365. .system = "syscalls",
  366. .reg = syscall_exit_register,
  367. .define_fields = syscall_exit_define_fields,
  368. .fields = LIST_HEAD_INIT(event_class_syscall_exit.fields),
  369. .raw_init = init_syscall_trace,
  370. };
  371. unsigned long __init __weak arch_syscall_addr(int nr)
  372. {
  373. return (unsigned long)sys_call_table[nr];
  374. }
  375. int __init init_ftrace_syscalls(void)
  376. {
  377. struct syscall_metadata *meta;
  378. unsigned long addr;
  379. int i;
  380. syscalls_metadata = kcalloc(NR_syscalls, sizeof(*syscalls_metadata),
  381. GFP_KERNEL);
  382. if (!syscalls_metadata) {
  383. WARN_ON(1);
  384. return -ENOMEM;
  385. }
  386. for (i = 0; i < NR_syscalls; i++) {
  387. addr = arch_syscall_addr(i);
  388. meta = find_syscall_meta(addr);
  389. if (!meta)
  390. continue;
  391. meta->syscall_nr = i;
  392. syscalls_metadata[i] = meta;
  393. }
  394. return 0;
  395. }
  396. early_initcall(init_ftrace_syscalls);
  397. #ifdef CONFIG_PERF_EVENTS
  398. static DECLARE_BITMAP(enabled_perf_enter_syscalls, NR_syscalls);
  399. static DECLARE_BITMAP(enabled_perf_exit_syscalls, NR_syscalls);
  400. static int sys_perf_refcount_enter;
  401. static int sys_perf_refcount_exit;
  402. static void perf_syscall_enter(void *ignore, struct pt_regs *regs, long id)
  403. {
  404. struct syscall_metadata *sys_data;
  405. struct syscall_trace_enter *rec;
  406. struct hlist_head *head;
  407. int syscall_nr;
  408. int rctx;
  409. int size;
  410. syscall_nr = syscall_get_nr(current, regs);
  411. if (syscall_nr < 0)
  412. return;
  413. if (!test_bit(syscall_nr, enabled_perf_enter_syscalls))
  414. return;
  415. sys_data = syscall_nr_to_meta(syscall_nr);
  416. if (!sys_data)
  417. return;
  418. /* get the size after alignment with the u32 buffer size field */
  419. size = sizeof(unsigned long) * sys_data->nb_args + sizeof(*rec);
  420. size = ALIGN(size + sizeof(u32), sizeof(u64));
  421. size -= sizeof(u32);
  422. if (WARN_ONCE(size > PERF_MAX_TRACE_SIZE,
  423. "perf buffer not large enough"))
  424. return;
  425. rec = (struct syscall_trace_enter *)perf_trace_buf_prepare(size,
  426. sys_data->enter_event->event.type, regs, &rctx);
  427. if (!rec)
  428. return;
  429. rec->nr = syscall_nr;
  430. syscall_get_arguments(current, regs, 0, sys_data->nb_args,
  431. (unsigned long *)&rec->args);
  432. head = this_cpu_ptr(sys_data->enter_event->perf_events);
  433. perf_trace_buf_submit(rec, size, rctx, 0, 1, regs, head, NULL);
  434. }
  435. static int perf_sysenter_enable(struct ftrace_event_call *call)
  436. {
  437. int ret = 0;
  438. int num;
  439. num = ((struct syscall_metadata *)call->data)->syscall_nr;
  440. mutex_lock(&syscall_trace_lock);
  441. if (!sys_perf_refcount_enter)
  442. ret = register_trace_sys_enter(perf_syscall_enter, NULL);
  443. if (ret) {
  444. pr_info("event trace: Could not activate"
  445. "syscall entry trace point");
  446. } else {
  447. set_bit(num, enabled_perf_enter_syscalls);
  448. sys_perf_refcount_enter++;
  449. }
  450. mutex_unlock(&syscall_trace_lock);
  451. return ret;
  452. }
  453. static void perf_sysenter_disable(struct ftrace_event_call *call)
  454. {
  455. int num;
  456. num = ((struct syscall_metadata *)call->data)->syscall_nr;
  457. mutex_lock(&syscall_trace_lock);
  458. sys_perf_refcount_enter--;
  459. clear_bit(num, enabled_perf_enter_syscalls);
  460. if (!sys_perf_refcount_enter)
  461. unregister_trace_sys_enter(perf_syscall_enter, NULL);
  462. mutex_unlock(&syscall_trace_lock);
  463. }
  464. static void perf_syscall_exit(void *ignore, struct pt_regs *regs, long ret)
  465. {
  466. struct syscall_metadata *sys_data;
  467. struct syscall_trace_exit *rec;
  468. struct hlist_head *head;
  469. int syscall_nr;
  470. int rctx;
  471. int size;
  472. syscall_nr = syscall_get_nr(current, regs);
  473. if (syscall_nr < 0)
  474. return;
  475. if (!test_bit(syscall_nr, enabled_perf_exit_syscalls))
  476. return;
  477. sys_data = syscall_nr_to_meta(syscall_nr);
  478. if (!sys_data)
  479. return;
  480. /* We can probably do that at build time */
  481. size = ALIGN(sizeof(*rec) + sizeof(u32), sizeof(u64));
  482. size -= sizeof(u32);
  483. /*
  484. * Impossible, but be paranoid with the future
  485. * How to put this check outside runtime?
  486. */
  487. if (WARN_ONCE(size > PERF_MAX_TRACE_SIZE,
  488. "exit event has grown above perf buffer size"))
  489. return;
  490. rec = (struct syscall_trace_exit *)perf_trace_buf_prepare(size,
  491. sys_data->exit_event->event.type, regs, &rctx);
  492. if (!rec)
  493. return;
  494. rec->nr = syscall_nr;
  495. rec->ret = syscall_get_return_value(current, regs);
  496. head = this_cpu_ptr(sys_data->exit_event->perf_events);
  497. perf_trace_buf_submit(rec, size, rctx, 0, 1, regs, head, NULL);
  498. }
  499. static int perf_sysexit_enable(struct ftrace_event_call *call)
  500. {
  501. int ret = 0;
  502. int num;
  503. num = ((struct syscall_metadata *)call->data)->syscall_nr;
  504. mutex_lock(&syscall_trace_lock);
  505. if (!sys_perf_refcount_exit)
  506. ret = register_trace_sys_exit(perf_syscall_exit, NULL);
  507. if (ret) {
  508. pr_info("event trace: Could not activate"
  509. "syscall exit trace point");
  510. } else {
  511. set_bit(num, enabled_perf_exit_syscalls);
  512. sys_perf_refcount_exit++;
  513. }
  514. mutex_unlock(&syscall_trace_lock);
  515. return ret;
  516. }
  517. static void perf_sysexit_disable(struct ftrace_event_call *call)
  518. {
  519. int num;
  520. num = ((struct syscall_metadata *)call->data)->syscall_nr;
  521. mutex_lock(&syscall_trace_lock);
  522. sys_perf_refcount_exit--;
  523. clear_bit(num, enabled_perf_exit_syscalls);
  524. if (!sys_perf_refcount_exit)
  525. unregister_trace_sys_exit(perf_syscall_exit, NULL);
  526. mutex_unlock(&syscall_trace_lock);
  527. }
  528. #endif /* CONFIG_PERF_EVENTS */
  529. static int syscall_enter_register(struct ftrace_event_call *event,
  530. enum trace_reg type, void *data)
  531. {
  532. switch (type) {
  533. case TRACE_REG_REGISTER:
  534. return reg_event_syscall_enter(event);
  535. case TRACE_REG_UNREGISTER:
  536. unreg_event_syscall_enter(event);
  537. return 0;
  538. #ifdef CONFIG_PERF_EVENTS
  539. case TRACE_REG_PERF_REGISTER:
  540. return perf_sysenter_enable(event);
  541. case TRACE_REG_PERF_UNREGISTER:
  542. perf_sysenter_disable(event);
  543. return 0;
  544. case TRACE_REG_PERF_OPEN:
  545. case TRACE_REG_PERF_CLOSE:
  546. case TRACE_REG_PERF_ADD:
  547. case TRACE_REG_PERF_DEL:
  548. return 0;
  549. #endif
  550. }
  551. return 0;
  552. }
  553. static int syscall_exit_register(struct ftrace_event_call *event,
  554. enum trace_reg type, void *data)
  555. {
  556. switch (type) {
  557. case TRACE_REG_REGISTER:
  558. return reg_event_syscall_exit(event);
  559. case TRACE_REG_UNREGISTER:
  560. unreg_event_syscall_exit(event);
  561. return 0;
  562. #ifdef CONFIG_PERF_EVENTS
  563. case TRACE_REG_PERF_REGISTER:
  564. return perf_sysexit_enable(event);
  565. case TRACE_REG_PERF_UNREGISTER:
  566. perf_sysexit_disable(event);
  567. return 0;
  568. case TRACE_REG_PERF_OPEN:
  569. case TRACE_REG_PERF_CLOSE:
  570. case TRACE_REG_PERF_ADD:
  571. case TRACE_REG_PERF_DEL:
  572. return 0;
  573. #endif
  574. }
  575. return 0;
  576. }