trace_syscalls.c 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747
  1. #include <trace/syscall.h>
  2. #include <trace/events/syscalls.h>
  3. #include <linux/syscalls.h>
  4. #include <linux/slab.h>
  5. #include <linux/kernel.h>
  6. #include <linux/module.h> /* for MODULE_NAME_LEN via KSYM_SYMBOL_LEN */
  7. #include <linux/ftrace.h>
  8. #include <linux/perf_event.h>
  9. #include <asm/syscall.h>
  10. #include "trace_output.h"
  11. #include "trace.h"
  12. static DEFINE_MUTEX(syscall_trace_lock);
  13. static int syscall_enter_register(struct ftrace_event_call *event,
  14. enum trace_reg type, void *data);
  15. static int syscall_exit_register(struct ftrace_event_call *event,
  16. enum trace_reg type, void *data);
  17. static struct list_head *
  18. syscall_get_enter_fields(struct ftrace_event_call *call)
  19. {
  20. struct syscall_metadata *entry = call->data;
  21. return &entry->enter_fields;
  22. }
  23. extern struct syscall_metadata *__start_syscalls_metadata[];
  24. extern struct syscall_metadata *__stop_syscalls_metadata[];
  25. static struct syscall_metadata **syscalls_metadata;
  26. #ifndef ARCH_HAS_SYSCALL_MATCH_SYM_NAME
  27. static inline bool arch_syscall_match_sym_name(const char *sym, const char *name)
  28. {
  29. /*
  30. * Only compare after the "sys" prefix. Archs that use
  31. * syscall wrappers may have syscalls symbols aliases prefixed
  32. * with ".SyS" or ".sys" instead of "sys", leading to an unwanted
  33. * mismatch.
  34. */
  35. return !strcmp(sym + 3, name + 3);
  36. }
  37. #endif
  38. #ifdef ARCH_TRACE_IGNORE_COMPAT_SYSCALLS
  39. /*
  40. * Some architectures that allow for 32bit applications
  41. * to run on a 64bit kernel, do not map the syscalls for
  42. * the 32bit tasks the same as they do for 64bit tasks.
  43. *
  44. * *cough*x86*cough*
  45. *
  46. * In such a case, instead of reporting the wrong syscalls,
  47. * simply ignore them.
  48. *
  49. * For an arch to ignore the compat syscalls it needs to
  50. * define ARCH_TRACE_IGNORE_COMPAT_SYSCALLS as well as
  51. * define the function arch_trace_is_compat_syscall() to let
  52. * the tracing system know that it should ignore it.
  53. */
  54. static int
  55. trace_get_syscall_nr(struct task_struct *task, struct pt_regs *regs)
  56. {
  57. if (unlikely(arch_trace_is_compat_syscall(regs)))
  58. return -1;
  59. return syscall_get_nr(task, regs);
  60. }
  61. #else
  62. static inline int
  63. trace_get_syscall_nr(struct task_struct *task, struct pt_regs *regs)
  64. {
  65. return syscall_get_nr(task, regs);
  66. }
  67. #endif /* ARCH_TRACE_IGNORE_COMPAT_SYSCALLS */
  68. static __init struct syscall_metadata *
  69. find_syscall_meta(unsigned long syscall)
  70. {
  71. struct syscall_metadata **start;
  72. struct syscall_metadata **stop;
  73. char str[KSYM_SYMBOL_LEN];
  74. start = __start_syscalls_metadata;
  75. stop = __stop_syscalls_metadata;
  76. kallsyms_lookup(syscall, NULL, NULL, NULL, str);
  77. if (arch_syscall_match_sym_name(str, "sys_ni_syscall"))
  78. return NULL;
  79. for ( ; start < stop; start++) {
  80. if ((*start)->name && arch_syscall_match_sym_name(str, (*start)->name))
  81. return *start;
  82. }
  83. return NULL;
  84. }
  85. static struct syscall_metadata *syscall_nr_to_meta(int nr)
  86. {
  87. if (!syscalls_metadata || nr >= NR_syscalls || nr < 0)
  88. return NULL;
  89. return syscalls_metadata[nr];
  90. }
  91. static enum print_line_t
  92. print_syscall_enter(struct trace_iterator *iter, int flags,
  93. struct trace_event *event)
  94. {
  95. struct trace_seq *s = &iter->seq;
  96. struct trace_entry *ent = iter->ent;
  97. struct syscall_trace_enter *trace;
  98. struct syscall_metadata *entry;
  99. int i, ret, syscall;
  100. trace = (typeof(trace))ent;
  101. syscall = trace->nr;
  102. entry = syscall_nr_to_meta(syscall);
  103. if (!entry)
  104. goto end;
  105. if (entry->enter_event->event.type != ent->type) {
  106. WARN_ON_ONCE(1);
  107. goto end;
  108. }
  109. ret = trace_seq_printf(s, "%s(", entry->name);
  110. if (!ret)
  111. return TRACE_TYPE_PARTIAL_LINE;
  112. for (i = 0; i < entry->nb_args; i++) {
  113. /* parameter types */
  114. if (trace_flags & TRACE_ITER_VERBOSE) {
  115. ret = trace_seq_printf(s, "%s ", entry->types[i]);
  116. if (!ret)
  117. return TRACE_TYPE_PARTIAL_LINE;
  118. }
  119. /* parameter values */
  120. ret = trace_seq_printf(s, "%s: %lx%s", entry->args[i],
  121. trace->args[i],
  122. i == entry->nb_args - 1 ? "" : ", ");
  123. if (!ret)
  124. return TRACE_TYPE_PARTIAL_LINE;
  125. }
  126. ret = trace_seq_putc(s, ')');
  127. if (!ret)
  128. return TRACE_TYPE_PARTIAL_LINE;
  129. end:
  130. ret = trace_seq_putc(s, '\n');
  131. if (!ret)
  132. return TRACE_TYPE_PARTIAL_LINE;
  133. return TRACE_TYPE_HANDLED;
  134. }
  135. static enum print_line_t
  136. print_syscall_exit(struct trace_iterator *iter, int flags,
  137. struct trace_event *event)
  138. {
  139. struct trace_seq *s = &iter->seq;
  140. struct trace_entry *ent = iter->ent;
  141. struct syscall_trace_exit *trace;
  142. int syscall;
  143. struct syscall_metadata *entry;
  144. int ret;
  145. trace = (typeof(trace))ent;
  146. syscall = trace->nr;
  147. entry = syscall_nr_to_meta(syscall);
  148. if (!entry) {
  149. trace_seq_printf(s, "\n");
  150. return TRACE_TYPE_HANDLED;
  151. }
  152. if (entry->exit_event->event.type != ent->type) {
  153. WARN_ON_ONCE(1);
  154. return TRACE_TYPE_UNHANDLED;
  155. }
  156. ret = trace_seq_printf(s, "%s -> 0x%lx\n", entry->name,
  157. trace->ret);
  158. if (!ret)
  159. return TRACE_TYPE_PARTIAL_LINE;
  160. return TRACE_TYPE_HANDLED;
  161. }
  162. extern char *__bad_type_size(void);
  163. #define SYSCALL_FIELD(type, name) \
  164. sizeof(type) != sizeof(trace.name) ? \
  165. __bad_type_size() : \
  166. #type, #name, offsetof(typeof(trace), name), \
  167. sizeof(trace.name), is_signed_type(type)
  168. static
  169. int __set_enter_print_fmt(struct syscall_metadata *entry, char *buf, int len)
  170. {
  171. int i;
  172. int pos = 0;
  173. /* When len=0, we just calculate the needed length */
  174. #define LEN_OR_ZERO (len ? len - pos : 0)
  175. pos += snprintf(buf + pos, LEN_OR_ZERO, "\"");
  176. for (i = 0; i < entry->nb_args; i++) {
  177. pos += snprintf(buf + pos, LEN_OR_ZERO, "%s: 0x%%0%zulx%s",
  178. entry->args[i], sizeof(unsigned long),
  179. i == entry->nb_args - 1 ? "" : ", ");
  180. }
  181. pos += snprintf(buf + pos, LEN_OR_ZERO, "\"");
  182. for (i = 0; i < entry->nb_args; i++) {
  183. pos += snprintf(buf + pos, LEN_OR_ZERO,
  184. ", ((unsigned long)(REC->%s))", entry->args[i]);
  185. }
  186. #undef LEN_OR_ZERO
  187. /* return the length of print_fmt */
  188. return pos;
  189. }
  190. static int set_syscall_print_fmt(struct ftrace_event_call *call)
  191. {
  192. char *print_fmt;
  193. int len;
  194. struct syscall_metadata *entry = call->data;
  195. if (entry->enter_event != call) {
  196. call->print_fmt = "\"0x%lx\", REC->ret";
  197. return 0;
  198. }
  199. /* First: called with 0 length to calculate the needed length */
  200. len = __set_enter_print_fmt(entry, NULL, 0);
  201. print_fmt = kmalloc(len + 1, GFP_KERNEL);
  202. if (!print_fmt)
  203. return -ENOMEM;
  204. /* Second: actually write the @print_fmt */
  205. __set_enter_print_fmt(entry, print_fmt, len + 1);
  206. call->print_fmt = print_fmt;
  207. return 0;
  208. }
  209. static void free_syscall_print_fmt(struct ftrace_event_call *call)
  210. {
  211. struct syscall_metadata *entry = call->data;
  212. if (entry->enter_event == call)
  213. kfree(call->print_fmt);
  214. }
  215. static int __init syscall_enter_define_fields(struct ftrace_event_call *call)
  216. {
  217. struct syscall_trace_enter trace;
  218. struct syscall_metadata *meta = call->data;
  219. int ret;
  220. int i;
  221. int offset = offsetof(typeof(trace), args);
  222. ret = trace_define_field(call, SYSCALL_FIELD(int, nr), FILTER_OTHER);
  223. if (ret)
  224. return ret;
  225. for (i = 0; i < meta->nb_args; i++) {
  226. ret = trace_define_field(call, meta->types[i],
  227. meta->args[i], offset,
  228. sizeof(unsigned long), 0,
  229. FILTER_OTHER);
  230. offset += sizeof(unsigned long);
  231. }
  232. return ret;
  233. }
  234. static int __init syscall_exit_define_fields(struct ftrace_event_call *call)
  235. {
  236. struct syscall_trace_exit trace;
  237. int ret;
  238. ret = trace_define_field(call, SYSCALL_FIELD(int, nr), FILTER_OTHER);
  239. if (ret)
  240. return ret;
  241. ret = trace_define_field(call, SYSCALL_FIELD(long, ret),
  242. FILTER_OTHER);
  243. return ret;
  244. }
  245. static void ftrace_syscall_enter(void *data, struct pt_regs *regs, long id)
  246. {
  247. struct trace_array *tr = data;
  248. struct syscall_trace_enter *entry;
  249. struct syscall_metadata *sys_data;
  250. struct ring_buffer_event *event;
  251. struct ring_buffer *buffer;
  252. int syscall_nr;
  253. int size;
  254. syscall_nr = trace_get_syscall_nr(current, regs);
  255. if (syscall_nr < 0)
  256. return;
  257. if (!test_bit(syscall_nr, tr->enabled_enter_syscalls))
  258. return;
  259. sys_data = syscall_nr_to_meta(syscall_nr);
  260. if (!sys_data)
  261. return;
  262. size = sizeof(*entry) + sizeof(unsigned long) * sys_data->nb_args;
  263. buffer = tr->trace_buffer.buffer;
  264. event = trace_buffer_lock_reserve(buffer,
  265. sys_data->enter_event->event.type, size, 0, 0);
  266. if (!event)
  267. return;
  268. entry = ring_buffer_event_data(event);
  269. entry->nr = syscall_nr;
  270. syscall_get_arguments(current, regs, 0, sys_data->nb_args, entry->args);
  271. if (!filter_current_check_discard(buffer, sys_data->enter_event,
  272. entry, event))
  273. trace_current_buffer_unlock_commit(buffer, event, 0, 0);
  274. }
  275. static void ftrace_syscall_exit(void *data, struct pt_regs *regs, long ret)
  276. {
  277. struct trace_array *tr = data;
  278. struct syscall_trace_exit *entry;
  279. struct syscall_metadata *sys_data;
  280. struct ring_buffer_event *event;
  281. struct ring_buffer *buffer;
  282. int syscall_nr;
  283. syscall_nr = trace_get_syscall_nr(current, regs);
  284. if (syscall_nr < 0)
  285. return;
  286. if (!test_bit(syscall_nr, tr->enabled_exit_syscalls))
  287. return;
  288. sys_data = syscall_nr_to_meta(syscall_nr);
  289. if (!sys_data)
  290. return;
  291. buffer = tr->trace_buffer.buffer;
  292. event = trace_buffer_lock_reserve(buffer,
  293. sys_data->exit_event->event.type, sizeof(*entry), 0, 0);
  294. if (!event)
  295. return;
  296. entry = ring_buffer_event_data(event);
  297. entry->nr = syscall_nr;
  298. entry->ret = syscall_get_return_value(current, regs);
  299. if (!filter_current_check_discard(buffer, sys_data->exit_event,
  300. entry, event))
  301. trace_current_buffer_unlock_commit(buffer, event, 0, 0);
  302. }
  303. static int reg_event_syscall_enter(struct ftrace_event_file *file,
  304. struct ftrace_event_call *call)
  305. {
  306. struct trace_array *tr = file->tr;
  307. int ret = 0;
  308. int num;
  309. num = ((struct syscall_metadata *)call->data)->syscall_nr;
  310. if (WARN_ON_ONCE(num < 0 || num >= NR_syscalls))
  311. return -ENOSYS;
  312. mutex_lock(&syscall_trace_lock);
  313. if (!tr->sys_refcount_enter)
  314. ret = register_trace_sys_enter(ftrace_syscall_enter, tr);
  315. if (!ret) {
  316. set_bit(num, tr->enabled_enter_syscalls);
  317. tr->sys_refcount_enter++;
  318. }
  319. mutex_unlock(&syscall_trace_lock);
  320. return ret;
  321. }
  322. static void unreg_event_syscall_enter(struct ftrace_event_file *file,
  323. struct ftrace_event_call *call)
  324. {
  325. struct trace_array *tr = file->tr;
  326. int num;
  327. num = ((struct syscall_metadata *)call->data)->syscall_nr;
  328. if (WARN_ON_ONCE(num < 0 || num >= NR_syscalls))
  329. return;
  330. mutex_lock(&syscall_trace_lock);
  331. tr->sys_refcount_enter--;
  332. clear_bit(num, tr->enabled_enter_syscalls);
  333. if (!tr->sys_refcount_enter)
  334. unregister_trace_sys_enter(ftrace_syscall_enter, tr);
  335. mutex_unlock(&syscall_trace_lock);
  336. }
  337. static int reg_event_syscall_exit(struct ftrace_event_file *file,
  338. struct ftrace_event_call *call)
  339. {
  340. struct trace_array *tr = file->tr;
  341. int ret = 0;
  342. int num;
  343. num = ((struct syscall_metadata *)call->data)->syscall_nr;
  344. if (WARN_ON_ONCE(num < 0 || num >= NR_syscalls))
  345. return -ENOSYS;
  346. mutex_lock(&syscall_trace_lock);
  347. if (!tr->sys_refcount_exit)
  348. ret = register_trace_sys_exit(ftrace_syscall_exit, tr);
  349. if (!ret) {
  350. set_bit(num, tr->enabled_exit_syscalls);
  351. tr->sys_refcount_exit++;
  352. }
  353. mutex_unlock(&syscall_trace_lock);
  354. return ret;
  355. }
  356. static void unreg_event_syscall_exit(struct ftrace_event_file *file,
  357. struct ftrace_event_call *call)
  358. {
  359. struct trace_array *tr = file->tr;
  360. int num;
  361. num = ((struct syscall_metadata *)call->data)->syscall_nr;
  362. if (WARN_ON_ONCE(num < 0 || num >= NR_syscalls))
  363. return;
  364. mutex_lock(&syscall_trace_lock);
  365. tr->sys_refcount_exit--;
  366. clear_bit(num, tr->enabled_exit_syscalls);
  367. if (!tr->sys_refcount_exit)
  368. unregister_trace_sys_exit(ftrace_syscall_exit, tr);
  369. mutex_unlock(&syscall_trace_lock);
  370. }
  371. static int init_syscall_trace(struct ftrace_event_call *call)
  372. {
  373. int id;
  374. int num;
  375. num = ((struct syscall_metadata *)call->data)->syscall_nr;
  376. if (num < 0 || num >= NR_syscalls) {
  377. pr_debug("syscall %s metadata not mapped, disabling ftrace event\n",
  378. ((struct syscall_metadata *)call->data)->name);
  379. return -ENOSYS;
  380. }
  381. if (set_syscall_print_fmt(call) < 0)
  382. return -ENOMEM;
  383. id = trace_event_raw_init(call);
  384. if (id < 0) {
  385. free_syscall_print_fmt(call);
  386. return id;
  387. }
  388. return id;
  389. }
  390. struct trace_event_functions enter_syscall_print_funcs = {
  391. .trace = print_syscall_enter,
  392. };
  393. struct trace_event_functions exit_syscall_print_funcs = {
  394. .trace = print_syscall_exit,
  395. };
  396. struct ftrace_event_class __refdata event_class_syscall_enter = {
  397. .system = "syscalls",
  398. .reg = syscall_enter_register,
  399. .define_fields = syscall_enter_define_fields,
  400. .get_fields = syscall_get_enter_fields,
  401. .raw_init = init_syscall_trace,
  402. };
  403. struct ftrace_event_class __refdata event_class_syscall_exit = {
  404. .system = "syscalls",
  405. .reg = syscall_exit_register,
  406. .define_fields = syscall_exit_define_fields,
  407. .fields = LIST_HEAD_INIT(event_class_syscall_exit.fields),
  408. .raw_init = init_syscall_trace,
  409. };
  410. unsigned long __init __weak arch_syscall_addr(int nr)
  411. {
  412. return (unsigned long)sys_call_table[nr];
  413. }
  414. static int __init init_ftrace_syscalls(void)
  415. {
  416. struct syscall_metadata *meta;
  417. unsigned long addr;
  418. int i;
  419. syscalls_metadata = kcalloc(NR_syscalls, sizeof(*syscalls_metadata),
  420. GFP_KERNEL);
  421. if (!syscalls_metadata) {
  422. WARN_ON(1);
  423. return -ENOMEM;
  424. }
  425. for (i = 0; i < NR_syscalls; i++) {
  426. addr = arch_syscall_addr(i);
  427. meta = find_syscall_meta(addr);
  428. if (!meta)
  429. continue;
  430. meta->syscall_nr = i;
  431. syscalls_metadata[i] = meta;
  432. }
  433. return 0;
  434. }
  435. early_initcall(init_ftrace_syscalls);
  436. #ifdef CONFIG_PERF_EVENTS
  437. static DECLARE_BITMAP(enabled_perf_enter_syscalls, NR_syscalls);
  438. static DECLARE_BITMAP(enabled_perf_exit_syscalls, NR_syscalls);
  439. static int sys_perf_refcount_enter;
  440. static int sys_perf_refcount_exit;
  441. static void perf_syscall_enter(void *ignore, struct pt_regs *regs, long id)
  442. {
  443. struct syscall_metadata *sys_data;
  444. struct syscall_trace_enter *rec;
  445. struct hlist_head *head;
  446. int syscall_nr;
  447. int rctx;
  448. int size;
  449. syscall_nr = trace_get_syscall_nr(current, regs);
  450. if (syscall_nr < 0)
  451. return;
  452. if (!test_bit(syscall_nr, enabled_perf_enter_syscalls))
  453. return;
  454. sys_data = syscall_nr_to_meta(syscall_nr);
  455. if (!sys_data)
  456. return;
  457. /* get the size after alignment with the u32 buffer size field */
  458. size = sizeof(unsigned long) * sys_data->nb_args + sizeof(*rec);
  459. size = ALIGN(size + sizeof(u32), sizeof(u64));
  460. size -= sizeof(u32);
  461. if (WARN_ONCE(size > PERF_MAX_TRACE_SIZE,
  462. "perf buffer not large enough"))
  463. return;
  464. rec = (struct syscall_trace_enter *)perf_trace_buf_prepare(size,
  465. sys_data->enter_event->event.type, regs, &rctx);
  466. if (!rec)
  467. return;
  468. rec->nr = syscall_nr;
  469. syscall_get_arguments(current, regs, 0, sys_data->nb_args,
  470. (unsigned long *)&rec->args);
  471. head = this_cpu_ptr(sys_data->enter_event->perf_events);
  472. perf_trace_buf_submit(rec, size, rctx, 0, 1, regs, head, NULL);
  473. }
  474. static int perf_sysenter_enable(struct ftrace_event_call *call)
  475. {
  476. int ret = 0;
  477. int num;
  478. num = ((struct syscall_metadata *)call->data)->syscall_nr;
  479. mutex_lock(&syscall_trace_lock);
  480. if (!sys_perf_refcount_enter)
  481. ret = register_trace_sys_enter(perf_syscall_enter, NULL);
  482. if (ret) {
  483. pr_info("event trace: Could not activate"
  484. "syscall entry trace point");
  485. } else {
  486. set_bit(num, enabled_perf_enter_syscalls);
  487. sys_perf_refcount_enter++;
  488. }
  489. mutex_unlock(&syscall_trace_lock);
  490. return ret;
  491. }
  492. static void perf_sysenter_disable(struct ftrace_event_call *call)
  493. {
  494. int num;
  495. num = ((struct syscall_metadata *)call->data)->syscall_nr;
  496. mutex_lock(&syscall_trace_lock);
  497. sys_perf_refcount_enter--;
  498. clear_bit(num, enabled_perf_enter_syscalls);
  499. if (!sys_perf_refcount_enter)
  500. unregister_trace_sys_enter(perf_syscall_enter, NULL);
  501. mutex_unlock(&syscall_trace_lock);
  502. }
  503. static void perf_syscall_exit(void *ignore, struct pt_regs *regs, long ret)
  504. {
  505. struct syscall_metadata *sys_data;
  506. struct syscall_trace_exit *rec;
  507. struct hlist_head *head;
  508. int syscall_nr;
  509. int rctx;
  510. int size;
  511. syscall_nr = trace_get_syscall_nr(current, regs);
  512. if (syscall_nr < 0)
  513. return;
  514. if (!test_bit(syscall_nr, enabled_perf_exit_syscalls))
  515. return;
  516. sys_data = syscall_nr_to_meta(syscall_nr);
  517. if (!sys_data)
  518. return;
  519. /* We can probably do that at build time */
  520. size = ALIGN(sizeof(*rec) + sizeof(u32), sizeof(u64));
  521. size -= sizeof(u32);
  522. /*
  523. * Impossible, but be paranoid with the future
  524. * How to put this check outside runtime?
  525. */
  526. if (WARN_ONCE(size > PERF_MAX_TRACE_SIZE,
  527. "exit event has grown above perf buffer size"))
  528. return;
  529. rec = (struct syscall_trace_exit *)perf_trace_buf_prepare(size,
  530. sys_data->exit_event->event.type, regs, &rctx);
  531. if (!rec)
  532. return;
  533. rec->nr = syscall_nr;
  534. rec->ret = syscall_get_return_value(current, regs);
  535. head = this_cpu_ptr(sys_data->exit_event->perf_events);
  536. perf_trace_buf_submit(rec, size, rctx, 0, 1, regs, head, NULL);
  537. }
  538. static int perf_sysexit_enable(struct ftrace_event_call *call)
  539. {
  540. int ret = 0;
  541. int num;
  542. num = ((struct syscall_metadata *)call->data)->syscall_nr;
  543. mutex_lock(&syscall_trace_lock);
  544. if (!sys_perf_refcount_exit)
  545. ret = register_trace_sys_exit(perf_syscall_exit, NULL);
  546. if (ret) {
  547. pr_info("event trace: Could not activate"
  548. "syscall exit trace point");
  549. } else {
  550. set_bit(num, enabled_perf_exit_syscalls);
  551. sys_perf_refcount_exit++;
  552. }
  553. mutex_unlock(&syscall_trace_lock);
  554. return ret;
  555. }
  556. static void perf_sysexit_disable(struct ftrace_event_call *call)
  557. {
  558. int num;
  559. num = ((struct syscall_metadata *)call->data)->syscall_nr;
  560. mutex_lock(&syscall_trace_lock);
  561. sys_perf_refcount_exit--;
  562. clear_bit(num, enabled_perf_exit_syscalls);
  563. if (!sys_perf_refcount_exit)
  564. unregister_trace_sys_exit(perf_syscall_exit, NULL);
  565. mutex_unlock(&syscall_trace_lock);
  566. }
  567. #endif /* CONFIG_PERF_EVENTS */
  568. static int syscall_enter_register(struct ftrace_event_call *event,
  569. enum trace_reg type, void *data)
  570. {
  571. struct ftrace_event_file *file = data;
  572. switch (type) {
  573. case TRACE_REG_REGISTER:
  574. return reg_event_syscall_enter(file, event);
  575. case TRACE_REG_UNREGISTER:
  576. unreg_event_syscall_enter(file, event);
  577. return 0;
  578. #ifdef CONFIG_PERF_EVENTS
  579. case TRACE_REG_PERF_REGISTER:
  580. return perf_sysenter_enable(event);
  581. case TRACE_REG_PERF_UNREGISTER:
  582. perf_sysenter_disable(event);
  583. return 0;
  584. case TRACE_REG_PERF_OPEN:
  585. case TRACE_REG_PERF_CLOSE:
  586. case TRACE_REG_PERF_ADD:
  587. case TRACE_REG_PERF_DEL:
  588. return 0;
  589. #endif
  590. }
  591. return 0;
  592. }
  593. static int syscall_exit_register(struct ftrace_event_call *event,
  594. enum trace_reg type, void *data)
  595. {
  596. struct ftrace_event_file *file = data;
  597. switch (type) {
  598. case TRACE_REG_REGISTER:
  599. return reg_event_syscall_exit(file, event);
  600. case TRACE_REG_UNREGISTER:
  601. unreg_event_syscall_exit(file, event);
  602. return 0;
  603. #ifdef CONFIG_PERF_EVENTS
  604. case TRACE_REG_PERF_REGISTER:
  605. return perf_sysexit_enable(event);
  606. case TRACE_REG_PERF_UNREGISTER:
  607. perf_sysexit_disable(event);
  608. return 0;
  609. case TRACE_REG_PERF_OPEN:
  610. case TRACE_REG_PERF_CLOSE:
  611. case TRACE_REG_PERF_ADD:
  612. case TRACE_REG_PERF_DEL:
  613. return 0;
  614. #endif
  615. }
  616. return 0;
  617. }