trace_syscalls.c 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774
  1. #include <trace/syscall.h>
  2. #include <trace/events/syscalls.h>
  3. #include <linux/syscalls.h>
  4. #include <linux/slab.h>
  5. #include <linux/kernel.h>
  6. #include <linux/module.h> /* for MODULE_NAME_LEN via KSYM_SYMBOL_LEN */
  7. #include <linux/ftrace.h>
  8. #include <linux/perf_event.h>
  9. #include <asm/syscall.h>
  10. #include "trace_output.h"
  11. #include "trace.h"
  12. static DEFINE_MUTEX(syscall_trace_lock);
  13. static int syscall_enter_register(struct ftrace_event_call *event,
  14. enum trace_reg type, void *data);
  15. static int syscall_exit_register(struct ftrace_event_call *event,
  16. enum trace_reg type, void *data);
  17. static struct list_head *
  18. syscall_get_enter_fields(struct ftrace_event_call *call)
  19. {
  20. struct syscall_metadata *entry = call->data;
  21. return &entry->enter_fields;
  22. }
  23. extern struct syscall_metadata *__start_syscalls_metadata[];
  24. extern struct syscall_metadata *__stop_syscalls_metadata[];
  25. static struct syscall_metadata **syscalls_metadata;
  26. #ifndef ARCH_HAS_SYSCALL_MATCH_SYM_NAME
  27. static inline bool arch_syscall_match_sym_name(const char *sym, const char *name)
  28. {
  29. /*
  30. * Only compare after the "sys" prefix. Archs that use
  31. * syscall wrappers may have syscalls symbols aliases prefixed
  32. * with ".SyS" or ".sys" instead of "sys", leading to an unwanted
  33. * mismatch.
  34. */
  35. return !strcmp(sym + 3, name + 3);
  36. }
  37. #endif
  38. #ifdef ARCH_TRACE_IGNORE_COMPAT_SYSCALLS
  39. /*
  40. * Some architectures that allow for 32bit applications
  41. * to run on a 64bit kernel, do not map the syscalls for
  42. * the 32bit tasks the same as they do for 64bit tasks.
  43. *
  44. * *cough*x86*cough*
  45. *
  46. * In such a case, instead of reporting the wrong syscalls,
  47. * simply ignore them.
  48. *
  49. * For an arch to ignore the compat syscalls it needs to
  50. * define ARCH_TRACE_IGNORE_COMPAT_SYSCALLS as well as
  51. * define the function arch_trace_is_compat_syscall() to let
  52. * the tracing system know that it should ignore it.
  53. */
  54. static int
  55. trace_get_syscall_nr(struct task_struct *task, struct pt_regs *regs)
  56. {
  57. if (unlikely(arch_trace_is_compat_syscall(regs)))
  58. return -1;
  59. return syscall_get_nr(task, regs);
  60. }
  61. #else
  62. static inline int
  63. trace_get_syscall_nr(struct task_struct *task, struct pt_regs *regs)
  64. {
  65. return syscall_get_nr(task, regs);
  66. }
  67. #endif /* ARCH_TRACE_IGNORE_COMPAT_SYSCALLS */
  68. static __init struct syscall_metadata *
  69. find_syscall_meta(unsigned long syscall)
  70. {
  71. struct syscall_metadata **start;
  72. struct syscall_metadata **stop;
  73. char str[KSYM_SYMBOL_LEN];
  74. start = __start_syscalls_metadata;
  75. stop = __stop_syscalls_metadata;
  76. kallsyms_lookup(syscall, NULL, NULL, NULL, str);
  77. if (arch_syscall_match_sym_name(str, "sys_ni_syscall"))
  78. return NULL;
  79. for ( ; start < stop; start++) {
  80. if ((*start)->name && arch_syscall_match_sym_name(str, (*start)->name))
  81. return *start;
  82. }
  83. return NULL;
  84. }
  85. static struct syscall_metadata *syscall_nr_to_meta(int nr)
  86. {
  87. if (!syscalls_metadata || nr >= NR_syscalls || nr < 0)
  88. return NULL;
  89. return syscalls_metadata[nr];
  90. }
  91. static enum print_line_t
  92. print_syscall_enter(struct trace_iterator *iter, int flags,
  93. struct trace_event *event)
  94. {
  95. struct trace_seq *s = &iter->seq;
  96. struct trace_entry *ent = iter->ent;
  97. struct syscall_trace_enter *trace;
  98. struct syscall_metadata *entry;
  99. int i, ret, syscall;
  100. trace = (typeof(trace))ent;
  101. syscall = trace->nr;
  102. entry = syscall_nr_to_meta(syscall);
  103. if (!entry)
  104. goto end;
  105. if (entry->enter_event->event.type != ent->type) {
  106. WARN_ON_ONCE(1);
  107. goto end;
  108. }
  109. ret = trace_seq_printf(s, "%s(", entry->name);
  110. if (!ret)
  111. return TRACE_TYPE_PARTIAL_LINE;
  112. for (i = 0; i < entry->nb_args; i++) {
  113. /* parameter types */
  114. if (trace_flags & TRACE_ITER_VERBOSE) {
  115. ret = trace_seq_printf(s, "%s ", entry->types[i]);
  116. if (!ret)
  117. return TRACE_TYPE_PARTIAL_LINE;
  118. }
  119. /* parameter values */
  120. ret = trace_seq_printf(s, "%s: %lx%s", entry->args[i],
  121. trace->args[i],
  122. i == entry->nb_args - 1 ? "" : ", ");
  123. if (!ret)
  124. return TRACE_TYPE_PARTIAL_LINE;
  125. }
  126. ret = trace_seq_putc(s, ')');
  127. if (!ret)
  128. return TRACE_TYPE_PARTIAL_LINE;
  129. end:
  130. ret = trace_seq_putc(s, '\n');
  131. if (!ret)
  132. return TRACE_TYPE_PARTIAL_LINE;
  133. return TRACE_TYPE_HANDLED;
  134. }
  135. static enum print_line_t
  136. print_syscall_exit(struct trace_iterator *iter, int flags,
  137. struct trace_event *event)
  138. {
  139. struct trace_seq *s = &iter->seq;
  140. struct trace_entry *ent = iter->ent;
  141. struct syscall_trace_exit *trace;
  142. int syscall;
  143. struct syscall_metadata *entry;
  144. int ret;
  145. trace = (typeof(trace))ent;
  146. syscall = trace->nr;
  147. entry = syscall_nr_to_meta(syscall);
  148. if (!entry) {
  149. trace_seq_putc(s, '\n');
  150. return TRACE_TYPE_HANDLED;
  151. }
  152. if (entry->exit_event->event.type != ent->type) {
  153. WARN_ON_ONCE(1);
  154. return TRACE_TYPE_UNHANDLED;
  155. }
  156. ret = trace_seq_printf(s, "%s -> 0x%lx\n", entry->name,
  157. trace->ret);
  158. if (!ret)
  159. return TRACE_TYPE_PARTIAL_LINE;
  160. return TRACE_TYPE_HANDLED;
  161. }
  162. extern char *__bad_type_size(void);
  163. #define SYSCALL_FIELD(type, name) \
  164. sizeof(type) != sizeof(trace.name) ? \
  165. __bad_type_size() : \
  166. #type, #name, offsetof(typeof(trace), name), \
  167. sizeof(trace.name), is_signed_type(type)
  168. static int __init
  169. __set_enter_print_fmt(struct syscall_metadata *entry, char *buf, int len)
  170. {
  171. int i;
  172. int pos = 0;
  173. /* When len=0, we just calculate the needed length */
  174. #define LEN_OR_ZERO (len ? len - pos : 0)
  175. pos += snprintf(buf + pos, LEN_OR_ZERO, "\"");
  176. for (i = 0; i < entry->nb_args; i++) {
  177. pos += snprintf(buf + pos, LEN_OR_ZERO, "%s: 0x%%0%zulx%s",
  178. entry->args[i], sizeof(unsigned long),
  179. i == entry->nb_args - 1 ? "" : ", ");
  180. }
  181. pos += snprintf(buf + pos, LEN_OR_ZERO, "\"");
  182. for (i = 0; i < entry->nb_args; i++) {
  183. pos += snprintf(buf + pos, LEN_OR_ZERO,
  184. ", ((unsigned long)(REC->%s))", entry->args[i]);
  185. }
  186. #undef LEN_OR_ZERO
  187. /* return the length of print_fmt */
  188. return pos;
  189. }
  190. static int __init set_syscall_print_fmt(struct ftrace_event_call *call)
  191. {
  192. char *print_fmt;
  193. int len;
  194. struct syscall_metadata *entry = call->data;
  195. if (entry->enter_event != call) {
  196. call->print_fmt = "\"0x%lx\", REC->ret";
  197. return 0;
  198. }
  199. /* First: called with 0 length to calculate the needed length */
  200. len = __set_enter_print_fmt(entry, NULL, 0);
  201. print_fmt = kmalloc(len + 1, GFP_KERNEL);
  202. if (!print_fmt)
  203. return -ENOMEM;
  204. /* Second: actually write the @print_fmt */
  205. __set_enter_print_fmt(entry, print_fmt, len + 1);
  206. call->print_fmt = print_fmt;
  207. return 0;
  208. }
  209. static void __init free_syscall_print_fmt(struct ftrace_event_call *call)
  210. {
  211. struct syscall_metadata *entry = call->data;
  212. if (entry->enter_event == call)
  213. kfree(call->print_fmt);
  214. }
  215. static int __init syscall_enter_define_fields(struct ftrace_event_call *call)
  216. {
  217. struct syscall_trace_enter trace;
  218. struct syscall_metadata *meta = call->data;
  219. int ret;
  220. int i;
  221. int offset = offsetof(typeof(trace), args);
  222. ret = trace_define_field(call, SYSCALL_FIELD(int, nr), FILTER_OTHER);
  223. if (ret)
  224. return ret;
  225. for (i = 0; i < meta->nb_args; i++) {
  226. ret = trace_define_field(call, meta->types[i],
  227. meta->args[i], offset,
  228. sizeof(unsigned long), 0,
  229. FILTER_OTHER);
  230. offset += sizeof(unsigned long);
  231. }
  232. return ret;
  233. }
  234. static int __init syscall_exit_define_fields(struct ftrace_event_call *call)
  235. {
  236. struct syscall_trace_exit trace;
  237. int ret;
  238. ret = trace_define_field(call, SYSCALL_FIELD(int, nr), FILTER_OTHER);
  239. if (ret)
  240. return ret;
  241. ret = trace_define_field(call, SYSCALL_FIELD(long, ret),
  242. FILTER_OTHER);
  243. return ret;
  244. }
  245. static void ftrace_syscall_enter(void *data, struct pt_regs *regs, long id)
  246. {
  247. struct trace_array *tr = data;
  248. struct ftrace_event_file *ftrace_file;
  249. struct syscall_trace_enter *entry;
  250. struct syscall_metadata *sys_data;
  251. struct ring_buffer_event *event;
  252. struct ring_buffer *buffer;
  253. unsigned long irq_flags;
  254. int pc;
  255. int syscall_nr;
  256. int size;
  257. syscall_nr = trace_get_syscall_nr(current, regs);
  258. if (syscall_nr < 0)
  259. return;
  260. /* Here we're inside tp handler's rcu_read_lock_sched (__DO_TRACE) */
  261. ftrace_file = rcu_dereference_sched(tr->enter_syscall_files[syscall_nr]);
  262. if (!ftrace_file)
  263. return;
  264. if (test_bit(FTRACE_EVENT_FL_SOFT_DISABLED_BIT, &ftrace_file->flags))
  265. return;
  266. sys_data = syscall_nr_to_meta(syscall_nr);
  267. if (!sys_data)
  268. return;
  269. size = sizeof(*entry) + sizeof(unsigned long) * sys_data->nb_args;
  270. local_save_flags(irq_flags);
  271. pc = preempt_count();
  272. buffer = tr->trace_buffer.buffer;
  273. event = trace_buffer_lock_reserve(buffer,
  274. sys_data->enter_event->event.type, size, irq_flags, pc);
  275. if (!event)
  276. return;
  277. entry = ring_buffer_event_data(event);
  278. entry->nr = syscall_nr;
  279. syscall_get_arguments(current, regs, 0, sys_data->nb_args, entry->args);
  280. if (!filter_check_discard(ftrace_file, entry, buffer, event))
  281. trace_current_buffer_unlock_commit(buffer, event,
  282. irq_flags, pc);
  283. }
  284. static void ftrace_syscall_exit(void *data, struct pt_regs *regs, long ret)
  285. {
  286. struct trace_array *tr = data;
  287. struct ftrace_event_file *ftrace_file;
  288. struct syscall_trace_exit *entry;
  289. struct syscall_metadata *sys_data;
  290. struct ring_buffer_event *event;
  291. struct ring_buffer *buffer;
  292. unsigned long irq_flags;
  293. int pc;
  294. int syscall_nr;
  295. syscall_nr = trace_get_syscall_nr(current, regs);
  296. if (syscall_nr < 0)
  297. return;
  298. /* Here we're inside tp handler's rcu_read_lock_sched (__DO_TRACE()) */
  299. ftrace_file = rcu_dereference_sched(tr->exit_syscall_files[syscall_nr]);
  300. if (!ftrace_file)
  301. return;
  302. if (test_bit(FTRACE_EVENT_FL_SOFT_DISABLED_BIT, &ftrace_file->flags))
  303. return;
  304. sys_data = syscall_nr_to_meta(syscall_nr);
  305. if (!sys_data)
  306. return;
  307. local_save_flags(irq_flags);
  308. pc = preempt_count();
  309. buffer = tr->trace_buffer.buffer;
  310. event = trace_buffer_lock_reserve(buffer,
  311. sys_data->exit_event->event.type, sizeof(*entry),
  312. irq_flags, pc);
  313. if (!event)
  314. return;
  315. entry = ring_buffer_event_data(event);
  316. entry->nr = syscall_nr;
  317. entry->ret = syscall_get_return_value(current, regs);
  318. if (!filter_check_discard(ftrace_file, entry, buffer, event))
  319. trace_current_buffer_unlock_commit(buffer, event,
  320. irq_flags, pc);
  321. }
  322. static int reg_event_syscall_enter(struct ftrace_event_file *file,
  323. struct ftrace_event_call *call)
  324. {
  325. struct trace_array *tr = file->tr;
  326. int ret = 0;
  327. int num;
  328. num = ((struct syscall_metadata *)call->data)->syscall_nr;
  329. if (WARN_ON_ONCE(num < 0 || num >= NR_syscalls))
  330. return -ENOSYS;
  331. mutex_lock(&syscall_trace_lock);
  332. if (!tr->sys_refcount_enter)
  333. ret = register_trace_sys_enter(ftrace_syscall_enter, tr);
  334. if (!ret) {
  335. rcu_assign_pointer(tr->enter_syscall_files[num], file);
  336. tr->sys_refcount_enter++;
  337. }
  338. mutex_unlock(&syscall_trace_lock);
  339. return ret;
  340. }
  341. static void unreg_event_syscall_enter(struct ftrace_event_file *file,
  342. struct ftrace_event_call *call)
  343. {
  344. struct trace_array *tr = file->tr;
  345. int num;
  346. num = ((struct syscall_metadata *)call->data)->syscall_nr;
  347. if (WARN_ON_ONCE(num < 0 || num >= NR_syscalls))
  348. return;
  349. mutex_lock(&syscall_trace_lock);
  350. tr->sys_refcount_enter--;
  351. rcu_assign_pointer(tr->enter_syscall_files[num], NULL);
  352. if (!tr->sys_refcount_enter)
  353. unregister_trace_sys_enter(ftrace_syscall_enter, tr);
  354. mutex_unlock(&syscall_trace_lock);
  355. /*
  356. * Callers expect the event to be completely disabled on
  357. * return, so wait for current handlers to finish.
  358. */
  359. synchronize_sched();
  360. }
  361. static int reg_event_syscall_exit(struct ftrace_event_file *file,
  362. struct ftrace_event_call *call)
  363. {
  364. struct trace_array *tr = file->tr;
  365. int ret = 0;
  366. int num;
  367. num = ((struct syscall_metadata *)call->data)->syscall_nr;
  368. if (WARN_ON_ONCE(num < 0 || num >= NR_syscalls))
  369. return -ENOSYS;
  370. mutex_lock(&syscall_trace_lock);
  371. if (!tr->sys_refcount_exit)
  372. ret = register_trace_sys_exit(ftrace_syscall_exit, tr);
  373. if (!ret) {
  374. rcu_assign_pointer(tr->exit_syscall_files[num], file);
  375. tr->sys_refcount_exit++;
  376. }
  377. mutex_unlock(&syscall_trace_lock);
  378. return ret;
  379. }
  380. static void unreg_event_syscall_exit(struct ftrace_event_file *file,
  381. struct ftrace_event_call *call)
  382. {
  383. struct trace_array *tr = file->tr;
  384. int num;
  385. num = ((struct syscall_metadata *)call->data)->syscall_nr;
  386. if (WARN_ON_ONCE(num < 0 || num >= NR_syscalls))
  387. return;
  388. mutex_lock(&syscall_trace_lock);
  389. tr->sys_refcount_exit--;
  390. rcu_assign_pointer(tr->exit_syscall_files[num], NULL);
  391. if (!tr->sys_refcount_exit)
  392. unregister_trace_sys_exit(ftrace_syscall_exit, tr);
  393. mutex_unlock(&syscall_trace_lock);
  394. /*
  395. * Callers expect the event to be completely disabled on
  396. * return, so wait for current handlers to finish.
  397. */
  398. synchronize_sched();
  399. }
  400. static int __init init_syscall_trace(struct ftrace_event_call *call)
  401. {
  402. int id;
  403. int num;
  404. num = ((struct syscall_metadata *)call->data)->syscall_nr;
  405. if (num < 0 || num >= NR_syscalls) {
  406. pr_debug("syscall %s metadata not mapped, disabling ftrace event\n",
  407. ((struct syscall_metadata *)call->data)->name);
  408. return -ENOSYS;
  409. }
  410. if (set_syscall_print_fmt(call) < 0)
  411. return -ENOMEM;
  412. id = trace_event_raw_init(call);
  413. if (id < 0) {
  414. free_syscall_print_fmt(call);
  415. return id;
  416. }
  417. return id;
  418. }
  419. struct trace_event_functions enter_syscall_print_funcs = {
  420. .trace = print_syscall_enter,
  421. };
  422. struct trace_event_functions exit_syscall_print_funcs = {
  423. .trace = print_syscall_exit,
  424. };
  425. struct ftrace_event_class __refdata event_class_syscall_enter = {
  426. .system = "syscalls",
  427. .reg = syscall_enter_register,
  428. .define_fields = syscall_enter_define_fields,
  429. .get_fields = syscall_get_enter_fields,
  430. .raw_init = init_syscall_trace,
  431. };
  432. struct ftrace_event_class __refdata event_class_syscall_exit = {
  433. .system = "syscalls",
  434. .reg = syscall_exit_register,
  435. .define_fields = syscall_exit_define_fields,
  436. .fields = LIST_HEAD_INIT(event_class_syscall_exit.fields),
  437. .raw_init = init_syscall_trace,
  438. };
  439. unsigned long __init __weak arch_syscall_addr(int nr)
  440. {
  441. return (unsigned long)sys_call_table[nr];
  442. }
  443. static int __init init_ftrace_syscalls(void)
  444. {
  445. struct syscall_metadata *meta;
  446. unsigned long addr;
  447. int i;
  448. syscalls_metadata = kcalloc(NR_syscalls, sizeof(*syscalls_metadata),
  449. GFP_KERNEL);
  450. if (!syscalls_metadata) {
  451. WARN_ON(1);
  452. return -ENOMEM;
  453. }
  454. for (i = 0; i < NR_syscalls; i++) {
  455. addr = arch_syscall_addr(i);
  456. meta = find_syscall_meta(addr);
  457. if (!meta)
  458. continue;
  459. meta->syscall_nr = i;
  460. syscalls_metadata[i] = meta;
  461. }
  462. return 0;
  463. }
  464. early_initcall(init_ftrace_syscalls);
  465. #ifdef CONFIG_PERF_EVENTS
  466. static DECLARE_BITMAP(enabled_perf_enter_syscalls, NR_syscalls);
  467. static DECLARE_BITMAP(enabled_perf_exit_syscalls, NR_syscalls);
  468. static int sys_perf_refcount_enter;
  469. static int sys_perf_refcount_exit;
  470. static void perf_syscall_enter(void *ignore, struct pt_regs *regs, long id)
  471. {
  472. struct syscall_metadata *sys_data;
  473. struct syscall_trace_enter *rec;
  474. struct hlist_head *head;
  475. int syscall_nr;
  476. int rctx;
  477. int size;
  478. syscall_nr = trace_get_syscall_nr(current, regs);
  479. if (syscall_nr < 0)
  480. return;
  481. if (!test_bit(syscall_nr, enabled_perf_enter_syscalls))
  482. return;
  483. sys_data = syscall_nr_to_meta(syscall_nr);
  484. if (!sys_data)
  485. return;
  486. head = this_cpu_ptr(sys_data->enter_event->perf_events);
  487. if (hlist_empty(head))
  488. return;
  489. /* get the size after alignment with the u32 buffer size field */
  490. size = sizeof(unsigned long) * sys_data->nb_args + sizeof(*rec);
  491. size = ALIGN(size + sizeof(u32), sizeof(u64));
  492. size -= sizeof(u32);
  493. rec = (struct syscall_trace_enter *)perf_trace_buf_prepare(size,
  494. sys_data->enter_event->event.type, regs, &rctx);
  495. if (!rec)
  496. return;
  497. rec->nr = syscall_nr;
  498. syscall_get_arguments(current, regs, 0, sys_data->nb_args,
  499. (unsigned long *)&rec->args);
  500. perf_trace_buf_submit(rec, size, rctx, 0, 1, regs, head, NULL);
  501. }
  502. static int perf_sysenter_enable(struct ftrace_event_call *call)
  503. {
  504. int ret = 0;
  505. int num;
  506. num = ((struct syscall_metadata *)call->data)->syscall_nr;
  507. mutex_lock(&syscall_trace_lock);
  508. if (!sys_perf_refcount_enter)
  509. ret = register_trace_sys_enter(perf_syscall_enter, NULL);
  510. if (ret) {
  511. pr_info("event trace: Could not activate"
  512. "syscall entry trace point");
  513. } else {
  514. set_bit(num, enabled_perf_enter_syscalls);
  515. sys_perf_refcount_enter++;
  516. }
  517. mutex_unlock(&syscall_trace_lock);
  518. return ret;
  519. }
  520. static void perf_sysenter_disable(struct ftrace_event_call *call)
  521. {
  522. int num;
  523. num = ((struct syscall_metadata *)call->data)->syscall_nr;
  524. mutex_lock(&syscall_trace_lock);
  525. sys_perf_refcount_enter--;
  526. clear_bit(num, enabled_perf_enter_syscalls);
  527. if (!sys_perf_refcount_enter)
  528. unregister_trace_sys_enter(perf_syscall_enter, NULL);
  529. mutex_unlock(&syscall_trace_lock);
  530. }
  531. static void perf_syscall_exit(void *ignore, struct pt_regs *regs, long ret)
  532. {
  533. struct syscall_metadata *sys_data;
  534. struct syscall_trace_exit *rec;
  535. struct hlist_head *head;
  536. int syscall_nr;
  537. int rctx;
  538. int size;
  539. syscall_nr = trace_get_syscall_nr(current, regs);
  540. if (syscall_nr < 0)
  541. return;
  542. if (!test_bit(syscall_nr, enabled_perf_exit_syscalls))
  543. return;
  544. sys_data = syscall_nr_to_meta(syscall_nr);
  545. if (!sys_data)
  546. return;
  547. head = this_cpu_ptr(sys_data->exit_event->perf_events);
  548. if (hlist_empty(head))
  549. return;
  550. /* We can probably do that at build time */
  551. size = ALIGN(sizeof(*rec) + sizeof(u32), sizeof(u64));
  552. size -= sizeof(u32);
  553. rec = (struct syscall_trace_exit *)perf_trace_buf_prepare(size,
  554. sys_data->exit_event->event.type, regs, &rctx);
  555. if (!rec)
  556. return;
  557. rec->nr = syscall_nr;
  558. rec->ret = syscall_get_return_value(current, regs);
  559. perf_trace_buf_submit(rec, size, rctx, 0, 1, regs, head, NULL);
  560. }
  561. static int perf_sysexit_enable(struct ftrace_event_call *call)
  562. {
  563. int ret = 0;
  564. int num;
  565. num = ((struct syscall_metadata *)call->data)->syscall_nr;
  566. mutex_lock(&syscall_trace_lock);
  567. if (!sys_perf_refcount_exit)
  568. ret = register_trace_sys_exit(perf_syscall_exit, NULL);
  569. if (ret) {
  570. pr_info("event trace: Could not activate"
  571. "syscall exit trace point");
  572. } else {
  573. set_bit(num, enabled_perf_exit_syscalls);
  574. sys_perf_refcount_exit++;
  575. }
  576. mutex_unlock(&syscall_trace_lock);
  577. return ret;
  578. }
  579. static void perf_sysexit_disable(struct ftrace_event_call *call)
  580. {
  581. int num;
  582. num = ((struct syscall_metadata *)call->data)->syscall_nr;
  583. mutex_lock(&syscall_trace_lock);
  584. sys_perf_refcount_exit--;
  585. clear_bit(num, enabled_perf_exit_syscalls);
  586. if (!sys_perf_refcount_exit)
  587. unregister_trace_sys_exit(perf_syscall_exit, NULL);
  588. mutex_unlock(&syscall_trace_lock);
  589. }
  590. #endif /* CONFIG_PERF_EVENTS */
  591. static int syscall_enter_register(struct ftrace_event_call *event,
  592. enum trace_reg type, void *data)
  593. {
  594. struct ftrace_event_file *file = data;
  595. switch (type) {
  596. case TRACE_REG_REGISTER:
  597. return reg_event_syscall_enter(file, event);
  598. case TRACE_REG_UNREGISTER:
  599. unreg_event_syscall_enter(file, event);
  600. return 0;
  601. #ifdef CONFIG_PERF_EVENTS
  602. case TRACE_REG_PERF_REGISTER:
  603. return perf_sysenter_enable(event);
  604. case TRACE_REG_PERF_UNREGISTER:
  605. perf_sysenter_disable(event);
  606. return 0;
  607. case TRACE_REG_PERF_OPEN:
  608. case TRACE_REG_PERF_CLOSE:
  609. case TRACE_REG_PERF_ADD:
  610. case TRACE_REG_PERF_DEL:
  611. return 0;
  612. #endif
  613. }
  614. return 0;
  615. }
  616. static int syscall_exit_register(struct ftrace_event_call *event,
  617. enum trace_reg type, void *data)
  618. {
  619. struct ftrace_event_file *file = data;
  620. switch (type) {
  621. case TRACE_REG_REGISTER:
  622. return reg_event_syscall_exit(file, event);
  623. case TRACE_REG_UNREGISTER:
  624. unreg_event_syscall_exit(file, event);
  625. return 0;
  626. #ifdef CONFIG_PERF_EVENTS
  627. case TRACE_REG_PERF_REGISTER:
  628. return perf_sysexit_enable(event);
  629. case TRACE_REG_PERF_UNREGISTER:
  630. perf_sysexit_disable(event);
  631. return 0;
  632. case TRACE_REG_PERF_OPEN:
  633. case TRACE_REG_PERF_CLOSE:
  634. case TRACE_REG_PERF_ADD:
  635. case TRACE_REG_PERF_DEL:
  636. return 0;
  637. #endif
  638. }
  639. return 0;
  640. }