trace_syscalls.c 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666
  1. #include <trace/syscall.h>
  2. #include <trace/events/syscalls.h>
  3. #include <linux/slab.h>
  4. #include <linux/kernel.h>
  5. #include <linux/ftrace.h>
  6. #include <linux/perf_event.h>
  7. #include <asm/syscall.h>
  8. #include "trace_output.h"
  9. #include "trace.h"
  10. static DEFINE_MUTEX(syscall_trace_lock);
  11. static int sys_refcount_enter;
  12. static int sys_refcount_exit;
  13. static DECLARE_BITMAP(enabled_enter_syscalls, NR_syscalls);
  14. static DECLARE_BITMAP(enabled_exit_syscalls, NR_syscalls);
  15. static int syscall_enter_register(struct ftrace_event_call *event,
  16. enum trace_reg type);
  17. static int syscall_exit_register(struct ftrace_event_call *event,
  18. enum trace_reg type);
  19. static int syscall_enter_define_fields(struct ftrace_event_call *call);
  20. static int syscall_exit_define_fields(struct ftrace_event_call *call);
  21. static struct list_head *
  22. syscall_get_enter_fields(struct ftrace_event_call *call)
  23. {
  24. struct syscall_metadata *entry = call->data;
  25. return &entry->enter_fields;
  26. }
  27. static struct list_head *
  28. syscall_get_exit_fields(struct ftrace_event_call *call)
  29. {
  30. struct syscall_metadata *entry = call->data;
  31. return &entry->exit_fields;
  32. }
  33. struct ftrace_event_class event_class_syscall_enter = {
  34. .system = "syscalls",
  35. .reg = syscall_enter_register,
  36. .define_fields = syscall_enter_define_fields,
  37. .get_fields = syscall_get_enter_fields,
  38. .raw_init = init_syscall_trace,
  39. };
  40. struct ftrace_event_class event_class_syscall_exit = {
  41. .system = "syscalls",
  42. .reg = syscall_exit_register,
  43. .define_fields = syscall_exit_define_fields,
  44. .get_fields = syscall_get_exit_fields,
  45. .raw_init = init_syscall_trace,
  46. };
  47. extern unsigned long __start_syscalls_metadata[];
  48. extern unsigned long __stop_syscalls_metadata[];
  49. static struct syscall_metadata **syscalls_metadata;
  50. static struct syscall_metadata *find_syscall_meta(unsigned long syscall)
  51. {
  52. struct syscall_metadata *start;
  53. struct syscall_metadata *stop;
  54. char str[KSYM_SYMBOL_LEN];
  55. start = (struct syscall_metadata *)__start_syscalls_metadata;
  56. stop = (struct syscall_metadata *)__stop_syscalls_metadata;
  57. kallsyms_lookup(syscall, NULL, NULL, NULL, str);
  58. for ( ; start < stop; start++) {
  59. /*
  60. * Only compare after the "sys" prefix. Archs that use
  61. * syscall wrappers may have syscalls symbols aliases prefixed
  62. * with "SyS" instead of "sys", leading to an unwanted
  63. * mismatch.
  64. */
  65. if (start->name && !strcmp(start->name + 3, str + 3))
  66. return start;
  67. }
  68. return NULL;
  69. }
  70. static struct syscall_metadata *syscall_nr_to_meta(int nr)
  71. {
  72. if (!syscalls_metadata || nr >= NR_syscalls || nr < 0)
  73. return NULL;
  74. return syscalls_metadata[nr];
  75. }
  76. enum print_line_t
  77. print_syscall_enter(struct trace_iterator *iter, int flags)
  78. {
  79. struct trace_seq *s = &iter->seq;
  80. struct trace_entry *ent = iter->ent;
  81. struct syscall_trace_enter *trace;
  82. struct syscall_metadata *entry;
  83. int i, ret, syscall;
  84. trace = (typeof(trace))ent;
  85. syscall = trace->nr;
  86. entry = syscall_nr_to_meta(syscall);
  87. if (!entry)
  88. goto end;
  89. if (entry->enter_event->id != ent->type) {
  90. WARN_ON_ONCE(1);
  91. goto end;
  92. }
  93. ret = trace_seq_printf(s, "%s(", entry->name);
  94. if (!ret)
  95. return TRACE_TYPE_PARTIAL_LINE;
  96. for (i = 0; i < entry->nb_args; i++) {
  97. /* parameter types */
  98. if (trace_flags & TRACE_ITER_VERBOSE) {
  99. ret = trace_seq_printf(s, "%s ", entry->types[i]);
  100. if (!ret)
  101. return TRACE_TYPE_PARTIAL_LINE;
  102. }
  103. /* parameter values */
  104. ret = trace_seq_printf(s, "%s: %lx%s", entry->args[i],
  105. trace->args[i],
  106. i == entry->nb_args - 1 ? "" : ", ");
  107. if (!ret)
  108. return TRACE_TYPE_PARTIAL_LINE;
  109. }
  110. ret = trace_seq_putc(s, ')');
  111. if (!ret)
  112. return TRACE_TYPE_PARTIAL_LINE;
  113. end:
  114. ret = trace_seq_putc(s, '\n');
  115. if (!ret)
  116. return TRACE_TYPE_PARTIAL_LINE;
  117. return TRACE_TYPE_HANDLED;
  118. }
  119. enum print_line_t
  120. print_syscall_exit(struct trace_iterator *iter, int flags)
  121. {
  122. struct trace_seq *s = &iter->seq;
  123. struct trace_entry *ent = iter->ent;
  124. struct syscall_trace_exit *trace;
  125. int syscall;
  126. struct syscall_metadata *entry;
  127. int ret;
  128. trace = (typeof(trace))ent;
  129. syscall = trace->nr;
  130. entry = syscall_nr_to_meta(syscall);
  131. if (!entry) {
  132. trace_seq_printf(s, "\n");
  133. return TRACE_TYPE_HANDLED;
  134. }
  135. if (entry->exit_event->id != ent->type) {
  136. WARN_ON_ONCE(1);
  137. return TRACE_TYPE_UNHANDLED;
  138. }
  139. ret = trace_seq_printf(s, "%s -> 0x%lx\n", entry->name,
  140. trace->ret);
  141. if (!ret)
  142. return TRACE_TYPE_PARTIAL_LINE;
  143. return TRACE_TYPE_HANDLED;
  144. }
  145. extern char *__bad_type_size(void);
  146. #define SYSCALL_FIELD(type, name) \
  147. sizeof(type) != sizeof(trace.name) ? \
  148. __bad_type_size() : \
  149. #type, #name, offsetof(typeof(trace), name), \
  150. sizeof(trace.name), is_signed_type(type)
  151. static
  152. int __set_enter_print_fmt(struct syscall_metadata *entry, char *buf, int len)
  153. {
  154. int i;
  155. int pos = 0;
  156. /* When len=0, we just calculate the needed length */
  157. #define LEN_OR_ZERO (len ? len - pos : 0)
  158. pos += snprintf(buf + pos, LEN_OR_ZERO, "\"");
  159. for (i = 0; i < entry->nb_args; i++) {
  160. pos += snprintf(buf + pos, LEN_OR_ZERO, "%s: 0x%%0%zulx%s",
  161. entry->args[i], sizeof(unsigned long),
  162. i == entry->nb_args - 1 ? "" : ", ");
  163. }
  164. pos += snprintf(buf + pos, LEN_OR_ZERO, "\"");
  165. for (i = 0; i < entry->nb_args; i++) {
  166. pos += snprintf(buf + pos, LEN_OR_ZERO,
  167. ", ((unsigned long)(REC->%s))", entry->args[i]);
  168. }
  169. #undef LEN_OR_ZERO
  170. /* return the length of print_fmt */
  171. return pos;
  172. }
  173. static int set_syscall_print_fmt(struct ftrace_event_call *call)
  174. {
  175. char *print_fmt;
  176. int len;
  177. struct syscall_metadata *entry = call->data;
  178. if (entry->enter_event != call) {
  179. call->print_fmt = "\"0x%lx\", REC->ret";
  180. return 0;
  181. }
  182. /* First: called with 0 length to calculate the needed length */
  183. len = __set_enter_print_fmt(entry, NULL, 0);
  184. print_fmt = kmalloc(len + 1, GFP_KERNEL);
  185. if (!print_fmt)
  186. return -ENOMEM;
  187. /* Second: actually write the @print_fmt */
  188. __set_enter_print_fmt(entry, print_fmt, len + 1);
  189. call->print_fmt = print_fmt;
  190. return 0;
  191. }
  192. static void free_syscall_print_fmt(struct ftrace_event_call *call)
  193. {
  194. struct syscall_metadata *entry = call->data;
  195. if (entry->enter_event == call)
  196. kfree(call->print_fmt);
  197. }
  198. static int syscall_enter_define_fields(struct ftrace_event_call *call)
  199. {
  200. struct syscall_trace_enter trace;
  201. struct syscall_metadata *meta = call->data;
  202. int ret;
  203. int i;
  204. int offset = offsetof(typeof(trace), args);
  205. ret = trace_define_field(call, SYSCALL_FIELD(int, nr), FILTER_OTHER);
  206. if (ret)
  207. return ret;
  208. for (i = 0; i < meta->nb_args; i++) {
  209. ret = trace_define_field(call, meta->types[i],
  210. meta->args[i], offset,
  211. sizeof(unsigned long), 0,
  212. FILTER_OTHER);
  213. offset += sizeof(unsigned long);
  214. }
  215. return ret;
  216. }
  217. static int syscall_exit_define_fields(struct ftrace_event_call *call)
  218. {
  219. struct syscall_trace_exit trace;
  220. int ret;
  221. ret = trace_define_field(call, SYSCALL_FIELD(int, nr), FILTER_OTHER);
  222. if (ret)
  223. return ret;
  224. ret = trace_define_field(call, SYSCALL_FIELD(long, ret),
  225. FILTER_OTHER);
  226. return ret;
  227. }
  228. void ftrace_syscall_enter(void *ignore, struct pt_regs *regs, long id)
  229. {
  230. struct syscall_trace_enter *entry;
  231. struct syscall_metadata *sys_data;
  232. struct ring_buffer_event *event;
  233. struct ring_buffer *buffer;
  234. int size;
  235. int syscall_nr;
  236. syscall_nr = syscall_get_nr(current, regs);
  237. if (syscall_nr < 0)
  238. return;
  239. if (!test_bit(syscall_nr, enabled_enter_syscalls))
  240. return;
  241. sys_data = syscall_nr_to_meta(syscall_nr);
  242. if (!sys_data)
  243. return;
  244. size = sizeof(*entry) + sizeof(unsigned long) * sys_data->nb_args;
  245. event = trace_current_buffer_lock_reserve(&buffer,
  246. sys_data->enter_event->id, size, 0, 0);
  247. if (!event)
  248. return;
  249. entry = ring_buffer_event_data(event);
  250. entry->nr = syscall_nr;
  251. syscall_get_arguments(current, regs, 0, sys_data->nb_args, entry->args);
  252. if (!filter_current_check_discard(buffer, sys_data->enter_event,
  253. entry, event))
  254. trace_current_buffer_unlock_commit(buffer, event, 0, 0);
  255. }
  256. void ftrace_syscall_exit(void *ignore, struct pt_regs *regs, long ret)
  257. {
  258. struct syscall_trace_exit *entry;
  259. struct syscall_metadata *sys_data;
  260. struct ring_buffer_event *event;
  261. struct ring_buffer *buffer;
  262. int syscall_nr;
  263. syscall_nr = syscall_get_nr(current, regs);
  264. if (syscall_nr < 0)
  265. return;
  266. if (!test_bit(syscall_nr, enabled_exit_syscalls))
  267. return;
  268. sys_data = syscall_nr_to_meta(syscall_nr);
  269. if (!sys_data)
  270. return;
  271. event = trace_current_buffer_lock_reserve(&buffer,
  272. sys_data->exit_event->id, sizeof(*entry), 0, 0);
  273. if (!event)
  274. return;
  275. entry = ring_buffer_event_data(event);
  276. entry->nr = syscall_nr;
  277. entry->ret = syscall_get_return_value(current, regs);
  278. if (!filter_current_check_discard(buffer, sys_data->exit_event,
  279. entry, event))
  280. trace_current_buffer_unlock_commit(buffer, event, 0, 0);
  281. }
  282. int reg_event_syscall_enter(struct ftrace_event_call *call)
  283. {
  284. int ret = 0;
  285. int num;
  286. num = ((struct syscall_metadata *)call->data)->syscall_nr;
  287. if (num < 0 || num >= NR_syscalls)
  288. return -ENOSYS;
  289. mutex_lock(&syscall_trace_lock);
  290. if (!sys_refcount_enter)
  291. ret = register_trace_sys_enter(ftrace_syscall_enter, NULL);
  292. if (!ret) {
  293. set_bit(num, enabled_enter_syscalls);
  294. sys_refcount_enter++;
  295. }
  296. mutex_unlock(&syscall_trace_lock);
  297. return ret;
  298. }
  299. void unreg_event_syscall_enter(struct ftrace_event_call *call)
  300. {
  301. int num;
  302. num = ((struct syscall_metadata *)call->data)->syscall_nr;
  303. if (num < 0 || num >= NR_syscalls)
  304. return;
  305. mutex_lock(&syscall_trace_lock);
  306. sys_refcount_enter--;
  307. clear_bit(num, enabled_enter_syscalls);
  308. if (!sys_refcount_enter)
  309. unregister_trace_sys_enter(ftrace_syscall_enter, NULL);
  310. mutex_unlock(&syscall_trace_lock);
  311. }
  312. int reg_event_syscall_exit(struct ftrace_event_call *call)
  313. {
  314. int ret = 0;
  315. int num;
  316. num = ((struct syscall_metadata *)call->data)->syscall_nr;
  317. if (num < 0 || num >= NR_syscalls)
  318. return -ENOSYS;
  319. mutex_lock(&syscall_trace_lock);
  320. if (!sys_refcount_exit)
  321. ret = register_trace_sys_exit(ftrace_syscall_exit, NULL);
  322. if (!ret) {
  323. set_bit(num, enabled_exit_syscalls);
  324. sys_refcount_exit++;
  325. }
  326. mutex_unlock(&syscall_trace_lock);
  327. return ret;
  328. }
  329. void unreg_event_syscall_exit(struct ftrace_event_call *call)
  330. {
  331. int num;
  332. num = ((struct syscall_metadata *)call->data)->syscall_nr;
  333. if (num < 0 || num >= NR_syscalls)
  334. return;
  335. mutex_lock(&syscall_trace_lock);
  336. sys_refcount_exit--;
  337. clear_bit(num, enabled_exit_syscalls);
  338. if (!sys_refcount_exit)
  339. unregister_trace_sys_exit(ftrace_syscall_exit, NULL);
  340. mutex_unlock(&syscall_trace_lock);
  341. }
  342. int init_syscall_trace(struct ftrace_event_call *call)
  343. {
  344. int id;
  345. if (set_syscall_print_fmt(call) < 0)
  346. return -ENOMEM;
  347. id = trace_event_raw_init(call);
  348. if (id < 0) {
  349. free_syscall_print_fmt(call);
  350. return id;
  351. }
  352. return id;
  353. }
  354. unsigned long __init arch_syscall_addr(int nr)
  355. {
  356. return (unsigned long)sys_call_table[nr];
  357. }
  358. int __init init_ftrace_syscalls(void)
  359. {
  360. struct syscall_metadata *meta;
  361. unsigned long addr;
  362. int i;
  363. syscalls_metadata = kzalloc(sizeof(*syscalls_metadata) *
  364. NR_syscalls, GFP_KERNEL);
  365. if (!syscalls_metadata) {
  366. WARN_ON(1);
  367. return -ENOMEM;
  368. }
  369. for (i = 0; i < NR_syscalls; i++) {
  370. addr = arch_syscall_addr(i);
  371. meta = find_syscall_meta(addr);
  372. if (!meta)
  373. continue;
  374. meta->syscall_nr = i;
  375. syscalls_metadata[i] = meta;
  376. }
  377. return 0;
  378. }
  379. core_initcall(init_ftrace_syscalls);
  380. #ifdef CONFIG_PERF_EVENTS
  381. static DECLARE_BITMAP(enabled_perf_enter_syscalls, NR_syscalls);
  382. static DECLARE_BITMAP(enabled_perf_exit_syscalls, NR_syscalls);
  383. static int sys_perf_refcount_enter;
  384. static int sys_perf_refcount_exit;
  385. static void perf_syscall_enter(void *ignore, struct pt_regs *regs, long id)
  386. {
  387. struct syscall_metadata *sys_data;
  388. struct syscall_trace_enter *rec;
  389. unsigned long flags;
  390. int syscall_nr;
  391. int rctx;
  392. int size;
  393. syscall_nr = syscall_get_nr(current, regs);
  394. if (!test_bit(syscall_nr, enabled_perf_enter_syscalls))
  395. return;
  396. sys_data = syscall_nr_to_meta(syscall_nr);
  397. if (!sys_data)
  398. return;
  399. /* get the size after alignment with the u32 buffer size field */
  400. size = sizeof(unsigned long) * sys_data->nb_args + sizeof(*rec);
  401. size = ALIGN(size + sizeof(u32), sizeof(u64));
  402. size -= sizeof(u32);
  403. if (WARN_ONCE(size > PERF_MAX_TRACE_SIZE,
  404. "perf buffer not large enough"))
  405. return;
  406. rec = (struct syscall_trace_enter *)perf_trace_buf_prepare(size,
  407. sys_data->enter_event->id, &rctx, &flags);
  408. if (!rec)
  409. return;
  410. rec->nr = syscall_nr;
  411. syscall_get_arguments(current, regs, 0, sys_data->nb_args,
  412. (unsigned long *)&rec->args);
  413. perf_trace_buf_submit(rec, size, rctx, 0, 1, flags, regs);
  414. }
  415. int perf_sysenter_enable(struct ftrace_event_call *call)
  416. {
  417. int ret = 0;
  418. int num;
  419. num = ((struct syscall_metadata *)call->data)->syscall_nr;
  420. mutex_lock(&syscall_trace_lock);
  421. if (!sys_perf_refcount_enter)
  422. ret = register_trace_sys_enter(perf_syscall_enter, NULL);
  423. if (ret) {
  424. pr_info("event trace: Could not activate"
  425. "syscall entry trace point");
  426. } else {
  427. set_bit(num, enabled_perf_enter_syscalls);
  428. sys_perf_refcount_enter++;
  429. }
  430. mutex_unlock(&syscall_trace_lock);
  431. return ret;
  432. }
  433. void perf_sysenter_disable(struct ftrace_event_call *call)
  434. {
  435. int num;
  436. num = ((struct syscall_metadata *)call->data)->syscall_nr;
  437. mutex_lock(&syscall_trace_lock);
  438. sys_perf_refcount_enter--;
  439. clear_bit(num, enabled_perf_enter_syscalls);
  440. if (!sys_perf_refcount_enter)
  441. unregister_trace_sys_enter(perf_syscall_enter, NULL);
  442. mutex_unlock(&syscall_trace_lock);
  443. }
  444. static void perf_syscall_exit(void *ignore, struct pt_regs *regs, long ret)
  445. {
  446. struct syscall_metadata *sys_data;
  447. struct syscall_trace_exit *rec;
  448. unsigned long flags;
  449. int syscall_nr;
  450. int rctx;
  451. int size;
  452. syscall_nr = syscall_get_nr(current, regs);
  453. if (!test_bit(syscall_nr, enabled_perf_exit_syscalls))
  454. return;
  455. sys_data = syscall_nr_to_meta(syscall_nr);
  456. if (!sys_data)
  457. return;
  458. /* We can probably do that at build time */
  459. size = ALIGN(sizeof(*rec) + sizeof(u32), sizeof(u64));
  460. size -= sizeof(u32);
  461. /*
  462. * Impossible, but be paranoid with the future
  463. * How to put this check outside runtime?
  464. */
  465. if (WARN_ONCE(size > PERF_MAX_TRACE_SIZE,
  466. "exit event has grown above perf buffer size"))
  467. return;
  468. rec = (struct syscall_trace_exit *)perf_trace_buf_prepare(size,
  469. sys_data->exit_event->id, &rctx, &flags);
  470. if (!rec)
  471. return;
  472. rec->nr = syscall_nr;
  473. rec->ret = syscall_get_return_value(current, regs);
  474. perf_trace_buf_submit(rec, size, rctx, 0, 1, flags, regs);
  475. }
  476. int perf_sysexit_enable(struct ftrace_event_call *call)
  477. {
  478. int ret = 0;
  479. int num;
  480. num = ((struct syscall_metadata *)call->data)->syscall_nr;
  481. mutex_lock(&syscall_trace_lock);
  482. if (!sys_perf_refcount_exit)
  483. ret = register_trace_sys_exit(perf_syscall_exit, NULL);
  484. if (ret) {
  485. pr_info("event trace: Could not activate"
  486. "syscall exit trace point");
  487. } else {
  488. set_bit(num, enabled_perf_exit_syscalls);
  489. sys_perf_refcount_exit++;
  490. }
  491. mutex_unlock(&syscall_trace_lock);
  492. return ret;
  493. }
  494. void perf_sysexit_disable(struct ftrace_event_call *call)
  495. {
  496. int num;
  497. num = ((struct syscall_metadata *)call->data)->syscall_nr;
  498. mutex_lock(&syscall_trace_lock);
  499. sys_perf_refcount_exit--;
  500. clear_bit(num, enabled_perf_exit_syscalls);
  501. if (!sys_perf_refcount_exit)
  502. unregister_trace_sys_exit(perf_syscall_exit, NULL);
  503. mutex_unlock(&syscall_trace_lock);
  504. }
  505. #endif /* CONFIG_PERF_EVENTS */
  506. static int syscall_enter_register(struct ftrace_event_call *event,
  507. enum trace_reg type)
  508. {
  509. switch (type) {
  510. case TRACE_REG_REGISTER:
  511. return reg_event_syscall_enter(event);
  512. case TRACE_REG_UNREGISTER:
  513. unreg_event_syscall_enter(event);
  514. return 0;
  515. #ifdef CONFIG_PERF_EVENTS
  516. case TRACE_REG_PERF_REGISTER:
  517. return perf_sysenter_enable(event);
  518. case TRACE_REG_PERF_UNREGISTER:
  519. perf_sysenter_disable(event);
  520. return 0;
  521. #endif
  522. }
  523. return 0;
  524. }
  525. static int syscall_exit_register(struct ftrace_event_call *event,
  526. enum trace_reg type)
  527. {
  528. switch (type) {
  529. case TRACE_REG_REGISTER:
  530. return reg_event_syscall_exit(event);
  531. case TRACE_REG_UNREGISTER:
  532. unreg_event_syscall_exit(event);
  533. return 0;
  534. #ifdef CONFIG_PERF_EVENTS
  535. case TRACE_REG_PERF_REGISTER:
  536. return perf_sysexit_enable(event);
  537. case TRACE_REG_PERF_UNREGISTER:
  538. perf_sysexit_disable(event);
  539. return 0;
  540. #endif
  541. }
  542. return 0;
  543. }