trace_syscalls.c 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697
  1. #include <trace/syscall.h>
  2. #include <trace/events/syscalls.h>
  3. #include <linux/kernel.h>
  4. #include <linux/ftrace.h>
  5. #include <linux/perf_event.h>
  6. #include <asm/syscall.h>
  7. #include "trace_output.h"
  8. #include "trace.h"
  9. static DEFINE_MUTEX(syscall_trace_lock);
  10. static int sys_refcount_enter;
  11. static int sys_refcount_exit;
  12. static DECLARE_BITMAP(enabled_enter_syscalls, NR_syscalls);
  13. static DECLARE_BITMAP(enabled_exit_syscalls, NR_syscalls);
  14. extern unsigned long __start_syscalls_metadata[];
  15. extern unsigned long __stop_syscalls_metadata[];
  16. static struct syscall_metadata **syscalls_metadata;
  17. static struct syscall_metadata *find_syscall_meta(unsigned long syscall)
  18. {
  19. struct syscall_metadata *start;
  20. struct syscall_metadata *stop;
  21. char str[KSYM_SYMBOL_LEN];
  22. start = (struct syscall_metadata *)__start_syscalls_metadata;
  23. stop = (struct syscall_metadata *)__stop_syscalls_metadata;
  24. kallsyms_lookup(syscall, NULL, NULL, NULL, str);
  25. for ( ; start < stop; start++) {
  26. /*
  27. * Only compare after the "sys" prefix. Archs that use
  28. * syscall wrappers may have syscalls symbols aliases prefixed
  29. * with "SyS" instead of "sys", leading to an unwanted
  30. * mismatch.
  31. */
  32. if (start->name && !strcmp(start->name + 3, str + 3))
  33. return start;
  34. }
  35. return NULL;
  36. }
  37. static struct syscall_metadata *syscall_nr_to_meta(int nr)
  38. {
  39. if (!syscalls_metadata || nr >= NR_syscalls || nr < 0)
  40. return NULL;
  41. return syscalls_metadata[nr];
  42. }
  43. enum print_line_t
  44. print_syscall_enter(struct trace_iterator *iter, int flags)
  45. {
  46. struct trace_seq *s = &iter->seq;
  47. struct trace_entry *ent = iter->ent;
  48. struct syscall_trace_enter *trace;
  49. struct syscall_metadata *entry;
  50. int i, ret, syscall;
  51. trace = (typeof(trace))ent;
  52. syscall = trace->nr;
  53. entry = syscall_nr_to_meta(syscall);
  54. if (!entry)
  55. goto end;
  56. if (entry->enter_event->id != ent->type) {
  57. WARN_ON_ONCE(1);
  58. goto end;
  59. }
  60. ret = trace_seq_printf(s, "%s(", entry->name);
  61. if (!ret)
  62. return TRACE_TYPE_PARTIAL_LINE;
  63. for (i = 0; i < entry->nb_args; i++) {
  64. /* parameter types */
  65. if (trace_flags & TRACE_ITER_VERBOSE) {
  66. ret = trace_seq_printf(s, "%s ", entry->types[i]);
  67. if (!ret)
  68. return TRACE_TYPE_PARTIAL_LINE;
  69. }
  70. /* parameter values */
  71. ret = trace_seq_printf(s, "%s: %lx%s", entry->args[i],
  72. trace->args[i],
  73. i == entry->nb_args - 1 ? "" : ", ");
  74. if (!ret)
  75. return TRACE_TYPE_PARTIAL_LINE;
  76. }
  77. ret = trace_seq_putc(s, ')');
  78. if (!ret)
  79. return TRACE_TYPE_PARTIAL_LINE;
  80. end:
  81. ret = trace_seq_putc(s, '\n');
  82. if (!ret)
  83. return TRACE_TYPE_PARTIAL_LINE;
  84. return TRACE_TYPE_HANDLED;
  85. }
  86. enum print_line_t
  87. print_syscall_exit(struct trace_iterator *iter, int flags)
  88. {
  89. struct trace_seq *s = &iter->seq;
  90. struct trace_entry *ent = iter->ent;
  91. struct syscall_trace_exit *trace;
  92. int syscall;
  93. struct syscall_metadata *entry;
  94. int ret;
  95. trace = (typeof(trace))ent;
  96. syscall = trace->nr;
  97. entry = syscall_nr_to_meta(syscall);
  98. if (!entry) {
  99. trace_seq_printf(s, "\n");
  100. return TRACE_TYPE_HANDLED;
  101. }
  102. if (entry->exit_event->id != ent->type) {
  103. WARN_ON_ONCE(1);
  104. return TRACE_TYPE_UNHANDLED;
  105. }
  106. ret = trace_seq_printf(s, "%s -> 0x%lx\n", entry->name,
  107. trace->ret);
  108. if (!ret)
  109. return TRACE_TYPE_PARTIAL_LINE;
  110. return TRACE_TYPE_HANDLED;
  111. }
  112. extern char *__bad_type_size(void);
  113. #define SYSCALL_FIELD(type, name) \
  114. sizeof(type) != sizeof(trace.name) ? \
  115. __bad_type_size() : \
  116. #type, #name, offsetof(typeof(trace), name), \
  117. sizeof(trace.name), is_signed_type(type)
  118. int syscall_enter_format(struct ftrace_event_call *call, struct trace_seq *s)
  119. {
  120. int i;
  121. int ret;
  122. struct syscall_metadata *entry = call->data;
  123. struct syscall_trace_enter trace;
  124. int offset = offsetof(struct syscall_trace_enter, args);
  125. ret = trace_seq_printf(s, "\tfield:%s %s;\toffset:%zu;\tsize:%zu;"
  126. "\tsigned:%u;\n",
  127. SYSCALL_FIELD(int, nr));
  128. if (!ret)
  129. return 0;
  130. for (i = 0; i < entry->nb_args; i++) {
  131. ret = trace_seq_printf(s, "\tfield:%s %s;", entry->types[i],
  132. entry->args[i]);
  133. if (!ret)
  134. return 0;
  135. ret = trace_seq_printf(s, "\toffset:%d;\tsize:%zu;"
  136. "\tsigned:%u;\n", offset,
  137. sizeof(unsigned long),
  138. is_signed_type(unsigned long));
  139. if (!ret)
  140. return 0;
  141. offset += sizeof(unsigned long);
  142. }
  143. trace_seq_puts(s, "\nprint fmt: \"");
  144. for (i = 0; i < entry->nb_args; i++) {
  145. ret = trace_seq_printf(s, "%s: 0x%%0%zulx%s", entry->args[i],
  146. sizeof(unsigned long),
  147. i == entry->nb_args - 1 ? "" : ", ");
  148. if (!ret)
  149. return 0;
  150. }
  151. trace_seq_putc(s, '"');
  152. for (i = 0; i < entry->nb_args; i++) {
  153. ret = trace_seq_printf(s, ", ((unsigned long)(REC->%s))",
  154. entry->args[i]);
  155. if (!ret)
  156. return 0;
  157. }
  158. return trace_seq_putc(s, '\n');
  159. }
  160. static
  161. int __set_enter_print_fmt(struct syscall_metadata *entry, char *buf, int len)
  162. {
  163. int i;
  164. int pos = 0;
  165. /* When len=0, we just calculate the needed length */
  166. #define LEN_OR_ZERO (len ? len - pos : 0)
  167. pos += snprintf(buf + pos, LEN_OR_ZERO, "\"");
  168. for (i = 0; i < entry->nb_args; i++) {
  169. pos += snprintf(buf + pos, LEN_OR_ZERO, "%s: 0x%%0%zulx%s",
  170. entry->args[i], sizeof(unsigned long),
  171. i == entry->nb_args - 1 ? "" : ", ");
  172. }
  173. pos += snprintf(buf + pos, LEN_OR_ZERO, "\"");
  174. for (i = 0; i < entry->nb_args; i++) {
  175. pos += snprintf(buf + pos, LEN_OR_ZERO,
  176. ", ((unsigned long)(REC->%s))", entry->args[i]);
  177. }
  178. #undef LEN_OR_ZERO
  179. /* return the length of print_fmt */
  180. return pos;
  181. }
  182. static int set_syscall_print_fmt(struct ftrace_event_call *call)
  183. {
  184. char *print_fmt;
  185. int len;
  186. struct syscall_metadata *entry = call->data;
  187. if (entry->enter_event != call) {
  188. call->print_fmt = "\"0x%lx\", REC->ret";
  189. return 0;
  190. }
  191. /* First: called with 0 length to calculate the needed length */
  192. len = __set_enter_print_fmt(entry, NULL, 0);
  193. print_fmt = kmalloc(len + 1, GFP_KERNEL);
  194. if (!print_fmt)
  195. return -ENOMEM;
  196. /* Second: actually write the @print_fmt */
  197. __set_enter_print_fmt(entry, print_fmt, len + 1);
  198. call->print_fmt = print_fmt;
  199. return 0;
  200. }
  201. static void free_syscall_print_fmt(struct ftrace_event_call *call)
  202. {
  203. struct syscall_metadata *entry = call->data;
  204. if (entry->enter_event == call)
  205. kfree(call->print_fmt);
  206. }
  207. int syscall_exit_format(struct ftrace_event_call *call, struct trace_seq *s)
  208. {
  209. int ret;
  210. struct syscall_trace_exit trace;
  211. ret = trace_seq_printf(s,
  212. "\tfield:%s %s;\toffset:%zu;\tsize:%zu;"
  213. "\tsigned:%u;\n"
  214. "\tfield:%s %s;\toffset:%zu;\tsize:%zu;"
  215. "\tsigned:%u;\n",
  216. SYSCALL_FIELD(int, nr),
  217. SYSCALL_FIELD(long, ret));
  218. if (!ret)
  219. return 0;
  220. return trace_seq_printf(s, "\nprint fmt: \"0x%%lx\", REC->ret\n");
  221. }
  222. int syscall_enter_define_fields(struct ftrace_event_call *call)
  223. {
  224. struct syscall_trace_enter trace;
  225. struct syscall_metadata *meta = call->data;
  226. int ret;
  227. int i;
  228. int offset = offsetof(typeof(trace), args);
  229. ret = trace_define_field(call, SYSCALL_FIELD(int, nr), FILTER_OTHER);
  230. if (ret)
  231. return ret;
  232. for (i = 0; i < meta->nb_args; i++) {
  233. ret = trace_define_field(call, meta->types[i],
  234. meta->args[i], offset,
  235. sizeof(unsigned long), 0,
  236. FILTER_OTHER);
  237. offset += sizeof(unsigned long);
  238. }
  239. return ret;
  240. }
  241. int syscall_exit_define_fields(struct ftrace_event_call *call)
  242. {
  243. struct syscall_trace_exit trace;
  244. int ret;
  245. ret = trace_define_field(call, SYSCALL_FIELD(int, nr), FILTER_OTHER);
  246. if (ret)
  247. return ret;
  248. ret = trace_define_field(call, SYSCALL_FIELD(long, ret),
  249. FILTER_OTHER);
  250. return ret;
  251. }
  252. void ftrace_syscall_enter(struct pt_regs *regs, long id)
  253. {
  254. struct syscall_trace_enter *entry;
  255. struct syscall_metadata *sys_data;
  256. struct ring_buffer_event *event;
  257. struct ring_buffer *buffer;
  258. int size;
  259. int syscall_nr;
  260. syscall_nr = syscall_get_nr(current, regs);
  261. if (syscall_nr < 0)
  262. return;
  263. if (!test_bit(syscall_nr, enabled_enter_syscalls))
  264. return;
  265. sys_data = syscall_nr_to_meta(syscall_nr);
  266. if (!sys_data)
  267. return;
  268. size = sizeof(*entry) + sizeof(unsigned long) * sys_data->nb_args;
  269. event = trace_current_buffer_lock_reserve(&buffer,
  270. sys_data->enter_event->id, size, 0, 0);
  271. if (!event)
  272. return;
  273. entry = ring_buffer_event_data(event);
  274. entry->nr = syscall_nr;
  275. syscall_get_arguments(current, regs, 0, sys_data->nb_args, entry->args);
  276. if (!filter_current_check_discard(buffer, sys_data->enter_event,
  277. entry, event))
  278. trace_current_buffer_unlock_commit(buffer, event, 0, 0);
  279. }
  280. void ftrace_syscall_exit(struct pt_regs *regs, long ret)
  281. {
  282. struct syscall_trace_exit *entry;
  283. struct syscall_metadata *sys_data;
  284. struct ring_buffer_event *event;
  285. struct ring_buffer *buffer;
  286. int syscall_nr;
  287. syscall_nr = syscall_get_nr(current, regs);
  288. if (syscall_nr < 0)
  289. return;
  290. if (!test_bit(syscall_nr, enabled_exit_syscalls))
  291. return;
  292. sys_data = syscall_nr_to_meta(syscall_nr);
  293. if (!sys_data)
  294. return;
  295. event = trace_current_buffer_lock_reserve(&buffer,
  296. sys_data->exit_event->id, sizeof(*entry), 0, 0);
  297. if (!event)
  298. return;
  299. entry = ring_buffer_event_data(event);
  300. entry->nr = syscall_nr;
  301. entry->ret = syscall_get_return_value(current, regs);
  302. if (!filter_current_check_discard(buffer, sys_data->exit_event,
  303. entry, event))
  304. trace_current_buffer_unlock_commit(buffer, event, 0, 0);
  305. }
  306. int reg_event_syscall_enter(struct ftrace_event_call *call)
  307. {
  308. int ret = 0;
  309. int num;
  310. num = ((struct syscall_metadata *)call->data)->syscall_nr;
  311. if (num < 0 || num >= NR_syscalls)
  312. return -ENOSYS;
  313. mutex_lock(&syscall_trace_lock);
  314. if (!sys_refcount_enter)
  315. ret = register_trace_sys_enter(ftrace_syscall_enter);
  316. if (!ret) {
  317. set_bit(num, enabled_enter_syscalls);
  318. sys_refcount_enter++;
  319. }
  320. mutex_unlock(&syscall_trace_lock);
  321. return ret;
  322. }
  323. void unreg_event_syscall_enter(struct ftrace_event_call *call)
  324. {
  325. int num;
  326. num = ((struct syscall_metadata *)call->data)->syscall_nr;
  327. if (num < 0 || num >= NR_syscalls)
  328. return;
  329. mutex_lock(&syscall_trace_lock);
  330. sys_refcount_enter--;
  331. clear_bit(num, enabled_enter_syscalls);
  332. if (!sys_refcount_enter)
  333. unregister_trace_sys_enter(ftrace_syscall_enter);
  334. mutex_unlock(&syscall_trace_lock);
  335. }
  336. int reg_event_syscall_exit(struct ftrace_event_call *call)
  337. {
  338. int ret = 0;
  339. int num;
  340. num = ((struct syscall_metadata *)call->data)->syscall_nr;
  341. if (num < 0 || num >= NR_syscalls)
  342. return -ENOSYS;
  343. mutex_lock(&syscall_trace_lock);
  344. if (!sys_refcount_exit)
  345. ret = register_trace_sys_exit(ftrace_syscall_exit);
  346. if (!ret) {
  347. set_bit(num, enabled_exit_syscalls);
  348. sys_refcount_exit++;
  349. }
  350. mutex_unlock(&syscall_trace_lock);
  351. return ret;
  352. }
  353. void unreg_event_syscall_exit(struct ftrace_event_call *call)
  354. {
  355. int num;
  356. num = ((struct syscall_metadata *)call->data)->syscall_nr;
  357. if (num < 0 || num >= NR_syscalls)
  358. return;
  359. mutex_lock(&syscall_trace_lock);
  360. sys_refcount_exit--;
  361. clear_bit(num, enabled_exit_syscalls);
  362. if (!sys_refcount_exit)
  363. unregister_trace_sys_exit(ftrace_syscall_exit);
  364. mutex_unlock(&syscall_trace_lock);
  365. }
  366. int init_syscall_trace(struct ftrace_event_call *call)
  367. {
  368. int id;
  369. if (set_syscall_print_fmt(call) < 0)
  370. return -ENOMEM;
  371. id = register_ftrace_event(call->event);
  372. if (!id) {
  373. free_syscall_print_fmt(call);
  374. return -ENODEV;
  375. }
  376. call->id = id;
  377. INIT_LIST_HEAD(&call->fields);
  378. return 0;
  379. }
  380. int __init init_ftrace_syscalls(void)
  381. {
  382. struct syscall_metadata *meta;
  383. unsigned long addr;
  384. int i;
  385. syscalls_metadata = kzalloc(sizeof(*syscalls_metadata) *
  386. NR_syscalls, GFP_KERNEL);
  387. if (!syscalls_metadata) {
  388. WARN_ON(1);
  389. return -ENOMEM;
  390. }
  391. for (i = 0; i < NR_syscalls; i++) {
  392. addr = arch_syscall_addr(i);
  393. meta = find_syscall_meta(addr);
  394. if (!meta)
  395. continue;
  396. meta->syscall_nr = i;
  397. syscalls_metadata[i] = meta;
  398. }
  399. return 0;
  400. }
  401. core_initcall(init_ftrace_syscalls);
  402. #ifdef CONFIG_EVENT_PROFILE
  403. static DECLARE_BITMAP(enabled_prof_enter_syscalls, NR_syscalls);
  404. static DECLARE_BITMAP(enabled_prof_exit_syscalls, NR_syscalls);
  405. static int sys_prof_refcount_enter;
  406. static int sys_prof_refcount_exit;
  407. static void prof_syscall_enter(struct pt_regs *regs, long id)
  408. {
  409. struct syscall_metadata *sys_data;
  410. struct syscall_trace_enter *rec;
  411. unsigned long flags;
  412. char *trace_buf;
  413. char *raw_data;
  414. int syscall_nr;
  415. int rctx;
  416. int size;
  417. int cpu;
  418. syscall_nr = syscall_get_nr(current, regs);
  419. if (!test_bit(syscall_nr, enabled_prof_enter_syscalls))
  420. return;
  421. sys_data = syscall_nr_to_meta(syscall_nr);
  422. if (!sys_data)
  423. return;
  424. /* get the size after alignment with the u32 buffer size field */
  425. size = sizeof(unsigned long) * sys_data->nb_args + sizeof(*rec);
  426. size = ALIGN(size + sizeof(u32), sizeof(u64));
  427. size -= sizeof(u32);
  428. if (WARN_ONCE(size > FTRACE_MAX_PROFILE_SIZE,
  429. "profile buffer not large enough"))
  430. return;
  431. /* Protect the per cpu buffer, begin the rcu read side */
  432. local_irq_save(flags);
  433. rctx = perf_swevent_get_recursion_context();
  434. if (rctx < 0)
  435. goto end_recursion;
  436. cpu = smp_processor_id();
  437. trace_buf = rcu_dereference(perf_trace_buf);
  438. if (!trace_buf)
  439. goto end;
  440. raw_data = per_cpu_ptr(trace_buf, cpu);
  441. /* zero the dead bytes from align to not leak stack to user */
  442. *(u64 *)(&raw_data[size - sizeof(u64)]) = 0ULL;
  443. rec = (struct syscall_trace_enter *) raw_data;
  444. tracing_generic_entry_update(&rec->ent, 0, 0);
  445. rec->ent.type = sys_data->enter_event->id;
  446. rec->nr = syscall_nr;
  447. syscall_get_arguments(current, regs, 0, sys_data->nb_args,
  448. (unsigned long *)&rec->args);
  449. perf_tp_event(sys_data->enter_event->id, 0, 1, rec, size);
  450. end:
  451. perf_swevent_put_recursion_context(rctx);
  452. end_recursion:
  453. local_irq_restore(flags);
  454. }
  455. int prof_sysenter_enable(struct ftrace_event_call *call)
  456. {
  457. int ret = 0;
  458. int num;
  459. num = ((struct syscall_metadata *)call->data)->syscall_nr;
  460. mutex_lock(&syscall_trace_lock);
  461. if (!sys_prof_refcount_enter)
  462. ret = register_trace_sys_enter(prof_syscall_enter);
  463. if (ret) {
  464. pr_info("event trace: Could not activate"
  465. "syscall entry trace point");
  466. } else {
  467. set_bit(num, enabled_prof_enter_syscalls);
  468. sys_prof_refcount_enter++;
  469. }
  470. mutex_unlock(&syscall_trace_lock);
  471. return ret;
  472. }
  473. void prof_sysenter_disable(struct ftrace_event_call *call)
  474. {
  475. int num;
  476. num = ((struct syscall_metadata *)call->data)->syscall_nr;
  477. mutex_lock(&syscall_trace_lock);
  478. sys_prof_refcount_enter--;
  479. clear_bit(num, enabled_prof_enter_syscalls);
  480. if (!sys_prof_refcount_enter)
  481. unregister_trace_sys_enter(prof_syscall_enter);
  482. mutex_unlock(&syscall_trace_lock);
  483. }
  484. static void prof_syscall_exit(struct pt_regs *regs, long ret)
  485. {
  486. struct syscall_metadata *sys_data;
  487. struct syscall_trace_exit *rec;
  488. unsigned long flags;
  489. int syscall_nr;
  490. char *trace_buf;
  491. char *raw_data;
  492. int rctx;
  493. int size;
  494. int cpu;
  495. syscall_nr = syscall_get_nr(current, regs);
  496. if (!test_bit(syscall_nr, enabled_prof_exit_syscalls))
  497. return;
  498. sys_data = syscall_nr_to_meta(syscall_nr);
  499. if (!sys_data)
  500. return;
  501. /* We can probably do that at build time */
  502. size = ALIGN(sizeof(*rec) + sizeof(u32), sizeof(u64));
  503. size -= sizeof(u32);
  504. /*
  505. * Impossible, but be paranoid with the future
  506. * How to put this check outside runtime?
  507. */
  508. if (WARN_ONCE(size > FTRACE_MAX_PROFILE_SIZE,
  509. "exit event has grown above profile buffer size"))
  510. return;
  511. /* Protect the per cpu buffer, begin the rcu read side */
  512. local_irq_save(flags);
  513. rctx = perf_swevent_get_recursion_context();
  514. if (rctx < 0)
  515. goto end_recursion;
  516. cpu = smp_processor_id();
  517. trace_buf = rcu_dereference(perf_trace_buf);
  518. if (!trace_buf)
  519. goto end;
  520. raw_data = per_cpu_ptr(trace_buf, cpu);
  521. /* zero the dead bytes from align to not leak stack to user */
  522. *(u64 *)(&raw_data[size - sizeof(u64)]) = 0ULL;
  523. rec = (struct syscall_trace_exit *)raw_data;
  524. tracing_generic_entry_update(&rec->ent, 0, 0);
  525. rec->ent.type = sys_data->exit_event->id;
  526. rec->nr = syscall_nr;
  527. rec->ret = syscall_get_return_value(current, regs);
  528. perf_tp_event(sys_data->exit_event->id, 0, 1, rec, size);
  529. end:
  530. perf_swevent_put_recursion_context(rctx);
  531. end_recursion:
  532. local_irq_restore(flags);
  533. }
  534. int prof_sysexit_enable(struct ftrace_event_call *call)
  535. {
  536. int ret = 0;
  537. int num;
  538. num = ((struct syscall_metadata *)call->data)->syscall_nr;
  539. mutex_lock(&syscall_trace_lock);
  540. if (!sys_prof_refcount_exit)
  541. ret = register_trace_sys_exit(prof_syscall_exit);
  542. if (ret) {
  543. pr_info("event trace: Could not activate"
  544. "syscall entry trace point");
  545. } else {
  546. set_bit(num, enabled_prof_exit_syscalls);
  547. sys_prof_refcount_exit++;
  548. }
  549. mutex_unlock(&syscall_trace_lock);
  550. return ret;
  551. }
  552. void prof_sysexit_disable(struct ftrace_event_call *call)
  553. {
  554. int num;
  555. num = ((struct syscall_metadata *)call->data)->syscall_nr;
  556. mutex_lock(&syscall_trace_lock);
  557. sys_prof_refcount_exit--;
  558. clear_bit(num, enabled_prof_exit_syscalls);
  559. if (!sys_prof_refcount_exit)
  560. unregister_trace_sys_exit(prof_syscall_exit);
  561. mutex_unlock(&syscall_trace_lock);
  562. }
  563. #endif