trace_syscalls.c 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702
  1. #include <trace/syscall.h>
  2. #include <trace/events/syscalls.h>
  3. #include <linux/kernel.h>
  4. #include <linux/ftrace.h>
  5. #include <linux/perf_event.h>
  6. #include <asm/syscall.h>
  7. #include "trace_output.h"
  8. #include "trace.h"
  9. static DEFINE_MUTEX(syscall_trace_lock);
  10. static int sys_refcount_enter;
  11. static int sys_refcount_exit;
  12. static DECLARE_BITMAP(enabled_enter_syscalls, NR_syscalls);
  13. static DECLARE_BITMAP(enabled_exit_syscalls, NR_syscalls);
  14. extern unsigned long __start_syscalls_metadata[];
  15. extern unsigned long __stop_syscalls_metadata[];
  16. static struct syscall_metadata **syscalls_metadata;
  17. static struct syscall_metadata *find_syscall_meta(unsigned long syscall)
  18. {
  19. struct syscall_metadata *start;
  20. struct syscall_metadata *stop;
  21. char str[KSYM_SYMBOL_LEN];
  22. start = (struct syscall_metadata *)__start_syscalls_metadata;
  23. stop = (struct syscall_metadata *)__stop_syscalls_metadata;
  24. kallsyms_lookup(syscall, NULL, NULL, NULL, str);
  25. for ( ; start < stop; start++) {
  26. /*
  27. * Only compare after the "sys" prefix. Archs that use
  28. * syscall wrappers may have syscalls symbols aliases prefixed
  29. * with "SyS" instead of "sys", leading to an unwanted
  30. * mismatch.
  31. */
  32. if (start->name && !strcmp(start->name + 3, str + 3))
  33. return start;
  34. }
  35. return NULL;
  36. }
  37. static struct syscall_metadata *syscall_nr_to_meta(int nr)
  38. {
  39. if (!syscalls_metadata || nr >= NR_syscalls || nr < 0)
  40. return NULL;
  41. return syscalls_metadata[nr];
  42. }
  43. int syscall_name_to_nr(char *name)
  44. {
  45. int i;
  46. if (!syscalls_metadata)
  47. return -1;
  48. for (i = 0; i < NR_syscalls; i++) {
  49. if (syscalls_metadata[i]) {
  50. if (!strcmp(syscalls_metadata[i]->name, name))
  51. return i;
  52. }
  53. }
  54. return -1;
  55. }
  56. void set_syscall_enter_id(int num, int id)
  57. {
  58. syscalls_metadata[num]->enter_id = id;
  59. }
  60. void set_syscall_exit_id(int num, int id)
  61. {
  62. syscalls_metadata[num]->exit_id = id;
  63. }
  64. enum print_line_t
  65. print_syscall_enter(struct trace_iterator *iter, int flags)
  66. {
  67. struct trace_seq *s = &iter->seq;
  68. struct trace_entry *ent = iter->ent;
  69. struct syscall_trace_enter *trace;
  70. struct syscall_metadata *entry;
  71. int i, ret, syscall;
  72. trace = (typeof(trace))ent;
  73. syscall = trace->nr;
  74. entry = syscall_nr_to_meta(syscall);
  75. if (!entry)
  76. goto end;
  77. if (entry->enter_id != ent->type) {
  78. WARN_ON_ONCE(1);
  79. goto end;
  80. }
  81. ret = trace_seq_printf(s, "%s(", entry->name);
  82. if (!ret)
  83. return TRACE_TYPE_PARTIAL_LINE;
  84. for (i = 0; i < entry->nb_args; i++) {
  85. /* parameter types */
  86. if (trace_flags & TRACE_ITER_VERBOSE) {
  87. ret = trace_seq_printf(s, "%s ", entry->types[i]);
  88. if (!ret)
  89. return TRACE_TYPE_PARTIAL_LINE;
  90. }
  91. /* parameter values */
  92. ret = trace_seq_printf(s, "%s: %lx%s", entry->args[i],
  93. trace->args[i],
  94. i == entry->nb_args - 1 ? "" : ", ");
  95. if (!ret)
  96. return TRACE_TYPE_PARTIAL_LINE;
  97. }
  98. ret = trace_seq_putc(s, ')');
  99. if (!ret)
  100. return TRACE_TYPE_PARTIAL_LINE;
  101. end:
  102. ret = trace_seq_putc(s, '\n');
  103. if (!ret)
  104. return TRACE_TYPE_PARTIAL_LINE;
  105. return TRACE_TYPE_HANDLED;
  106. }
  107. enum print_line_t
  108. print_syscall_exit(struct trace_iterator *iter, int flags)
  109. {
  110. struct trace_seq *s = &iter->seq;
  111. struct trace_entry *ent = iter->ent;
  112. struct syscall_trace_exit *trace;
  113. int syscall;
  114. struct syscall_metadata *entry;
  115. int ret;
  116. trace = (typeof(trace))ent;
  117. syscall = trace->nr;
  118. entry = syscall_nr_to_meta(syscall);
  119. if (!entry) {
  120. trace_seq_printf(s, "\n");
  121. return TRACE_TYPE_HANDLED;
  122. }
  123. if (entry->exit_id != ent->type) {
  124. WARN_ON_ONCE(1);
  125. return TRACE_TYPE_UNHANDLED;
  126. }
  127. ret = trace_seq_printf(s, "%s -> 0x%lx\n", entry->name,
  128. trace->ret);
  129. if (!ret)
  130. return TRACE_TYPE_PARTIAL_LINE;
  131. return TRACE_TYPE_HANDLED;
  132. }
  133. extern char *__bad_type_size(void);
  134. #define SYSCALL_FIELD(type, name) \
  135. sizeof(type) != sizeof(trace.name) ? \
  136. __bad_type_size() : \
  137. #type, #name, offsetof(typeof(trace), name), \
  138. sizeof(trace.name), is_signed_type(type)
  139. int syscall_enter_format(struct ftrace_event_call *call, struct trace_seq *s)
  140. {
  141. int i;
  142. int nr;
  143. int ret;
  144. struct syscall_metadata *entry;
  145. struct syscall_trace_enter trace;
  146. int offset = offsetof(struct syscall_trace_enter, args);
  147. nr = syscall_name_to_nr(call->data);
  148. entry = syscall_nr_to_meta(nr);
  149. if (!entry)
  150. return 0;
  151. ret = trace_seq_printf(s, "\tfield:%s %s;\toffset:%zu;\tsize:%zu;"
  152. "\tsigned:%u;\n",
  153. SYSCALL_FIELD(int, nr));
  154. if (!ret)
  155. return 0;
  156. for (i = 0; i < entry->nb_args; i++) {
  157. ret = trace_seq_printf(s, "\tfield:%s %s;", entry->types[i],
  158. entry->args[i]);
  159. if (!ret)
  160. return 0;
  161. ret = trace_seq_printf(s, "\toffset:%d;\tsize:%zu;"
  162. "\tsigned:%u;\n", offset,
  163. sizeof(unsigned long),
  164. is_signed_type(unsigned long));
  165. if (!ret)
  166. return 0;
  167. offset += sizeof(unsigned long);
  168. }
  169. trace_seq_puts(s, "\nprint fmt: \"");
  170. for (i = 0; i < entry->nb_args; i++) {
  171. ret = trace_seq_printf(s, "%s: 0x%%0%zulx%s", entry->args[i],
  172. sizeof(unsigned long),
  173. i == entry->nb_args - 1 ? "" : ", ");
  174. if (!ret)
  175. return 0;
  176. }
  177. trace_seq_putc(s, '"');
  178. for (i = 0; i < entry->nb_args; i++) {
  179. ret = trace_seq_printf(s, ", ((unsigned long)(REC->%s))",
  180. entry->args[i]);
  181. if (!ret)
  182. return 0;
  183. }
  184. return trace_seq_putc(s, '\n');
  185. }
  186. int syscall_exit_format(struct ftrace_event_call *call, struct trace_seq *s)
  187. {
  188. int ret;
  189. struct syscall_trace_exit trace;
  190. ret = trace_seq_printf(s,
  191. "\tfield:%s %s;\toffset:%zu;\tsize:%zu;"
  192. "\tsigned:%u;\n"
  193. "\tfield:%s %s;\toffset:%zu;\tsize:%zu;"
  194. "\tsigned:%u;\n",
  195. SYSCALL_FIELD(int, nr),
  196. SYSCALL_FIELD(long, ret));
  197. if (!ret)
  198. return 0;
  199. return trace_seq_printf(s, "\nprint fmt: \"0x%%lx\", REC->ret\n");
  200. }
  201. int syscall_enter_define_fields(struct ftrace_event_call *call)
  202. {
  203. struct syscall_trace_enter trace;
  204. struct syscall_metadata *meta;
  205. int ret;
  206. int nr;
  207. int i;
  208. int offset = offsetof(typeof(trace), args);
  209. nr = syscall_name_to_nr(call->data);
  210. meta = syscall_nr_to_meta(nr);
  211. if (!meta)
  212. return 0;
  213. ret = trace_define_common_fields(call);
  214. if (ret)
  215. return ret;
  216. for (i = 0; i < meta->nb_args; i++) {
  217. ret = trace_define_field(call, meta->types[i],
  218. meta->args[i], offset,
  219. sizeof(unsigned long), 0,
  220. FILTER_OTHER);
  221. offset += sizeof(unsigned long);
  222. }
  223. return ret;
  224. }
  225. int syscall_exit_define_fields(struct ftrace_event_call *call)
  226. {
  227. struct syscall_trace_exit trace;
  228. int ret;
  229. ret = trace_define_common_fields(call);
  230. if (ret)
  231. return ret;
  232. ret = trace_define_field(call, SYSCALL_FIELD(long, ret),
  233. FILTER_OTHER);
  234. return ret;
  235. }
  236. void ftrace_syscall_enter(struct pt_regs *regs, long id)
  237. {
  238. struct syscall_trace_enter *entry;
  239. struct syscall_metadata *sys_data;
  240. struct ring_buffer_event *event;
  241. struct ring_buffer *buffer;
  242. int size;
  243. int syscall_nr;
  244. syscall_nr = syscall_get_nr(current, regs);
  245. if (syscall_nr < 0)
  246. return;
  247. if (!test_bit(syscall_nr, enabled_enter_syscalls))
  248. return;
  249. sys_data = syscall_nr_to_meta(syscall_nr);
  250. if (!sys_data)
  251. return;
  252. size = sizeof(*entry) + sizeof(unsigned long) * sys_data->nb_args;
  253. event = trace_current_buffer_lock_reserve(&buffer, sys_data->enter_id,
  254. size, 0, 0);
  255. if (!event)
  256. return;
  257. entry = ring_buffer_event_data(event);
  258. entry->nr = syscall_nr;
  259. syscall_get_arguments(current, regs, 0, sys_data->nb_args, entry->args);
  260. if (!filter_current_check_discard(buffer, sys_data->enter_event,
  261. entry, event))
  262. trace_current_buffer_unlock_commit(buffer, event, 0, 0);
  263. }
  264. void ftrace_syscall_exit(struct pt_regs *regs, long ret)
  265. {
  266. struct syscall_trace_exit *entry;
  267. struct syscall_metadata *sys_data;
  268. struct ring_buffer_event *event;
  269. struct ring_buffer *buffer;
  270. int syscall_nr;
  271. syscall_nr = syscall_get_nr(current, regs);
  272. if (syscall_nr < 0)
  273. return;
  274. if (!test_bit(syscall_nr, enabled_exit_syscalls))
  275. return;
  276. sys_data = syscall_nr_to_meta(syscall_nr);
  277. if (!sys_data)
  278. return;
  279. event = trace_current_buffer_lock_reserve(&buffer, sys_data->exit_id,
  280. sizeof(*entry), 0, 0);
  281. if (!event)
  282. return;
  283. entry = ring_buffer_event_data(event);
  284. entry->nr = syscall_nr;
  285. entry->ret = syscall_get_return_value(current, regs);
  286. if (!filter_current_check_discard(buffer, sys_data->exit_event,
  287. entry, event))
  288. trace_current_buffer_unlock_commit(buffer, event, 0, 0);
  289. }
  290. int reg_event_syscall_enter(struct ftrace_event_call *call)
  291. {
  292. int ret = 0;
  293. int num;
  294. char *name;
  295. name = (char *)call->data;
  296. num = syscall_name_to_nr(name);
  297. if (num < 0 || num >= NR_syscalls)
  298. return -ENOSYS;
  299. mutex_lock(&syscall_trace_lock);
  300. if (!sys_refcount_enter)
  301. ret = register_trace_sys_enter(ftrace_syscall_enter);
  302. if (ret) {
  303. pr_info("event trace: Could not activate"
  304. "syscall entry trace point");
  305. } else {
  306. set_bit(num, enabled_enter_syscalls);
  307. sys_refcount_enter++;
  308. }
  309. mutex_unlock(&syscall_trace_lock);
  310. return ret;
  311. }
  312. void unreg_event_syscall_enter(struct ftrace_event_call *call)
  313. {
  314. int num;
  315. char *name;
  316. name = (char *)call->data;
  317. num = syscall_name_to_nr(name);
  318. if (num < 0 || num >= NR_syscalls)
  319. return;
  320. mutex_lock(&syscall_trace_lock);
  321. sys_refcount_enter--;
  322. clear_bit(num, enabled_enter_syscalls);
  323. if (!sys_refcount_enter)
  324. unregister_trace_sys_enter(ftrace_syscall_enter);
  325. mutex_unlock(&syscall_trace_lock);
  326. }
  327. int reg_event_syscall_exit(struct ftrace_event_call *call)
  328. {
  329. int ret = 0;
  330. int num;
  331. char *name;
  332. name = call->data;
  333. num = syscall_name_to_nr(name);
  334. if (num < 0 || num >= NR_syscalls)
  335. return -ENOSYS;
  336. mutex_lock(&syscall_trace_lock);
  337. if (!sys_refcount_exit)
  338. ret = register_trace_sys_exit(ftrace_syscall_exit);
  339. if (ret) {
  340. pr_info("event trace: Could not activate"
  341. "syscall exit trace point");
  342. } else {
  343. set_bit(num, enabled_exit_syscalls);
  344. sys_refcount_exit++;
  345. }
  346. mutex_unlock(&syscall_trace_lock);
  347. return ret;
  348. }
  349. void unreg_event_syscall_exit(struct ftrace_event_call *call)
  350. {
  351. int num;
  352. char *name;
  353. name = call->data;
  354. num = syscall_name_to_nr(name);
  355. if (num < 0 || num >= NR_syscalls)
  356. return;
  357. mutex_lock(&syscall_trace_lock);
  358. sys_refcount_exit--;
  359. clear_bit(num, enabled_exit_syscalls);
  360. if (!sys_refcount_exit)
  361. unregister_trace_sys_exit(ftrace_syscall_exit);
  362. mutex_unlock(&syscall_trace_lock);
  363. }
  364. struct trace_event event_syscall_enter = {
  365. .trace = print_syscall_enter,
  366. };
  367. struct trace_event event_syscall_exit = {
  368. .trace = print_syscall_exit,
  369. };
  370. int __init init_ftrace_syscalls(void)
  371. {
  372. struct syscall_metadata *meta;
  373. unsigned long addr;
  374. int i;
  375. syscalls_metadata = kzalloc(sizeof(*syscalls_metadata) *
  376. NR_syscalls, GFP_KERNEL);
  377. if (!syscalls_metadata) {
  378. WARN_ON(1);
  379. return -ENOMEM;
  380. }
  381. for (i = 0; i < NR_syscalls; i++) {
  382. addr = arch_syscall_addr(i);
  383. meta = find_syscall_meta(addr);
  384. syscalls_metadata[i] = meta;
  385. }
  386. return 0;
  387. }
  388. core_initcall(init_ftrace_syscalls);
  389. #ifdef CONFIG_EVENT_PROFILE
  390. static DECLARE_BITMAP(enabled_prof_enter_syscalls, NR_syscalls);
  391. static DECLARE_BITMAP(enabled_prof_exit_syscalls, NR_syscalls);
  392. static int sys_prof_refcount_enter;
  393. static int sys_prof_refcount_exit;
  394. static void prof_syscall_enter(struct pt_regs *regs, long id)
  395. {
  396. struct syscall_metadata *sys_data;
  397. struct perf_trace_buf *trace_buf;
  398. struct syscall_trace_enter *rec;
  399. unsigned long flags;
  400. char *raw_data;
  401. int syscall_nr;
  402. int size;
  403. int cpu;
  404. syscall_nr = syscall_get_nr(current, regs);
  405. if (!test_bit(syscall_nr, enabled_prof_enter_syscalls))
  406. return;
  407. sys_data = syscall_nr_to_meta(syscall_nr);
  408. if (!sys_data)
  409. return;
  410. /* get the size after alignment with the u32 buffer size field */
  411. size = sizeof(unsigned long) * sys_data->nb_args + sizeof(*rec);
  412. size = ALIGN(size + sizeof(u32), sizeof(u64));
  413. size -= sizeof(u32);
  414. if (WARN_ONCE(size > FTRACE_MAX_PROFILE_SIZE,
  415. "profile buffer not large enough"))
  416. return;
  417. /* Protect the per cpu buffer, begin the rcu read side */
  418. local_irq_save(flags);
  419. cpu = smp_processor_id();
  420. if (in_nmi())
  421. trace_buf = rcu_dereference(perf_trace_buf_nmi);
  422. else
  423. trace_buf = rcu_dereference(perf_trace_buf);
  424. if (!trace_buf)
  425. goto end;
  426. trace_buf = per_cpu_ptr(trace_buf, cpu);
  427. if (trace_buf->recursion++)
  428. goto end_recursion;
  429. /*
  430. * Make recursion update visible before entering perf_tp_event
  431. * so that we protect from perf recursions.
  432. */
  433. barrier();
  434. raw_data = trace_buf->buf;
  435. /* zero the dead bytes from align to not leak stack to user */
  436. *(u64 *)(&raw_data[size - sizeof(u64)]) = 0ULL;
  437. rec = (struct syscall_trace_enter *) raw_data;
  438. tracing_generic_entry_update(&rec->ent, 0, 0);
  439. rec->ent.type = sys_data->enter_id;
  440. rec->nr = syscall_nr;
  441. syscall_get_arguments(current, regs, 0, sys_data->nb_args,
  442. (unsigned long *)&rec->args);
  443. perf_tp_event(sys_data->enter_id, 0, 1, rec, size);
  444. end_recursion:
  445. trace_buf->recursion--;
  446. end:
  447. local_irq_restore(flags);
  448. }
  449. int reg_prof_syscall_enter(char *name)
  450. {
  451. int ret = 0;
  452. int num;
  453. num = syscall_name_to_nr(name);
  454. if (num < 0 || num >= NR_syscalls)
  455. return -ENOSYS;
  456. mutex_lock(&syscall_trace_lock);
  457. if (!sys_prof_refcount_enter)
  458. ret = register_trace_sys_enter(prof_syscall_enter);
  459. if (ret) {
  460. pr_info("event trace: Could not activate"
  461. "syscall entry trace point");
  462. } else {
  463. set_bit(num, enabled_prof_enter_syscalls);
  464. sys_prof_refcount_enter++;
  465. }
  466. mutex_unlock(&syscall_trace_lock);
  467. return ret;
  468. }
  469. void unreg_prof_syscall_enter(char *name)
  470. {
  471. int num;
  472. num = syscall_name_to_nr(name);
  473. if (num < 0 || num >= NR_syscalls)
  474. return;
  475. mutex_lock(&syscall_trace_lock);
  476. sys_prof_refcount_enter--;
  477. clear_bit(num, enabled_prof_enter_syscalls);
  478. if (!sys_prof_refcount_enter)
  479. unregister_trace_sys_enter(prof_syscall_enter);
  480. mutex_unlock(&syscall_trace_lock);
  481. }
  482. static void prof_syscall_exit(struct pt_regs *regs, long ret)
  483. {
  484. struct syscall_metadata *sys_data;
  485. struct syscall_trace_exit *rec;
  486. struct perf_trace_buf *trace_buf;
  487. unsigned long flags;
  488. int syscall_nr;
  489. char *raw_data;
  490. int size;
  491. int cpu;
  492. syscall_nr = syscall_get_nr(current, regs);
  493. if (!test_bit(syscall_nr, enabled_prof_exit_syscalls))
  494. return;
  495. sys_data = syscall_nr_to_meta(syscall_nr);
  496. if (!sys_data)
  497. return;
  498. /* We can probably do that at build time */
  499. size = ALIGN(sizeof(*rec) + sizeof(u32), sizeof(u64));
  500. size -= sizeof(u32);
  501. /*
  502. * Impossible, but be paranoid with the future
  503. * How to put this check outside runtime?
  504. */
  505. if (WARN_ONCE(size > FTRACE_MAX_PROFILE_SIZE,
  506. "exit event has grown above profile buffer size"))
  507. return;
  508. /* Protect the per cpu buffer, begin the rcu read side */
  509. local_irq_save(flags);
  510. cpu = smp_processor_id();
  511. if (in_nmi())
  512. trace_buf = rcu_dereference(perf_trace_buf_nmi);
  513. else
  514. trace_buf = rcu_dereference(perf_trace_buf);
  515. if (!trace_buf)
  516. goto end;
  517. trace_buf = per_cpu_ptr(trace_buf, cpu);
  518. if (trace_buf->recursion++)
  519. goto end_recursion;
  520. /*
  521. * Make recursion update visible before entering perf_tp_event
  522. * so that we protect from perf recursions.
  523. */
  524. barrier();
  525. raw_data = trace_buf->buf;
  526. /* zero the dead bytes from align to not leak stack to user */
  527. *(u64 *)(&raw_data[size - sizeof(u64)]) = 0ULL;
  528. rec = (struct syscall_trace_exit *)raw_data;
  529. tracing_generic_entry_update(&rec->ent, 0, 0);
  530. rec->ent.type = sys_data->exit_id;
  531. rec->nr = syscall_nr;
  532. rec->ret = syscall_get_return_value(current, regs);
  533. perf_tp_event(sys_data->exit_id, 0, 1, rec, size);
  534. end_recursion:
  535. trace_buf->recursion--;
  536. end:
  537. local_irq_restore(flags);
  538. }
  539. int reg_prof_syscall_exit(char *name)
  540. {
  541. int ret = 0;
  542. int num;
  543. num = syscall_name_to_nr(name);
  544. if (num < 0 || num >= NR_syscalls)
  545. return -ENOSYS;
  546. mutex_lock(&syscall_trace_lock);
  547. if (!sys_prof_refcount_exit)
  548. ret = register_trace_sys_exit(prof_syscall_exit);
  549. if (ret) {
  550. pr_info("event trace: Could not activate"
  551. "syscall entry trace point");
  552. } else {
  553. set_bit(num, enabled_prof_exit_syscalls);
  554. sys_prof_refcount_exit++;
  555. }
  556. mutex_unlock(&syscall_trace_lock);
  557. return ret;
  558. }
  559. void unreg_prof_syscall_exit(char *name)
  560. {
  561. int num;
  562. num = syscall_name_to_nr(name);
  563. if (num < 0 || num >= NR_syscalls)
  564. return;
  565. mutex_lock(&syscall_trace_lock);
  566. sys_prof_refcount_exit--;
  567. clear_bit(num, enabled_prof_exit_syscalls);
  568. if (!sys_prof_refcount_exit)
  569. unregister_trace_sys_exit(prof_syscall_exit);
  570. mutex_unlock(&syscall_trace_lock);
  571. }
  572. #endif