builtin-trace.c 31 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197
  1. #include <traceevent/event-parse.h>
  2. #include "builtin.h"
  3. #include "util/color.h"
  4. #include "util/debug.h"
  5. #include "util/evlist.h"
  6. #include "util/machine.h"
  7. #include "util/session.h"
  8. #include "util/thread.h"
  9. #include "util/parse-options.h"
  10. #include "util/strlist.h"
  11. #include "util/intlist.h"
  12. #include "util/thread_map.h"
  13. #include <libaudit.h>
  14. #include <stdlib.h>
  15. #include <sys/mman.h>
  16. #include <linux/futex.h>
  17. static size_t syscall_arg__scnprintf_hex(char *bf, size_t size,
  18. unsigned long arg, u8 *arg_mask __maybe_unused)
  19. {
  20. return scnprintf(bf, size, "%#lx", arg);
  21. }
  22. #define SCA_HEX syscall_arg__scnprintf_hex
  23. static size_t syscall_arg__scnprintf_whence(char *bf, size_t size,
  24. unsigned long arg, u8 *arg_mask __maybe_unused)
  25. {
  26. int whence = arg;
  27. switch (whence) {
  28. #define P_WHENCE(n) case SEEK_##n: return scnprintf(bf, size, #n)
  29. P_WHENCE(SET);
  30. P_WHENCE(CUR);
  31. P_WHENCE(END);
  32. #ifdef SEEK_DATA
  33. P_WHENCE(DATA);
  34. #endif
  35. #ifdef SEEK_HOLE
  36. P_WHENCE(HOLE);
  37. #endif
  38. #undef P_WHENCE
  39. default: break;
  40. }
  41. return scnprintf(bf, size, "%#x", whence);
  42. }
  43. #define SCA_WHENCE syscall_arg__scnprintf_whence
  44. static size_t syscall_arg__scnprintf_mmap_prot(char *bf, size_t size,
  45. unsigned long arg, u8 *arg_mask __maybe_unused)
  46. {
  47. int printed = 0, prot = arg;
  48. if (prot == PROT_NONE)
  49. return scnprintf(bf, size, "NONE");
  50. #define P_MMAP_PROT(n) \
  51. if (prot & PROT_##n) { \
  52. printed += scnprintf(bf + printed, size - printed, "%s%s", printed ? "|" : "", #n); \
  53. prot &= ~PROT_##n; \
  54. }
  55. P_MMAP_PROT(EXEC);
  56. P_MMAP_PROT(READ);
  57. P_MMAP_PROT(WRITE);
  58. #ifdef PROT_SEM
  59. P_MMAP_PROT(SEM);
  60. #endif
  61. P_MMAP_PROT(GROWSDOWN);
  62. P_MMAP_PROT(GROWSUP);
  63. #undef P_MMAP_PROT
  64. if (prot)
  65. printed += scnprintf(bf + printed, size - printed, "%s%#x", printed ? "|" : "", prot);
  66. return printed;
  67. }
  68. #define SCA_MMAP_PROT syscall_arg__scnprintf_mmap_prot
  69. static size_t syscall_arg__scnprintf_mmap_flags(char *bf, size_t size,
  70. unsigned long arg, u8 *arg_mask __maybe_unused)
  71. {
  72. int printed = 0, flags = arg;
  73. #define P_MMAP_FLAG(n) \
  74. if (flags & MAP_##n) { \
  75. printed += scnprintf(bf + printed, size - printed, "%s%s", printed ? "|" : "", #n); \
  76. flags &= ~MAP_##n; \
  77. }
  78. P_MMAP_FLAG(SHARED);
  79. P_MMAP_FLAG(PRIVATE);
  80. P_MMAP_FLAG(32BIT);
  81. P_MMAP_FLAG(ANONYMOUS);
  82. P_MMAP_FLAG(DENYWRITE);
  83. P_MMAP_FLAG(EXECUTABLE);
  84. P_MMAP_FLAG(FILE);
  85. P_MMAP_FLAG(FIXED);
  86. P_MMAP_FLAG(GROWSDOWN);
  87. #ifdef MAP_HUGETLB
  88. P_MMAP_FLAG(HUGETLB);
  89. #endif
  90. P_MMAP_FLAG(LOCKED);
  91. P_MMAP_FLAG(NONBLOCK);
  92. P_MMAP_FLAG(NORESERVE);
  93. P_MMAP_FLAG(POPULATE);
  94. P_MMAP_FLAG(STACK);
  95. #ifdef MAP_UNINITIALIZED
  96. P_MMAP_FLAG(UNINITIALIZED);
  97. #endif
  98. #undef P_MMAP_FLAG
  99. if (flags)
  100. printed += scnprintf(bf + printed, size - printed, "%s%#x", printed ? "|" : "", flags);
  101. return printed;
  102. }
  103. #define SCA_MMAP_FLAGS syscall_arg__scnprintf_mmap_flags
  104. static size_t syscall_arg__scnprintf_madvise_behavior(char *bf, size_t size,
  105. unsigned long arg, u8 *arg_mask __maybe_unused)
  106. {
  107. int behavior = arg;
  108. switch (behavior) {
  109. #define P_MADV_BHV(n) case MADV_##n: return scnprintf(bf, size, #n)
  110. P_MADV_BHV(NORMAL);
  111. P_MADV_BHV(RANDOM);
  112. P_MADV_BHV(SEQUENTIAL);
  113. P_MADV_BHV(WILLNEED);
  114. P_MADV_BHV(DONTNEED);
  115. P_MADV_BHV(REMOVE);
  116. P_MADV_BHV(DONTFORK);
  117. P_MADV_BHV(DOFORK);
  118. P_MADV_BHV(HWPOISON);
  119. #ifdef MADV_SOFT_OFFLINE
  120. P_MADV_BHV(SOFT_OFFLINE);
  121. #endif
  122. P_MADV_BHV(MERGEABLE);
  123. P_MADV_BHV(UNMERGEABLE);
  124. #ifdef MADV_HUGEPAGE
  125. P_MADV_BHV(HUGEPAGE);
  126. #endif
  127. #ifdef MADV_NOHUGEPAGE
  128. P_MADV_BHV(NOHUGEPAGE);
  129. #endif
  130. #ifdef MADV_DONTDUMP
  131. P_MADV_BHV(DONTDUMP);
  132. #endif
  133. #ifdef MADV_DODUMP
  134. P_MADV_BHV(DODUMP);
  135. #endif
  136. #undef P_MADV_PHV
  137. default: break;
  138. }
  139. return scnprintf(bf, size, "%#x", behavior);
  140. }
  141. #define SCA_MADV_BHV syscall_arg__scnprintf_madvise_behavior
  142. static size_t syscall_arg__scnprintf_futex_op(char *bf, size_t size, unsigned long arg, u8 *arg_mask)
  143. {
  144. enum syscall_futex_args {
  145. SCF_UADDR = (1 << 0),
  146. SCF_OP = (1 << 1),
  147. SCF_VAL = (1 << 2),
  148. SCF_TIMEOUT = (1 << 3),
  149. SCF_UADDR2 = (1 << 4),
  150. SCF_VAL3 = (1 << 5),
  151. };
  152. int op = arg;
  153. int cmd = op & FUTEX_CMD_MASK;
  154. size_t printed = 0;
  155. switch (cmd) {
  156. #define P_FUTEX_OP(n) case FUTEX_##n: printed = scnprintf(bf, size, #n);
  157. P_FUTEX_OP(WAIT); *arg_mask |= SCF_VAL3|SCF_UADDR2; break;
  158. P_FUTEX_OP(WAKE); *arg_mask |= SCF_VAL3|SCF_UADDR2|SCF_TIMEOUT; break;
  159. P_FUTEX_OP(FD); *arg_mask |= SCF_VAL3|SCF_UADDR2|SCF_TIMEOUT; break;
  160. P_FUTEX_OP(REQUEUE); *arg_mask |= SCF_VAL3|SCF_TIMEOUT; break;
  161. P_FUTEX_OP(CMP_REQUEUE); *arg_mask |= SCF_TIMEOUT; break;
  162. P_FUTEX_OP(CMP_REQUEUE_PI); *arg_mask |= SCF_TIMEOUT; break;
  163. P_FUTEX_OP(WAKE_OP); break;
  164. P_FUTEX_OP(LOCK_PI); *arg_mask |= SCF_VAL3|SCF_UADDR2|SCF_TIMEOUT; break;
  165. P_FUTEX_OP(UNLOCK_PI); *arg_mask |= SCF_VAL3|SCF_UADDR2|SCF_TIMEOUT; break;
  166. P_FUTEX_OP(TRYLOCK_PI); *arg_mask |= SCF_VAL3|SCF_UADDR2; break;
  167. P_FUTEX_OP(WAIT_BITSET); *arg_mask |= SCF_UADDR2; break;
  168. P_FUTEX_OP(WAKE_BITSET); *arg_mask |= SCF_UADDR2; break;
  169. P_FUTEX_OP(WAIT_REQUEUE_PI); break;
  170. default: printed = scnprintf(bf, size, "%#x", cmd); break;
  171. }
  172. if (op & FUTEX_PRIVATE_FLAG)
  173. printed += scnprintf(bf + printed, size - printed, "|PRIV");
  174. if (op & FUTEX_CLOCK_REALTIME)
  175. printed += scnprintf(bf + printed, size - printed, "|CLKRT");
  176. return printed;
  177. }
  178. #define SCA_FUTEX_OP syscall_arg__scnprintf_futex_op
  179. static struct syscall_fmt {
  180. const char *name;
  181. const char *alias;
  182. size_t (*arg_scnprintf[6])(char *bf, size_t size, unsigned long arg, u8 *arg_mask);
  183. bool errmsg;
  184. bool timeout;
  185. bool hexret;
  186. } syscall_fmts[] = {
  187. { .name = "access", .errmsg = true, },
  188. { .name = "arch_prctl", .errmsg = true, .alias = "prctl", },
  189. { .name = "brk", .hexret = true,
  190. .arg_scnprintf = { [0] = SCA_HEX, /* brk */ }, },
  191. { .name = "mmap", .hexret = true, },
  192. { .name = "connect", .errmsg = true, },
  193. { .name = "fstat", .errmsg = true, .alias = "newfstat", },
  194. { .name = "fstatat", .errmsg = true, .alias = "newfstatat", },
  195. { .name = "futex", .errmsg = true,
  196. .arg_scnprintf = { [1] = SCA_FUTEX_OP, /* op */ }, },
  197. { .name = "ioctl", .errmsg = true,
  198. .arg_scnprintf = { [2] = SCA_HEX, /* arg */ }, },
  199. { .name = "lseek", .errmsg = true,
  200. .arg_scnprintf = { [2] = SCA_WHENCE, /* whence */ }, },
  201. { .name = "lstat", .errmsg = true, .alias = "newlstat", },
  202. { .name = "madvise", .errmsg = true,
  203. .arg_scnprintf = { [0] = SCA_HEX, /* start */
  204. [2] = SCA_MADV_BHV, /* behavior */ }, },
  205. { .name = "mmap", .hexret = true,
  206. .arg_scnprintf = { [0] = SCA_HEX, /* addr */
  207. [2] = SCA_MMAP_PROT, /* prot */
  208. [3] = SCA_MMAP_FLAGS, /* flags */ }, },
  209. { .name = "mprotect", .errmsg = true,
  210. .arg_scnprintf = { [0] = SCA_HEX, /* start */
  211. [2] = SCA_MMAP_PROT, /* prot */ }, },
  212. { .name = "mremap", .hexret = true,
  213. .arg_scnprintf = { [0] = SCA_HEX, /* addr */
  214. [4] = SCA_HEX, /* new_addr */ }, },
  215. { .name = "munmap", .errmsg = true,
  216. .arg_scnprintf = { [0] = SCA_HEX, /* addr */ }, },
  217. { .name = "open", .errmsg = true, },
  218. { .name = "poll", .errmsg = true, .timeout = true, },
  219. { .name = "ppoll", .errmsg = true, .timeout = true, },
  220. { .name = "pread", .errmsg = true, .alias = "pread64", },
  221. { .name = "pwrite", .errmsg = true, .alias = "pwrite64", },
  222. { .name = "read", .errmsg = true, },
  223. { .name = "recvfrom", .errmsg = true, },
  224. { .name = "select", .errmsg = true, .timeout = true, },
  225. { .name = "socket", .errmsg = true, },
  226. { .name = "stat", .errmsg = true, .alias = "newstat", },
  227. { .name = "uname", .errmsg = true, .alias = "newuname", },
  228. };
  229. static int syscall_fmt__cmp(const void *name, const void *fmtp)
  230. {
  231. const struct syscall_fmt *fmt = fmtp;
  232. return strcmp(name, fmt->name);
  233. }
  234. static struct syscall_fmt *syscall_fmt__find(const char *name)
  235. {
  236. const int nmemb = ARRAY_SIZE(syscall_fmts);
  237. return bsearch(name, syscall_fmts, nmemb, sizeof(struct syscall_fmt), syscall_fmt__cmp);
  238. }
  239. struct syscall {
  240. struct event_format *tp_format;
  241. const char *name;
  242. bool filtered;
  243. struct syscall_fmt *fmt;
  244. size_t (**arg_scnprintf)(char *bf, size_t size,
  245. unsigned long arg, u8 *args_mask);
  246. };
  247. static size_t fprintf_duration(unsigned long t, FILE *fp)
  248. {
  249. double duration = (double)t / NSEC_PER_MSEC;
  250. size_t printed = fprintf(fp, "(");
  251. if (duration >= 1.0)
  252. printed += color_fprintf(fp, PERF_COLOR_RED, "%6.3f ms", duration);
  253. else if (duration >= 0.01)
  254. printed += color_fprintf(fp, PERF_COLOR_YELLOW, "%6.3f ms", duration);
  255. else
  256. printed += color_fprintf(fp, PERF_COLOR_NORMAL, "%6.3f ms", duration);
  257. return printed + fprintf(fp, "): ");
  258. }
  259. struct thread_trace {
  260. u64 entry_time;
  261. u64 exit_time;
  262. bool entry_pending;
  263. unsigned long nr_events;
  264. char *entry_str;
  265. double runtime_ms;
  266. };
  267. static struct thread_trace *thread_trace__new(void)
  268. {
  269. return zalloc(sizeof(struct thread_trace));
  270. }
  271. static struct thread_trace *thread__trace(struct thread *thread, FILE *fp)
  272. {
  273. struct thread_trace *ttrace;
  274. if (thread == NULL)
  275. goto fail;
  276. if (thread->priv == NULL)
  277. thread->priv = thread_trace__new();
  278. if (thread->priv == NULL)
  279. goto fail;
  280. ttrace = thread->priv;
  281. ++ttrace->nr_events;
  282. return ttrace;
  283. fail:
  284. color_fprintf(fp, PERF_COLOR_RED,
  285. "WARNING: not enough memory, dropping samples!\n");
  286. return NULL;
  287. }
  288. struct trace {
  289. struct perf_tool tool;
  290. int audit_machine;
  291. struct {
  292. int max;
  293. struct syscall *table;
  294. } syscalls;
  295. struct perf_record_opts opts;
  296. struct machine host;
  297. u64 base_time;
  298. FILE *output;
  299. unsigned long nr_events;
  300. struct strlist *ev_qualifier;
  301. bool not_ev_qualifier;
  302. struct intlist *tid_list;
  303. struct intlist *pid_list;
  304. bool sched;
  305. bool multiple_threads;
  306. double duration_filter;
  307. double runtime_ms;
  308. };
  309. static bool trace__filter_duration(struct trace *trace, double t)
  310. {
  311. return t < (trace->duration_filter * NSEC_PER_MSEC);
  312. }
  313. static size_t trace__fprintf_tstamp(struct trace *trace, u64 tstamp, FILE *fp)
  314. {
  315. double ts = (double)(tstamp - trace->base_time) / NSEC_PER_MSEC;
  316. return fprintf(fp, "%10.3f ", ts);
  317. }
  318. static bool done = false;
  319. static void sig_handler(int sig __maybe_unused)
  320. {
  321. done = true;
  322. }
  323. static size_t trace__fprintf_entry_head(struct trace *trace, struct thread *thread,
  324. u64 duration, u64 tstamp, FILE *fp)
  325. {
  326. size_t printed = trace__fprintf_tstamp(trace, tstamp, fp);
  327. printed += fprintf_duration(duration, fp);
  328. if (trace->multiple_threads)
  329. printed += fprintf(fp, "%d ", thread->tid);
  330. return printed;
  331. }
  332. static int trace__process_event(struct trace *trace, struct machine *machine,
  333. union perf_event *event)
  334. {
  335. int ret = 0;
  336. switch (event->header.type) {
  337. case PERF_RECORD_LOST:
  338. color_fprintf(trace->output, PERF_COLOR_RED,
  339. "LOST %" PRIu64 " events!\n", event->lost.lost);
  340. ret = machine__process_lost_event(machine, event);
  341. default:
  342. ret = machine__process_event(machine, event);
  343. break;
  344. }
  345. return ret;
  346. }
  347. static int trace__tool_process(struct perf_tool *tool,
  348. union perf_event *event,
  349. struct perf_sample *sample __maybe_unused,
  350. struct machine *machine)
  351. {
  352. struct trace *trace = container_of(tool, struct trace, tool);
  353. return trace__process_event(trace, machine, event);
  354. }
  355. static int trace__symbols_init(struct trace *trace, struct perf_evlist *evlist)
  356. {
  357. int err = symbol__init();
  358. if (err)
  359. return err;
  360. machine__init(&trace->host, "", HOST_KERNEL_ID);
  361. machine__create_kernel_maps(&trace->host);
  362. if (perf_target__has_task(&trace->opts.target)) {
  363. err = perf_event__synthesize_thread_map(&trace->tool, evlist->threads,
  364. trace__tool_process,
  365. &trace->host);
  366. } else {
  367. err = perf_event__synthesize_threads(&trace->tool, trace__tool_process,
  368. &trace->host);
  369. }
  370. if (err)
  371. symbol__exit();
  372. return err;
  373. }
  374. static int syscall__set_arg_fmts(struct syscall *sc)
  375. {
  376. struct format_field *field;
  377. int idx = 0;
  378. sc->arg_scnprintf = calloc(sc->tp_format->format.nr_fields - 1, sizeof(void *));
  379. if (sc->arg_scnprintf == NULL)
  380. return -1;
  381. for (field = sc->tp_format->format.fields->next; field; field = field->next) {
  382. if (sc->fmt && sc->fmt->arg_scnprintf[idx])
  383. sc->arg_scnprintf[idx] = sc->fmt->arg_scnprintf[idx];
  384. else if (field->flags & FIELD_IS_POINTER)
  385. sc->arg_scnprintf[idx] = syscall_arg__scnprintf_hex;
  386. ++idx;
  387. }
  388. return 0;
  389. }
  390. static int trace__read_syscall_info(struct trace *trace, int id)
  391. {
  392. char tp_name[128];
  393. struct syscall *sc;
  394. const char *name = audit_syscall_to_name(id, trace->audit_machine);
  395. if (name == NULL)
  396. return -1;
  397. if (id > trace->syscalls.max) {
  398. struct syscall *nsyscalls = realloc(trace->syscalls.table, (id + 1) * sizeof(*sc));
  399. if (nsyscalls == NULL)
  400. return -1;
  401. if (trace->syscalls.max != -1) {
  402. memset(nsyscalls + trace->syscalls.max + 1, 0,
  403. (id - trace->syscalls.max) * sizeof(*sc));
  404. } else {
  405. memset(nsyscalls, 0, (id + 1) * sizeof(*sc));
  406. }
  407. trace->syscalls.table = nsyscalls;
  408. trace->syscalls.max = id;
  409. }
  410. sc = trace->syscalls.table + id;
  411. sc->name = name;
  412. if (trace->ev_qualifier) {
  413. bool in = strlist__find(trace->ev_qualifier, name) != NULL;
  414. if (!(in ^ trace->not_ev_qualifier)) {
  415. sc->filtered = true;
  416. /*
  417. * No need to do read tracepoint information since this will be
  418. * filtered out.
  419. */
  420. return 0;
  421. }
  422. }
  423. sc->fmt = syscall_fmt__find(sc->name);
  424. snprintf(tp_name, sizeof(tp_name), "sys_enter_%s", sc->name);
  425. sc->tp_format = event_format__new("syscalls", tp_name);
  426. if (sc->tp_format == NULL && sc->fmt && sc->fmt->alias) {
  427. snprintf(tp_name, sizeof(tp_name), "sys_enter_%s", sc->fmt->alias);
  428. sc->tp_format = event_format__new("syscalls", tp_name);
  429. }
  430. if (sc->tp_format == NULL)
  431. return -1;
  432. return syscall__set_arg_fmts(sc);
  433. }
  434. static size_t syscall__scnprintf_args(struct syscall *sc, char *bf, size_t size,
  435. unsigned long *args)
  436. {
  437. int i = 0;
  438. size_t printed = 0;
  439. if (sc->tp_format != NULL) {
  440. struct format_field *field;
  441. u8 mask = 0, bit = 1;
  442. for (field = sc->tp_format->format.fields->next; field;
  443. field = field->next, ++i, bit <<= 1) {
  444. if (mask & bit)
  445. continue;
  446. printed += scnprintf(bf + printed, size - printed,
  447. "%s%s: ", printed ? ", " : "", field->name);
  448. if (sc->arg_scnprintf && sc->arg_scnprintf[i]) {
  449. printed += sc->arg_scnprintf[i](bf + printed, size - printed,
  450. args[i], &mask);
  451. } else {
  452. printed += scnprintf(bf + printed, size - printed,
  453. "%ld", args[i]);
  454. }
  455. }
  456. } else {
  457. while (i < 6) {
  458. printed += scnprintf(bf + printed, size - printed,
  459. "%sarg%d: %ld",
  460. printed ? ", " : "", i, args[i]);
  461. ++i;
  462. }
  463. }
  464. return printed;
  465. }
  466. typedef int (*tracepoint_handler)(struct trace *trace, struct perf_evsel *evsel,
  467. struct perf_sample *sample);
  468. static struct syscall *trace__syscall_info(struct trace *trace,
  469. struct perf_evsel *evsel,
  470. struct perf_sample *sample)
  471. {
  472. int id = perf_evsel__intval(evsel, sample, "id");
  473. if (id < 0) {
  474. /*
  475. * XXX: Noticed on x86_64, reproduced as far back as 3.0.36, haven't tried
  476. * before that, leaving at a higher verbosity level till that is
  477. * explained. Reproduced with plain ftrace with:
  478. *
  479. * echo 1 > /t/events/raw_syscalls/sys_exit/enable
  480. * grep "NR -1 " /t/trace_pipe
  481. *
  482. * After generating some load on the machine.
  483. */
  484. if (verbose > 1) {
  485. static u64 n;
  486. fprintf(trace->output, "Invalid syscall %d id, skipping (%s, %" PRIu64 ") ...\n",
  487. id, perf_evsel__name(evsel), ++n);
  488. }
  489. return NULL;
  490. }
  491. if ((id > trace->syscalls.max || trace->syscalls.table[id].name == NULL) &&
  492. trace__read_syscall_info(trace, id))
  493. goto out_cant_read;
  494. if ((id > trace->syscalls.max || trace->syscalls.table[id].name == NULL))
  495. goto out_cant_read;
  496. return &trace->syscalls.table[id];
  497. out_cant_read:
  498. if (verbose) {
  499. fprintf(trace->output, "Problems reading syscall %d", id);
  500. if (id <= trace->syscalls.max && trace->syscalls.table[id].name != NULL)
  501. fprintf(trace->output, "(%s)", trace->syscalls.table[id].name);
  502. fputs(" information\n", trace->output);
  503. }
  504. return NULL;
  505. }
  506. static int trace__sys_enter(struct trace *trace, struct perf_evsel *evsel,
  507. struct perf_sample *sample)
  508. {
  509. char *msg;
  510. void *args;
  511. size_t printed = 0;
  512. struct thread *thread;
  513. struct syscall *sc = trace__syscall_info(trace, evsel, sample);
  514. struct thread_trace *ttrace;
  515. if (sc == NULL)
  516. return -1;
  517. if (sc->filtered)
  518. return 0;
  519. thread = machine__findnew_thread(&trace->host, sample->pid,
  520. sample->tid);
  521. ttrace = thread__trace(thread, trace->output);
  522. if (ttrace == NULL)
  523. return -1;
  524. args = perf_evsel__rawptr(evsel, sample, "args");
  525. if (args == NULL) {
  526. fprintf(trace->output, "Problems reading syscall arguments\n");
  527. return -1;
  528. }
  529. ttrace = thread->priv;
  530. if (ttrace->entry_str == NULL) {
  531. ttrace->entry_str = malloc(1024);
  532. if (!ttrace->entry_str)
  533. return -1;
  534. }
  535. ttrace->entry_time = sample->time;
  536. msg = ttrace->entry_str;
  537. printed += scnprintf(msg + printed, 1024 - printed, "%s(", sc->name);
  538. printed += syscall__scnprintf_args(sc, msg + printed, 1024 - printed, args);
  539. if (!strcmp(sc->name, "exit_group") || !strcmp(sc->name, "exit")) {
  540. if (!trace->duration_filter) {
  541. trace__fprintf_entry_head(trace, thread, 1, sample->time, trace->output);
  542. fprintf(trace->output, "%-70s\n", ttrace->entry_str);
  543. }
  544. } else
  545. ttrace->entry_pending = true;
  546. return 0;
  547. }
  548. static int trace__sys_exit(struct trace *trace, struct perf_evsel *evsel,
  549. struct perf_sample *sample)
  550. {
  551. int ret;
  552. u64 duration = 0;
  553. struct thread *thread;
  554. struct syscall *sc = trace__syscall_info(trace, evsel, sample);
  555. struct thread_trace *ttrace;
  556. if (sc == NULL)
  557. return -1;
  558. if (sc->filtered)
  559. return 0;
  560. thread = machine__findnew_thread(&trace->host, sample->pid,
  561. sample->tid);
  562. ttrace = thread__trace(thread, trace->output);
  563. if (ttrace == NULL)
  564. return -1;
  565. ret = perf_evsel__intval(evsel, sample, "ret");
  566. ttrace = thread->priv;
  567. ttrace->exit_time = sample->time;
  568. if (ttrace->entry_time) {
  569. duration = sample->time - ttrace->entry_time;
  570. if (trace__filter_duration(trace, duration))
  571. goto out;
  572. } else if (trace->duration_filter)
  573. goto out;
  574. trace__fprintf_entry_head(trace, thread, duration, sample->time, trace->output);
  575. if (ttrace->entry_pending) {
  576. fprintf(trace->output, "%-70s", ttrace->entry_str);
  577. } else {
  578. fprintf(trace->output, " ... [");
  579. color_fprintf(trace->output, PERF_COLOR_YELLOW, "continued");
  580. fprintf(trace->output, "]: %s()", sc->name);
  581. }
  582. if (sc->fmt == NULL) {
  583. signed_print:
  584. fprintf(trace->output, ") = %d", ret);
  585. } else if (ret < 0 && sc->fmt->errmsg) {
  586. char bf[256];
  587. const char *emsg = strerror_r(-ret, bf, sizeof(bf)),
  588. *e = audit_errno_to_name(-ret);
  589. fprintf(trace->output, ") = -1 %s %s", e, emsg);
  590. } else if (ret == 0 && sc->fmt->timeout)
  591. fprintf(trace->output, ") = 0 Timeout");
  592. else if (sc->fmt->hexret)
  593. fprintf(trace->output, ") = %#x", ret);
  594. else
  595. goto signed_print;
  596. fputc('\n', trace->output);
  597. out:
  598. ttrace->entry_pending = false;
  599. return 0;
  600. }
  601. static int trace__sched_stat_runtime(struct trace *trace, struct perf_evsel *evsel,
  602. struct perf_sample *sample)
  603. {
  604. u64 runtime = perf_evsel__intval(evsel, sample, "runtime");
  605. double runtime_ms = (double)runtime / NSEC_PER_MSEC;
  606. struct thread *thread = machine__findnew_thread(&trace->host,
  607. sample->pid,
  608. sample->tid);
  609. struct thread_trace *ttrace = thread__trace(thread, trace->output);
  610. if (ttrace == NULL)
  611. goto out_dump;
  612. ttrace->runtime_ms += runtime_ms;
  613. trace->runtime_ms += runtime_ms;
  614. return 0;
  615. out_dump:
  616. fprintf(trace->output, "%s: comm=%s,pid=%u,runtime=%" PRIu64 ",vruntime=%" PRIu64 ")\n",
  617. evsel->name,
  618. perf_evsel__strval(evsel, sample, "comm"),
  619. (pid_t)perf_evsel__intval(evsel, sample, "pid"),
  620. runtime,
  621. perf_evsel__intval(evsel, sample, "vruntime"));
  622. return 0;
  623. }
  624. static bool skip_sample(struct trace *trace, struct perf_sample *sample)
  625. {
  626. if ((trace->pid_list && intlist__find(trace->pid_list, sample->pid)) ||
  627. (trace->tid_list && intlist__find(trace->tid_list, sample->tid)))
  628. return false;
  629. if (trace->pid_list || trace->tid_list)
  630. return true;
  631. return false;
  632. }
  633. static int trace__process_sample(struct perf_tool *tool,
  634. union perf_event *event __maybe_unused,
  635. struct perf_sample *sample,
  636. struct perf_evsel *evsel,
  637. struct machine *machine __maybe_unused)
  638. {
  639. struct trace *trace = container_of(tool, struct trace, tool);
  640. int err = 0;
  641. tracepoint_handler handler = evsel->handler.func;
  642. if (skip_sample(trace, sample))
  643. return 0;
  644. if (trace->base_time == 0)
  645. trace->base_time = sample->time;
  646. if (handler)
  647. handler(trace, evsel, sample);
  648. return err;
  649. }
  650. static bool
  651. perf_session__has_tp(struct perf_session *session, const char *name)
  652. {
  653. struct perf_evsel *evsel;
  654. evsel = perf_evlist__find_tracepoint_by_name(session->evlist, name);
  655. return evsel != NULL;
  656. }
  657. static int parse_target_str(struct trace *trace)
  658. {
  659. if (trace->opts.target.pid) {
  660. trace->pid_list = intlist__new(trace->opts.target.pid);
  661. if (trace->pid_list == NULL) {
  662. pr_err("Error parsing process id string\n");
  663. return -EINVAL;
  664. }
  665. }
  666. if (trace->opts.target.tid) {
  667. trace->tid_list = intlist__new(trace->opts.target.tid);
  668. if (trace->tid_list == NULL) {
  669. pr_err("Error parsing thread id string\n");
  670. return -EINVAL;
  671. }
  672. }
  673. return 0;
  674. }
  675. static int trace__run(struct trace *trace, int argc, const char **argv)
  676. {
  677. struct perf_evlist *evlist = perf_evlist__new();
  678. struct perf_evsel *evsel;
  679. int err = -1, i;
  680. unsigned long before;
  681. const bool forks = argc > 0;
  682. if (evlist == NULL) {
  683. fprintf(trace->output, "Not enough memory to run!\n");
  684. goto out;
  685. }
  686. if (perf_evlist__add_newtp(evlist, "raw_syscalls", "sys_enter", trace__sys_enter) ||
  687. perf_evlist__add_newtp(evlist, "raw_syscalls", "sys_exit", trace__sys_exit)) {
  688. fprintf(trace->output, "Couldn't read the raw_syscalls tracepoints information!\n");
  689. goto out_delete_evlist;
  690. }
  691. if (trace->sched &&
  692. perf_evlist__add_newtp(evlist, "sched", "sched_stat_runtime",
  693. trace__sched_stat_runtime)) {
  694. fprintf(trace->output, "Couldn't read the sched_stat_runtime tracepoint information!\n");
  695. goto out_delete_evlist;
  696. }
  697. err = perf_evlist__create_maps(evlist, &trace->opts.target);
  698. if (err < 0) {
  699. fprintf(trace->output, "Problems parsing the target to trace, check your options!\n");
  700. goto out_delete_evlist;
  701. }
  702. err = trace__symbols_init(trace, evlist);
  703. if (err < 0) {
  704. fprintf(trace->output, "Problems initializing symbol libraries!\n");
  705. goto out_delete_maps;
  706. }
  707. perf_evlist__config(evlist, &trace->opts);
  708. signal(SIGCHLD, sig_handler);
  709. signal(SIGINT, sig_handler);
  710. if (forks) {
  711. err = perf_evlist__prepare_workload(evlist, &trace->opts.target,
  712. argv, false, false);
  713. if (err < 0) {
  714. fprintf(trace->output, "Couldn't run the workload!\n");
  715. goto out_delete_maps;
  716. }
  717. }
  718. err = perf_evlist__open(evlist);
  719. if (err < 0) {
  720. fprintf(trace->output, "Couldn't create the events: %s\n", strerror(errno));
  721. goto out_delete_maps;
  722. }
  723. err = perf_evlist__mmap(evlist, UINT_MAX, false);
  724. if (err < 0) {
  725. fprintf(trace->output, "Couldn't mmap the events: %s\n", strerror(errno));
  726. goto out_close_evlist;
  727. }
  728. perf_evlist__enable(evlist);
  729. if (forks)
  730. perf_evlist__start_workload(evlist);
  731. trace->multiple_threads = evlist->threads->map[0] == -1 || evlist->threads->nr > 1;
  732. again:
  733. before = trace->nr_events;
  734. for (i = 0; i < evlist->nr_mmaps; i++) {
  735. union perf_event *event;
  736. while ((event = perf_evlist__mmap_read(evlist, i)) != NULL) {
  737. const u32 type = event->header.type;
  738. tracepoint_handler handler;
  739. struct perf_sample sample;
  740. ++trace->nr_events;
  741. err = perf_evlist__parse_sample(evlist, event, &sample);
  742. if (err) {
  743. fprintf(trace->output, "Can't parse sample, err = %d, skipping...\n", err);
  744. continue;
  745. }
  746. if (trace->base_time == 0)
  747. trace->base_time = sample.time;
  748. if (type != PERF_RECORD_SAMPLE) {
  749. trace__process_event(trace, &trace->host, event);
  750. continue;
  751. }
  752. evsel = perf_evlist__id2evsel(evlist, sample.id);
  753. if (evsel == NULL) {
  754. fprintf(trace->output, "Unknown tp ID %" PRIu64 ", skipping...\n", sample.id);
  755. continue;
  756. }
  757. if (sample.raw_data == NULL) {
  758. fprintf(trace->output, "%s sample with no payload for tid: %d, cpu %d, raw_size=%d, skipping...\n",
  759. perf_evsel__name(evsel), sample.tid,
  760. sample.cpu, sample.raw_size);
  761. continue;
  762. }
  763. handler = evsel->handler.func;
  764. handler(trace, evsel, &sample);
  765. }
  766. }
  767. if (trace->nr_events == before) {
  768. if (done)
  769. goto out_unmap_evlist;
  770. poll(evlist->pollfd, evlist->nr_fds, -1);
  771. }
  772. if (done)
  773. perf_evlist__disable(evlist);
  774. goto again;
  775. out_unmap_evlist:
  776. perf_evlist__munmap(evlist);
  777. out_close_evlist:
  778. perf_evlist__close(evlist);
  779. out_delete_maps:
  780. perf_evlist__delete_maps(evlist);
  781. out_delete_evlist:
  782. perf_evlist__delete(evlist);
  783. out:
  784. return err;
  785. }
  786. static int trace__replay(struct trace *trace)
  787. {
  788. const struct perf_evsel_str_handler handlers[] = {
  789. { "raw_syscalls:sys_enter", trace__sys_enter, },
  790. { "raw_syscalls:sys_exit", trace__sys_exit, },
  791. };
  792. struct perf_session *session;
  793. int err = -1;
  794. trace->tool.sample = trace__process_sample;
  795. trace->tool.mmap = perf_event__process_mmap;
  796. trace->tool.comm = perf_event__process_comm;
  797. trace->tool.exit = perf_event__process_exit;
  798. trace->tool.fork = perf_event__process_fork;
  799. trace->tool.attr = perf_event__process_attr;
  800. trace->tool.tracing_data = perf_event__process_tracing_data;
  801. trace->tool.build_id = perf_event__process_build_id;
  802. trace->tool.ordered_samples = true;
  803. trace->tool.ordering_requires_timestamps = true;
  804. /* add tid to output */
  805. trace->multiple_threads = true;
  806. if (symbol__init() < 0)
  807. return -1;
  808. session = perf_session__new(input_name, O_RDONLY, 0, false,
  809. &trace->tool);
  810. if (session == NULL)
  811. return -ENOMEM;
  812. err = perf_session__set_tracepoints_handlers(session, handlers);
  813. if (err)
  814. goto out;
  815. if (!perf_session__has_tp(session, "raw_syscalls:sys_enter")) {
  816. pr_err("Data file does not have raw_syscalls:sys_enter events\n");
  817. goto out;
  818. }
  819. if (!perf_session__has_tp(session, "raw_syscalls:sys_exit")) {
  820. pr_err("Data file does not have raw_syscalls:sys_exit events\n");
  821. goto out;
  822. }
  823. err = parse_target_str(trace);
  824. if (err != 0)
  825. goto out;
  826. setup_pager();
  827. err = perf_session__process_events(session, &trace->tool);
  828. if (err)
  829. pr_err("Failed to process events, error %d", err);
  830. out:
  831. perf_session__delete(session);
  832. return err;
  833. }
  834. static size_t trace__fprintf_threads_header(FILE *fp)
  835. {
  836. size_t printed;
  837. printed = fprintf(fp, "\n _____________________________________________________________________\n");
  838. printed += fprintf(fp," __) Summary of events (__\n\n");
  839. printed += fprintf(fp," [ task - pid ] [ events ] [ ratio ] [ runtime ]\n");
  840. printed += fprintf(fp," _____________________________________________________________________\n\n");
  841. return printed;
  842. }
  843. static size_t trace__fprintf_thread_summary(struct trace *trace, FILE *fp)
  844. {
  845. size_t printed = trace__fprintf_threads_header(fp);
  846. struct rb_node *nd;
  847. for (nd = rb_first(&trace->host.threads); nd; nd = rb_next(nd)) {
  848. struct thread *thread = rb_entry(nd, struct thread, rb_node);
  849. struct thread_trace *ttrace = thread->priv;
  850. const char *color;
  851. double ratio;
  852. if (ttrace == NULL)
  853. continue;
  854. ratio = (double)ttrace->nr_events / trace->nr_events * 100.0;
  855. color = PERF_COLOR_NORMAL;
  856. if (ratio > 50.0)
  857. color = PERF_COLOR_RED;
  858. else if (ratio > 25.0)
  859. color = PERF_COLOR_GREEN;
  860. else if (ratio > 5.0)
  861. color = PERF_COLOR_YELLOW;
  862. printed += color_fprintf(fp, color, "%20s", thread->comm);
  863. printed += fprintf(fp, " - %-5d :%11lu [", thread->tid, ttrace->nr_events);
  864. printed += color_fprintf(fp, color, "%5.1f%%", ratio);
  865. printed += fprintf(fp, " ] %10.3f ms\n", ttrace->runtime_ms);
  866. }
  867. return printed;
  868. }
  869. static int trace__set_duration(const struct option *opt, const char *str,
  870. int unset __maybe_unused)
  871. {
  872. struct trace *trace = opt->value;
  873. trace->duration_filter = atof(str);
  874. return 0;
  875. }
  876. static int trace__open_output(struct trace *trace, const char *filename)
  877. {
  878. struct stat st;
  879. if (!stat(filename, &st) && st.st_size) {
  880. char oldname[PATH_MAX];
  881. scnprintf(oldname, sizeof(oldname), "%s.old", filename);
  882. unlink(oldname);
  883. rename(filename, oldname);
  884. }
  885. trace->output = fopen(filename, "w");
  886. return trace->output == NULL ? -errno : 0;
  887. }
  888. int cmd_trace(int argc, const char **argv, const char *prefix __maybe_unused)
  889. {
  890. const char * const trace_usage[] = {
  891. "perf trace [<options>] [<command>]",
  892. "perf trace [<options>] -- <command> [<options>]",
  893. NULL
  894. };
  895. struct trace trace = {
  896. .audit_machine = audit_detect_machine(),
  897. .syscalls = {
  898. . max = -1,
  899. },
  900. .opts = {
  901. .target = {
  902. .uid = UINT_MAX,
  903. .uses_mmap = true,
  904. },
  905. .user_freq = UINT_MAX,
  906. .user_interval = ULLONG_MAX,
  907. .no_delay = true,
  908. .mmap_pages = 1024,
  909. },
  910. .output = stdout,
  911. };
  912. const char *output_name = NULL;
  913. const char *ev_qualifier_str = NULL;
  914. const struct option trace_options[] = {
  915. OPT_STRING('e', "expr", &ev_qualifier_str, "expr",
  916. "list of events to trace"),
  917. OPT_STRING('o', "output", &output_name, "file", "output file name"),
  918. OPT_STRING('i', "input", &input_name, "file", "Analyze events in file"),
  919. OPT_STRING('p', "pid", &trace.opts.target.pid, "pid",
  920. "trace events on existing process id"),
  921. OPT_STRING('t', "tid", &trace.opts.target.tid, "tid",
  922. "trace events on existing thread id"),
  923. OPT_BOOLEAN('a', "all-cpus", &trace.opts.target.system_wide,
  924. "system-wide collection from all CPUs"),
  925. OPT_STRING('C', "cpu", &trace.opts.target.cpu_list, "cpu",
  926. "list of cpus to monitor"),
  927. OPT_BOOLEAN(0, "no-inherit", &trace.opts.no_inherit,
  928. "child tasks do not inherit counters"),
  929. OPT_UINTEGER('m', "mmap-pages", &trace.opts.mmap_pages,
  930. "number of mmap data pages"),
  931. OPT_STRING('u', "uid", &trace.opts.target.uid_str, "user",
  932. "user to profile"),
  933. OPT_CALLBACK(0, "duration", &trace, "float",
  934. "show only events with duration > N.M ms",
  935. trace__set_duration),
  936. OPT_BOOLEAN(0, "sched", &trace.sched, "show blocking scheduler events"),
  937. OPT_INCR('v', "verbose", &verbose, "be more verbose"),
  938. OPT_END()
  939. };
  940. int err;
  941. char bf[BUFSIZ];
  942. argc = parse_options(argc, argv, trace_options, trace_usage, 0);
  943. if (output_name != NULL) {
  944. err = trace__open_output(&trace, output_name);
  945. if (err < 0) {
  946. perror("failed to create output file");
  947. goto out;
  948. }
  949. }
  950. if (ev_qualifier_str != NULL) {
  951. const char *s = ev_qualifier_str;
  952. trace.not_ev_qualifier = *s == '!';
  953. if (trace.not_ev_qualifier)
  954. ++s;
  955. trace.ev_qualifier = strlist__new(true, s);
  956. if (trace.ev_qualifier == NULL) {
  957. fputs("Not enough memory to parse event qualifier",
  958. trace.output);
  959. err = -ENOMEM;
  960. goto out_close;
  961. }
  962. }
  963. err = perf_target__validate(&trace.opts.target);
  964. if (err) {
  965. perf_target__strerror(&trace.opts.target, err, bf, sizeof(bf));
  966. fprintf(trace.output, "%s", bf);
  967. goto out_close;
  968. }
  969. err = perf_target__parse_uid(&trace.opts.target);
  970. if (err) {
  971. perf_target__strerror(&trace.opts.target, err, bf, sizeof(bf));
  972. fprintf(trace.output, "%s", bf);
  973. goto out_close;
  974. }
  975. if (!argc && perf_target__none(&trace.opts.target))
  976. trace.opts.target.system_wide = true;
  977. if (input_name)
  978. err = trace__replay(&trace);
  979. else
  980. err = trace__run(&trace, argc, argv);
  981. if (trace.sched && !err)
  982. trace__fprintf_thread_summary(&trace, trace.output);
  983. out_close:
  984. if (output_name != NULL)
  985. fclose(trace.output);
  986. out:
  987. return err;
  988. }