builtin-trace.c 22 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853
  1. #include <traceevent/event-parse.h>
  2. #include "builtin.h"
  3. #include "util/color.h"
  4. #include "util/debug.h"
  5. #include "util/evlist.h"
  6. #include "util/machine.h"
  7. #include "util/thread.h"
  8. #include "util/parse-options.h"
  9. #include "util/strlist.h"
  10. #include "util/thread_map.h"
  11. #include <libaudit.h>
  12. #include <stdlib.h>
  13. static size_t syscall_arg__scnprintf_hex(char *bf, size_t size, unsigned long arg)
  14. {
  15. return scnprintf(bf, size, "%#lx", arg);
  16. }
  17. #define SCA_HEX syscall_arg__scnprintf_hex
  18. static struct syscall_fmt {
  19. const char *name;
  20. const char *alias;
  21. size_t (*arg_scnprintf[6])(char *bf, size_t size, unsigned long arg);
  22. bool errmsg;
  23. bool timeout;
  24. bool hexret;
  25. } syscall_fmts[] = {
  26. { .name = "access", .errmsg = true, },
  27. { .name = "arch_prctl", .errmsg = true, .alias = "prctl", },
  28. { .name = "brk", .hexret = true,
  29. .arg_scnprintf = { [0] = SCA_HEX, /* brk */ }, },
  30. { .name = "mmap", .hexret = true, },
  31. { .name = "connect", .errmsg = true, },
  32. { .name = "fstat", .errmsg = true, .alias = "newfstat", },
  33. { .name = "fstatat", .errmsg = true, .alias = "newfstatat", },
  34. { .name = "futex", .errmsg = true, },
  35. { .name = "ioctl", .errmsg = true,
  36. .arg_scnprintf = { [2] = SCA_HEX, /* arg */ }, },
  37. { .name = "lstat", .errmsg = true, .alias = "newlstat", },
  38. { .name = "mmap", .hexret = true,
  39. .arg_scnprintf = { [0] = SCA_HEX, /* addr */ }, },
  40. { .name = "mprotect", .errmsg = true,
  41. .arg_scnprintf = { [0] = SCA_HEX, /* addr */ }, },
  42. { .name = "mremap", .hexret = true, },
  43. { .name = "munmap", .errmsg = true,
  44. .arg_scnprintf = { [0] = SCA_HEX, /* addr */ }, },
  45. { .name = "open", .errmsg = true, },
  46. { .name = "poll", .errmsg = true, .timeout = true, },
  47. { .name = "ppoll", .errmsg = true, .timeout = true, },
  48. { .name = "pread", .errmsg = true, .alias = "pread64", },
  49. { .name = "pwrite", .errmsg = true, .alias = "pwrite64", },
  50. { .name = "read", .errmsg = true, },
  51. { .name = "recvfrom", .errmsg = true, },
  52. { .name = "select", .errmsg = true, .timeout = true, },
  53. { .name = "socket", .errmsg = true, },
  54. { .name = "stat", .errmsg = true, .alias = "newstat", },
  55. { .name = "uname", .errmsg = true, .alias = "newuname", },
  56. };
  57. static int syscall_fmt__cmp(const void *name, const void *fmtp)
  58. {
  59. const struct syscall_fmt *fmt = fmtp;
  60. return strcmp(name, fmt->name);
  61. }
  62. static struct syscall_fmt *syscall_fmt__find(const char *name)
  63. {
  64. const int nmemb = ARRAY_SIZE(syscall_fmts);
  65. return bsearch(name, syscall_fmts, nmemb, sizeof(struct syscall_fmt), syscall_fmt__cmp);
  66. }
  67. struct syscall {
  68. struct event_format *tp_format;
  69. const char *name;
  70. bool filtered;
  71. struct syscall_fmt *fmt;
  72. size_t (**arg_scnprintf)(char *bf, size_t size, unsigned long arg);
  73. };
  74. static size_t fprintf_duration(unsigned long t, FILE *fp)
  75. {
  76. double duration = (double)t / NSEC_PER_MSEC;
  77. size_t printed = fprintf(fp, "(");
  78. if (duration >= 1.0)
  79. printed += color_fprintf(fp, PERF_COLOR_RED, "%6.3f ms", duration);
  80. else if (duration >= 0.01)
  81. printed += color_fprintf(fp, PERF_COLOR_YELLOW, "%6.3f ms", duration);
  82. else
  83. printed += color_fprintf(fp, PERF_COLOR_NORMAL, "%6.3f ms", duration);
  84. return printed + fprintf(fp, "): ");
  85. }
  86. struct thread_trace {
  87. u64 entry_time;
  88. u64 exit_time;
  89. bool entry_pending;
  90. unsigned long nr_events;
  91. char *entry_str;
  92. double runtime_ms;
  93. };
  94. static struct thread_trace *thread_trace__new(void)
  95. {
  96. return zalloc(sizeof(struct thread_trace));
  97. }
  98. static struct thread_trace *thread__trace(struct thread *thread, FILE *fp)
  99. {
  100. struct thread_trace *ttrace;
  101. if (thread == NULL)
  102. goto fail;
  103. if (thread->priv == NULL)
  104. thread->priv = thread_trace__new();
  105. if (thread->priv == NULL)
  106. goto fail;
  107. ttrace = thread->priv;
  108. ++ttrace->nr_events;
  109. return ttrace;
  110. fail:
  111. color_fprintf(fp, PERF_COLOR_RED,
  112. "WARNING: not enough memory, dropping samples!\n");
  113. return NULL;
  114. }
  115. struct trace {
  116. struct perf_tool tool;
  117. int audit_machine;
  118. struct {
  119. int max;
  120. struct syscall *table;
  121. } syscalls;
  122. struct perf_record_opts opts;
  123. struct machine host;
  124. u64 base_time;
  125. FILE *output;
  126. unsigned long nr_events;
  127. struct strlist *ev_qualifier;
  128. bool not_ev_qualifier;
  129. bool sched;
  130. bool multiple_threads;
  131. double duration_filter;
  132. double runtime_ms;
  133. };
  134. static bool trace__filter_duration(struct trace *trace, double t)
  135. {
  136. return t < (trace->duration_filter * NSEC_PER_MSEC);
  137. }
  138. static size_t trace__fprintf_tstamp(struct trace *trace, u64 tstamp, FILE *fp)
  139. {
  140. double ts = (double)(tstamp - trace->base_time) / NSEC_PER_MSEC;
  141. return fprintf(fp, "%10.3f ", ts);
  142. }
  143. static bool done = false;
  144. static void sig_handler(int sig __maybe_unused)
  145. {
  146. done = true;
  147. }
  148. static size_t trace__fprintf_entry_head(struct trace *trace, struct thread *thread,
  149. u64 duration, u64 tstamp, FILE *fp)
  150. {
  151. size_t printed = trace__fprintf_tstamp(trace, tstamp, fp);
  152. printed += fprintf_duration(duration, fp);
  153. if (trace->multiple_threads)
  154. printed += fprintf(fp, "%d ", thread->tid);
  155. return printed;
  156. }
  157. static int trace__process_event(struct trace *trace, struct machine *machine,
  158. union perf_event *event)
  159. {
  160. int ret = 0;
  161. switch (event->header.type) {
  162. case PERF_RECORD_LOST:
  163. color_fprintf(trace->output, PERF_COLOR_RED,
  164. "LOST %" PRIu64 " events!\n", event->lost.lost);
  165. ret = machine__process_lost_event(machine, event);
  166. default:
  167. ret = machine__process_event(machine, event);
  168. break;
  169. }
  170. return ret;
  171. }
  172. static int trace__tool_process(struct perf_tool *tool,
  173. union perf_event *event,
  174. struct perf_sample *sample __maybe_unused,
  175. struct machine *machine)
  176. {
  177. struct trace *trace = container_of(tool, struct trace, tool);
  178. return trace__process_event(trace, machine, event);
  179. }
  180. static int trace__symbols_init(struct trace *trace, struct perf_evlist *evlist)
  181. {
  182. int err = symbol__init();
  183. if (err)
  184. return err;
  185. machine__init(&trace->host, "", HOST_KERNEL_ID);
  186. machine__create_kernel_maps(&trace->host);
  187. if (perf_target__has_task(&trace->opts.target)) {
  188. err = perf_event__synthesize_thread_map(&trace->tool, evlist->threads,
  189. trace__tool_process,
  190. &trace->host);
  191. } else {
  192. err = perf_event__synthesize_threads(&trace->tool, trace__tool_process,
  193. &trace->host);
  194. }
  195. if (err)
  196. symbol__exit();
  197. return err;
  198. }
  199. static int syscall__set_arg_fmts(struct syscall *sc)
  200. {
  201. struct format_field *field;
  202. int idx = 0;
  203. sc->arg_scnprintf = calloc(sc->tp_format->format.nr_fields - 1, sizeof(void *));
  204. if (sc->arg_scnprintf == NULL)
  205. return -1;
  206. for (field = sc->tp_format->format.fields->next; field; field = field->next) {
  207. if (sc->fmt && sc->fmt->arg_scnprintf[idx])
  208. sc->arg_scnprintf[idx] = sc->fmt->arg_scnprintf[idx];
  209. else if (field->flags & FIELD_IS_POINTER)
  210. sc->arg_scnprintf[idx] = syscall_arg__scnprintf_hex;
  211. ++idx;
  212. }
  213. return 0;
  214. }
  215. static int trace__read_syscall_info(struct trace *trace, int id)
  216. {
  217. char tp_name[128];
  218. struct syscall *sc;
  219. const char *name = audit_syscall_to_name(id, trace->audit_machine);
  220. if (name == NULL)
  221. return -1;
  222. if (id > trace->syscalls.max) {
  223. struct syscall *nsyscalls = realloc(trace->syscalls.table, (id + 1) * sizeof(*sc));
  224. if (nsyscalls == NULL)
  225. return -1;
  226. if (trace->syscalls.max != -1) {
  227. memset(nsyscalls + trace->syscalls.max + 1, 0,
  228. (id - trace->syscalls.max) * sizeof(*sc));
  229. } else {
  230. memset(nsyscalls, 0, (id + 1) * sizeof(*sc));
  231. }
  232. trace->syscalls.table = nsyscalls;
  233. trace->syscalls.max = id;
  234. }
  235. sc = trace->syscalls.table + id;
  236. sc->name = name;
  237. if (trace->ev_qualifier) {
  238. bool in = strlist__find(trace->ev_qualifier, name) != NULL;
  239. if (!(in ^ trace->not_ev_qualifier)) {
  240. sc->filtered = true;
  241. /*
  242. * No need to do read tracepoint information since this will be
  243. * filtered out.
  244. */
  245. return 0;
  246. }
  247. }
  248. sc->fmt = syscall_fmt__find(sc->name);
  249. snprintf(tp_name, sizeof(tp_name), "sys_enter_%s", sc->name);
  250. sc->tp_format = event_format__new("syscalls", tp_name);
  251. if (sc->tp_format == NULL && sc->fmt && sc->fmt->alias) {
  252. snprintf(tp_name, sizeof(tp_name), "sys_enter_%s", sc->fmt->alias);
  253. sc->tp_format = event_format__new("syscalls", tp_name);
  254. }
  255. if (sc->tp_format == NULL)
  256. return -1;
  257. return syscall__set_arg_fmts(sc);
  258. }
  259. static size_t syscall__scnprintf_args(struct syscall *sc, char *bf, size_t size,
  260. unsigned long *args)
  261. {
  262. int i = 0;
  263. size_t printed = 0;
  264. if (sc->tp_format != NULL) {
  265. struct format_field *field;
  266. for (field = sc->tp_format->format.fields->next; field; field = field->next) {
  267. printed += scnprintf(bf + printed, size - printed,
  268. "%s%s: ", printed ? ", " : "", field->name);
  269. if (sc->arg_scnprintf && sc->arg_scnprintf[i])
  270. printed += sc->arg_scnprintf[i](bf + printed, size - printed, args[i]);
  271. else
  272. printed += scnprintf(bf + printed, size - printed,
  273. "%ld", args[i]);
  274. ++i;
  275. }
  276. } else {
  277. while (i < 6) {
  278. printed += scnprintf(bf + printed, size - printed,
  279. "%sarg%d: %ld",
  280. printed ? ", " : "", i, args[i]);
  281. ++i;
  282. }
  283. }
  284. return printed;
  285. }
  286. typedef int (*tracepoint_handler)(struct trace *trace, struct perf_evsel *evsel,
  287. struct perf_sample *sample);
  288. static struct syscall *trace__syscall_info(struct trace *trace,
  289. struct perf_evsel *evsel,
  290. struct perf_sample *sample)
  291. {
  292. int id = perf_evsel__intval(evsel, sample, "id");
  293. if (id < 0) {
  294. /*
  295. * XXX: Noticed on x86_64, reproduced as far back as 3.0.36, haven't tried
  296. * before that, leaving at a higher verbosity level till that is
  297. * explained. Reproduced with plain ftrace with:
  298. *
  299. * echo 1 > /t/events/raw_syscalls/sys_exit/enable
  300. * grep "NR -1 " /t/trace_pipe
  301. *
  302. * After generating some load on the machine.
  303. */
  304. if (verbose > 1) {
  305. static u64 n;
  306. fprintf(trace->output, "Invalid syscall %d id, skipping (%s, %" PRIu64 ") ...\n",
  307. id, perf_evsel__name(evsel), ++n);
  308. }
  309. return NULL;
  310. }
  311. if ((id > trace->syscalls.max || trace->syscalls.table[id].name == NULL) &&
  312. trace__read_syscall_info(trace, id))
  313. goto out_cant_read;
  314. if ((id > trace->syscalls.max || trace->syscalls.table[id].name == NULL))
  315. goto out_cant_read;
  316. return &trace->syscalls.table[id];
  317. out_cant_read:
  318. if (verbose) {
  319. fprintf(trace->output, "Problems reading syscall %d", id);
  320. if (id <= trace->syscalls.max && trace->syscalls.table[id].name != NULL)
  321. fprintf(trace->output, "(%s)", trace->syscalls.table[id].name);
  322. fputs(" information\n", trace->output);
  323. }
  324. return NULL;
  325. }
  326. static int trace__sys_enter(struct trace *trace, struct perf_evsel *evsel,
  327. struct perf_sample *sample)
  328. {
  329. char *msg;
  330. void *args;
  331. size_t printed = 0;
  332. struct thread *thread;
  333. struct syscall *sc = trace__syscall_info(trace, evsel, sample);
  334. struct thread_trace *ttrace;
  335. if (sc == NULL)
  336. return -1;
  337. if (sc->filtered)
  338. return 0;
  339. thread = machine__findnew_thread(&trace->host, sample->tid);
  340. ttrace = thread__trace(thread, trace->output);
  341. if (ttrace == NULL)
  342. return -1;
  343. args = perf_evsel__rawptr(evsel, sample, "args");
  344. if (args == NULL) {
  345. fprintf(trace->output, "Problems reading syscall arguments\n");
  346. return -1;
  347. }
  348. ttrace = thread->priv;
  349. if (ttrace->entry_str == NULL) {
  350. ttrace->entry_str = malloc(1024);
  351. if (!ttrace->entry_str)
  352. return -1;
  353. }
  354. ttrace->entry_time = sample->time;
  355. msg = ttrace->entry_str;
  356. printed += scnprintf(msg + printed, 1024 - printed, "%s(", sc->name);
  357. printed += syscall__scnprintf_args(sc, msg + printed, 1024 - printed, args);
  358. if (!strcmp(sc->name, "exit_group") || !strcmp(sc->name, "exit")) {
  359. if (!trace->duration_filter) {
  360. trace__fprintf_entry_head(trace, thread, 1, sample->time, trace->output);
  361. fprintf(trace->output, "%-70s\n", ttrace->entry_str);
  362. }
  363. } else
  364. ttrace->entry_pending = true;
  365. return 0;
  366. }
  367. static int trace__sys_exit(struct trace *trace, struct perf_evsel *evsel,
  368. struct perf_sample *sample)
  369. {
  370. int ret;
  371. u64 duration = 0;
  372. struct thread *thread;
  373. struct syscall *sc = trace__syscall_info(trace, evsel, sample);
  374. struct thread_trace *ttrace;
  375. if (sc == NULL)
  376. return -1;
  377. if (sc->filtered)
  378. return 0;
  379. thread = machine__findnew_thread(&trace->host, sample->tid);
  380. ttrace = thread__trace(thread, trace->output);
  381. if (ttrace == NULL)
  382. return -1;
  383. ret = perf_evsel__intval(evsel, sample, "ret");
  384. ttrace = thread->priv;
  385. ttrace->exit_time = sample->time;
  386. if (ttrace->entry_time) {
  387. duration = sample->time - ttrace->entry_time;
  388. if (trace__filter_duration(trace, duration))
  389. goto out;
  390. } else if (trace->duration_filter)
  391. goto out;
  392. trace__fprintf_entry_head(trace, thread, duration, sample->time, trace->output);
  393. if (ttrace->entry_pending) {
  394. fprintf(trace->output, "%-70s", ttrace->entry_str);
  395. } else {
  396. fprintf(trace->output, " ... [");
  397. color_fprintf(trace->output, PERF_COLOR_YELLOW, "continued");
  398. fprintf(trace->output, "]: %s()", sc->name);
  399. }
  400. if (sc->fmt == NULL) {
  401. signed_print:
  402. fprintf(trace->output, ") = %d", ret);
  403. } else if (ret < 0 && sc->fmt->errmsg) {
  404. char bf[256];
  405. const char *emsg = strerror_r(-ret, bf, sizeof(bf)),
  406. *e = audit_errno_to_name(-ret);
  407. fprintf(trace->output, ") = -1 %s %s", e, emsg);
  408. } else if (ret == 0 && sc->fmt->timeout)
  409. fprintf(trace->output, ") = 0 Timeout");
  410. else if (sc->fmt->hexret)
  411. fprintf(trace->output, ") = %#x", ret);
  412. else
  413. goto signed_print;
  414. fputc('\n', trace->output);
  415. out:
  416. ttrace->entry_pending = false;
  417. return 0;
  418. }
  419. static int trace__sched_stat_runtime(struct trace *trace, struct perf_evsel *evsel,
  420. struct perf_sample *sample)
  421. {
  422. u64 runtime = perf_evsel__intval(evsel, sample, "runtime");
  423. double runtime_ms = (double)runtime / NSEC_PER_MSEC;
  424. struct thread *thread = machine__findnew_thread(&trace->host, sample->tid);
  425. struct thread_trace *ttrace = thread__trace(thread, trace->output);
  426. if (ttrace == NULL)
  427. goto out_dump;
  428. ttrace->runtime_ms += runtime_ms;
  429. trace->runtime_ms += runtime_ms;
  430. return 0;
  431. out_dump:
  432. fprintf(trace->output, "%s: comm=%s,pid=%u,runtime=%" PRIu64 ",vruntime=%" PRIu64 ")\n",
  433. evsel->name,
  434. perf_evsel__strval(evsel, sample, "comm"),
  435. (pid_t)perf_evsel__intval(evsel, sample, "pid"),
  436. runtime,
  437. perf_evsel__intval(evsel, sample, "vruntime"));
  438. return 0;
  439. }
  440. static int trace__run(struct trace *trace, int argc, const char **argv)
  441. {
  442. struct perf_evlist *evlist = perf_evlist__new();
  443. struct perf_evsel *evsel;
  444. int err = -1, i;
  445. unsigned long before;
  446. const bool forks = argc > 0;
  447. if (evlist == NULL) {
  448. fprintf(trace->output, "Not enough memory to run!\n");
  449. goto out;
  450. }
  451. if (perf_evlist__add_newtp(evlist, "raw_syscalls", "sys_enter", trace__sys_enter) ||
  452. perf_evlist__add_newtp(evlist, "raw_syscalls", "sys_exit", trace__sys_exit)) {
  453. fprintf(trace->output, "Couldn't read the raw_syscalls tracepoints information!\n");
  454. goto out_delete_evlist;
  455. }
  456. if (trace->sched &&
  457. perf_evlist__add_newtp(evlist, "sched", "sched_stat_runtime",
  458. trace__sched_stat_runtime)) {
  459. fprintf(trace->output, "Couldn't read the sched_stat_runtime tracepoint information!\n");
  460. goto out_delete_evlist;
  461. }
  462. err = perf_evlist__create_maps(evlist, &trace->opts.target);
  463. if (err < 0) {
  464. fprintf(trace->output, "Problems parsing the target to trace, check your options!\n");
  465. goto out_delete_evlist;
  466. }
  467. err = trace__symbols_init(trace, evlist);
  468. if (err < 0) {
  469. fprintf(trace->output, "Problems initializing symbol libraries!\n");
  470. goto out_delete_maps;
  471. }
  472. perf_evlist__config(evlist, &trace->opts);
  473. signal(SIGCHLD, sig_handler);
  474. signal(SIGINT, sig_handler);
  475. if (forks) {
  476. err = perf_evlist__prepare_workload(evlist, &trace->opts.target,
  477. argv, false, false);
  478. if (err < 0) {
  479. fprintf(trace->output, "Couldn't run the workload!\n");
  480. goto out_delete_maps;
  481. }
  482. }
  483. err = perf_evlist__open(evlist);
  484. if (err < 0) {
  485. fprintf(trace->output, "Couldn't create the events: %s\n", strerror(errno));
  486. goto out_delete_maps;
  487. }
  488. err = perf_evlist__mmap(evlist, UINT_MAX, false);
  489. if (err < 0) {
  490. fprintf(trace->output, "Couldn't mmap the events: %s\n", strerror(errno));
  491. goto out_close_evlist;
  492. }
  493. perf_evlist__enable(evlist);
  494. if (forks)
  495. perf_evlist__start_workload(evlist);
  496. trace->multiple_threads = evlist->threads->map[0] == -1 || evlist->threads->nr > 1;
  497. again:
  498. before = trace->nr_events;
  499. for (i = 0; i < evlist->nr_mmaps; i++) {
  500. union perf_event *event;
  501. while ((event = perf_evlist__mmap_read(evlist, i)) != NULL) {
  502. const u32 type = event->header.type;
  503. tracepoint_handler handler;
  504. struct perf_sample sample;
  505. ++trace->nr_events;
  506. err = perf_evlist__parse_sample(evlist, event, &sample);
  507. if (err) {
  508. fprintf(trace->output, "Can't parse sample, err = %d, skipping...\n", err);
  509. continue;
  510. }
  511. if (trace->base_time == 0)
  512. trace->base_time = sample.time;
  513. if (type != PERF_RECORD_SAMPLE) {
  514. trace__process_event(trace, &trace->host, event);
  515. continue;
  516. }
  517. evsel = perf_evlist__id2evsel(evlist, sample.id);
  518. if (evsel == NULL) {
  519. fprintf(trace->output, "Unknown tp ID %" PRIu64 ", skipping...\n", sample.id);
  520. continue;
  521. }
  522. if (sample.raw_data == NULL) {
  523. fprintf(trace->output, "%s sample with no payload for tid: %d, cpu %d, raw_size=%d, skipping...\n",
  524. perf_evsel__name(evsel), sample.tid,
  525. sample.cpu, sample.raw_size);
  526. continue;
  527. }
  528. handler = evsel->handler.func;
  529. handler(trace, evsel, &sample);
  530. }
  531. }
  532. if (trace->nr_events == before) {
  533. if (done)
  534. goto out_unmap_evlist;
  535. poll(evlist->pollfd, evlist->nr_fds, -1);
  536. }
  537. if (done)
  538. perf_evlist__disable(evlist);
  539. goto again;
  540. out_unmap_evlist:
  541. perf_evlist__munmap(evlist);
  542. out_close_evlist:
  543. perf_evlist__close(evlist);
  544. out_delete_maps:
  545. perf_evlist__delete_maps(evlist);
  546. out_delete_evlist:
  547. perf_evlist__delete(evlist);
  548. out:
  549. return err;
  550. }
  551. static size_t trace__fprintf_threads_header(FILE *fp)
  552. {
  553. size_t printed;
  554. printed = fprintf(fp, "\n _____________________________________________________________________\n");
  555. printed += fprintf(fp," __) Summary of events (__\n\n");
  556. printed += fprintf(fp," [ task - pid ] [ events ] [ ratio ] [ runtime ]\n");
  557. printed += fprintf(fp," _____________________________________________________________________\n\n");
  558. return printed;
  559. }
  560. static size_t trace__fprintf_thread_summary(struct trace *trace, FILE *fp)
  561. {
  562. size_t printed = trace__fprintf_threads_header(fp);
  563. struct rb_node *nd;
  564. for (nd = rb_first(&trace->host.threads); nd; nd = rb_next(nd)) {
  565. struct thread *thread = rb_entry(nd, struct thread, rb_node);
  566. struct thread_trace *ttrace = thread->priv;
  567. const char *color;
  568. double ratio;
  569. if (ttrace == NULL)
  570. continue;
  571. ratio = (double)ttrace->nr_events / trace->nr_events * 100.0;
  572. color = PERF_COLOR_NORMAL;
  573. if (ratio > 50.0)
  574. color = PERF_COLOR_RED;
  575. else if (ratio > 25.0)
  576. color = PERF_COLOR_GREEN;
  577. else if (ratio > 5.0)
  578. color = PERF_COLOR_YELLOW;
  579. printed += color_fprintf(fp, color, "%20s", thread->comm);
  580. printed += fprintf(fp, " - %-5d :%11lu [", thread->tid, ttrace->nr_events);
  581. printed += color_fprintf(fp, color, "%5.1f%%", ratio);
  582. printed += fprintf(fp, " ] %10.3f ms\n", ttrace->runtime_ms);
  583. }
  584. return printed;
  585. }
  586. static int trace__set_duration(const struct option *opt, const char *str,
  587. int unset __maybe_unused)
  588. {
  589. struct trace *trace = opt->value;
  590. trace->duration_filter = atof(str);
  591. return 0;
  592. }
  593. static int trace__open_output(struct trace *trace, const char *filename)
  594. {
  595. struct stat st;
  596. if (!stat(filename, &st) && st.st_size) {
  597. char oldname[PATH_MAX];
  598. scnprintf(oldname, sizeof(oldname), "%s.old", filename);
  599. unlink(oldname);
  600. rename(filename, oldname);
  601. }
  602. trace->output = fopen(filename, "w");
  603. return trace->output == NULL ? -errno : 0;
  604. }
  605. int cmd_trace(int argc, const char **argv, const char *prefix __maybe_unused)
  606. {
  607. const char * const trace_usage[] = {
  608. "perf trace [<options>] [<command>]",
  609. "perf trace [<options>] -- <command> [<options>]",
  610. NULL
  611. };
  612. struct trace trace = {
  613. .audit_machine = audit_detect_machine(),
  614. .syscalls = {
  615. . max = -1,
  616. },
  617. .opts = {
  618. .target = {
  619. .uid = UINT_MAX,
  620. .uses_mmap = true,
  621. },
  622. .user_freq = UINT_MAX,
  623. .user_interval = ULLONG_MAX,
  624. .no_delay = true,
  625. .mmap_pages = 1024,
  626. },
  627. .output = stdout,
  628. };
  629. const char *output_name = NULL;
  630. const char *ev_qualifier_str = NULL;
  631. const struct option trace_options[] = {
  632. OPT_STRING('e', "expr", &ev_qualifier_str, "expr",
  633. "list of events to trace"),
  634. OPT_STRING('o', "output", &output_name, "file", "output file name"),
  635. OPT_STRING('p', "pid", &trace.opts.target.pid, "pid",
  636. "trace events on existing process id"),
  637. OPT_STRING('t', "tid", &trace.opts.target.tid, "tid",
  638. "trace events on existing thread id"),
  639. OPT_BOOLEAN('a', "all-cpus", &trace.opts.target.system_wide,
  640. "system-wide collection from all CPUs"),
  641. OPT_STRING('C', "cpu", &trace.opts.target.cpu_list, "cpu",
  642. "list of cpus to monitor"),
  643. OPT_BOOLEAN('i', "no-inherit", &trace.opts.no_inherit,
  644. "child tasks do not inherit counters"),
  645. OPT_UINTEGER('m', "mmap-pages", &trace.opts.mmap_pages,
  646. "number of mmap data pages"),
  647. OPT_STRING('u', "uid", &trace.opts.target.uid_str, "user",
  648. "user to profile"),
  649. OPT_CALLBACK(0, "duration", &trace, "float",
  650. "show only events with duration > N.M ms",
  651. trace__set_duration),
  652. OPT_BOOLEAN(0, "sched", &trace.sched, "show blocking scheduler events"),
  653. OPT_INCR('v', "verbose", &verbose, "be more verbose"),
  654. OPT_END()
  655. };
  656. int err;
  657. char bf[BUFSIZ];
  658. argc = parse_options(argc, argv, trace_options, trace_usage, 0);
  659. if (output_name != NULL) {
  660. err = trace__open_output(&trace, output_name);
  661. if (err < 0) {
  662. perror("failed to create output file");
  663. goto out;
  664. }
  665. }
  666. if (ev_qualifier_str != NULL) {
  667. const char *s = ev_qualifier_str;
  668. trace.not_ev_qualifier = *s == '!';
  669. if (trace.not_ev_qualifier)
  670. ++s;
  671. trace.ev_qualifier = strlist__new(true, s);
  672. if (trace.ev_qualifier == NULL) {
  673. fputs("Not enough memory to parse event qualifier",
  674. trace.output);
  675. err = -ENOMEM;
  676. goto out_close;
  677. }
  678. }
  679. err = perf_target__validate(&trace.opts.target);
  680. if (err) {
  681. perf_target__strerror(&trace.opts.target, err, bf, sizeof(bf));
  682. fprintf(trace.output, "%s", bf);
  683. goto out_close;
  684. }
  685. err = perf_target__parse_uid(&trace.opts.target);
  686. if (err) {
  687. perf_target__strerror(&trace.opts.target, err, bf, sizeof(bf));
  688. fprintf(trace.output, "%s", bf);
  689. goto out_close;
  690. }
  691. if (!argc && perf_target__none(&trace.opts.target))
  692. trace.opts.target.system_wide = true;
  693. err = trace__run(&trace, argc, argv);
  694. if (trace.sched && !err)
  695. trace__fprintf_thread_summary(&trace, trace.output);
  696. out_close:
  697. if (output_name != NULL)
  698. fclose(trace.output);
  699. out:
  700. return err;
  701. }