builtin-trace.c 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591
  1. #include "builtin.h"
  2. #include "util/color.h"
  3. #include "util/evlist.h"
  4. #include "util/machine.h"
  5. #include "util/thread.h"
  6. #include "util/parse-options.h"
  7. #include "util/thread_map.h"
  8. #include "event-parse.h"
  9. #include <libaudit.h>
  10. #include <stdlib.h>
  11. static struct syscall_fmt {
  12. const char *name;
  13. const char *alias;
  14. bool errmsg;
  15. bool timeout;
  16. } syscall_fmts[] = {
  17. { .name = "access", .errmsg = true, },
  18. { .name = "arch_prctl", .errmsg = true, .alias = "prctl", },
  19. { .name = "fstat", .errmsg = true, .alias = "newfstat", },
  20. { .name = "fstatat", .errmsg = true, .alias = "newfstatat", },
  21. { .name = "futex", .errmsg = true, },
  22. { .name = "open", .errmsg = true, },
  23. { .name = "poll", .errmsg = true, .timeout = true, },
  24. { .name = "ppoll", .errmsg = true, .timeout = true, },
  25. { .name = "read", .errmsg = true, },
  26. { .name = "recvfrom", .errmsg = true, },
  27. { .name = "select", .errmsg = true, .timeout = true, },
  28. { .name = "socket", .errmsg = true, },
  29. { .name = "stat", .errmsg = true, .alias = "newstat", },
  30. };
  31. static int syscall_fmt__cmp(const void *name, const void *fmtp)
  32. {
  33. const struct syscall_fmt *fmt = fmtp;
  34. return strcmp(name, fmt->name);
  35. }
  36. static struct syscall_fmt *syscall_fmt__find(const char *name)
  37. {
  38. const int nmemb = ARRAY_SIZE(syscall_fmts);
  39. return bsearch(name, syscall_fmts, nmemb, sizeof(struct syscall_fmt), syscall_fmt__cmp);
  40. }
  41. struct syscall {
  42. struct event_format *tp_format;
  43. const char *name;
  44. struct syscall_fmt *fmt;
  45. };
  46. static size_t fprintf_duration(unsigned long t, FILE *fp)
  47. {
  48. double duration = (double)t / NSEC_PER_MSEC;
  49. size_t printed = fprintf(fp, "(");
  50. if (duration >= 1.0)
  51. printed += color_fprintf(fp, PERF_COLOR_RED, "%6.3f ms", duration);
  52. else if (duration >= 0.01)
  53. printed += color_fprintf(fp, PERF_COLOR_YELLOW, "%6.3f ms", duration);
  54. else
  55. printed += color_fprintf(fp, PERF_COLOR_NORMAL, "%6.3f ms", duration);
  56. return printed + fprintf(stdout, "): ");
  57. }
  58. struct thread_trace {
  59. u64 entry_time;
  60. u64 exit_time;
  61. bool entry_pending;
  62. char *entry_str;
  63. };
  64. static struct thread_trace *thread_trace__new(void)
  65. {
  66. return zalloc(sizeof(struct thread_trace));
  67. }
  68. static struct thread_trace *thread__trace(struct thread *thread)
  69. {
  70. if (thread == NULL)
  71. goto fail;
  72. if (thread->priv == NULL)
  73. thread->priv = thread_trace__new();
  74. if (thread->priv == NULL)
  75. goto fail;
  76. return thread->priv;
  77. fail:
  78. color_fprintf(stdout, PERF_COLOR_RED,
  79. "WARNING: not enough memory, dropping samples!\n");
  80. return NULL;
  81. }
  82. struct trace {
  83. int audit_machine;
  84. struct {
  85. int max;
  86. struct syscall *table;
  87. } syscalls;
  88. struct perf_record_opts opts;
  89. struct machine host;
  90. u64 base_time;
  91. bool multiple_threads;
  92. double duration_filter;
  93. };
  94. static bool trace__filter_duration(struct trace *trace, double t)
  95. {
  96. return t < (trace->duration_filter * NSEC_PER_MSEC);
  97. }
  98. static size_t trace__fprintf_tstamp(struct trace *trace, u64 tstamp, FILE *fp)
  99. {
  100. double ts = (double)(tstamp - trace->base_time) / NSEC_PER_MSEC;
  101. return fprintf(fp, "%10.3f ", ts);
  102. }
  103. static bool done = false;
  104. static void sig_handler(int sig __maybe_unused)
  105. {
  106. done = true;
  107. }
  108. static size_t trace__fprintf_entry_head(struct trace *trace, struct thread *thread,
  109. u64 duration, u64 tstamp, FILE *fp)
  110. {
  111. size_t printed = trace__fprintf_tstamp(trace, tstamp, fp);
  112. printed += fprintf_duration(duration, fp);
  113. if (trace->multiple_threads)
  114. printed += fprintf(fp, "%d ", thread->pid);
  115. return printed;
  116. }
  117. static int trace__process_event(struct machine *machine, union perf_event *event)
  118. {
  119. int ret = 0;
  120. switch (event->header.type) {
  121. case PERF_RECORD_LOST:
  122. color_fprintf(stdout, PERF_COLOR_RED,
  123. "LOST %" PRIu64 " events!\n", event->lost.lost);
  124. ret = machine__process_lost_event(machine, event);
  125. default:
  126. ret = machine__process_event(machine, event);
  127. break;
  128. }
  129. return ret;
  130. }
  131. static int trace__tool_process(struct perf_tool *tool __maybe_unused,
  132. union perf_event *event,
  133. struct perf_sample *sample __maybe_unused,
  134. struct machine *machine)
  135. {
  136. return trace__process_event(machine, event);
  137. }
  138. static int trace__symbols_init(struct trace *trace, struct perf_evlist *evlist)
  139. {
  140. int err = symbol__init();
  141. if (err)
  142. return err;
  143. machine__init(&trace->host, "", HOST_KERNEL_ID);
  144. machine__create_kernel_maps(&trace->host);
  145. if (perf_target__has_task(&trace->opts.target)) {
  146. err = perf_event__synthesize_thread_map(NULL, evlist->threads,
  147. trace__tool_process,
  148. &trace->host);
  149. } else {
  150. err = perf_event__synthesize_threads(NULL, trace__tool_process,
  151. &trace->host);
  152. }
  153. if (err)
  154. symbol__exit();
  155. return err;
  156. }
  157. static int trace__read_syscall_info(struct trace *trace, int id)
  158. {
  159. char tp_name[128];
  160. struct syscall *sc;
  161. const char *name = audit_syscall_to_name(id, trace->audit_machine);
  162. if (name == NULL)
  163. return -1;
  164. if (id > trace->syscalls.max) {
  165. struct syscall *nsyscalls = realloc(trace->syscalls.table, (id + 1) * sizeof(*sc));
  166. if (nsyscalls == NULL)
  167. return -1;
  168. if (trace->syscalls.max != -1) {
  169. memset(nsyscalls + trace->syscalls.max + 1, 0,
  170. (id - trace->syscalls.max) * sizeof(*sc));
  171. } else {
  172. memset(nsyscalls, 0, (id + 1) * sizeof(*sc));
  173. }
  174. trace->syscalls.table = nsyscalls;
  175. trace->syscalls.max = id;
  176. }
  177. sc = trace->syscalls.table + id;
  178. sc->name = name;
  179. sc->fmt = syscall_fmt__find(sc->name);
  180. snprintf(tp_name, sizeof(tp_name), "sys_enter_%s", sc->name);
  181. sc->tp_format = event_format__new("syscalls", tp_name);
  182. if (sc->tp_format == NULL && sc->fmt && sc->fmt->alias) {
  183. snprintf(tp_name, sizeof(tp_name), "sys_enter_%s", sc->fmt->alias);
  184. sc->tp_format = event_format__new("syscalls", tp_name);
  185. }
  186. return sc->tp_format != NULL ? 0 : -1;
  187. }
  188. static size_t syscall__scnprintf_args(struct syscall *sc, char *bf, size_t size,
  189. unsigned long *args)
  190. {
  191. int i = 0;
  192. size_t printed = 0;
  193. if (sc->tp_format != NULL) {
  194. struct format_field *field;
  195. for (field = sc->tp_format->format.fields->next; field; field = field->next) {
  196. printed += scnprintf(bf + printed, size - printed,
  197. "%s%s: %ld", printed ? ", " : "",
  198. field->name, args[i++]);
  199. }
  200. } else {
  201. while (i < 6) {
  202. printed += scnprintf(bf + printed, size - printed,
  203. "%sarg%d: %ld",
  204. printed ? ", " : "", i, args[i]);
  205. ++i;
  206. }
  207. }
  208. return printed;
  209. }
  210. typedef int (*tracepoint_handler)(struct trace *trace, struct perf_evsel *evsel,
  211. struct perf_sample *sample);
  212. static struct syscall *trace__syscall_info(struct trace *trace,
  213. struct perf_evsel *evsel,
  214. struct perf_sample *sample)
  215. {
  216. int id = perf_evsel__intval(evsel, sample, "id");
  217. if (id < 0) {
  218. printf("Invalid syscall %d id, skipping...\n", id);
  219. return NULL;
  220. }
  221. if ((id > trace->syscalls.max || trace->syscalls.table[id].name == NULL) &&
  222. trace__read_syscall_info(trace, id))
  223. goto out_cant_read;
  224. if ((id > trace->syscalls.max || trace->syscalls.table[id].name == NULL))
  225. goto out_cant_read;
  226. return &trace->syscalls.table[id];
  227. out_cant_read:
  228. printf("Problems reading syscall %d", id);
  229. if (id <= trace->syscalls.max && trace->syscalls.table[id].name != NULL)
  230. printf("(%s)", trace->syscalls.table[id].name);
  231. puts(" information");
  232. return NULL;
  233. }
  234. static int trace__sys_enter(struct trace *trace, struct perf_evsel *evsel,
  235. struct perf_sample *sample)
  236. {
  237. char *msg;
  238. void *args;
  239. size_t printed = 0;
  240. struct thread *thread = machine__findnew_thread(&trace->host, sample->tid);
  241. struct syscall *sc = trace__syscall_info(trace, evsel, sample);
  242. struct thread_trace *ttrace = thread__trace(thread);
  243. if (ttrace == NULL || sc == NULL)
  244. return -1;
  245. args = perf_evsel__rawptr(evsel, sample, "args");
  246. if (args == NULL) {
  247. printf("Problems reading syscall arguments\n");
  248. return -1;
  249. }
  250. ttrace = thread->priv;
  251. if (ttrace->entry_str == NULL) {
  252. ttrace->entry_str = malloc(1024);
  253. if (!ttrace->entry_str)
  254. return -1;
  255. }
  256. ttrace->entry_time = sample->time;
  257. msg = ttrace->entry_str;
  258. printed += scnprintf(msg + printed, 1024 - printed, "%s(", sc->name);
  259. printed += syscall__scnprintf_args(sc, msg + printed, 1024 - printed, args);
  260. if (!strcmp(sc->name, "exit_group") || !strcmp(sc->name, "exit")) {
  261. if (!trace->duration_filter) {
  262. trace__fprintf_entry_head(trace, thread, 1, sample->time, stdout);
  263. printf("%-70s\n", ttrace->entry_str);
  264. }
  265. } else
  266. ttrace->entry_pending = true;
  267. return 0;
  268. }
  269. static int trace__sys_exit(struct trace *trace, struct perf_evsel *evsel,
  270. struct perf_sample *sample)
  271. {
  272. int ret;
  273. u64 duration = 0;
  274. struct thread *thread = machine__findnew_thread(&trace->host, sample->tid);
  275. struct thread_trace *ttrace = thread__trace(thread);
  276. struct syscall *sc = trace__syscall_info(trace, evsel, sample);
  277. if (ttrace == NULL || sc == NULL)
  278. return -1;
  279. ret = perf_evsel__intval(evsel, sample, "ret");
  280. ttrace = thread->priv;
  281. ttrace->exit_time = sample->time;
  282. if (ttrace->entry_time) {
  283. duration = sample->time - ttrace->entry_time;
  284. if (trace__filter_duration(trace, duration))
  285. goto out;
  286. } else if (trace->duration_filter)
  287. goto out;
  288. trace__fprintf_entry_head(trace, thread, duration, sample->time, stdout);
  289. if (ttrace->entry_pending) {
  290. printf("%-70s", ttrace->entry_str);
  291. } else {
  292. printf(" ... [");
  293. color_fprintf(stdout, PERF_COLOR_YELLOW, "continued");
  294. printf("]: %s()", sc->name);
  295. }
  296. if (ret < 0 && sc->fmt && sc->fmt->errmsg) {
  297. char bf[256];
  298. const char *emsg = strerror_r(-ret, bf, sizeof(bf)),
  299. *e = audit_errno_to_name(-ret);
  300. printf(") = -1 %s %s", e, emsg);
  301. } else if (ret == 0 && sc->fmt && sc->fmt->timeout)
  302. printf(") = 0 Timeout");
  303. else
  304. printf(") = %d", ret);
  305. putchar('\n');
  306. out:
  307. ttrace->entry_pending = false;
  308. return 0;
  309. }
  310. static int trace__run(struct trace *trace, int argc, const char **argv)
  311. {
  312. struct perf_evlist *evlist = perf_evlist__new(NULL, NULL);
  313. struct perf_evsel *evsel;
  314. int err = -1, i, nr_events = 0, before;
  315. const bool forks = argc > 0;
  316. if (evlist == NULL) {
  317. printf("Not enough memory to run!\n");
  318. goto out;
  319. }
  320. if (perf_evlist__add_newtp(evlist, "raw_syscalls", "sys_enter", trace__sys_enter) ||
  321. perf_evlist__add_newtp(evlist, "raw_syscalls", "sys_exit", trace__sys_exit)) {
  322. printf("Couldn't read the raw_syscalls tracepoints information!\n");
  323. goto out_delete_evlist;
  324. }
  325. err = perf_evlist__create_maps(evlist, &trace->opts.target);
  326. if (err < 0) {
  327. printf("Problems parsing the target to trace, check your options!\n");
  328. goto out_delete_evlist;
  329. }
  330. err = trace__symbols_init(trace, evlist);
  331. if (err < 0) {
  332. printf("Problems initializing symbol libraries!\n");
  333. goto out_delete_evlist;
  334. }
  335. perf_evlist__config_attrs(evlist, &trace->opts);
  336. signal(SIGCHLD, sig_handler);
  337. signal(SIGINT, sig_handler);
  338. if (forks) {
  339. err = perf_evlist__prepare_workload(evlist, &trace->opts, argv);
  340. if (err < 0) {
  341. printf("Couldn't run the workload!\n");
  342. goto out_delete_evlist;
  343. }
  344. }
  345. err = perf_evlist__open(evlist);
  346. if (err < 0) {
  347. printf("Couldn't create the events: %s\n", strerror(errno));
  348. goto out_delete_evlist;
  349. }
  350. err = perf_evlist__mmap(evlist, UINT_MAX, false);
  351. if (err < 0) {
  352. printf("Couldn't mmap the events: %s\n", strerror(errno));
  353. goto out_delete_evlist;
  354. }
  355. perf_evlist__enable(evlist);
  356. if (forks)
  357. perf_evlist__start_workload(evlist);
  358. trace->multiple_threads = evlist->threads->map[0] == -1 || evlist->threads->nr > 1;
  359. again:
  360. before = nr_events;
  361. for (i = 0; i < evlist->nr_mmaps; i++) {
  362. union perf_event *event;
  363. while ((event = perf_evlist__mmap_read(evlist, i)) != NULL) {
  364. const u32 type = event->header.type;
  365. tracepoint_handler handler;
  366. struct perf_sample sample;
  367. ++nr_events;
  368. err = perf_evlist__parse_sample(evlist, event, &sample);
  369. if (err) {
  370. printf("Can't parse sample, err = %d, skipping...\n", err);
  371. continue;
  372. }
  373. if (trace->base_time == 0)
  374. trace->base_time = sample.time;
  375. if (type != PERF_RECORD_SAMPLE) {
  376. trace__process_event(&trace->host, event);
  377. continue;
  378. }
  379. evsel = perf_evlist__id2evsel(evlist, sample.id);
  380. if (evsel == NULL) {
  381. printf("Unknown tp ID %" PRIu64 ", skipping...\n", sample.id);
  382. continue;
  383. }
  384. if (sample.raw_data == NULL) {
  385. printf("%s sample with no payload for tid: %d, cpu %d, raw_size=%d, skipping...\n",
  386. perf_evsel__name(evsel), sample.tid,
  387. sample.cpu, sample.raw_size);
  388. continue;
  389. }
  390. if (sample.raw_data == NULL) {
  391. printf("%s sample with no payload for tid: %d, cpu %d, raw_size=%d, skipping...\n",
  392. perf_evsel__name(evsel), sample.tid,
  393. sample.cpu, sample.raw_size);
  394. continue;
  395. }
  396. handler = evsel->handler.func;
  397. handler(trace, evsel, &sample);
  398. }
  399. }
  400. if (nr_events == before) {
  401. if (done)
  402. goto out_delete_evlist;
  403. poll(evlist->pollfd, evlist->nr_fds, -1);
  404. }
  405. if (done)
  406. perf_evlist__disable(evlist);
  407. goto again;
  408. out_delete_evlist:
  409. perf_evlist__delete(evlist);
  410. out:
  411. return err;
  412. }
  413. static int trace__set_duration(const struct option *opt, const char *str,
  414. int unset __maybe_unused)
  415. {
  416. struct trace *trace = opt->value;
  417. trace->duration_filter = atof(str);
  418. return 0;
  419. }
  420. int cmd_trace(int argc, const char **argv, const char *prefix __maybe_unused)
  421. {
  422. const char * const trace_usage[] = {
  423. "perf trace [<options>] [<command>]",
  424. "perf trace [<options>] -- <command> [<options>]",
  425. NULL
  426. };
  427. struct trace trace = {
  428. .audit_machine = audit_detect_machine(),
  429. .syscalls = {
  430. . max = -1,
  431. },
  432. .opts = {
  433. .target = {
  434. .uid = UINT_MAX,
  435. .uses_mmap = true,
  436. },
  437. .user_freq = UINT_MAX,
  438. .user_interval = ULLONG_MAX,
  439. .no_delay = true,
  440. .mmap_pages = 1024,
  441. },
  442. };
  443. const struct option trace_options[] = {
  444. OPT_STRING('p', "pid", &trace.opts.target.pid, "pid",
  445. "trace events on existing process id"),
  446. OPT_STRING(0, "tid", &trace.opts.target.tid, "tid",
  447. "trace events on existing thread id"),
  448. OPT_BOOLEAN(0, "all-cpus", &trace.opts.target.system_wide,
  449. "system-wide collection from all CPUs"),
  450. OPT_STRING(0, "cpu", &trace.opts.target.cpu_list, "cpu",
  451. "list of cpus to monitor"),
  452. OPT_BOOLEAN(0, "no-inherit", &trace.opts.no_inherit,
  453. "child tasks do not inherit counters"),
  454. OPT_UINTEGER(0, "mmap-pages", &trace.opts.mmap_pages,
  455. "number of mmap data pages"),
  456. OPT_STRING(0, "uid", &trace.opts.target.uid_str, "user",
  457. "user to profile"),
  458. OPT_CALLBACK(0, "duration", &trace, "float",
  459. "show only events with duration > N.M ms",
  460. trace__set_duration),
  461. OPT_END()
  462. };
  463. int err;
  464. char bf[BUFSIZ];
  465. argc = parse_options(argc, argv, trace_options, trace_usage, 0);
  466. err = perf_target__validate(&trace.opts.target);
  467. if (err) {
  468. perf_target__strerror(&trace.opts.target, err, bf, sizeof(bf));
  469. printf("%s", bf);
  470. return err;
  471. }
  472. err = perf_target__parse_uid(&trace.opts.target);
  473. if (err) {
  474. perf_target__strerror(&trace.opts.target, err, bf, sizeof(bf));
  475. printf("%s", bf);
  476. return err;
  477. }
  478. if (!argc && perf_target__none(&trace.opts.target))
  479. trace.opts.target.system_wide = true;
  480. return trace__run(&trace, argc, argv);
  481. }