builtin-trace.c 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561
  1. #include "builtin.h"
  2. #include "util/color.h"
  3. #include "util/evlist.h"
  4. #include "util/machine.h"
  5. #include "util/thread.h"
  6. #include "util/parse-options.h"
  7. #include "util/thread_map.h"
  8. #include "event-parse.h"
  9. #include <libaudit.h>
  10. #include <stdlib.h>
  11. static struct syscall_fmt {
  12. const char *name;
  13. const char *alias;
  14. bool errmsg;
  15. bool timeout;
  16. } syscall_fmts[] = {
  17. { .name = "arch_prctl", .errmsg = true, .alias = "prctl", },
  18. { .name = "fstat", .errmsg = true, .alias = "newfstat", },
  19. { .name = "fstatat", .errmsg = true, .alias = "newfstatat", },
  20. { .name = "futex", .errmsg = true, },
  21. { .name = "poll", .errmsg = true, .timeout = true, },
  22. { .name = "ppoll", .errmsg = true, .timeout = true, },
  23. { .name = "read", .errmsg = true, },
  24. { .name = "recvfrom", .errmsg = true, },
  25. { .name = "select", .errmsg = true, .timeout = true, },
  26. { .name = "stat", .errmsg = true, .alias = "newstat", },
  27. };
  28. static int syscall_fmt__cmp(const void *name, const void *fmtp)
  29. {
  30. const struct syscall_fmt *fmt = fmtp;
  31. return strcmp(name, fmt->name);
  32. }
  33. static struct syscall_fmt *syscall_fmt__find(const char *name)
  34. {
  35. const int nmemb = ARRAY_SIZE(syscall_fmts);
  36. return bsearch(name, syscall_fmts, nmemb, sizeof(struct syscall_fmt), syscall_fmt__cmp);
  37. }
  38. struct syscall {
  39. struct event_format *tp_format;
  40. const char *name;
  41. struct syscall_fmt *fmt;
  42. };
  43. static size_t fprintf_duration(unsigned long t, FILE *fp)
  44. {
  45. double duration = (double)t / NSEC_PER_MSEC;
  46. size_t printed = fprintf(fp, "(");
  47. if (duration >= 1.0)
  48. printed += color_fprintf(fp, PERF_COLOR_RED, "%6.3f ms", duration);
  49. else if (duration >= 0.01)
  50. printed += color_fprintf(fp, PERF_COLOR_YELLOW, "%6.3f ms", duration);
  51. else
  52. printed += color_fprintf(fp, PERF_COLOR_NORMAL, "%6.3f ms", duration);
  53. return printed + fprintf(stdout, "): ");
  54. }
  55. struct thread_trace {
  56. u64 entry_time;
  57. u64 exit_time;
  58. bool entry_pending;
  59. char *entry_str;
  60. };
  61. static struct thread_trace *thread_trace__new(void)
  62. {
  63. return zalloc(sizeof(struct thread_trace));
  64. }
  65. static struct thread_trace *thread__trace(struct thread *thread)
  66. {
  67. if (thread == NULL)
  68. goto fail;
  69. if (thread->priv == NULL)
  70. thread->priv = thread_trace__new();
  71. if (thread->priv == NULL)
  72. goto fail;
  73. return thread->priv;
  74. fail:
  75. color_fprintf(stdout, PERF_COLOR_RED,
  76. "WARNING: not enough memory, dropping samples!\n");
  77. return NULL;
  78. }
  79. struct trace {
  80. int audit_machine;
  81. struct {
  82. int max;
  83. struct syscall *table;
  84. } syscalls;
  85. struct perf_record_opts opts;
  86. struct machine host;
  87. u64 base_time;
  88. bool multiple_threads;
  89. };
  90. static size_t trace__fprintf_tstamp(struct trace *trace, u64 tstamp, FILE *fp)
  91. {
  92. double ts = (double)(tstamp - trace->base_time) / NSEC_PER_MSEC;
  93. return fprintf(fp, "%10.3f ", ts);
  94. }
  95. static bool done = false;
  96. static void sig_handler(int sig __maybe_unused)
  97. {
  98. done = true;
  99. }
  100. static size_t trace__fprintf_entry_head(struct trace *trace, struct thread *thread,
  101. u64 duration, u64 tstamp, FILE *fp)
  102. {
  103. size_t printed = trace__fprintf_tstamp(trace, tstamp, fp);
  104. printed += fprintf_duration(duration, fp);
  105. if (trace->multiple_threads)
  106. printed += fprintf(fp, "%d ", thread->pid);
  107. return printed;
  108. }
  109. static int trace__process_event(struct machine *machine, union perf_event *event)
  110. {
  111. int ret = 0;
  112. switch (event->header.type) {
  113. case PERF_RECORD_LOST:
  114. color_fprintf(stdout, PERF_COLOR_RED,
  115. "LOST %" PRIu64 " events!\n", event->lost.lost);
  116. ret = machine__process_lost_event(machine, event);
  117. default:
  118. ret = machine__process_event(machine, event);
  119. break;
  120. }
  121. return ret;
  122. }
  123. static int trace__tool_process(struct perf_tool *tool __maybe_unused,
  124. union perf_event *event,
  125. struct perf_sample *sample __maybe_unused,
  126. struct machine *machine)
  127. {
  128. return trace__process_event(machine, event);
  129. }
  130. static int trace__symbols_init(struct trace *trace, struct perf_evlist *evlist)
  131. {
  132. int err = symbol__init();
  133. if (err)
  134. return err;
  135. machine__init(&trace->host, "", HOST_KERNEL_ID);
  136. machine__create_kernel_maps(&trace->host);
  137. if (perf_target__has_task(&trace->opts.target)) {
  138. err = perf_event__synthesize_thread_map(NULL, evlist->threads,
  139. trace__tool_process,
  140. &trace->host);
  141. } else {
  142. err = perf_event__synthesize_threads(NULL, trace__tool_process,
  143. &trace->host);
  144. }
  145. if (err)
  146. symbol__exit();
  147. return err;
  148. }
  149. static int trace__read_syscall_info(struct trace *trace, int id)
  150. {
  151. char tp_name[128];
  152. struct syscall *sc;
  153. const char *name = audit_syscall_to_name(id, trace->audit_machine);
  154. if (name == NULL)
  155. return -1;
  156. if (id > trace->syscalls.max) {
  157. struct syscall *nsyscalls = realloc(trace->syscalls.table, (id + 1) * sizeof(*sc));
  158. if (nsyscalls == NULL)
  159. return -1;
  160. if (trace->syscalls.max != -1) {
  161. memset(nsyscalls + trace->syscalls.max + 1, 0,
  162. (id - trace->syscalls.max) * sizeof(*sc));
  163. } else {
  164. memset(nsyscalls, 0, (id + 1) * sizeof(*sc));
  165. }
  166. trace->syscalls.table = nsyscalls;
  167. trace->syscalls.max = id;
  168. }
  169. sc = trace->syscalls.table + id;
  170. sc->name = name;
  171. sc->fmt = syscall_fmt__find(sc->name);
  172. snprintf(tp_name, sizeof(tp_name), "sys_enter_%s", sc->name);
  173. sc->tp_format = event_format__new("syscalls", tp_name);
  174. if (sc->tp_format == NULL && sc->fmt && sc->fmt->alias) {
  175. snprintf(tp_name, sizeof(tp_name), "sys_enter_%s", sc->fmt->alias);
  176. sc->tp_format = event_format__new("syscalls", tp_name);
  177. }
  178. return sc->tp_format != NULL ? 0 : -1;
  179. }
  180. static size_t syscall__scnprintf_args(struct syscall *sc, char *bf, size_t size,
  181. unsigned long *args)
  182. {
  183. int i = 0;
  184. size_t printed = 0;
  185. if (sc->tp_format != NULL) {
  186. struct format_field *field;
  187. for (field = sc->tp_format->format.fields->next; field; field = field->next) {
  188. printed += scnprintf(bf + printed, size - printed,
  189. "%s%s: %ld", printed ? ", " : "",
  190. field->name, args[i++]);
  191. }
  192. } else {
  193. while (i < 6) {
  194. printed += scnprintf(bf + printed, size - printed,
  195. "%sarg%d: %ld",
  196. printed ? ", " : "", i, args[i]);
  197. ++i;
  198. }
  199. }
  200. return printed;
  201. }
  202. typedef int (*tracepoint_handler)(struct trace *trace, struct perf_evsel *evsel,
  203. struct perf_sample *sample);
  204. static struct syscall *trace__syscall_info(struct trace *trace,
  205. struct perf_evsel *evsel,
  206. struct perf_sample *sample)
  207. {
  208. int id = perf_evsel__intval(evsel, sample, "id");
  209. if (id < 0) {
  210. printf("Invalid syscall %d id, skipping...\n", id);
  211. return NULL;
  212. }
  213. if ((id > trace->syscalls.max || trace->syscalls.table[id].name == NULL) &&
  214. trace__read_syscall_info(trace, id))
  215. goto out_cant_read;
  216. if ((id > trace->syscalls.max || trace->syscalls.table[id].name == NULL))
  217. goto out_cant_read;
  218. return &trace->syscalls.table[id];
  219. out_cant_read:
  220. printf("Problems reading syscall %d information\n", id);
  221. return NULL;
  222. }
  223. static int trace__sys_enter(struct trace *trace, struct perf_evsel *evsel,
  224. struct perf_sample *sample)
  225. {
  226. char *msg;
  227. void *args;
  228. size_t printed = 0;
  229. struct thread *thread = machine__findnew_thread(&trace->host, sample->tid);
  230. struct syscall *sc = trace__syscall_info(trace, evsel, sample);
  231. struct thread_trace *ttrace = thread__trace(thread);
  232. if (ttrace == NULL || sc == NULL)
  233. return -1;
  234. args = perf_evsel__rawptr(evsel, sample, "args");
  235. if (args == NULL) {
  236. printf("Problems reading syscall arguments\n");
  237. return -1;
  238. }
  239. ttrace = thread->priv;
  240. if (ttrace->entry_str == NULL) {
  241. ttrace->entry_str = malloc(1024);
  242. if (!ttrace->entry_str)
  243. return -1;
  244. }
  245. ttrace->entry_time = sample->time;
  246. msg = ttrace->entry_str;
  247. printed += scnprintf(msg + printed, 1024 - printed, "%s(", sc->name);
  248. printed += syscall__scnprintf_args(sc, msg + printed, 1024 - printed, args);
  249. if (!strcmp(sc->name, "exit_group") || !strcmp(sc->name, "exit")) {
  250. trace__fprintf_entry_head(trace, thread, 1, sample->time, stdout);
  251. printf("%-70s\n", ttrace->entry_str);
  252. } else
  253. ttrace->entry_pending = true;
  254. return 0;
  255. }
  256. static int trace__sys_exit(struct trace *trace, struct perf_evsel *evsel,
  257. struct perf_sample *sample)
  258. {
  259. int ret;
  260. u64 duration = 0;
  261. struct thread *thread = machine__findnew_thread(&trace->host, sample->tid);
  262. struct thread_trace *ttrace = thread__trace(thread);
  263. struct syscall *sc = trace__syscall_info(trace, evsel, sample);
  264. if (ttrace == NULL || sc == NULL)
  265. return -1;
  266. ret = perf_evsel__intval(evsel, sample, "ret");
  267. ttrace = thread->priv;
  268. ttrace->exit_time = sample->time;
  269. if (ttrace->entry_time)
  270. duration = sample->time - ttrace->entry_time;
  271. trace__fprintf_entry_head(trace, thread, duration, sample->time, stdout);
  272. if (ttrace->entry_pending) {
  273. printf("%-70s", ttrace->entry_str);
  274. } else {
  275. printf(" ... [");
  276. color_fprintf(stdout, PERF_COLOR_YELLOW, "continued");
  277. printf("]: %s()", sc->name);
  278. }
  279. if (ret < 0 && sc->fmt && sc->fmt->errmsg) {
  280. char bf[256];
  281. const char *emsg = strerror_r(-ret, bf, sizeof(bf)),
  282. *e = audit_errno_to_name(-ret);
  283. printf(") = -1 %s %s", e, emsg);
  284. } else if (ret == 0 && sc->fmt && sc->fmt->timeout)
  285. printf(") = 0 Timeout");
  286. else
  287. printf(") = %d", ret);
  288. putchar('\n');
  289. ttrace->entry_pending = false;
  290. return 0;
  291. }
  292. static int trace__run(struct trace *trace, int argc, const char **argv)
  293. {
  294. struct perf_evlist *evlist = perf_evlist__new(NULL, NULL);
  295. struct perf_evsel *evsel;
  296. int err = -1, i, nr_events = 0, before;
  297. const bool forks = argc > 0;
  298. if (evlist == NULL) {
  299. printf("Not enough memory to run!\n");
  300. goto out;
  301. }
  302. if (perf_evlist__add_newtp(evlist, "raw_syscalls", "sys_enter", trace__sys_enter) ||
  303. perf_evlist__add_newtp(evlist, "raw_syscalls", "sys_exit", trace__sys_exit)) {
  304. printf("Couldn't read the raw_syscalls tracepoints information!\n");
  305. goto out_delete_evlist;
  306. }
  307. err = perf_evlist__create_maps(evlist, &trace->opts.target);
  308. if (err < 0) {
  309. printf("Problems parsing the target to trace, check your options!\n");
  310. goto out_delete_evlist;
  311. }
  312. err = trace__symbols_init(trace, evlist);
  313. if (err < 0) {
  314. printf("Problems initializing symbol libraries!\n");
  315. goto out_delete_evlist;
  316. }
  317. perf_evlist__config_attrs(evlist, &trace->opts);
  318. signal(SIGCHLD, sig_handler);
  319. signal(SIGINT, sig_handler);
  320. if (forks) {
  321. err = perf_evlist__prepare_workload(evlist, &trace->opts, argv);
  322. if (err < 0) {
  323. printf("Couldn't run the workload!\n");
  324. goto out_delete_evlist;
  325. }
  326. }
  327. err = perf_evlist__open(evlist);
  328. if (err < 0) {
  329. printf("Couldn't create the events: %s\n", strerror(errno));
  330. goto out_delete_evlist;
  331. }
  332. err = perf_evlist__mmap(evlist, UINT_MAX, false);
  333. if (err < 0) {
  334. printf("Couldn't mmap the events: %s\n", strerror(errno));
  335. goto out_delete_evlist;
  336. }
  337. perf_evlist__enable(evlist);
  338. if (forks)
  339. perf_evlist__start_workload(evlist);
  340. trace->multiple_threads = evlist->threads->map[0] == -1 || evlist->threads->nr > 1;
  341. again:
  342. before = nr_events;
  343. for (i = 0; i < evlist->nr_mmaps; i++) {
  344. union perf_event *event;
  345. while ((event = perf_evlist__mmap_read(evlist, i)) != NULL) {
  346. const u32 type = event->header.type;
  347. tracepoint_handler handler;
  348. struct perf_sample sample;
  349. ++nr_events;
  350. err = perf_evlist__parse_sample(evlist, event, &sample);
  351. if (err) {
  352. printf("Can't parse sample, err = %d, skipping...\n", err);
  353. continue;
  354. }
  355. if (trace->base_time == 0)
  356. trace->base_time = sample.time;
  357. if (type != PERF_RECORD_SAMPLE) {
  358. trace__process_event(&trace->host, event);
  359. continue;
  360. }
  361. evsel = perf_evlist__id2evsel(evlist, sample.id);
  362. if (evsel == NULL) {
  363. printf("Unknown tp ID %" PRIu64 ", skipping...\n", sample.id);
  364. continue;
  365. }
  366. if (sample.raw_data == NULL) {
  367. printf("%s sample with no payload for tid: %d, cpu %d, raw_size=%d, skipping...\n",
  368. perf_evsel__name(evsel), sample.tid,
  369. sample.cpu, sample.raw_size);
  370. continue;
  371. }
  372. if (sample.raw_data == NULL) {
  373. printf("%s sample with no payload for tid: %d, cpu %d, raw_size=%d, skipping...\n",
  374. perf_evsel__name(evsel), sample.tid,
  375. sample.cpu, sample.raw_size);
  376. continue;
  377. }
  378. handler = evsel->handler.func;
  379. handler(trace, evsel, &sample);
  380. }
  381. }
  382. if (nr_events == before) {
  383. if (done)
  384. goto out_delete_evlist;
  385. poll(evlist->pollfd, evlist->nr_fds, -1);
  386. }
  387. if (done)
  388. perf_evlist__disable(evlist);
  389. goto again;
  390. out_delete_evlist:
  391. perf_evlist__delete(evlist);
  392. out:
  393. return err;
  394. }
  395. int cmd_trace(int argc, const char **argv, const char *prefix __maybe_unused)
  396. {
  397. const char * const trace_usage[] = {
  398. "perf trace [<options>] [<command>]",
  399. "perf trace [<options>] -- <command> [<options>]",
  400. NULL
  401. };
  402. struct trace trace = {
  403. .audit_machine = audit_detect_machine(),
  404. .syscalls = {
  405. . max = -1,
  406. },
  407. .opts = {
  408. .target = {
  409. .uid = UINT_MAX,
  410. .uses_mmap = true,
  411. },
  412. .user_freq = UINT_MAX,
  413. .user_interval = ULLONG_MAX,
  414. .no_delay = true,
  415. .mmap_pages = 1024,
  416. },
  417. };
  418. const struct option trace_options[] = {
  419. OPT_STRING('p', "pid", &trace.opts.target.pid, "pid",
  420. "trace events on existing process id"),
  421. OPT_STRING(0, "tid", &trace.opts.target.tid, "tid",
  422. "trace events on existing thread id"),
  423. OPT_BOOLEAN(0, "all-cpus", &trace.opts.target.system_wide,
  424. "system-wide collection from all CPUs"),
  425. OPT_STRING(0, "cpu", &trace.opts.target.cpu_list, "cpu",
  426. "list of cpus to monitor"),
  427. OPT_BOOLEAN(0, "no-inherit", &trace.opts.no_inherit,
  428. "child tasks do not inherit counters"),
  429. OPT_UINTEGER(0, "mmap-pages", &trace.opts.mmap_pages,
  430. "number of mmap data pages"),
  431. OPT_STRING(0, "uid", &trace.opts.target.uid_str, "user",
  432. "user to profile"),
  433. OPT_END()
  434. };
  435. int err;
  436. char bf[BUFSIZ];
  437. argc = parse_options(argc, argv, trace_options, trace_usage, 0);
  438. err = perf_target__validate(&trace.opts.target);
  439. if (err) {
  440. perf_target__strerror(&trace.opts.target, err, bf, sizeof(bf));
  441. printf("%s", bf);
  442. return err;
  443. }
  444. err = perf_target__parse_uid(&trace.opts.target);
  445. if (err) {
  446. perf_target__strerror(&trace.opts.target, err, bf, sizeof(bf));
  447. printf("%s", bf);
  448. return err;
  449. }
  450. if (!argc && perf_target__none(&trace.opts.target))
  451. trace.opts.target.system_wide = true;
  452. return trace__run(&trace, argc, argv);
  453. }