builtin-trace.c 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686
  1. #include <traceevent/event-parse.h>
  2. #include "builtin.h"
  3. #include "util/color.h"
  4. #include "util/evlist.h"
  5. #include "util/machine.h"
  6. #include "util/thread.h"
  7. #include "util/parse-options.h"
  8. #include "util/thread_map.h"
  9. #include <libaudit.h>
  10. #include <stdlib.h>
  11. static struct syscall_fmt {
  12. const char *name;
  13. const char *alias;
  14. bool errmsg;
  15. bool timeout;
  16. } syscall_fmts[] = {
  17. { .name = "access", .errmsg = true, },
  18. { .name = "arch_prctl", .errmsg = true, .alias = "prctl", },
  19. { .name = "connect", .errmsg = true, },
  20. { .name = "fstat", .errmsg = true, .alias = "newfstat", },
  21. { .name = "fstatat", .errmsg = true, .alias = "newfstatat", },
  22. { .name = "futex", .errmsg = true, },
  23. { .name = "open", .errmsg = true, },
  24. { .name = "poll", .errmsg = true, .timeout = true, },
  25. { .name = "ppoll", .errmsg = true, .timeout = true, },
  26. { .name = "read", .errmsg = true, },
  27. { .name = "recvfrom", .errmsg = true, },
  28. { .name = "select", .errmsg = true, .timeout = true, },
  29. { .name = "socket", .errmsg = true, },
  30. { .name = "stat", .errmsg = true, .alias = "newstat", },
  31. };
  32. static int syscall_fmt__cmp(const void *name, const void *fmtp)
  33. {
  34. const struct syscall_fmt *fmt = fmtp;
  35. return strcmp(name, fmt->name);
  36. }
  37. static struct syscall_fmt *syscall_fmt__find(const char *name)
  38. {
  39. const int nmemb = ARRAY_SIZE(syscall_fmts);
  40. return bsearch(name, syscall_fmts, nmemb, sizeof(struct syscall_fmt), syscall_fmt__cmp);
  41. }
  42. struct syscall {
  43. struct event_format *tp_format;
  44. const char *name;
  45. struct syscall_fmt *fmt;
  46. };
  47. static size_t fprintf_duration(unsigned long t, FILE *fp)
  48. {
  49. double duration = (double)t / NSEC_PER_MSEC;
  50. size_t printed = fprintf(fp, "(");
  51. if (duration >= 1.0)
  52. printed += color_fprintf(fp, PERF_COLOR_RED, "%6.3f ms", duration);
  53. else if (duration >= 0.01)
  54. printed += color_fprintf(fp, PERF_COLOR_YELLOW, "%6.3f ms", duration);
  55. else
  56. printed += color_fprintf(fp, PERF_COLOR_NORMAL, "%6.3f ms", duration);
  57. return printed + fprintf(stdout, "): ");
  58. }
  59. struct thread_trace {
  60. u64 entry_time;
  61. u64 exit_time;
  62. bool entry_pending;
  63. unsigned long nr_events;
  64. char *entry_str;
  65. double runtime_ms;
  66. };
  67. static struct thread_trace *thread_trace__new(void)
  68. {
  69. return zalloc(sizeof(struct thread_trace));
  70. }
  71. static struct thread_trace *thread__trace(struct thread *thread)
  72. {
  73. struct thread_trace *ttrace;
  74. if (thread == NULL)
  75. goto fail;
  76. if (thread->priv == NULL)
  77. thread->priv = thread_trace__new();
  78. if (thread->priv == NULL)
  79. goto fail;
  80. ttrace = thread->priv;
  81. ++ttrace->nr_events;
  82. return ttrace;
  83. fail:
  84. color_fprintf(stdout, PERF_COLOR_RED,
  85. "WARNING: not enough memory, dropping samples!\n");
  86. return NULL;
  87. }
  88. struct trace {
  89. int audit_machine;
  90. struct {
  91. int max;
  92. struct syscall *table;
  93. } syscalls;
  94. struct perf_record_opts opts;
  95. struct machine host;
  96. u64 base_time;
  97. unsigned long nr_events;
  98. bool sched;
  99. bool multiple_threads;
  100. double duration_filter;
  101. double runtime_ms;
  102. };
  103. static bool trace__filter_duration(struct trace *trace, double t)
  104. {
  105. return t < (trace->duration_filter * NSEC_PER_MSEC);
  106. }
  107. static size_t trace__fprintf_tstamp(struct trace *trace, u64 tstamp, FILE *fp)
  108. {
  109. double ts = (double)(tstamp - trace->base_time) / NSEC_PER_MSEC;
  110. return fprintf(fp, "%10.3f ", ts);
  111. }
  112. static bool done = false;
  113. static void sig_handler(int sig __maybe_unused)
  114. {
  115. done = true;
  116. }
  117. static size_t trace__fprintf_entry_head(struct trace *trace, struct thread *thread,
  118. u64 duration, u64 tstamp, FILE *fp)
  119. {
  120. size_t printed = trace__fprintf_tstamp(trace, tstamp, fp);
  121. printed += fprintf_duration(duration, fp);
  122. if (trace->multiple_threads)
  123. printed += fprintf(fp, "%d ", thread->tid);
  124. return printed;
  125. }
  126. static int trace__process_event(struct machine *machine, union perf_event *event)
  127. {
  128. int ret = 0;
  129. switch (event->header.type) {
  130. case PERF_RECORD_LOST:
  131. color_fprintf(stdout, PERF_COLOR_RED,
  132. "LOST %" PRIu64 " events!\n", event->lost.lost);
  133. ret = machine__process_lost_event(machine, event);
  134. default:
  135. ret = machine__process_event(machine, event);
  136. break;
  137. }
  138. return ret;
  139. }
  140. static int trace__tool_process(struct perf_tool *tool __maybe_unused,
  141. union perf_event *event,
  142. struct perf_sample *sample __maybe_unused,
  143. struct machine *machine)
  144. {
  145. return trace__process_event(machine, event);
  146. }
  147. static int trace__symbols_init(struct trace *trace, struct perf_evlist *evlist)
  148. {
  149. int err = symbol__init();
  150. if (err)
  151. return err;
  152. machine__init(&trace->host, "", HOST_KERNEL_ID);
  153. machine__create_kernel_maps(&trace->host);
  154. if (perf_target__has_task(&trace->opts.target)) {
  155. err = perf_event__synthesize_thread_map(NULL, evlist->threads,
  156. trace__tool_process,
  157. &trace->host);
  158. } else {
  159. err = perf_event__synthesize_threads(NULL, trace__tool_process,
  160. &trace->host);
  161. }
  162. if (err)
  163. symbol__exit();
  164. return err;
  165. }
  166. static int trace__read_syscall_info(struct trace *trace, int id)
  167. {
  168. char tp_name[128];
  169. struct syscall *sc;
  170. const char *name = audit_syscall_to_name(id, trace->audit_machine);
  171. if (name == NULL)
  172. return -1;
  173. if (id > trace->syscalls.max) {
  174. struct syscall *nsyscalls = realloc(trace->syscalls.table, (id + 1) * sizeof(*sc));
  175. if (nsyscalls == NULL)
  176. return -1;
  177. if (trace->syscalls.max != -1) {
  178. memset(nsyscalls + trace->syscalls.max + 1, 0,
  179. (id - trace->syscalls.max) * sizeof(*sc));
  180. } else {
  181. memset(nsyscalls, 0, (id + 1) * sizeof(*sc));
  182. }
  183. trace->syscalls.table = nsyscalls;
  184. trace->syscalls.max = id;
  185. }
  186. sc = trace->syscalls.table + id;
  187. sc->name = name;
  188. sc->fmt = syscall_fmt__find(sc->name);
  189. snprintf(tp_name, sizeof(tp_name), "sys_enter_%s", sc->name);
  190. sc->tp_format = event_format__new("syscalls", tp_name);
  191. if (sc->tp_format == NULL && sc->fmt && sc->fmt->alias) {
  192. snprintf(tp_name, sizeof(tp_name), "sys_enter_%s", sc->fmt->alias);
  193. sc->tp_format = event_format__new("syscalls", tp_name);
  194. }
  195. return sc->tp_format != NULL ? 0 : -1;
  196. }
  197. static size_t syscall__scnprintf_args(struct syscall *sc, char *bf, size_t size,
  198. unsigned long *args)
  199. {
  200. int i = 0;
  201. size_t printed = 0;
  202. if (sc->tp_format != NULL) {
  203. struct format_field *field;
  204. for (field = sc->tp_format->format.fields->next; field; field = field->next) {
  205. printed += scnprintf(bf + printed, size - printed,
  206. "%s%s: %ld", printed ? ", " : "",
  207. field->name, args[i++]);
  208. }
  209. } else {
  210. while (i < 6) {
  211. printed += scnprintf(bf + printed, size - printed,
  212. "%sarg%d: %ld",
  213. printed ? ", " : "", i, args[i]);
  214. ++i;
  215. }
  216. }
  217. return printed;
  218. }
  219. typedef int (*tracepoint_handler)(struct trace *trace, struct perf_evsel *evsel,
  220. struct perf_sample *sample);
  221. static struct syscall *trace__syscall_info(struct trace *trace,
  222. struct perf_evsel *evsel,
  223. struct perf_sample *sample)
  224. {
  225. int id = perf_evsel__intval(evsel, sample, "id");
  226. if (id < 0) {
  227. printf("Invalid syscall %d id, skipping...\n", id);
  228. return NULL;
  229. }
  230. if ((id > trace->syscalls.max || trace->syscalls.table[id].name == NULL) &&
  231. trace__read_syscall_info(trace, id))
  232. goto out_cant_read;
  233. if ((id > trace->syscalls.max || trace->syscalls.table[id].name == NULL))
  234. goto out_cant_read;
  235. return &trace->syscalls.table[id];
  236. out_cant_read:
  237. printf("Problems reading syscall %d", id);
  238. if (id <= trace->syscalls.max && trace->syscalls.table[id].name != NULL)
  239. printf("(%s)", trace->syscalls.table[id].name);
  240. puts(" information");
  241. return NULL;
  242. }
  243. static int trace__sys_enter(struct trace *trace, struct perf_evsel *evsel,
  244. struct perf_sample *sample)
  245. {
  246. char *msg;
  247. void *args;
  248. size_t printed = 0;
  249. struct thread *thread = machine__findnew_thread(&trace->host, sample->tid);
  250. struct syscall *sc = trace__syscall_info(trace, evsel, sample);
  251. struct thread_trace *ttrace = thread__trace(thread);
  252. if (ttrace == NULL || sc == NULL)
  253. return -1;
  254. args = perf_evsel__rawptr(evsel, sample, "args");
  255. if (args == NULL) {
  256. printf("Problems reading syscall arguments\n");
  257. return -1;
  258. }
  259. ttrace = thread->priv;
  260. if (ttrace->entry_str == NULL) {
  261. ttrace->entry_str = malloc(1024);
  262. if (!ttrace->entry_str)
  263. return -1;
  264. }
  265. ttrace->entry_time = sample->time;
  266. msg = ttrace->entry_str;
  267. printed += scnprintf(msg + printed, 1024 - printed, "%s(", sc->name);
  268. printed += syscall__scnprintf_args(sc, msg + printed, 1024 - printed, args);
  269. if (!strcmp(sc->name, "exit_group") || !strcmp(sc->name, "exit")) {
  270. if (!trace->duration_filter) {
  271. trace__fprintf_entry_head(trace, thread, 1, sample->time, stdout);
  272. printf("%-70s\n", ttrace->entry_str);
  273. }
  274. } else
  275. ttrace->entry_pending = true;
  276. return 0;
  277. }
  278. static int trace__sys_exit(struct trace *trace, struct perf_evsel *evsel,
  279. struct perf_sample *sample)
  280. {
  281. int ret;
  282. u64 duration = 0;
  283. struct thread *thread = machine__findnew_thread(&trace->host, sample->tid);
  284. struct thread_trace *ttrace = thread__trace(thread);
  285. struct syscall *sc = trace__syscall_info(trace, evsel, sample);
  286. if (ttrace == NULL || sc == NULL)
  287. return -1;
  288. ret = perf_evsel__intval(evsel, sample, "ret");
  289. ttrace = thread->priv;
  290. ttrace->exit_time = sample->time;
  291. if (ttrace->entry_time) {
  292. duration = sample->time - ttrace->entry_time;
  293. if (trace__filter_duration(trace, duration))
  294. goto out;
  295. } else if (trace->duration_filter)
  296. goto out;
  297. trace__fprintf_entry_head(trace, thread, duration, sample->time, stdout);
  298. if (ttrace->entry_pending) {
  299. printf("%-70s", ttrace->entry_str);
  300. } else {
  301. printf(" ... [");
  302. color_fprintf(stdout, PERF_COLOR_YELLOW, "continued");
  303. printf("]: %s()", sc->name);
  304. }
  305. if (ret < 0 && sc->fmt && sc->fmt->errmsg) {
  306. char bf[256];
  307. const char *emsg = strerror_r(-ret, bf, sizeof(bf)),
  308. *e = audit_errno_to_name(-ret);
  309. printf(") = -1 %s %s", e, emsg);
  310. } else if (ret == 0 && sc->fmt && sc->fmt->timeout)
  311. printf(") = 0 Timeout");
  312. else
  313. printf(") = %d", ret);
  314. putchar('\n');
  315. out:
  316. ttrace->entry_pending = false;
  317. return 0;
  318. }
  319. static int trace__sched_stat_runtime(struct trace *trace, struct perf_evsel *evsel,
  320. struct perf_sample *sample)
  321. {
  322. u64 runtime = perf_evsel__intval(evsel, sample, "runtime");
  323. double runtime_ms = (double)runtime / NSEC_PER_MSEC;
  324. struct thread *thread = machine__findnew_thread(&trace->host, sample->tid);
  325. struct thread_trace *ttrace = thread__trace(thread);
  326. if (ttrace == NULL)
  327. goto out_dump;
  328. ttrace->runtime_ms += runtime_ms;
  329. trace->runtime_ms += runtime_ms;
  330. return 0;
  331. out_dump:
  332. printf("%s: comm=%s,pid=%u,runtime=%" PRIu64 ",vruntime=%" PRIu64 ")\n",
  333. evsel->name,
  334. perf_evsel__strval(evsel, sample, "comm"),
  335. (pid_t)perf_evsel__intval(evsel, sample, "pid"),
  336. runtime,
  337. perf_evsel__intval(evsel, sample, "vruntime"));
  338. return 0;
  339. }
  340. static int trace__run(struct trace *trace, int argc, const char **argv)
  341. {
  342. struct perf_evlist *evlist = perf_evlist__new();
  343. struct perf_evsel *evsel;
  344. int err = -1, i;
  345. unsigned long before;
  346. const bool forks = argc > 0;
  347. if (evlist == NULL) {
  348. printf("Not enough memory to run!\n");
  349. goto out;
  350. }
  351. if (perf_evlist__add_newtp(evlist, "raw_syscalls", "sys_enter", trace__sys_enter) ||
  352. perf_evlist__add_newtp(evlist, "raw_syscalls", "sys_exit", trace__sys_exit)) {
  353. printf("Couldn't read the raw_syscalls tracepoints information!\n");
  354. goto out_delete_evlist;
  355. }
  356. if (trace->sched &&
  357. perf_evlist__add_newtp(evlist, "sched", "sched_stat_runtime",
  358. trace__sched_stat_runtime)) {
  359. printf("Couldn't read the sched_stat_runtime tracepoint information!\n");
  360. goto out_delete_evlist;
  361. }
  362. err = perf_evlist__create_maps(evlist, &trace->opts.target);
  363. if (err < 0) {
  364. printf("Problems parsing the target to trace, check your options!\n");
  365. goto out_delete_evlist;
  366. }
  367. err = trace__symbols_init(trace, evlist);
  368. if (err < 0) {
  369. printf("Problems initializing symbol libraries!\n");
  370. goto out_delete_maps;
  371. }
  372. perf_evlist__config(evlist, &trace->opts);
  373. signal(SIGCHLD, sig_handler);
  374. signal(SIGINT, sig_handler);
  375. if (forks) {
  376. err = perf_evlist__prepare_workload(evlist, &trace->opts.target,
  377. argv, false, false);
  378. if (err < 0) {
  379. printf("Couldn't run the workload!\n");
  380. goto out_delete_maps;
  381. }
  382. }
  383. err = perf_evlist__open(evlist);
  384. if (err < 0) {
  385. printf("Couldn't create the events: %s\n", strerror(errno));
  386. goto out_delete_maps;
  387. }
  388. err = perf_evlist__mmap(evlist, UINT_MAX, false);
  389. if (err < 0) {
  390. printf("Couldn't mmap the events: %s\n", strerror(errno));
  391. goto out_close_evlist;
  392. }
  393. perf_evlist__enable(evlist);
  394. if (forks)
  395. perf_evlist__start_workload(evlist);
  396. trace->multiple_threads = evlist->threads->map[0] == -1 || evlist->threads->nr > 1;
  397. again:
  398. before = trace->nr_events;
  399. for (i = 0; i < evlist->nr_mmaps; i++) {
  400. union perf_event *event;
  401. while ((event = perf_evlist__mmap_read(evlist, i)) != NULL) {
  402. const u32 type = event->header.type;
  403. tracepoint_handler handler;
  404. struct perf_sample sample;
  405. ++trace->nr_events;
  406. err = perf_evlist__parse_sample(evlist, event, &sample);
  407. if (err) {
  408. printf("Can't parse sample, err = %d, skipping...\n", err);
  409. continue;
  410. }
  411. if (trace->base_time == 0)
  412. trace->base_time = sample.time;
  413. if (type != PERF_RECORD_SAMPLE) {
  414. trace__process_event(&trace->host, event);
  415. continue;
  416. }
  417. evsel = perf_evlist__id2evsel(evlist, sample.id);
  418. if (evsel == NULL) {
  419. printf("Unknown tp ID %" PRIu64 ", skipping...\n", sample.id);
  420. continue;
  421. }
  422. if (sample.raw_data == NULL) {
  423. printf("%s sample with no payload for tid: %d, cpu %d, raw_size=%d, skipping...\n",
  424. perf_evsel__name(evsel), sample.tid,
  425. sample.cpu, sample.raw_size);
  426. continue;
  427. }
  428. handler = evsel->handler.func;
  429. handler(trace, evsel, &sample);
  430. }
  431. }
  432. if (trace->nr_events == before) {
  433. if (done)
  434. goto out_unmap_evlist;
  435. poll(evlist->pollfd, evlist->nr_fds, -1);
  436. }
  437. if (done)
  438. perf_evlist__disable(evlist);
  439. goto again;
  440. out_unmap_evlist:
  441. perf_evlist__munmap(evlist);
  442. out_close_evlist:
  443. perf_evlist__close(evlist);
  444. out_delete_maps:
  445. perf_evlist__delete_maps(evlist);
  446. out_delete_evlist:
  447. perf_evlist__delete(evlist);
  448. out:
  449. return err;
  450. }
  451. static size_t trace__fprintf_threads_header(FILE *fp)
  452. {
  453. size_t printed;
  454. printed = fprintf(fp, "\n _____________________________________________________________________\n");
  455. printed += fprintf(fp," __) Summary of events (__\n\n");
  456. printed += fprintf(fp," [ task - pid ] [ events ] [ ratio ] [ runtime ]\n");
  457. printed += fprintf(fp," _____________________________________________________________________\n\n");
  458. return printed;
  459. }
  460. static size_t trace__fprintf_thread_summary(struct trace *trace, FILE *fp)
  461. {
  462. size_t printed = trace__fprintf_threads_header(fp);
  463. struct rb_node *nd;
  464. for (nd = rb_first(&trace->host.threads); nd; nd = rb_next(nd)) {
  465. struct thread *thread = rb_entry(nd, struct thread, rb_node);
  466. struct thread_trace *ttrace = thread->priv;
  467. const char *color;
  468. double ratio;
  469. if (ttrace == NULL)
  470. continue;
  471. ratio = (double)ttrace->nr_events / trace->nr_events * 100.0;
  472. color = PERF_COLOR_NORMAL;
  473. if (ratio > 50.0)
  474. color = PERF_COLOR_RED;
  475. else if (ratio > 25.0)
  476. color = PERF_COLOR_GREEN;
  477. else if (ratio > 5.0)
  478. color = PERF_COLOR_YELLOW;
  479. printed += color_fprintf(fp, color, "%20s", thread->comm);
  480. printed += fprintf(fp, " - %-5d :%11lu [", thread->tid, ttrace->nr_events);
  481. printed += color_fprintf(fp, color, "%5.1f%%", ratio);
  482. printed += fprintf(fp, " ] %10.3f ms\n", ttrace->runtime_ms);
  483. }
  484. return printed;
  485. }
  486. static int trace__set_duration(const struct option *opt, const char *str,
  487. int unset __maybe_unused)
  488. {
  489. struct trace *trace = opt->value;
  490. trace->duration_filter = atof(str);
  491. return 0;
  492. }
  493. int cmd_trace(int argc, const char **argv, const char *prefix __maybe_unused)
  494. {
  495. const char * const trace_usage[] = {
  496. "perf trace [<options>] [<command>]",
  497. "perf trace [<options>] -- <command> [<options>]",
  498. NULL
  499. };
  500. struct trace trace = {
  501. .audit_machine = audit_detect_machine(),
  502. .syscalls = {
  503. . max = -1,
  504. },
  505. .opts = {
  506. .target = {
  507. .uid = UINT_MAX,
  508. .uses_mmap = true,
  509. },
  510. .user_freq = UINT_MAX,
  511. .user_interval = ULLONG_MAX,
  512. .no_delay = true,
  513. .mmap_pages = 1024,
  514. },
  515. };
  516. const struct option trace_options[] = {
  517. OPT_STRING('p', "pid", &trace.opts.target.pid, "pid",
  518. "trace events on existing process id"),
  519. OPT_STRING(0, "tid", &trace.opts.target.tid, "tid",
  520. "trace events on existing thread id"),
  521. OPT_BOOLEAN(0, "all-cpus", &trace.opts.target.system_wide,
  522. "system-wide collection from all CPUs"),
  523. OPT_STRING(0, "cpu", &trace.opts.target.cpu_list, "cpu",
  524. "list of cpus to monitor"),
  525. OPT_BOOLEAN(0, "no-inherit", &trace.opts.no_inherit,
  526. "child tasks do not inherit counters"),
  527. OPT_UINTEGER(0, "mmap-pages", &trace.opts.mmap_pages,
  528. "number of mmap data pages"),
  529. OPT_STRING(0, "uid", &trace.opts.target.uid_str, "user",
  530. "user to profile"),
  531. OPT_CALLBACK(0, "duration", &trace, "float",
  532. "show only events with duration > N.M ms",
  533. trace__set_duration),
  534. OPT_BOOLEAN(0, "sched", &trace.sched, "show blocking scheduler events"),
  535. OPT_END()
  536. };
  537. int err;
  538. char bf[BUFSIZ];
  539. argc = parse_options(argc, argv, trace_options, trace_usage, 0);
  540. err = perf_target__validate(&trace.opts.target);
  541. if (err) {
  542. perf_target__strerror(&trace.opts.target, err, bf, sizeof(bf));
  543. printf("%s", bf);
  544. return err;
  545. }
  546. err = perf_target__parse_uid(&trace.opts.target);
  547. if (err) {
  548. perf_target__strerror(&trace.opts.target, err, bf, sizeof(bf));
  549. printf("%s", bf);
  550. return err;
  551. }
  552. if (!argc && perf_target__none(&trace.opts.target))
  553. trace.opts.target.system_wide = true;
  554. err = trace__run(&trace, argc, argv);
  555. if (trace.sched && !err)
  556. trace__fprintf_thread_summary(&trace, stdout);
  557. return err;
  558. }