builtin-trace.c 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685
  1. #include "builtin.h"
  2. #include "util/color.h"
  3. #include "util/evlist.h"
  4. #include "util/machine.h"
  5. #include "util/thread.h"
  6. #include "util/parse-options.h"
  7. #include "util/thread_map.h"
  8. #include "event-parse.h"
  9. #include <libaudit.h>
  10. #include <stdlib.h>
  11. static struct syscall_fmt {
  12. const char *name;
  13. const char *alias;
  14. bool errmsg;
  15. bool timeout;
  16. } syscall_fmts[] = {
  17. { .name = "access", .errmsg = true, },
  18. { .name = "arch_prctl", .errmsg = true, .alias = "prctl", },
  19. { .name = "fstat", .errmsg = true, .alias = "newfstat", },
  20. { .name = "fstatat", .errmsg = true, .alias = "newfstatat", },
  21. { .name = "futex", .errmsg = true, },
  22. { .name = "open", .errmsg = true, },
  23. { .name = "poll", .errmsg = true, .timeout = true, },
  24. { .name = "ppoll", .errmsg = true, .timeout = true, },
  25. { .name = "read", .errmsg = true, },
  26. { .name = "recvfrom", .errmsg = true, },
  27. { .name = "select", .errmsg = true, .timeout = true, },
  28. { .name = "socket", .errmsg = true, },
  29. { .name = "stat", .errmsg = true, .alias = "newstat", },
  30. };
  31. static int syscall_fmt__cmp(const void *name, const void *fmtp)
  32. {
  33. const struct syscall_fmt *fmt = fmtp;
  34. return strcmp(name, fmt->name);
  35. }
  36. static struct syscall_fmt *syscall_fmt__find(const char *name)
  37. {
  38. const int nmemb = ARRAY_SIZE(syscall_fmts);
  39. return bsearch(name, syscall_fmts, nmemb, sizeof(struct syscall_fmt), syscall_fmt__cmp);
  40. }
  41. struct syscall {
  42. struct event_format *tp_format;
  43. const char *name;
  44. struct syscall_fmt *fmt;
  45. };
  46. static size_t fprintf_duration(unsigned long t, FILE *fp)
  47. {
  48. double duration = (double)t / NSEC_PER_MSEC;
  49. size_t printed = fprintf(fp, "(");
  50. if (duration >= 1.0)
  51. printed += color_fprintf(fp, PERF_COLOR_RED, "%6.3f ms", duration);
  52. else if (duration >= 0.01)
  53. printed += color_fprintf(fp, PERF_COLOR_YELLOW, "%6.3f ms", duration);
  54. else
  55. printed += color_fprintf(fp, PERF_COLOR_NORMAL, "%6.3f ms", duration);
  56. return printed + fprintf(stdout, "): ");
  57. }
  58. struct thread_trace {
  59. u64 entry_time;
  60. u64 exit_time;
  61. bool entry_pending;
  62. unsigned long nr_events;
  63. char *entry_str;
  64. double runtime_ms;
  65. };
  66. static struct thread_trace *thread_trace__new(void)
  67. {
  68. return zalloc(sizeof(struct thread_trace));
  69. }
  70. static struct thread_trace *thread__trace(struct thread *thread)
  71. {
  72. struct thread_trace *ttrace;
  73. if (thread == NULL)
  74. goto fail;
  75. if (thread->priv == NULL)
  76. thread->priv = thread_trace__new();
  77. if (thread->priv == NULL)
  78. goto fail;
  79. ttrace = thread->priv;
  80. ++ttrace->nr_events;
  81. return ttrace;
  82. fail:
  83. color_fprintf(stdout, PERF_COLOR_RED,
  84. "WARNING: not enough memory, dropping samples!\n");
  85. return NULL;
  86. }
  87. struct trace {
  88. int audit_machine;
  89. struct {
  90. int max;
  91. struct syscall *table;
  92. } syscalls;
  93. struct perf_record_opts opts;
  94. struct machine host;
  95. u64 base_time;
  96. unsigned long nr_events;
  97. bool sched;
  98. bool multiple_threads;
  99. double duration_filter;
  100. double runtime_ms;
  101. };
  102. static bool trace__filter_duration(struct trace *trace, double t)
  103. {
  104. return t < (trace->duration_filter * NSEC_PER_MSEC);
  105. }
  106. static size_t trace__fprintf_tstamp(struct trace *trace, u64 tstamp, FILE *fp)
  107. {
  108. double ts = (double)(tstamp - trace->base_time) / NSEC_PER_MSEC;
  109. return fprintf(fp, "%10.3f ", ts);
  110. }
  111. static bool done = false;
  112. static void sig_handler(int sig __maybe_unused)
  113. {
  114. done = true;
  115. }
  116. static size_t trace__fprintf_entry_head(struct trace *trace, struct thread *thread,
  117. u64 duration, u64 tstamp, FILE *fp)
  118. {
  119. size_t printed = trace__fprintf_tstamp(trace, tstamp, fp);
  120. printed += fprintf_duration(duration, fp);
  121. if (trace->multiple_threads)
  122. printed += fprintf(fp, "%d ", thread->pid);
  123. return printed;
  124. }
  125. static int trace__process_event(struct machine *machine, union perf_event *event)
  126. {
  127. int ret = 0;
  128. switch (event->header.type) {
  129. case PERF_RECORD_LOST:
  130. color_fprintf(stdout, PERF_COLOR_RED,
  131. "LOST %" PRIu64 " events!\n", event->lost.lost);
  132. ret = machine__process_lost_event(machine, event);
  133. default:
  134. ret = machine__process_event(machine, event);
  135. break;
  136. }
  137. return ret;
  138. }
  139. static int trace__tool_process(struct perf_tool *tool __maybe_unused,
  140. union perf_event *event,
  141. struct perf_sample *sample __maybe_unused,
  142. struct machine *machine)
  143. {
  144. return trace__process_event(machine, event);
  145. }
  146. static int trace__symbols_init(struct trace *trace, struct perf_evlist *evlist)
  147. {
  148. int err = symbol__init();
  149. if (err)
  150. return err;
  151. machine__init(&trace->host, "", HOST_KERNEL_ID);
  152. machine__create_kernel_maps(&trace->host);
  153. if (perf_target__has_task(&trace->opts.target)) {
  154. err = perf_event__synthesize_thread_map(NULL, evlist->threads,
  155. trace__tool_process,
  156. &trace->host);
  157. } else {
  158. err = perf_event__synthesize_threads(NULL, trace__tool_process,
  159. &trace->host);
  160. }
  161. if (err)
  162. symbol__exit();
  163. return err;
  164. }
  165. static int trace__read_syscall_info(struct trace *trace, int id)
  166. {
  167. char tp_name[128];
  168. struct syscall *sc;
  169. const char *name = audit_syscall_to_name(id, trace->audit_machine);
  170. if (name == NULL)
  171. return -1;
  172. if (id > trace->syscalls.max) {
  173. struct syscall *nsyscalls = realloc(trace->syscalls.table, (id + 1) * sizeof(*sc));
  174. if (nsyscalls == NULL)
  175. return -1;
  176. if (trace->syscalls.max != -1) {
  177. memset(nsyscalls + trace->syscalls.max + 1, 0,
  178. (id - trace->syscalls.max) * sizeof(*sc));
  179. } else {
  180. memset(nsyscalls, 0, (id + 1) * sizeof(*sc));
  181. }
  182. trace->syscalls.table = nsyscalls;
  183. trace->syscalls.max = id;
  184. }
  185. sc = trace->syscalls.table + id;
  186. sc->name = name;
  187. sc->fmt = syscall_fmt__find(sc->name);
  188. snprintf(tp_name, sizeof(tp_name), "sys_enter_%s", sc->name);
  189. sc->tp_format = event_format__new("syscalls", tp_name);
  190. if (sc->tp_format == NULL && sc->fmt && sc->fmt->alias) {
  191. snprintf(tp_name, sizeof(tp_name), "sys_enter_%s", sc->fmt->alias);
  192. sc->tp_format = event_format__new("syscalls", tp_name);
  193. }
  194. return sc->tp_format != NULL ? 0 : -1;
  195. }
  196. static size_t syscall__scnprintf_args(struct syscall *sc, char *bf, size_t size,
  197. unsigned long *args)
  198. {
  199. int i = 0;
  200. size_t printed = 0;
  201. if (sc->tp_format != NULL) {
  202. struct format_field *field;
  203. for (field = sc->tp_format->format.fields->next; field; field = field->next) {
  204. printed += scnprintf(bf + printed, size - printed,
  205. "%s%s: %ld", printed ? ", " : "",
  206. field->name, args[i++]);
  207. }
  208. } else {
  209. while (i < 6) {
  210. printed += scnprintf(bf + printed, size - printed,
  211. "%sarg%d: %ld",
  212. printed ? ", " : "", i, args[i]);
  213. ++i;
  214. }
  215. }
  216. return printed;
  217. }
  218. typedef int (*tracepoint_handler)(struct trace *trace, struct perf_evsel *evsel,
  219. struct perf_sample *sample);
  220. static struct syscall *trace__syscall_info(struct trace *trace,
  221. struct perf_evsel *evsel,
  222. struct perf_sample *sample)
  223. {
  224. int id = perf_evsel__intval(evsel, sample, "id");
  225. if (id < 0) {
  226. printf("Invalid syscall %d id, skipping...\n", id);
  227. return NULL;
  228. }
  229. if ((id > trace->syscalls.max || trace->syscalls.table[id].name == NULL) &&
  230. trace__read_syscall_info(trace, id))
  231. goto out_cant_read;
  232. if ((id > trace->syscalls.max || trace->syscalls.table[id].name == NULL))
  233. goto out_cant_read;
  234. return &trace->syscalls.table[id];
  235. out_cant_read:
  236. printf("Problems reading syscall %d", id);
  237. if (id <= trace->syscalls.max && trace->syscalls.table[id].name != NULL)
  238. printf("(%s)", trace->syscalls.table[id].name);
  239. puts(" information");
  240. return NULL;
  241. }
  242. static int trace__sys_enter(struct trace *trace, struct perf_evsel *evsel,
  243. struct perf_sample *sample)
  244. {
  245. char *msg;
  246. void *args;
  247. size_t printed = 0;
  248. struct thread *thread = machine__findnew_thread(&trace->host, sample->tid);
  249. struct syscall *sc = trace__syscall_info(trace, evsel, sample);
  250. struct thread_trace *ttrace = thread__trace(thread);
  251. if (ttrace == NULL || sc == NULL)
  252. return -1;
  253. args = perf_evsel__rawptr(evsel, sample, "args");
  254. if (args == NULL) {
  255. printf("Problems reading syscall arguments\n");
  256. return -1;
  257. }
  258. ttrace = thread->priv;
  259. if (ttrace->entry_str == NULL) {
  260. ttrace->entry_str = malloc(1024);
  261. if (!ttrace->entry_str)
  262. return -1;
  263. }
  264. ttrace->entry_time = sample->time;
  265. msg = ttrace->entry_str;
  266. printed += scnprintf(msg + printed, 1024 - printed, "%s(", sc->name);
  267. printed += syscall__scnprintf_args(sc, msg + printed, 1024 - printed, args);
  268. if (!strcmp(sc->name, "exit_group") || !strcmp(sc->name, "exit")) {
  269. if (!trace->duration_filter) {
  270. trace__fprintf_entry_head(trace, thread, 1, sample->time, stdout);
  271. printf("%-70s\n", ttrace->entry_str);
  272. }
  273. } else
  274. ttrace->entry_pending = true;
  275. return 0;
  276. }
  277. static int trace__sys_exit(struct trace *trace, struct perf_evsel *evsel,
  278. struct perf_sample *sample)
  279. {
  280. int ret;
  281. u64 duration = 0;
  282. struct thread *thread = machine__findnew_thread(&trace->host, sample->tid);
  283. struct thread_trace *ttrace = thread__trace(thread);
  284. struct syscall *sc = trace__syscall_info(trace, evsel, sample);
  285. if (ttrace == NULL || sc == NULL)
  286. return -1;
  287. ret = perf_evsel__intval(evsel, sample, "ret");
  288. ttrace = thread->priv;
  289. ttrace->exit_time = sample->time;
  290. if (ttrace->entry_time) {
  291. duration = sample->time - ttrace->entry_time;
  292. if (trace__filter_duration(trace, duration))
  293. goto out;
  294. } else if (trace->duration_filter)
  295. goto out;
  296. trace__fprintf_entry_head(trace, thread, duration, sample->time, stdout);
  297. if (ttrace->entry_pending) {
  298. printf("%-70s", ttrace->entry_str);
  299. } else {
  300. printf(" ... [");
  301. color_fprintf(stdout, PERF_COLOR_YELLOW, "continued");
  302. printf("]: %s()", sc->name);
  303. }
  304. if (ret < 0 && sc->fmt && sc->fmt->errmsg) {
  305. char bf[256];
  306. const char *emsg = strerror_r(-ret, bf, sizeof(bf)),
  307. *e = audit_errno_to_name(-ret);
  308. printf(") = -1 %s %s", e, emsg);
  309. } else if (ret == 0 && sc->fmt && sc->fmt->timeout)
  310. printf(") = 0 Timeout");
  311. else
  312. printf(") = %d", ret);
  313. putchar('\n');
  314. out:
  315. ttrace->entry_pending = false;
  316. return 0;
  317. }
  318. static int trace__sched_stat_runtime(struct trace *trace, struct perf_evsel *evsel,
  319. struct perf_sample *sample)
  320. {
  321. u64 runtime = perf_evsel__intval(evsel, sample, "runtime");
  322. double runtime_ms = (double)runtime / NSEC_PER_MSEC;
  323. struct thread *thread = machine__findnew_thread(&trace->host, sample->tid);
  324. struct thread_trace *ttrace = thread__trace(thread);
  325. if (ttrace == NULL)
  326. goto out_dump;
  327. ttrace->runtime_ms += runtime_ms;
  328. trace->runtime_ms += runtime_ms;
  329. return 0;
  330. out_dump:
  331. printf("%s: comm=%s,pid=%u,runtime=%" PRIu64 ",vruntime=%" PRIu64 ")\n",
  332. evsel->name,
  333. perf_evsel__strval(evsel, sample, "comm"),
  334. (pid_t)perf_evsel__intval(evsel, sample, "pid"),
  335. runtime,
  336. perf_evsel__intval(evsel, sample, "vruntime"));
  337. return 0;
  338. }
  339. static int trace__run(struct trace *trace, int argc, const char **argv)
  340. {
  341. struct perf_evlist *evlist = perf_evlist__new(NULL, NULL);
  342. struct perf_evsel *evsel;
  343. int err = -1, i;
  344. unsigned long before;
  345. const bool forks = argc > 0;
  346. if (evlist == NULL) {
  347. printf("Not enough memory to run!\n");
  348. goto out;
  349. }
  350. if (perf_evlist__add_newtp(evlist, "raw_syscalls", "sys_enter", trace__sys_enter) ||
  351. perf_evlist__add_newtp(evlist, "raw_syscalls", "sys_exit", trace__sys_exit)) {
  352. printf("Couldn't read the raw_syscalls tracepoints information!\n");
  353. goto out_delete_evlist;
  354. }
  355. if (trace->sched &&
  356. perf_evlist__add_newtp(evlist, "sched", "sched_stat_runtime",
  357. trace__sched_stat_runtime)) {
  358. printf("Couldn't read the sched_stat_runtime tracepoint information!\n");
  359. goto out_delete_evlist;
  360. }
  361. err = perf_evlist__create_maps(evlist, &trace->opts.target);
  362. if (err < 0) {
  363. printf("Problems parsing the target to trace, check your options!\n");
  364. goto out_delete_evlist;
  365. }
  366. err = trace__symbols_init(trace, evlist);
  367. if (err < 0) {
  368. printf("Problems initializing symbol libraries!\n");
  369. goto out_delete_evlist;
  370. }
  371. perf_evlist__config_attrs(evlist, &trace->opts);
  372. signal(SIGCHLD, sig_handler);
  373. signal(SIGINT, sig_handler);
  374. if (forks) {
  375. err = perf_evlist__prepare_workload(evlist, &trace->opts, argv);
  376. if (err < 0) {
  377. printf("Couldn't run the workload!\n");
  378. goto out_delete_evlist;
  379. }
  380. }
  381. err = perf_evlist__open(evlist);
  382. if (err < 0) {
  383. printf("Couldn't create the events: %s\n", strerror(errno));
  384. goto out_delete_evlist;
  385. }
  386. err = perf_evlist__mmap(evlist, UINT_MAX, false);
  387. if (err < 0) {
  388. printf("Couldn't mmap the events: %s\n", strerror(errno));
  389. goto out_delete_evlist;
  390. }
  391. perf_evlist__enable(evlist);
  392. if (forks)
  393. perf_evlist__start_workload(evlist);
  394. trace->multiple_threads = evlist->threads->map[0] == -1 || evlist->threads->nr > 1;
  395. again:
  396. before = trace->nr_events;
  397. for (i = 0; i < evlist->nr_mmaps; i++) {
  398. union perf_event *event;
  399. while ((event = perf_evlist__mmap_read(evlist, i)) != NULL) {
  400. const u32 type = event->header.type;
  401. tracepoint_handler handler;
  402. struct perf_sample sample;
  403. ++trace->nr_events;
  404. err = perf_evlist__parse_sample(evlist, event, &sample);
  405. if (err) {
  406. printf("Can't parse sample, err = %d, skipping...\n", err);
  407. continue;
  408. }
  409. if (trace->base_time == 0)
  410. trace->base_time = sample.time;
  411. if (type != PERF_RECORD_SAMPLE) {
  412. trace__process_event(&trace->host, event);
  413. continue;
  414. }
  415. evsel = perf_evlist__id2evsel(evlist, sample.id);
  416. if (evsel == NULL) {
  417. printf("Unknown tp ID %" PRIu64 ", skipping...\n", sample.id);
  418. continue;
  419. }
  420. if (sample.raw_data == NULL) {
  421. printf("%s sample with no payload for tid: %d, cpu %d, raw_size=%d, skipping...\n",
  422. perf_evsel__name(evsel), sample.tid,
  423. sample.cpu, sample.raw_size);
  424. continue;
  425. }
  426. if (sample.raw_data == NULL) {
  427. printf("%s sample with no payload for tid: %d, cpu %d, raw_size=%d, skipping...\n",
  428. perf_evsel__name(evsel), sample.tid,
  429. sample.cpu, sample.raw_size);
  430. continue;
  431. }
  432. handler = evsel->handler.func;
  433. handler(trace, evsel, &sample);
  434. }
  435. }
  436. if (trace->nr_events == before) {
  437. if (done)
  438. goto out_delete_evlist;
  439. poll(evlist->pollfd, evlist->nr_fds, -1);
  440. }
  441. if (done)
  442. perf_evlist__disable(evlist);
  443. goto again;
  444. out_delete_evlist:
  445. perf_evlist__delete(evlist);
  446. out:
  447. return err;
  448. }
  449. static size_t trace__fprintf_threads_header(FILE *fp)
  450. {
  451. size_t printed;
  452. printed = fprintf(fp, "\n _____________________________________________________________________\n");
  453. printed += fprintf(fp," __) Summary of events (__\n\n");
  454. printed += fprintf(fp," [ task - pid ] [ events ] [ ratio ] [ runtime ]\n");
  455. printed += fprintf(fp," _____________________________________________________________________\n\n");
  456. return printed;
  457. }
  458. static size_t trace__fprintf_thread_summary(struct trace *trace, FILE *fp)
  459. {
  460. size_t printed = trace__fprintf_threads_header(fp);
  461. struct rb_node *nd;
  462. for (nd = rb_first(&trace->host.threads); nd; nd = rb_next(nd)) {
  463. struct thread *thread = rb_entry(nd, struct thread, rb_node);
  464. struct thread_trace *ttrace = thread->priv;
  465. const char *color;
  466. double ratio;
  467. if (ttrace == NULL)
  468. continue;
  469. ratio = (double)ttrace->nr_events / trace->nr_events * 100.0;
  470. color = PERF_COLOR_NORMAL;
  471. if (ratio > 50.0)
  472. color = PERF_COLOR_RED;
  473. else if (ratio > 25.0)
  474. color = PERF_COLOR_GREEN;
  475. else if (ratio > 5.0)
  476. color = PERF_COLOR_YELLOW;
  477. printed += color_fprintf(fp, color, "%20s", thread->comm);
  478. printed += fprintf(fp, " - %-5d :%11lu [", thread->pid, ttrace->nr_events);
  479. printed += color_fprintf(fp, color, "%5.1f%%", ratio);
  480. printed += fprintf(fp, " ] %10.3f ms\n", ttrace->runtime_ms);
  481. }
  482. return printed;
  483. }
  484. static int trace__set_duration(const struct option *opt, const char *str,
  485. int unset __maybe_unused)
  486. {
  487. struct trace *trace = opt->value;
  488. trace->duration_filter = atof(str);
  489. return 0;
  490. }
  491. int cmd_trace(int argc, const char **argv, const char *prefix __maybe_unused)
  492. {
  493. const char * const trace_usage[] = {
  494. "perf trace [<options>] [<command>]",
  495. "perf trace [<options>] -- <command> [<options>]",
  496. NULL
  497. };
  498. struct trace trace = {
  499. .audit_machine = audit_detect_machine(),
  500. .syscalls = {
  501. . max = -1,
  502. },
  503. .opts = {
  504. .target = {
  505. .uid = UINT_MAX,
  506. .uses_mmap = true,
  507. },
  508. .user_freq = UINT_MAX,
  509. .user_interval = ULLONG_MAX,
  510. .no_delay = true,
  511. .mmap_pages = 1024,
  512. },
  513. };
  514. const struct option trace_options[] = {
  515. OPT_STRING('p', "pid", &trace.opts.target.pid, "pid",
  516. "trace events on existing process id"),
  517. OPT_STRING(0, "tid", &trace.opts.target.tid, "tid",
  518. "trace events on existing thread id"),
  519. OPT_BOOLEAN(0, "all-cpus", &trace.opts.target.system_wide,
  520. "system-wide collection from all CPUs"),
  521. OPT_STRING(0, "cpu", &trace.opts.target.cpu_list, "cpu",
  522. "list of cpus to monitor"),
  523. OPT_BOOLEAN(0, "no-inherit", &trace.opts.no_inherit,
  524. "child tasks do not inherit counters"),
  525. OPT_UINTEGER(0, "mmap-pages", &trace.opts.mmap_pages,
  526. "number of mmap data pages"),
  527. OPT_STRING(0, "uid", &trace.opts.target.uid_str, "user",
  528. "user to profile"),
  529. OPT_CALLBACK(0, "duration", &trace, "float",
  530. "show only events with duration > N.M ms",
  531. trace__set_duration),
  532. OPT_BOOLEAN(0, "sched", &trace.sched, "show blocking scheduler events"),
  533. OPT_END()
  534. };
  535. int err;
  536. char bf[BUFSIZ];
  537. argc = parse_options(argc, argv, trace_options, trace_usage, 0);
  538. err = perf_target__validate(&trace.opts.target);
  539. if (err) {
  540. perf_target__strerror(&trace.opts.target, err, bf, sizeof(bf));
  541. printf("%s", bf);
  542. return err;
  543. }
  544. err = perf_target__parse_uid(&trace.opts.target);
  545. if (err) {
  546. perf_target__strerror(&trace.opts.target, err, bf, sizeof(bf));
  547. printf("%s", bf);
  548. return err;
  549. }
  550. if (!argc && perf_target__none(&trace.opts.target))
  551. trace.opts.target.system_wide = true;
  552. err = trace__run(&trace, argc, argv);
  553. if (trace.sched && !err)
  554. trace__fprintf_thread_summary(&trace, stdout);
  555. return err;
  556. }