builtin-trace.c 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726
  1. #include <traceevent/event-parse.h>
  2. #include "builtin.h"
  3. #include "util/color.h"
  4. #include "util/evlist.h"
  5. #include "util/machine.h"
  6. #include "util/thread.h"
  7. #include "util/parse-options.h"
  8. #include "util/strlist.h"
  9. #include "util/thread_map.h"
  10. #include <libaudit.h>
  11. #include <stdlib.h>
  12. static struct syscall_fmt {
  13. const char *name;
  14. const char *alias;
  15. bool errmsg;
  16. bool timeout;
  17. } syscall_fmts[] = {
  18. { .name = "access", .errmsg = true, },
  19. { .name = "arch_prctl", .errmsg = true, .alias = "prctl", },
  20. { .name = "connect", .errmsg = true, },
  21. { .name = "fstat", .errmsg = true, .alias = "newfstat", },
  22. { .name = "fstatat", .errmsg = true, .alias = "newfstatat", },
  23. { .name = "futex", .errmsg = true, },
  24. { .name = "open", .errmsg = true, },
  25. { .name = "poll", .errmsg = true, .timeout = true, },
  26. { .name = "ppoll", .errmsg = true, .timeout = true, },
  27. { .name = "read", .errmsg = true, },
  28. { .name = "recvfrom", .errmsg = true, },
  29. { .name = "select", .errmsg = true, .timeout = true, },
  30. { .name = "socket", .errmsg = true, },
  31. { .name = "stat", .errmsg = true, .alias = "newstat", },
  32. };
  33. static int syscall_fmt__cmp(const void *name, const void *fmtp)
  34. {
  35. const struct syscall_fmt *fmt = fmtp;
  36. return strcmp(name, fmt->name);
  37. }
  38. static struct syscall_fmt *syscall_fmt__find(const char *name)
  39. {
  40. const int nmemb = ARRAY_SIZE(syscall_fmts);
  41. return bsearch(name, syscall_fmts, nmemb, sizeof(struct syscall_fmt), syscall_fmt__cmp);
  42. }
  43. struct syscall {
  44. struct event_format *tp_format;
  45. const char *name;
  46. bool filtered;
  47. struct syscall_fmt *fmt;
  48. };
  49. static size_t fprintf_duration(unsigned long t, FILE *fp)
  50. {
  51. double duration = (double)t / NSEC_PER_MSEC;
  52. size_t printed = fprintf(fp, "(");
  53. if (duration >= 1.0)
  54. printed += color_fprintf(fp, PERF_COLOR_RED, "%6.3f ms", duration);
  55. else if (duration >= 0.01)
  56. printed += color_fprintf(fp, PERF_COLOR_YELLOW, "%6.3f ms", duration);
  57. else
  58. printed += color_fprintf(fp, PERF_COLOR_NORMAL, "%6.3f ms", duration);
  59. return printed + fprintf(stdout, "): ");
  60. }
  61. struct thread_trace {
  62. u64 entry_time;
  63. u64 exit_time;
  64. bool entry_pending;
  65. unsigned long nr_events;
  66. char *entry_str;
  67. double runtime_ms;
  68. };
  69. static struct thread_trace *thread_trace__new(void)
  70. {
  71. return zalloc(sizeof(struct thread_trace));
  72. }
  73. static struct thread_trace *thread__trace(struct thread *thread)
  74. {
  75. struct thread_trace *ttrace;
  76. if (thread == NULL)
  77. goto fail;
  78. if (thread->priv == NULL)
  79. thread->priv = thread_trace__new();
  80. if (thread->priv == NULL)
  81. goto fail;
  82. ttrace = thread->priv;
  83. ++ttrace->nr_events;
  84. return ttrace;
  85. fail:
  86. color_fprintf(stdout, PERF_COLOR_RED,
  87. "WARNING: not enough memory, dropping samples!\n");
  88. return NULL;
  89. }
  90. struct trace {
  91. int audit_machine;
  92. struct {
  93. int max;
  94. struct syscall *table;
  95. } syscalls;
  96. struct perf_record_opts opts;
  97. struct machine host;
  98. u64 base_time;
  99. struct strlist *ev_qualifier;
  100. unsigned long nr_events;
  101. bool sched;
  102. bool multiple_threads;
  103. double duration_filter;
  104. double runtime_ms;
  105. };
  106. static bool trace__filter_duration(struct trace *trace, double t)
  107. {
  108. return t < (trace->duration_filter * NSEC_PER_MSEC);
  109. }
  110. static size_t trace__fprintf_tstamp(struct trace *trace, u64 tstamp, FILE *fp)
  111. {
  112. double ts = (double)(tstamp - trace->base_time) / NSEC_PER_MSEC;
  113. return fprintf(fp, "%10.3f ", ts);
  114. }
  115. static bool done = false;
  116. static void sig_handler(int sig __maybe_unused)
  117. {
  118. done = true;
  119. }
  120. static size_t trace__fprintf_entry_head(struct trace *trace, struct thread *thread,
  121. u64 duration, u64 tstamp, FILE *fp)
  122. {
  123. size_t printed = trace__fprintf_tstamp(trace, tstamp, fp);
  124. printed += fprintf_duration(duration, fp);
  125. if (trace->multiple_threads)
  126. printed += fprintf(fp, "%d ", thread->tid);
  127. return printed;
  128. }
  129. static int trace__process_event(struct machine *machine, union perf_event *event)
  130. {
  131. int ret = 0;
  132. switch (event->header.type) {
  133. case PERF_RECORD_LOST:
  134. color_fprintf(stdout, PERF_COLOR_RED,
  135. "LOST %" PRIu64 " events!\n", event->lost.lost);
  136. ret = machine__process_lost_event(machine, event);
  137. default:
  138. ret = machine__process_event(machine, event);
  139. break;
  140. }
  141. return ret;
  142. }
  143. static int trace__tool_process(struct perf_tool *tool __maybe_unused,
  144. union perf_event *event,
  145. struct perf_sample *sample __maybe_unused,
  146. struct machine *machine)
  147. {
  148. return trace__process_event(machine, event);
  149. }
  150. static int trace__symbols_init(struct trace *trace, struct perf_evlist *evlist)
  151. {
  152. int err = symbol__init();
  153. if (err)
  154. return err;
  155. machine__init(&trace->host, "", HOST_KERNEL_ID);
  156. machine__create_kernel_maps(&trace->host);
  157. if (perf_target__has_task(&trace->opts.target)) {
  158. err = perf_event__synthesize_thread_map(NULL, evlist->threads,
  159. trace__tool_process,
  160. &trace->host);
  161. } else {
  162. err = perf_event__synthesize_threads(NULL, trace__tool_process,
  163. &trace->host);
  164. }
  165. if (err)
  166. symbol__exit();
  167. return err;
  168. }
  169. static int trace__read_syscall_info(struct trace *trace, int id)
  170. {
  171. char tp_name[128];
  172. struct syscall *sc;
  173. const char *name = audit_syscall_to_name(id, trace->audit_machine);
  174. if (name == NULL)
  175. return -1;
  176. if (id > trace->syscalls.max) {
  177. struct syscall *nsyscalls = realloc(trace->syscalls.table, (id + 1) * sizeof(*sc));
  178. if (nsyscalls == NULL)
  179. return -1;
  180. if (trace->syscalls.max != -1) {
  181. memset(nsyscalls + trace->syscalls.max + 1, 0,
  182. (id - trace->syscalls.max) * sizeof(*sc));
  183. } else {
  184. memset(nsyscalls, 0, (id + 1) * sizeof(*sc));
  185. }
  186. trace->syscalls.table = nsyscalls;
  187. trace->syscalls.max = id;
  188. }
  189. sc = trace->syscalls.table + id;
  190. sc->name = name;
  191. if (trace->ev_qualifier && !strlist__find(trace->ev_qualifier, name)) {
  192. sc->filtered = true;
  193. /*
  194. * No need to do read tracepoint information since this will be
  195. * filtered out.
  196. */
  197. return 0;
  198. }
  199. sc->fmt = syscall_fmt__find(sc->name);
  200. snprintf(tp_name, sizeof(tp_name), "sys_enter_%s", sc->name);
  201. sc->tp_format = event_format__new("syscalls", tp_name);
  202. if (sc->tp_format == NULL && sc->fmt && sc->fmt->alias) {
  203. snprintf(tp_name, sizeof(tp_name), "sys_enter_%s", sc->fmt->alias);
  204. sc->tp_format = event_format__new("syscalls", tp_name);
  205. }
  206. return sc->tp_format != NULL ? 0 : -1;
  207. }
  208. static size_t syscall__scnprintf_args(struct syscall *sc, char *bf, size_t size,
  209. unsigned long *args)
  210. {
  211. int i = 0;
  212. size_t printed = 0;
  213. if (sc->tp_format != NULL) {
  214. struct format_field *field;
  215. for (field = sc->tp_format->format.fields->next; field; field = field->next) {
  216. printed += scnprintf(bf + printed, size - printed,
  217. "%s%s: %ld", printed ? ", " : "",
  218. field->name, args[i++]);
  219. }
  220. } else {
  221. while (i < 6) {
  222. printed += scnprintf(bf + printed, size - printed,
  223. "%sarg%d: %ld",
  224. printed ? ", " : "", i, args[i]);
  225. ++i;
  226. }
  227. }
  228. return printed;
  229. }
  230. typedef int (*tracepoint_handler)(struct trace *trace, struct perf_evsel *evsel,
  231. struct perf_sample *sample);
  232. static struct syscall *trace__syscall_info(struct trace *trace,
  233. struct perf_evsel *evsel,
  234. struct perf_sample *sample)
  235. {
  236. int id = perf_evsel__intval(evsel, sample, "id");
  237. if (id < 0) {
  238. printf("Invalid syscall %d id, skipping...\n", id);
  239. return NULL;
  240. }
  241. if ((id > trace->syscalls.max || trace->syscalls.table[id].name == NULL) &&
  242. trace__read_syscall_info(trace, id))
  243. goto out_cant_read;
  244. if ((id > trace->syscalls.max || trace->syscalls.table[id].name == NULL))
  245. goto out_cant_read;
  246. return &trace->syscalls.table[id];
  247. out_cant_read:
  248. printf("Problems reading syscall %d", id);
  249. if (id <= trace->syscalls.max && trace->syscalls.table[id].name != NULL)
  250. printf("(%s)", trace->syscalls.table[id].name);
  251. puts(" information");
  252. return NULL;
  253. }
  254. static int trace__sys_enter(struct trace *trace, struct perf_evsel *evsel,
  255. struct perf_sample *sample)
  256. {
  257. char *msg;
  258. void *args;
  259. size_t printed = 0;
  260. struct thread *thread;
  261. struct syscall *sc = trace__syscall_info(trace, evsel, sample);
  262. struct thread_trace *ttrace;
  263. if (sc == NULL)
  264. return -1;
  265. if (sc->filtered)
  266. return 0;
  267. thread = machine__findnew_thread(&trace->host, sample->tid);
  268. ttrace = thread__trace(thread);
  269. if (ttrace == NULL)
  270. return -1;
  271. args = perf_evsel__rawptr(evsel, sample, "args");
  272. if (args == NULL) {
  273. printf("Problems reading syscall arguments\n");
  274. return -1;
  275. }
  276. ttrace = thread->priv;
  277. if (ttrace->entry_str == NULL) {
  278. ttrace->entry_str = malloc(1024);
  279. if (!ttrace->entry_str)
  280. return -1;
  281. }
  282. ttrace->entry_time = sample->time;
  283. msg = ttrace->entry_str;
  284. printed += scnprintf(msg + printed, 1024 - printed, "%s(", sc->name);
  285. printed += syscall__scnprintf_args(sc, msg + printed, 1024 - printed, args);
  286. if (!strcmp(sc->name, "exit_group") || !strcmp(sc->name, "exit")) {
  287. if (!trace->duration_filter) {
  288. trace__fprintf_entry_head(trace, thread, 1, sample->time, stdout);
  289. printf("%-70s\n", ttrace->entry_str);
  290. }
  291. } else
  292. ttrace->entry_pending = true;
  293. return 0;
  294. }
  295. static int trace__sys_exit(struct trace *trace, struct perf_evsel *evsel,
  296. struct perf_sample *sample)
  297. {
  298. int ret;
  299. u64 duration = 0;
  300. struct thread *thread;
  301. struct syscall *sc = trace__syscall_info(trace, evsel, sample);
  302. struct thread_trace *ttrace;
  303. if (sc == NULL)
  304. return -1;
  305. if (sc->filtered)
  306. return 0;
  307. thread = machine__findnew_thread(&trace->host, sample->tid);
  308. ttrace = thread__trace(thread);
  309. if (ttrace == NULL)
  310. return -1;
  311. ret = perf_evsel__intval(evsel, sample, "ret");
  312. ttrace = thread->priv;
  313. ttrace->exit_time = sample->time;
  314. if (ttrace->entry_time) {
  315. duration = sample->time - ttrace->entry_time;
  316. if (trace__filter_duration(trace, duration))
  317. goto out;
  318. } else if (trace->duration_filter)
  319. goto out;
  320. trace__fprintf_entry_head(trace, thread, duration, sample->time, stdout);
  321. if (ttrace->entry_pending) {
  322. printf("%-70s", ttrace->entry_str);
  323. } else {
  324. printf(" ... [");
  325. color_fprintf(stdout, PERF_COLOR_YELLOW, "continued");
  326. printf("]: %s()", sc->name);
  327. }
  328. if (ret < 0 && sc->fmt && sc->fmt->errmsg) {
  329. char bf[256];
  330. const char *emsg = strerror_r(-ret, bf, sizeof(bf)),
  331. *e = audit_errno_to_name(-ret);
  332. printf(") = -1 %s %s", e, emsg);
  333. } else if (ret == 0 && sc->fmt && sc->fmt->timeout)
  334. printf(") = 0 Timeout");
  335. else
  336. printf(") = %d", ret);
  337. putchar('\n');
  338. out:
  339. ttrace->entry_pending = false;
  340. return 0;
  341. }
  342. static int trace__sched_stat_runtime(struct trace *trace, struct perf_evsel *evsel,
  343. struct perf_sample *sample)
  344. {
  345. u64 runtime = perf_evsel__intval(evsel, sample, "runtime");
  346. double runtime_ms = (double)runtime / NSEC_PER_MSEC;
  347. struct thread *thread = machine__findnew_thread(&trace->host, sample->tid);
  348. struct thread_trace *ttrace = thread__trace(thread);
  349. if (ttrace == NULL)
  350. goto out_dump;
  351. ttrace->runtime_ms += runtime_ms;
  352. trace->runtime_ms += runtime_ms;
  353. return 0;
  354. out_dump:
  355. printf("%s: comm=%s,pid=%u,runtime=%" PRIu64 ",vruntime=%" PRIu64 ")\n",
  356. evsel->name,
  357. perf_evsel__strval(evsel, sample, "comm"),
  358. (pid_t)perf_evsel__intval(evsel, sample, "pid"),
  359. runtime,
  360. perf_evsel__intval(evsel, sample, "vruntime"));
  361. return 0;
  362. }
  363. static int trace__run(struct trace *trace, int argc, const char **argv)
  364. {
  365. struct perf_evlist *evlist = perf_evlist__new();
  366. struct perf_evsel *evsel;
  367. int err = -1, i;
  368. unsigned long before;
  369. const bool forks = argc > 0;
  370. if (evlist == NULL) {
  371. printf("Not enough memory to run!\n");
  372. goto out;
  373. }
  374. if (perf_evlist__add_newtp(evlist, "raw_syscalls", "sys_enter", trace__sys_enter) ||
  375. perf_evlist__add_newtp(evlist, "raw_syscalls", "sys_exit", trace__sys_exit)) {
  376. printf("Couldn't read the raw_syscalls tracepoints information!\n");
  377. goto out_delete_evlist;
  378. }
  379. if (trace->sched &&
  380. perf_evlist__add_newtp(evlist, "sched", "sched_stat_runtime",
  381. trace__sched_stat_runtime)) {
  382. printf("Couldn't read the sched_stat_runtime tracepoint information!\n");
  383. goto out_delete_evlist;
  384. }
  385. err = perf_evlist__create_maps(evlist, &trace->opts.target);
  386. if (err < 0) {
  387. printf("Problems parsing the target to trace, check your options!\n");
  388. goto out_delete_evlist;
  389. }
  390. err = trace__symbols_init(trace, evlist);
  391. if (err < 0) {
  392. printf("Problems initializing symbol libraries!\n");
  393. goto out_delete_maps;
  394. }
  395. perf_evlist__config(evlist, &trace->opts);
  396. signal(SIGCHLD, sig_handler);
  397. signal(SIGINT, sig_handler);
  398. if (forks) {
  399. err = perf_evlist__prepare_workload(evlist, &trace->opts.target,
  400. argv, false, false);
  401. if (err < 0) {
  402. printf("Couldn't run the workload!\n");
  403. goto out_delete_maps;
  404. }
  405. }
  406. err = perf_evlist__open(evlist);
  407. if (err < 0) {
  408. printf("Couldn't create the events: %s\n", strerror(errno));
  409. goto out_delete_maps;
  410. }
  411. err = perf_evlist__mmap(evlist, UINT_MAX, false);
  412. if (err < 0) {
  413. printf("Couldn't mmap the events: %s\n", strerror(errno));
  414. goto out_close_evlist;
  415. }
  416. perf_evlist__enable(evlist);
  417. if (forks)
  418. perf_evlist__start_workload(evlist);
  419. trace->multiple_threads = evlist->threads->map[0] == -1 || evlist->threads->nr > 1;
  420. again:
  421. before = trace->nr_events;
  422. for (i = 0; i < evlist->nr_mmaps; i++) {
  423. union perf_event *event;
  424. while ((event = perf_evlist__mmap_read(evlist, i)) != NULL) {
  425. const u32 type = event->header.type;
  426. tracepoint_handler handler;
  427. struct perf_sample sample;
  428. ++trace->nr_events;
  429. err = perf_evlist__parse_sample(evlist, event, &sample);
  430. if (err) {
  431. printf("Can't parse sample, err = %d, skipping...\n", err);
  432. continue;
  433. }
  434. if (trace->base_time == 0)
  435. trace->base_time = sample.time;
  436. if (type != PERF_RECORD_SAMPLE) {
  437. trace__process_event(&trace->host, event);
  438. continue;
  439. }
  440. evsel = perf_evlist__id2evsel(evlist, sample.id);
  441. if (evsel == NULL) {
  442. printf("Unknown tp ID %" PRIu64 ", skipping...\n", sample.id);
  443. continue;
  444. }
  445. if (sample.raw_data == NULL) {
  446. printf("%s sample with no payload for tid: %d, cpu %d, raw_size=%d, skipping...\n",
  447. perf_evsel__name(evsel), sample.tid,
  448. sample.cpu, sample.raw_size);
  449. continue;
  450. }
  451. handler = evsel->handler.func;
  452. handler(trace, evsel, &sample);
  453. }
  454. }
  455. if (trace->nr_events == before) {
  456. if (done)
  457. goto out_unmap_evlist;
  458. poll(evlist->pollfd, evlist->nr_fds, -1);
  459. }
  460. if (done)
  461. perf_evlist__disable(evlist);
  462. goto again;
  463. out_unmap_evlist:
  464. perf_evlist__munmap(evlist);
  465. out_close_evlist:
  466. perf_evlist__close(evlist);
  467. out_delete_maps:
  468. perf_evlist__delete_maps(evlist);
  469. out_delete_evlist:
  470. perf_evlist__delete(evlist);
  471. out:
  472. return err;
  473. }
  474. static size_t trace__fprintf_threads_header(FILE *fp)
  475. {
  476. size_t printed;
  477. printed = fprintf(fp, "\n _____________________________________________________________________\n");
  478. printed += fprintf(fp," __) Summary of events (__\n\n");
  479. printed += fprintf(fp," [ task - pid ] [ events ] [ ratio ] [ runtime ]\n");
  480. printed += fprintf(fp," _____________________________________________________________________\n\n");
  481. return printed;
  482. }
  483. static size_t trace__fprintf_thread_summary(struct trace *trace, FILE *fp)
  484. {
  485. size_t printed = trace__fprintf_threads_header(fp);
  486. struct rb_node *nd;
  487. for (nd = rb_first(&trace->host.threads); nd; nd = rb_next(nd)) {
  488. struct thread *thread = rb_entry(nd, struct thread, rb_node);
  489. struct thread_trace *ttrace = thread->priv;
  490. const char *color;
  491. double ratio;
  492. if (ttrace == NULL)
  493. continue;
  494. ratio = (double)ttrace->nr_events / trace->nr_events * 100.0;
  495. color = PERF_COLOR_NORMAL;
  496. if (ratio > 50.0)
  497. color = PERF_COLOR_RED;
  498. else if (ratio > 25.0)
  499. color = PERF_COLOR_GREEN;
  500. else if (ratio > 5.0)
  501. color = PERF_COLOR_YELLOW;
  502. printed += color_fprintf(fp, color, "%20s", thread->comm);
  503. printed += fprintf(fp, " - %-5d :%11lu [", thread->tid, ttrace->nr_events);
  504. printed += color_fprintf(fp, color, "%5.1f%%", ratio);
  505. printed += fprintf(fp, " ] %10.3f ms\n", ttrace->runtime_ms);
  506. }
  507. return printed;
  508. }
  509. static int trace__set_duration(const struct option *opt, const char *str,
  510. int unset __maybe_unused)
  511. {
  512. struct trace *trace = opt->value;
  513. trace->duration_filter = atof(str);
  514. return 0;
  515. }
  516. int cmd_trace(int argc, const char **argv, const char *prefix __maybe_unused)
  517. {
  518. const char * const trace_usage[] = {
  519. "perf trace [<options>] [<command>]",
  520. "perf trace [<options>] -- <command> [<options>]",
  521. NULL
  522. };
  523. struct trace trace = {
  524. .audit_machine = audit_detect_machine(),
  525. .syscalls = {
  526. . max = -1,
  527. },
  528. .opts = {
  529. .target = {
  530. .uid = UINT_MAX,
  531. .uses_mmap = true,
  532. },
  533. .user_freq = UINT_MAX,
  534. .user_interval = ULLONG_MAX,
  535. .no_delay = true,
  536. .mmap_pages = 1024,
  537. },
  538. };
  539. const char *ev_qualifier_str = NULL;
  540. const struct option trace_options[] = {
  541. OPT_STRING('e', "expr", &ev_qualifier_str, "expr",
  542. "list of events to trace"),
  543. OPT_STRING('p', "pid", &trace.opts.target.pid, "pid",
  544. "trace events on existing process id"),
  545. OPT_STRING(0, "tid", &trace.opts.target.tid, "tid",
  546. "trace events on existing thread id"),
  547. OPT_BOOLEAN(0, "all-cpus", &trace.opts.target.system_wide,
  548. "system-wide collection from all CPUs"),
  549. OPT_STRING(0, "cpu", &trace.opts.target.cpu_list, "cpu",
  550. "list of cpus to monitor"),
  551. OPT_BOOLEAN(0, "no-inherit", &trace.opts.no_inherit,
  552. "child tasks do not inherit counters"),
  553. OPT_UINTEGER(0, "mmap-pages", &trace.opts.mmap_pages,
  554. "number of mmap data pages"),
  555. OPT_STRING(0, "uid", &trace.opts.target.uid_str, "user",
  556. "user to profile"),
  557. OPT_CALLBACK(0, "duration", &trace, "float",
  558. "show only events with duration > N.M ms",
  559. trace__set_duration),
  560. OPT_BOOLEAN(0, "sched", &trace.sched, "show blocking scheduler events"),
  561. OPT_END()
  562. };
  563. int err;
  564. char bf[BUFSIZ];
  565. argc = parse_options(argc, argv, trace_options, trace_usage, 0);
  566. if (ev_qualifier_str != NULL) {
  567. trace.ev_qualifier = strlist__new(true, ev_qualifier_str);
  568. if (trace.ev_qualifier == NULL) {
  569. puts("Not enough memory to parse event qualifier");
  570. return -ENOMEM;
  571. }
  572. }
  573. err = perf_target__validate(&trace.opts.target);
  574. if (err) {
  575. perf_target__strerror(&trace.opts.target, err, bf, sizeof(bf));
  576. printf("%s", bf);
  577. return err;
  578. }
  579. err = perf_target__parse_uid(&trace.opts.target);
  580. if (err) {
  581. perf_target__strerror(&trace.opts.target, err, bf, sizeof(bf));
  582. printf("%s", bf);
  583. return err;
  584. }
  585. if (!argc && perf_target__none(&trace.opts.target))
  586. trace.opts.target.system_wide = true;
  587. err = trace__run(&trace, argc, argv);
  588. if (trace.sched && !err)
  589. trace__fprintf_thread_summary(&trace, stdout);
  590. return err;
  591. }