builtin-trace.c 33 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273
  1. #include <traceevent/event-parse.h>
  2. #include "builtin.h"
  3. #include "util/color.h"
  4. #include "util/debug.h"
  5. #include "util/evlist.h"
  6. #include "util/machine.h"
  7. #include "util/session.h"
  8. #include "util/thread.h"
  9. #include "util/parse-options.h"
  10. #include "util/strlist.h"
  11. #include "util/intlist.h"
  12. #include "util/thread_map.h"
  13. #include <libaudit.h>
  14. #include <stdlib.h>
  15. #include <sys/mman.h>
  16. #include <linux/futex.h>
  17. static size_t syscall_arg__scnprintf_hex(char *bf, size_t size,
  18. unsigned long arg,
  19. u8 arg_idx __maybe_unused,
  20. u8 *arg_mask __maybe_unused)
  21. {
  22. return scnprintf(bf, size, "%#lx", arg);
  23. }
  24. #define SCA_HEX syscall_arg__scnprintf_hex
  25. static size_t syscall_arg__scnprintf_whence(char *bf, size_t size,
  26. unsigned long arg,
  27. u8 arg_idx __maybe_unused,
  28. u8 *arg_mask __maybe_unused)
  29. {
  30. int whence = arg;
  31. switch (whence) {
  32. #define P_WHENCE(n) case SEEK_##n: return scnprintf(bf, size, #n)
  33. P_WHENCE(SET);
  34. P_WHENCE(CUR);
  35. P_WHENCE(END);
  36. #ifdef SEEK_DATA
  37. P_WHENCE(DATA);
  38. #endif
  39. #ifdef SEEK_HOLE
  40. P_WHENCE(HOLE);
  41. #endif
  42. #undef P_WHENCE
  43. default: break;
  44. }
  45. return scnprintf(bf, size, "%#x", whence);
  46. }
  47. #define SCA_WHENCE syscall_arg__scnprintf_whence
  48. static size_t syscall_arg__scnprintf_mmap_prot(char *bf, size_t size,
  49. unsigned long arg,
  50. u8 arg_idx __maybe_unused,
  51. u8 *arg_mask __maybe_unused)
  52. {
  53. int printed = 0, prot = arg;
  54. if (prot == PROT_NONE)
  55. return scnprintf(bf, size, "NONE");
  56. #define P_MMAP_PROT(n) \
  57. if (prot & PROT_##n) { \
  58. printed += scnprintf(bf + printed, size - printed, "%s%s", printed ? "|" : "", #n); \
  59. prot &= ~PROT_##n; \
  60. }
  61. P_MMAP_PROT(EXEC);
  62. P_MMAP_PROT(READ);
  63. P_MMAP_PROT(WRITE);
  64. #ifdef PROT_SEM
  65. P_MMAP_PROT(SEM);
  66. #endif
  67. P_MMAP_PROT(GROWSDOWN);
  68. P_MMAP_PROT(GROWSUP);
  69. #undef P_MMAP_PROT
  70. if (prot)
  71. printed += scnprintf(bf + printed, size - printed, "%s%#x", printed ? "|" : "", prot);
  72. return printed;
  73. }
  74. #define SCA_MMAP_PROT syscall_arg__scnprintf_mmap_prot
  75. static size_t syscall_arg__scnprintf_mmap_flags(char *bf, size_t size,
  76. unsigned long arg, u8 arg_idx __maybe_unused,
  77. u8 *arg_mask __maybe_unused)
  78. {
  79. int printed = 0, flags = arg;
  80. #define P_MMAP_FLAG(n) \
  81. if (flags & MAP_##n) { \
  82. printed += scnprintf(bf + printed, size - printed, "%s%s", printed ? "|" : "", #n); \
  83. flags &= ~MAP_##n; \
  84. }
  85. P_MMAP_FLAG(SHARED);
  86. P_MMAP_FLAG(PRIVATE);
  87. #ifdef MAP_32BIT
  88. P_MMAP_FLAG(32BIT);
  89. #endif
  90. P_MMAP_FLAG(ANONYMOUS);
  91. P_MMAP_FLAG(DENYWRITE);
  92. P_MMAP_FLAG(EXECUTABLE);
  93. P_MMAP_FLAG(FILE);
  94. P_MMAP_FLAG(FIXED);
  95. P_MMAP_FLAG(GROWSDOWN);
  96. #ifdef MAP_HUGETLB
  97. P_MMAP_FLAG(HUGETLB);
  98. #endif
  99. P_MMAP_FLAG(LOCKED);
  100. P_MMAP_FLAG(NONBLOCK);
  101. P_MMAP_FLAG(NORESERVE);
  102. P_MMAP_FLAG(POPULATE);
  103. P_MMAP_FLAG(STACK);
  104. #ifdef MAP_UNINITIALIZED
  105. P_MMAP_FLAG(UNINITIALIZED);
  106. #endif
  107. #undef P_MMAP_FLAG
  108. if (flags)
  109. printed += scnprintf(bf + printed, size - printed, "%s%#x", printed ? "|" : "", flags);
  110. return printed;
  111. }
  112. #define SCA_MMAP_FLAGS syscall_arg__scnprintf_mmap_flags
  113. static size_t syscall_arg__scnprintf_madvise_behavior(char *bf, size_t size,
  114. unsigned long arg, u8 arg_idx __maybe_unused,
  115. u8 *arg_mask __maybe_unused)
  116. {
  117. int behavior = arg;
  118. switch (behavior) {
  119. #define P_MADV_BHV(n) case MADV_##n: return scnprintf(bf, size, #n)
  120. P_MADV_BHV(NORMAL);
  121. P_MADV_BHV(RANDOM);
  122. P_MADV_BHV(SEQUENTIAL);
  123. P_MADV_BHV(WILLNEED);
  124. P_MADV_BHV(DONTNEED);
  125. P_MADV_BHV(REMOVE);
  126. P_MADV_BHV(DONTFORK);
  127. P_MADV_BHV(DOFORK);
  128. P_MADV_BHV(HWPOISON);
  129. #ifdef MADV_SOFT_OFFLINE
  130. P_MADV_BHV(SOFT_OFFLINE);
  131. #endif
  132. P_MADV_BHV(MERGEABLE);
  133. P_MADV_BHV(UNMERGEABLE);
  134. #ifdef MADV_HUGEPAGE
  135. P_MADV_BHV(HUGEPAGE);
  136. #endif
  137. #ifdef MADV_NOHUGEPAGE
  138. P_MADV_BHV(NOHUGEPAGE);
  139. #endif
  140. #ifdef MADV_DONTDUMP
  141. P_MADV_BHV(DONTDUMP);
  142. #endif
  143. #ifdef MADV_DODUMP
  144. P_MADV_BHV(DODUMP);
  145. #endif
  146. #undef P_MADV_PHV
  147. default: break;
  148. }
  149. return scnprintf(bf, size, "%#x", behavior);
  150. }
  151. #define SCA_MADV_BHV syscall_arg__scnprintf_madvise_behavior
  152. static size_t syscall_arg__scnprintf_futex_op(char *bf, size_t size, unsigned long arg,
  153. u8 arg_idx __maybe_unused, u8 *arg_mask)
  154. {
  155. enum syscall_futex_args {
  156. SCF_UADDR = (1 << 0),
  157. SCF_OP = (1 << 1),
  158. SCF_VAL = (1 << 2),
  159. SCF_TIMEOUT = (1 << 3),
  160. SCF_UADDR2 = (1 << 4),
  161. SCF_VAL3 = (1 << 5),
  162. };
  163. int op = arg;
  164. int cmd = op & FUTEX_CMD_MASK;
  165. size_t printed = 0;
  166. switch (cmd) {
  167. #define P_FUTEX_OP(n) case FUTEX_##n: printed = scnprintf(bf, size, #n);
  168. P_FUTEX_OP(WAIT); *arg_mask |= SCF_VAL3|SCF_UADDR2; break;
  169. P_FUTEX_OP(WAKE); *arg_mask |= SCF_VAL3|SCF_UADDR2|SCF_TIMEOUT; break;
  170. P_FUTEX_OP(FD); *arg_mask |= SCF_VAL3|SCF_UADDR2|SCF_TIMEOUT; break;
  171. P_FUTEX_OP(REQUEUE); *arg_mask |= SCF_VAL3|SCF_TIMEOUT; break;
  172. P_FUTEX_OP(CMP_REQUEUE); *arg_mask |= SCF_TIMEOUT; break;
  173. P_FUTEX_OP(CMP_REQUEUE_PI); *arg_mask |= SCF_TIMEOUT; break;
  174. P_FUTEX_OP(WAKE_OP); break;
  175. P_FUTEX_OP(LOCK_PI); *arg_mask |= SCF_VAL3|SCF_UADDR2|SCF_TIMEOUT; break;
  176. P_FUTEX_OP(UNLOCK_PI); *arg_mask |= SCF_VAL3|SCF_UADDR2|SCF_TIMEOUT; break;
  177. P_FUTEX_OP(TRYLOCK_PI); *arg_mask |= SCF_VAL3|SCF_UADDR2; break;
  178. P_FUTEX_OP(WAIT_BITSET); *arg_mask |= SCF_UADDR2; break;
  179. P_FUTEX_OP(WAKE_BITSET); *arg_mask |= SCF_UADDR2; break;
  180. P_FUTEX_OP(WAIT_REQUEUE_PI); break;
  181. default: printed = scnprintf(bf, size, "%#x", cmd); break;
  182. }
  183. if (op & FUTEX_PRIVATE_FLAG)
  184. printed += scnprintf(bf + printed, size - printed, "|PRIV");
  185. if (op & FUTEX_CLOCK_REALTIME)
  186. printed += scnprintf(bf + printed, size - printed, "|CLKRT");
  187. return printed;
  188. }
  189. #define SCA_FUTEX_OP syscall_arg__scnprintf_futex_op
  190. static size_t syscall_arg__scnprintf_open_flags(char *bf, size_t size,
  191. unsigned long arg,
  192. u8 arg_idx, u8 *arg_mask)
  193. {
  194. int printed = 0, flags = arg;
  195. if (!(flags & O_CREAT))
  196. *arg_mask |= 1 << (arg_idx + 1); /* Mask the mode parm */
  197. if (flags == 0)
  198. return scnprintf(bf, size, "RDONLY");
  199. #define P_FLAG(n) \
  200. if (flags & O_##n) { \
  201. printed += scnprintf(bf + printed, size - printed, "%s%s", printed ? "|" : "", #n); \
  202. flags &= ~O_##n; \
  203. }
  204. P_FLAG(APPEND);
  205. P_FLAG(ASYNC);
  206. P_FLAG(CLOEXEC);
  207. P_FLAG(CREAT);
  208. P_FLAG(DIRECT);
  209. P_FLAG(DIRECTORY);
  210. P_FLAG(EXCL);
  211. P_FLAG(LARGEFILE);
  212. P_FLAG(NOATIME);
  213. P_FLAG(NOCTTY);
  214. #ifdef O_NONBLOCK
  215. P_FLAG(NONBLOCK);
  216. #elif O_NDELAY
  217. P_FLAG(NDELAY);
  218. #endif
  219. #ifdef O_PATH
  220. P_FLAG(PATH);
  221. #endif
  222. P_FLAG(RDWR);
  223. #ifdef O_DSYNC
  224. if ((flags & O_SYNC) == O_SYNC)
  225. printed += scnprintf(bf + printed, size - printed, "%s%s", printed ? "|" : "", "SYNC");
  226. else {
  227. P_FLAG(DSYNC);
  228. }
  229. #else
  230. P_FLAG(SYNC);
  231. #endif
  232. P_FLAG(TRUNC);
  233. P_FLAG(WRONLY);
  234. #undef P_FLAG
  235. if (flags)
  236. printed += scnprintf(bf + printed, size - printed, "%s%#x", printed ? "|" : "", flags);
  237. return printed;
  238. }
  239. #define SCA_OPEN_FLAGS syscall_arg__scnprintf_open_flags
  240. static struct syscall_fmt {
  241. const char *name;
  242. const char *alias;
  243. size_t (*arg_scnprintf[6])(char *bf, size_t size, unsigned long arg, u8 arg_idx, u8 *arg_mask);
  244. bool errmsg;
  245. bool timeout;
  246. bool hexret;
  247. } syscall_fmts[] = {
  248. { .name = "access", .errmsg = true, },
  249. { .name = "arch_prctl", .errmsg = true, .alias = "prctl", },
  250. { .name = "brk", .hexret = true,
  251. .arg_scnprintf = { [0] = SCA_HEX, /* brk */ }, },
  252. { .name = "mmap", .hexret = true, },
  253. { .name = "connect", .errmsg = true, },
  254. { .name = "fstat", .errmsg = true, .alias = "newfstat", },
  255. { .name = "fstatat", .errmsg = true, .alias = "newfstatat", },
  256. { .name = "futex", .errmsg = true,
  257. .arg_scnprintf = { [1] = SCA_FUTEX_OP, /* op */ }, },
  258. { .name = "ioctl", .errmsg = true,
  259. .arg_scnprintf = { [2] = SCA_HEX, /* arg */ }, },
  260. { .name = "lseek", .errmsg = true,
  261. .arg_scnprintf = { [2] = SCA_WHENCE, /* whence */ }, },
  262. { .name = "lstat", .errmsg = true, .alias = "newlstat", },
  263. { .name = "madvise", .errmsg = true,
  264. .arg_scnprintf = { [0] = SCA_HEX, /* start */
  265. [2] = SCA_MADV_BHV, /* behavior */ }, },
  266. { .name = "mmap", .hexret = true,
  267. .arg_scnprintf = { [0] = SCA_HEX, /* addr */
  268. [2] = SCA_MMAP_PROT, /* prot */
  269. [3] = SCA_MMAP_FLAGS, /* flags */ }, },
  270. { .name = "mprotect", .errmsg = true,
  271. .arg_scnprintf = { [0] = SCA_HEX, /* start */
  272. [2] = SCA_MMAP_PROT, /* prot */ }, },
  273. { .name = "mremap", .hexret = true,
  274. .arg_scnprintf = { [0] = SCA_HEX, /* addr */
  275. [4] = SCA_HEX, /* new_addr */ }, },
  276. { .name = "munmap", .errmsg = true,
  277. .arg_scnprintf = { [0] = SCA_HEX, /* addr */ }, },
  278. { .name = "open", .errmsg = true,
  279. .arg_scnprintf = { [1] = SCA_OPEN_FLAGS, /* flags */ }, },
  280. { .name = "open_by_handle_at", .errmsg = true,
  281. .arg_scnprintf = { [2] = SCA_OPEN_FLAGS, /* flags */ }, },
  282. { .name = "openat", .errmsg = true,
  283. .arg_scnprintf = { [2] = SCA_OPEN_FLAGS, /* flags */ }, },
  284. { .name = "poll", .errmsg = true, .timeout = true, },
  285. { .name = "ppoll", .errmsg = true, .timeout = true, },
  286. { .name = "pread", .errmsg = true, .alias = "pread64", },
  287. { .name = "pwrite", .errmsg = true, .alias = "pwrite64", },
  288. { .name = "read", .errmsg = true, },
  289. { .name = "recvfrom", .errmsg = true, },
  290. { .name = "select", .errmsg = true, .timeout = true, },
  291. { .name = "socket", .errmsg = true, },
  292. { .name = "stat", .errmsg = true, .alias = "newstat", },
  293. { .name = "uname", .errmsg = true, .alias = "newuname", },
  294. };
  295. static int syscall_fmt__cmp(const void *name, const void *fmtp)
  296. {
  297. const struct syscall_fmt *fmt = fmtp;
  298. return strcmp(name, fmt->name);
  299. }
  300. static struct syscall_fmt *syscall_fmt__find(const char *name)
  301. {
  302. const int nmemb = ARRAY_SIZE(syscall_fmts);
  303. return bsearch(name, syscall_fmts, nmemb, sizeof(struct syscall_fmt), syscall_fmt__cmp);
  304. }
  305. struct syscall {
  306. struct event_format *tp_format;
  307. const char *name;
  308. bool filtered;
  309. struct syscall_fmt *fmt;
  310. size_t (**arg_scnprintf)(char *bf, size_t size,
  311. unsigned long arg, u8 arg_idx, u8 *args_mask);
  312. };
  313. static size_t fprintf_duration(unsigned long t, FILE *fp)
  314. {
  315. double duration = (double)t / NSEC_PER_MSEC;
  316. size_t printed = fprintf(fp, "(");
  317. if (duration >= 1.0)
  318. printed += color_fprintf(fp, PERF_COLOR_RED, "%6.3f ms", duration);
  319. else if (duration >= 0.01)
  320. printed += color_fprintf(fp, PERF_COLOR_YELLOW, "%6.3f ms", duration);
  321. else
  322. printed += color_fprintf(fp, PERF_COLOR_NORMAL, "%6.3f ms", duration);
  323. return printed + fprintf(fp, "): ");
  324. }
  325. struct thread_trace {
  326. u64 entry_time;
  327. u64 exit_time;
  328. bool entry_pending;
  329. unsigned long nr_events;
  330. char *entry_str;
  331. double runtime_ms;
  332. };
  333. static struct thread_trace *thread_trace__new(void)
  334. {
  335. return zalloc(sizeof(struct thread_trace));
  336. }
  337. static struct thread_trace *thread__trace(struct thread *thread, FILE *fp)
  338. {
  339. struct thread_trace *ttrace;
  340. if (thread == NULL)
  341. goto fail;
  342. if (thread->priv == NULL)
  343. thread->priv = thread_trace__new();
  344. if (thread->priv == NULL)
  345. goto fail;
  346. ttrace = thread->priv;
  347. ++ttrace->nr_events;
  348. return ttrace;
  349. fail:
  350. color_fprintf(fp, PERF_COLOR_RED,
  351. "WARNING: not enough memory, dropping samples!\n");
  352. return NULL;
  353. }
  354. struct trace {
  355. struct perf_tool tool;
  356. int audit_machine;
  357. struct {
  358. int max;
  359. struct syscall *table;
  360. } syscalls;
  361. struct perf_record_opts opts;
  362. struct machine host;
  363. u64 base_time;
  364. FILE *output;
  365. unsigned long nr_events;
  366. struct strlist *ev_qualifier;
  367. bool not_ev_qualifier;
  368. struct intlist *tid_list;
  369. struct intlist *pid_list;
  370. bool sched;
  371. bool multiple_threads;
  372. double duration_filter;
  373. double runtime_ms;
  374. };
  375. static bool trace__filter_duration(struct trace *trace, double t)
  376. {
  377. return t < (trace->duration_filter * NSEC_PER_MSEC);
  378. }
  379. static size_t trace__fprintf_tstamp(struct trace *trace, u64 tstamp, FILE *fp)
  380. {
  381. double ts = (double)(tstamp - trace->base_time) / NSEC_PER_MSEC;
  382. return fprintf(fp, "%10.3f ", ts);
  383. }
  384. static bool done = false;
  385. static void sig_handler(int sig __maybe_unused)
  386. {
  387. done = true;
  388. }
  389. static size_t trace__fprintf_entry_head(struct trace *trace, struct thread *thread,
  390. u64 duration, u64 tstamp, FILE *fp)
  391. {
  392. size_t printed = trace__fprintf_tstamp(trace, tstamp, fp);
  393. printed += fprintf_duration(duration, fp);
  394. if (trace->multiple_threads)
  395. printed += fprintf(fp, "%d ", thread->tid);
  396. return printed;
  397. }
  398. static int trace__process_event(struct trace *trace, struct machine *machine,
  399. union perf_event *event)
  400. {
  401. int ret = 0;
  402. switch (event->header.type) {
  403. case PERF_RECORD_LOST:
  404. color_fprintf(trace->output, PERF_COLOR_RED,
  405. "LOST %" PRIu64 " events!\n", event->lost.lost);
  406. ret = machine__process_lost_event(machine, event);
  407. default:
  408. ret = machine__process_event(machine, event);
  409. break;
  410. }
  411. return ret;
  412. }
  413. static int trace__tool_process(struct perf_tool *tool,
  414. union perf_event *event,
  415. struct perf_sample *sample __maybe_unused,
  416. struct machine *machine)
  417. {
  418. struct trace *trace = container_of(tool, struct trace, tool);
  419. return trace__process_event(trace, machine, event);
  420. }
  421. static int trace__symbols_init(struct trace *trace, struct perf_evlist *evlist)
  422. {
  423. int err = symbol__init();
  424. if (err)
  425. return err;
  426. machine__init(&trace->host, "", HOST_KERNEL_ID);
  427. machine__create_kernel_maps(&trace->host);
  428. if (perf_target__has_task(&trace->opts.target)) {
  429. err = perf_event__synthesize_thread_map(&trace->tool, evlist->threads,
  430. trace__tool_process,
  431. &trace->host);
  432. } else {
  433. err = perf_event__synthesize_threads(&trace->tool, trace__tool_process,
  434. &trace->host);
  435. }
  436. if (err)
  437. symbol__exit();
  438. return err;
  439. }
  440. static int syscall__set_arg_fmts(struct syscall *sc)
  441. {
  442. struct format_field *field;
  443. int idx = 0;
  444. sc->arg_scnprintf = calloc(sc->tp_format->format.nr_fields - 1, sizeof(void *));
  445. if (sc->arg_scnprintf == NULL)
  446. return -1;
  447. for (field = sc->tp_format->format.fields->next; field; field = field->next) {
  448. if (sc->fmt && sc->fmt->arg_scnprintf[idx])
  449. sc->arg_scnprintf[idx] = sc->fmt->arg_scnprintf[idx];
  450. else if (field->flags & FIELD_IS_POINTER)
  451. sc->arg_scnprintf[idx] = syscall_arg__scnprintf_hex;
  452. ++idx;
  453. }
  454. return 0;
  455. }
  456. static int trace__read_syscall_info(struct trace *trace, int id)
  457. {
  458. char tp_name[128];
  459. struct syscall *sc;
  460. const char *name = audit_syscall_to_name(id, trace->audit_machine);
  461. if (name == NULL)
  462. return -1;
  463. if (id > trace->syscalls.max) {
  464. struct syscall *nsyscalls = realloc(trace->syscalls.table, (id + 1) * sizeof(*sc));
  465. if (nsyscalls == NULL)
  466. return -1;
  467. if (trace->syscalls.max != -1) {
  468. memset(nsyscalls + trace->syscalls.max + 1, 0,
  469. (id - trace->syscalls.max) * sizeof(*sc));
  470. } else {
  471. memset(nsyscalls, 0, (id + 1) * sizeof(*sc));
  472. }
  473. trace->syscalls.table = nsyscalls;
  474. trace->syscalls.max = id;
  475. }
  476. sc = trace->syscalls.table + id;
  477. sc->name = name;
  478. if (trace->ev_qualifier) {
  479. bool in = strlist__find(trace->ev_qualifier, name) != NULL;
  480. if (!(in ^ trace->not_ev_qualifier)) {
  481. sc->filtered = true;
  482. /*
  483. * No need to do read tracepoint information since this will be
  484. * filtered out.
  485. */
  486. return 0;
  487. }
  488. }
  489. sc->fmt = syscall_fmt__find(sc->name);
  490. snprintf(tp_name, sizeof(tp_name), "sys_enter_%s", sc->name);
  491. sc->tp_format = event_format__new("syscalls", tp_name);
  492. if (sc->tp_format == NULL && sc->fmt && sc->fmt->alias) {
  493. snprintf(tp_name, sizeof(tp_name), "sys_enter_%s", sc->fmt->alias);
  494. sc->tp_format = event_format__new("syscalls", tp_name);
  495. }
  496. if (sc->tp_format == NULL)
  497. return -1;
  498. return syscall__set_arg_fmts(sc);
  499. }
  500. static size_t syscall__scnprintf_args(struct syscall *sc, char *bf, size_t size,
  501. unsigned long *args)
  502. {
  503. int i = 0;
  504. size_t printed = 0;
  505. if (sc->tp_format != NULL) {
  506. struct format_field *field;
  507. u8 mask = 0, bit = 1;
  508. for (field = sc->tp_format->format.fields->next; field;
  509. field = field->next, ++i, bit <<= 1) {
  510. if (mask & bit)
  511. continue;
  512. printed += scnprintf(bf + printed, size - printed,
  513. "%s%s: ", printed ? ", " : "", field->name);
  514. if (sc->arg_scnprintf && sc->arg_scnprintf[i]) {
  515. printed += sc->arg_scnprintf[i](bf + printed, size - printed,
  516. args[i], i, &mask);
  517. } else {
  518. printed += scnprintf(bf + printed, size - printed,
  519. "%ld", args[i]);
  520. }
  521. }
  522. } else {
  523. while (i < 6) {
  524. printed += scnprintf(bf + printed, size - printed,
  525. "%sarg%d: %ld",
  526. printed ? ", " : "", i, args[i]);
  527. ++i;
  528. }
  529. }
  530. return printed;
  531. }
  532. typedef int (*tracepoint_handler)(struct trace *trace, struct perf_evsel *evsel,
  533. struct perf_sample *sample);
  534. static struct syscall *trace__syscall_info(struct trace *trace,
  535. struct perf_evsel *evsel,
  536. struct perf_sample *sample)
  537. {
  538. int id = perf_evsel__intval(evsel, sample, "id");
  539. if (id < 0) {
  540. /*
  541. * XXX: Noticed on x86_64, reproduced as far back as 3.0.36, haven't tried
  542. * before that, leaving at a higher verbosity level till that is
  543. * explained. Reproduced with plain ftrace with:
  544. *
  545. * echo 1 > /t/events/raw_syscalls/sys_exit/enable
  546. * grep "NR -1 " /t/trace_pipe
  547. *
  548. * After generating some load on the machine.
  549. */
  550. if (verbose > 1) {
  551. static u64 n;
  552. fprintf(trace->output, "Invalid syscall %d id, skipping (%s, %" PRIu64 ") ...\n",
  553. id, perf_evsel__name(evsel), ++n);
  554. }
  555. return NULL;
  556. }
  557. if ((id > trace->syscalls.max || trace->syscalls.table[id].name == NULL) &&
  558. trace__read_syscall_info(trace, id))
  559. goto out_cant_read;
  560. if ((id > trace->syscalls.max || trace->syscalls.table[id].name == NULL))
  561. goto out_cant_read;
  562. return &trace->syscalls.table[id];
  563. out_cant_read:
  564. if (verbose) {
  565. fprintf(trace->output, "Problems reading syscall %d", id);
  566. if (id <= trace->syscalls.max && trace->syscalls.table[id].name != NULL)
  567. fprintf(trace->output, "(%s)", trace->syscalls.table[id].name);
  568. fputs(" information\n", trace->output);
  569. }
  570. return NULL;
  571. }
  572. static int trace__sys_enter(struct trace *trace, struct perf_evsel *evsel,
  573. struct perf_sample *sample)
  574. {
  575. char *msg;
  576. void *args;
  577. size_t printed = 0;
  578. struct thread *thread;
  579. struct syscall *sc = trace__syscall_info(trace, evsel, sample);
  580. struct thread_trace *ttrace;
  581. if (sc == NULL)
  582. return -1;
  583. if (sc->filtered)
  584. return 0;
  585. thread = machine__findnew_thread(&trace->host, sample->pid,
  586. sample->tid);
  587. ttrace = thread__trace(thread, trace->output);
  588. if (ttrace == NULL)
  589. return -1;
  590. args = perf_evsel__rawptr(evsel, sample, "args");
  591. if (args == NULL) {
  592. fprintf(trace->output, "Problems reading syscall arguments\n");
  593. return -1;
  594. }
  595. ttrace = thread->priv;
  596. if (ttrace->entry_str == NULL) {
  597. ttrace->entry_str = malloc(1024);
  598. if (!ttrace->entry_str)
  599. return -1;
  600. }
  601. ttrace->entry_time = sample->time;
  602. msg = ttrace->entry_str;
  603. printed += scnprintf(msg + printed, 1024 - printed, "%s(", sc->name);
  604. printed += syscall__scnprintf_args(sc, msg + printed, 1024 - printed, args);
  605. if (!strcmp(sc->name, "exit_group") || !strcmp(sc->name, "exit")) {
  606. if (!trace->duration_filter) {
  607. trace__fprintf_entry_head(trace, thread, 1, sample->time, trace->output);
  608. fprintf(trace->output, "%-70s\n", ttrace->entry_str);
  609. }
  610. } else
  611. ttrace->entry_pending = true;
  612. return 0;
  613. }
  614. static int trace__sys_exit(struct trace *trace, struct perf_evsel *evsel,
  615. struct perf_sample *sample)
  616. {
  617. int ret;
  618. u64 duration = 0;
  619. struct thread *thread;
  620. struct syscall *sc = trace__syscall_info(trace, evsel, sample);
  621. struct thread_trace *ttrace;
  622. if (sc == NULL)
  623. return -1;
  624. if (sc->filtered)
  625. return 0;
  626. thread = machine__findnew_thread(&trace->host, sample->pid,
  627. sample->tid);
  628. ttrace = thread__trace(thread, trace->output);
  629. if (ttrace == NULL)
  630. return -1;
  631. ret = perf_evsel__intval(evsel, sample, "ret");
  632. ttrace = thread->priv;
  633. ttrace->exit_time = sample->time;
  634. if (ttrace->entry_time) {
  635. duration = sample->time - ttrace->entry_time;
  636. if (trace__filter_duration(trace, duration))
  637. goto out;
  638. } else if (trace->duration_filter)
  639. goto out;
  640. trace__fprintf_entry_head(trace, thread, duration, sample->time, trace->output);
  641. if (ttrace->entry_pending) {
  642. fprintf(trace->output, "%-70s", ttrace->entry_str);
  643. } else {
  644. fprintf(trace->output, " ... [");
  645. color_fprintf(trace->output, PERF_COLOR_YELLOW, "continued");
  646. fprintf(trace->output, "]: %s()", sc->name);
  647. }
  648. if (sc->fmt == NULL) {
  649. signed_print:
  650. fprintf(trace->output, ") = %d", ret);
  651. } else if (ret < 0 && sc->fmt->errmsg) {
  652. char bf[256];
  653. const char *emsg = strerror_r(-ret, bf, sizeof(bf)),
  654. *e = audit_errno_to_name(-ret);
  655. fprintf(trace->output, ") = -1 %s %s", e, emsg);
  656. } else if (ret == 0 && sc->fmt->timeout)
  657. fprintf(trace->output, ") = 0 Timeout");
  658. else if (sc->fmt->hexret)
  659. fprintf(trace->output, ") = %#x", ret);
  660. else
  661. goto signed_print;
  662. fputc('\n', trace->output);
  663. out:
  664. ttrace->entry_pending = false;
  665. return 0;
  666. }
  667. static int trace__sched_stat_runtime(struct trace *trace, struct perf_evsel *evsel,
  668. struct perf_sample *sample)
  669. {
  670. u64 runtime = perf_evsel__intval(evsel, sample, "runtime");
  671. double runtime_ms = (double)runtime / NSEC_PER_MSEC;
  672. struct thread *thread = machine__findnew_thread(&trace->host,
  673. sample->pid,
  674. sample->tid);
  675. struct thread_trace *ttrace = thread__trace(thread, trace->output);
  676. if (ttrace == NULL)
  677. goto out_dump;
  678. ttrace->runtime_ms += runtime_ms;
  679. trace->runtime_ms += runtime_ms;
  680. return 0;
  681. out_dump:
  682. fprintf(trace->output, "%s: comm=%s,pid=%u,runtime=%" PRIu64 ",vruntime=%" PRIu64 ")\n",
  683. evsel->name,
  684. perf_evsel__strval(evsel, sample, "comm"),
  685. (pid_t)perf_evsel__intval(evsel, sample, "pid"),
  686. runtime,
  687. perf_evsel__intval(evsel, sample, "vruntime"));
  688. return 0;
  689. }
  690. static bool skip_sample(struct trace *trace, struct perf_sample *sample)
  691. {
  692. if ((trace->pid_list && intlist__find(trace->pid_list, sample->pid)) ||
  693. (trace->tid_list && intlist__find(trace->tid_list, sample->tid)))
  694. return false;
  695. if (trace->pid_list || trace->tid_list)
  696. return true;
  697. return false;
  698. }
  699. static int trace__process_sample(struct perf_tool *tool,
  700. union perf_event *event __maybe_unused,
  701. struct perf_sample *sample,
  702. struct perf_evsel *evsel,
  703. struct machine *machine __maybe_unused)
  704. {
  705. struct trace *trace = container_of(tool, struct trace, tool);
  706. int err = 0;
  707. tracepoint_handler handler = evsel->handler.func;
  708. if (skip_sample(trace, sample))
  709. return 0;
  710. if (trace->base_time == 0)
  711. trace->base_time = sample->time;
  712. if (handler)
  713. handler(trace, evsel, sample);
  714. return err;
  715. }
  716. static bool
  717. perf_session__has_tp(struct perf_session *session, const char *name)
  718. {
  719. struct perf_evsel *evsel;
  720. evsel = perf_evlist__find_tracepoint_by_name(session->evlist, name);
  721. return evsel != NULL;
  722. }
  723. static int parse_target_str(struct trace *trace)
  724. {
  725. if (trace->opts.target.pid) {
  726. trace->pid_list = intlist__new(trace->opts.target.pid);
  727. if (trace->pid_list == NULL) {
  728. pr_err("Error parsing process id string\n");
  729. return -EINVAL;
  730. }
  731. }
  732. if (trace->opts.target.tid) {
  733. trace->tid_list = intlist__new(trace->opts.target.tid);
  734. if (trace->tid_list == NULL) {
  735. pr_err("Error parsing thread id string\n");
  736. return -EINVAL;
  737. }
  738. }
  739. return 0;
  740. }
  741. static int trace__run(struct trace *trace, int argc, const char **argv)
  742. {
  743. struct perf_evlist *evlist = perf_evlist__new();
  744. struct perf_evsel *evsel;
  745. int err = -1, i;
  746. unsigned long before;
  747. const bool forks = argc > 0;
  748. if (evlist == NULL) {
  749. fprintf(trace->output, "Not enough memory to run!\n");
  750. goto out;
  751. }
  752. if (perf_evlist__add_newtp(evlist, "raw_syscalls", "sys_enter", trace__sys_enter) ||
  753. perf_evlist__add_newtp(evlist, "raw_syscalls", "sys_exit", trace__sys_exit)) {
  754. fprintf(trace->output, "Couldn't read the raw_syscalls tracepoints information!\n");
  755. goto out_delete_evlist;
  756. }
  757. if (trace->sched &&
  758. perf_evlist__add_newtp(evlist, "sched", "sched_stat_runtime",
  759. trace__sched_stat_runtime)) {
  760. fprintf(trace->output, "Couldn't read the sched_stat_runtime tracepoint information!\n");
  761. goto out_delete_evlist;
  762. }
  763. err = perf_evlist__create_maps(evlist, &trace->opts.target);
  764. if (err < 0) {
  765. fprintf(trace->output, "Problems parsing the target to trace, check your options!\n");
  766. goto out_delete_evlist;
  767. }
  768. err = trace__symbols_init(trace, evlist);
  769. if (err < 0) {
  770. fprintf(trace->output, "Problems initializing symbol libraries!\n");
  771. goto out_delete_maps;
  772. }
  773. perf_evlist__config(evlist, &trace->opts);
  774. signal(SIGCHLD, sig_handler);
  775. signal(SIGINT, sig_handler);
  776. if (forks) {
  777. err = perf_evlist__prepare_workload(evlist, &trace->opts.target,
  778. argv, false, false);
  779. if (err < 0) {
  780. fprintf(trace->output, "Couldn't run the workload!\n");
  781. goto out_delete_maps;
  782. }
  783. }
  784. err = perf_evlist__open(evlist);
  785. if (err < 0) {
  786. fprintf(trace->output, "Couldn't create the events: %s\n", strerror(errno));
  787. goto out_delete_maps;
  788. }
  789. err = perf_evlist__mmap(evlist, UINT_MAX, false);
  790. if (err < 0) {
  791. fprintf(trace->output, "Couldn't mmap the events: %s\n", strerror(errno));
  792. goto out_close_evlist;
  793. }
  794. perf_evlist__enable(evlist);
  795. if (forks)
  796. perf_evlist__start_workload(evlist);
  797. trace->multiple_threads = evlist->threads->map[0] == -1 || evlist->threads->nr > 1;
  798. again:
  799. before = trace->nr_events;
  800. for (i = 0; i < evlist->nr_mmaps; i++) {
  801. union perf_event *event;
  802. while ((event = perf_evlist__mmap_read(evlist, i)) != NULL) {
  803. const u32 type = event->header.type;
  804. tracepoint_handler handler;
  805. struct perf_sample sample;
  806. ++trace->nr_events;
  807. err = perf_evlist__parse_sample(evlist, event, &sample);
  808. if (err) {
  809. fprintf(trace->output, "Can't parse sample, err = %d, skipping...\n", err);
  810. continue;
  811. }
  812. if (trace->base_time == 0)
  813. trace->base_time = sample.time;
  814. if (type != PERF_RECORD_SAMPLE) {
  815. trace__process_event(trace, &trace->host, event);
  816. continue;
  817. }
  818. evsel = perf_evlist__id2evsel(evlist, sample.id);
  819. if (evsel == NULL) {
  820. fprintf(trace->output, "Unknown tp ID %" PRIu64 ", skipping...\n", sample.id);
  821. continue;
  822. }
  823. if (sample.raw_data == NULL) {
  824. fprintf(trace->output, "%s sample with no payload for tid: %d, cpu %d, raw_size=%d, skipping...\n",
  825. perf_evsel__name(evsel), sample.tid,
  826. sample.cpu, sample.raw_size);
  827. continue;
  828. }
  829. handler = evsel->handler.func;
  830. handler(trace, evsel, &sample);
  831. if (done)
  832. goto out_unmap_evlist;
  833. }
  834. }
  835. if (trace->nr_events == before) {
  836. if (done)
  837. goto out_unmap_evlist;
  838. poll(evlist->pollfd, evlist->nr_fds, -1);
  839. }
  840. if (done)
  841. perf_evlist__disable(evlist);
  842. goto again;
  843. out_unmap_evlist:
  844. perf_evlist__munmap(evlist);
  845. out_close_evlist:
  846. perf_evlist__close(evlist);
  847. out_delete_maps:
  848. perf_evlist__delete_maps(evlist);
  849. out_delete_evlist:
  850. perf_evlist__delete(evlist);
  851. out:
  852. return err;
  853. }
  854. static int trace__replay(struct trace *trace)
  855. {
  856. const struct perf_evsel_str_handler handlers[] = {
  857. { "raw_syscalls:sys_enter", trace__sys_enter, },
  858. { "raw_syscalls:sys_exit", trace__sys_exit, },
  859. };
  860. struct perf_session *session;
  861. int err = -1;
  862. trace->tool.sample = trace__process_sample;
  863. trace->tool.mmap = perf_event__process_mmap;
  864. trace->tool.comm = perf_event__process_comm;
  865. trace->tool.exit = perf_event__process_exit;
  866. trace->tool.fork = perf_event__process_fork;
  867. trace->tool.attr = perf_event__process_attr;
  868. trace->tool.tracing_data = perf_event__process_tracing_data;
  869. trace->tool.build_id = perf_event__process_build_id;
  870. trace->tool.ordered_samples = true;
  871. trace->tool.ordering_requires_timestamps = true;
  872. /* add tid to output */
  873. trace->multiple_threads = true;
  874. if (symbol__init() < 0)
  875. return -1;
  876. session = perf_session__new(input_name, O_RDONLY, 0, false,
  877. &trace->tool);
  878. if (session == NULL)
  879. return -ENOMEM;
  880. err = perf_session__set_tracepoints_handlers(session, handlers);
  881. if (err)
  882. goto out;
  883. if (!perf_session__has_tp(session, "raw_syscalls:sys_enter")) {
  884. pr_err("Data file does not have raw_syscalls:sys_enter events\n");
  885. goto out;
  886. }
  887. if (!perf_session__has_tp(session, "raw_syscalls:sys_exit")) {
  888. pr_err("Data file does not have raw_syscalls:sys_exit events\n");
  889. goto out;
  890. }
  891. err = parse_target_str(trace);
  892. if (err != 0)
  893. goto out;
  894. setup_pager();
  895. err = perf_session__process_events(session, &trace->tool);
  896. if (err)
  897. pr_err("Failed to process events, error %d", err);
  898. out:
  899. perf_session__delete(session);
  900. return err;
  901. }
  902. static size_t trace__fprintf_threads_header(FILE *fp)
  903. {
  904. size_t printed;
  905. printed = fprintf(fp, "\n _____________________________________________________________________\n");
  906. printed += fprintf(fp," __) Summary of events (__\n\n");
  907. printed += fprintf(fp," [ task - pid ] [ events ] [ ratio ] [ runtime ]\n");
  908. printed += fprintf(fp," _____________________________________________________________________\n\n");
  909. return printed;
  910. }
  911. static size_t trace__fprintf_thread_summary(struct trace *trace, FILE *fp)
  912. {
  913. size_t printed = trace__fprintf_threads_header(fp);
  914. struct rb_node *nd;
  915. for (nd = rb_first(&trace->host.threads); nd; nd = rb_next(nd)) {
  916. struct thread *thread = rb_entry(nd, struct thread, rb_node);
  917. struct thread_trace *ttrace = thread->priv;
  918. const char *color;
  919. double ratio;
  920. if (ttrace == NULL)
  921. continue;
  922. ratio = (double)ttrace->nr_events / trace->nr_events * 100.0;
  923. color = PERF_COLOR_NORMAL;
  924. if (ratio > 50.0)
  925. color = PERF_COLOR_RED;
  926. else if (ratio > 25.0)
  927. color = PERF_COLOR_GREEN;
  928. else if (ratio > 5.0)
  929. color = PERF_COLOR_YELLOW;
  930. printed += color_fprintf(fp, color, "%20s", thread->comm);
  931. printed += fprintf(fp, " - %-5d :%11lu [", thread->tid, ttrace->nr_events);
  932. printed += color_fprintf(fp, color, "%5.1f%%", ratio);
  933. printed += fprintf(fp, " ] %10.3f ms\n", ttrace->runtime_ms);
  934. }
  935. return printed;
  936. }
  937. static int trace__set_duration(const struct option *opt, const char *str,
  938. int unset __maybe_unused)
  939. {
  940. struct trace *trace = opt->value;
  941. trace->duration_filter = atof(str);
  942. return 0;
  943. }
  944. static int trace__open_output(struct trace *trace, const char *filename)
  945. {
  946. struct stat st;
  947. if (!stat(filename, &st) && st.st_size) {
  948. char oldname[PATH_MAX];
  949. scnprintf(oldname, sizeof(oldname), "%s.old", filename);
  950. unlink(oldname);
  951. rename(filename, oldname);
  952. }
  953. trace->output = fopen(filename, "w");
  954. return trace->output == NULL ? -errno : 0;
  955. }
  956. int cmd_trace(int argc, const char **argv, const char *prefix __maybe_unused)
  957. {
  958. const char * const trace_usage[] = {
  959. "perf trace [<options>] [<command>]",
  960. "perf trace [<options>] -- <command> [<options>]",
  961. NULL
  962. };
  963. struct trace trace = {
  964. .audit_machine = audit_detect_machine(),
  965. .syscalls = {
  966. . max = -1,
  967. },
  968. .opts = {
  969. .target = {
  970. .uid = UINT_MAX,
  971. .uses_mmap = true,
  972. },
  973. .user_freq = UINT_MAX,
  974. .user_interval = ULLONG_MAX,
  975. .no_delay = true,
  976. .mmap_pages = 1024,
  977. },
  978. .output = stdout,
  979. };
  980. const char *output_name = NULL;
  981. const char *ev_qualifier_str = NULL;
  982. const struct option trace_options[] = {
  983. OPT_STRING('e', "expr", &ev_qualifier_str, "expr",
  984. "list of events to trace"),
  985. OPT_STRING('o', "output", &output_name, "file", "output file name"),
  986. OPT_STRING('i', "input", &input_name, "file", "Analyze events in file"),
  987. OPT_STRING('p', "pid", &trace.opts.target.pid, "pid",
  988. "trace events on existing process id"),
  989. OPT_STRING('t', "tid", &trace.opts.target.tid, "tid",
  990. "trace events on existing thread id"),
  991. OPT_BOOLEAN('a', "all-cpus", &trace.opts.target.system_wide,
  992. "system-wide collection from all CPUs"),
  993. OPT_STRING('C', "cpu", &trace.opts.target.cpu_list, "cpu",
  994. "list of cpus to monitor"),
  995. OPT_BOOLEAN(0, "no-inherit", &trace.opts.no_inherit,
  996. "child tasks do not inherit counters"),
  997. OPT_UINTEGER('m', "mmap-pages", &trace.opts.mmap_pages,
  998. "number of mmap data pages"),
  999. OPT_STRING('u', "uid", &trace.opts.target.uid_str, "user",
  1000. "user to profile"),
  1001. OPT_CALLBACK(0, "duration", &trace, "float",
  1002. "show only events with duration > N.M ms",
  1003. trace__set_duration),
  1004. OPT_BOOLEAN(0, "sched", &trace.sched, "show blocking scheduler events"),
  1005. OPT_INCR('v', "verbose", &verbose, "be more verbose"),
  1006. OPT_END()
  1007. };
  1008. int err;
  1009. char bf[BUFSIZ];
  1010. argc = parse_options(argc, argv, trace_options, trace_usage, 0);
  1011. if (output_name != NULL) {
  1012. err = trace__open_output(&trace, output_name);
  1013. if (err < 0) {
  1014. perror("failed to create output file");
  1015. goto out;
  1016. }
  1017. }
  1018. if (ev_qualifier_str != NULL) {
  1019. const char *s = ev_qualifier_str;
  1020. trace.not_ev_qualifier = *s == '!';
  1021. if (trace.not_ev_qualifier)
  1022. ++s;
  1023. trace.ev_qualifier = strlist__new(true, s);
  1024. if (trace.ev_qualifier == NULL) {
  1025. fputs("Not enough memory to parse event qualifier",
  1026. trace.output);
  1027. err = -ENOMEM;
  1028. goto out_close;
  1029. }
  1030. }
  1031. err = perf_target__validate(&trace.opts.target);
  1032. if (err) {
  1033. perf_target__strerror(&trace.opts.target, err, bf, sizeof(bf));
  1034. fprintf(trace.output, "%s", bf);
  1035. goto out_close;
  1036. }
  1037. err = perf_target__parse_uid(&trace.opts.target);
  1038. if (err) {
  1039. perf_target__strerror(&trace.opts.target, err, bf, sizeof(bf));
  1040. fprintf(trace.output, "%s", bf);
  1041. goto out_close;
  1042. }
  1043. if (!argc && perf_target__none(&trace.opts.target))
  1044. trace.opts.target.system_wide = true;
  1045. if (input_name)
  1046. err = trace__replay(&trace);
  1047. else
  1048. err = trace__run(&trace, argc, argv);
  1049. if (trace.sched && !err)
  1050. trace__fprintf_thread_summary(&trace, trace.output);
  1051. out_close:
  1052. if (output_name != NULL)
  1053. fclose(trace.output);
  1054. out:
  1055. return err;
  1056. }