builtin-trace.c 34 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329
  1. #include <traceevent/event-parse.h>
  2. #include "builtin.h"
  3. #include "util/color.h"
  4. #include "util/debug.h"
  5. #include "util/evlist.h"
  6. #include "util/machine.h"
  7. #include "util/session.h"
  8. #include "util/thread.h"
  9. #include "util/parse-options.h"
  10. #include "util/strlist.h"
  11. #include "util/intlist.h"
  12. #include "util/thread_map.h"
  13. #include <libaudit.h>
  14. #include <stdlib.h>
  15. #include <sys/mman.h>
  16. #include <linux/futex.h>
  17. /* For older distros: */
  18. #ifndef MAP_STACK
  19. # define MAP_STACK 0x20000
  20. #endif
  21. #ifndef MADV_HWPOISON
  22. # define MADV_HWPOISON 100
  23. #endif
  24. #ifndef MADV_MERGEABLE
  25. # define MADV_MERGEABLE 12
  26. #endif
  27. #ifndef MADV_UNMERGEABLE
  28. # define MADV_UNMERGEABLE 13
  29. #endif
  30. struct syscall_arg {
  31. unsigned long val;
  32. void *parm;
  33. u8 idx;
  34. u8 mask;
  35. };
  36. struct strarray {
  37. int nr_entries;
  38. const char **entries;
  39. };
  40. #define DEFINE_STRARRAY(array) struct strarray strarray__##array = { \
  41. .nr_entries = ARRAY_SIZE(array), \
  42. .entries = array, \
  43. }
  44. static size_t syscall_arg__scnprintf_strarray(char *bf, size_t size,
  45. struct syscall_arg *arg)
  46. {
  47. int idx = arg->val;
  48. struct strarray *sa = arg->parm;
  49. if (idx < 0 || idx >= sa->nr_entries)
  50. return scnprintf(bf, size, "%d", idx);
  51. return scnprintf(bf, size, "%s", sa->entries[idx]);
  52. }
  53. #define SCA_STRARRAY syscall_arg__scnprintf_strarray
  54. static size_t syscall_arg__scnprintf_hex(char *bf, size_t size,
  55. struct syscall_arg *arg)
  56. {
  57. return scnprintf(bf, size, "%#lx", arg->val);
  58. }
  59. #define SCA_HEX syscall_arg__scnprintf_hex
  60. static size_t syscall_arg__scnprintf_mmap_prot(char *bf, size_t size,
  61. struct syscall_arg *arg)
  62. {
  63. int printed = 0, prot = arg->val;
  64. if (prot == PROT_NONE)
  65. return scnprintf(bf, size, "NONE");
  66. #define P_MMAP_PROT(n) \
  67. if (prot & PROT_##n) { \
  68. printed += scnprintf(bf + printed, size - printed, "%s%s", printed ? "|" : "", #n); \
  69. prot &= ~PROT_##n; \
  70. }
  71. P_MMAP_PROT(EXEC);
  72. P_MMAP_PROT(READ);
  73. P_MMAP_PROT(WRITE);
  74. #ifdef PROT_SEM
  75. P_MMAP_PROT(SEM);
  76. #endif
  77. P_MMAP_PROT(GROWSDOWN);
  78. P_MMAP_PROT(GROWSUP);
  79. #undef P_MMAP_PROT
  80. if (prot)
  81. printed += scnprintf(bf + printed, size - printed, "%s%#x", printed ? "|" : "", prot);
  82. return printed;
  83. }
  84. #define SCA_MMAP_PROT syscall_arg__scnprintf_mmap_prot
  85. static size_t syscall_arg__scnprintf_mmap_flags(char *bf, size_t size,
  86. struct syscall_arg *arg)
  87. {
  88. int printed = 0, flags = arg->val;
  89. #define P_MMAP_FLAG(n) \
  90. if (flags & MAP_##n) { \
  91. printed += scnprintf(bf + printed, size - printed, "%s%s", printed ? "|" : "", #n); \
  92. flags &= ~MAP_##n; \
  93. }
  94. P_MMAP_FLAG(SHARED);
  95. P_MMAP_FLAG(PRIVATE);
  96. #ifdef MAP_32BIT
  97. P_MMAP_FLAG(32BIT);
  98. #endif
  99. P_MMAP_FLAG(ANONYMOUS);
  100. P_MMAP_FLAG(DENYWRITE);
  101. P_MMAP_FLAG(EXECUTABLE);
  102. P_MMAP_FLAG(FILE);
  103. P_MMAP_FLAG(FIXED);
  104. P_MMAP_FLAG(GROWSDOWN);
  105. #ifdef MAP_HUGETLB
  106. P_MMAP_FLAG(HUGETLB);
  107. #endif
  108. P_MMAP_FLAG(LOCKED);
  109. P_MMAP_FLAG(NONBLOCK);
  110. P_MMAP_FLAG(NORESERVE);
  111. P_MMAP_FLAG(POPULATE);
  112. P_MMAP_FLAG(STACK);
  113. #ifdef MAP_UNINITIALIZED
  114. P_MMAP_FLAG(UNINITIALIZED);
  115. #endif
  116. #undef P_MMAP_FLAG
  117. if (flags)
  118. printed += scnprintf(bf + printed, size - printed, "%s%#x", printed ? "|" : "", flags);
  119. return printed;
  120. }
  121. #define SCA_MMAP_FLAGS syscall_arg__scnprintf_mmap_flags
  122. static size_t syscall_arg__scnprintf_madvise_behavior(char *bf, size_t size,
  123. struct syscall_arg *arg)
  124. {
  125. int behavior = arg->val;
  126. switch (behavior) {
  127. #define P_MADV_BHV(n) case MADV_##n: return scnprintf(bf, size, #n)
  128. P_MADV_BHV(NORMAL);
  129. P_MADV_BHV(RANDOM);
  130. P_MADV_BHV(SEQUENTIAL);
  131. P_MADV_BHV(WILLNEED);
  132. P_MADV_BHV(DONTNEED);
  133. P_MADV_BHV(REMOVE);
  134. P_MADV_BHV(DONTFORK);
  135. P_MADV_BHV(DOFORK);
  136. P_MADV_BHV(HWPOISON);
  137. #ifdef MADV_SOFT_OFFLINE
  138. P_MADV_BHV(SOFT_OFFLINE);
  139. #endif
  140. P_MADV_BHV(MERGEABLE);
  141. P_MADV_BHV(UNMERGEABLE);
  142. #ifdef MADV_HUGEPAGE
  143. P_MADV_BHV(HUGEPAGE);
  144. #endif
  145. #ifdef MADV_NOHUGEPAGE
  146. P_MADV_BHV(NOHUGEPAGE);
  147. #endif
  148. #ifdef MADV_DONTDUMP
  149. P_MADV_BHV(DONTDUMP);
  150. #endif
  151. #ifdef MADV_DODUMP
  152. P_MADV_BHV(DODUMP);
  153. #endif
  154. #undef P_MADV_PHV
  155. default: break;
  156. }
  157. return scnprintf(bf, size, "%#x", behavior);
  158. }
  159. #define SCA_MADV_BHV syscall_arg__scnprintf_madvise_behavior
  160. static size_t syscall_arg__scnprintf_futex_op(char *bf, size_t size, struct syscall_arg *arg)
  161. {
  162. enum syscall_futex_args {
  163. SCF_UADDR = (1 << 0),
  164. SCF_OP = (1 << 1),
  165. SCF_VAL = (1 << 2),
  166. SCF_TIMEOUT = (1 << 3),
  167. SCF_UADDR2 = (1 << 4),
  168. SCF_VAL3 = (1 << 5),
  169. };
  170. int op = arg->val;
  171. int cmd = op & FUTEX_CMD_MASK;
  172. size_t printed = 0;
  173. switch (cmd) {
  174. #define P_FUTEX_OP(n) case FUTEX_##n: printed = scnprintf(bf, size, #n);
  175. P_FUTEX_OP(WAIT); arg->mask |= SCF_VAL3|SCF_UADDR2; break;
  176. P_FUTEX_OP(WAKE); arg->mask |= SCF_VAL3|SCF_UADDR2|SCF_TIMEOUT; break;
  177. P_FUTEX_OP(FD); arg->mask |= SCF_VAL3|SCF_UADDR2|SCF_TIMEOUT; break;
  178. P_FUTEX_OP(REQUEUE); arg->mask |= SCF_VAL3|SCF_TIMEOUT; break;
  179. P_FUTEX_OP(CMP_REQUEUE); arg->mask |= SCF_TIMEOUT; break;
  180. P_FUTEX_OP(CMP_REQUEUE_PI); arg->mask |= SCF_TIMEOUT; break;
  181. P_FUTEX_OP(WAKE_OP); break;
  182. P_FUTEX_OP(LOCK_PI); arg->mask |= SCF_VAL3|SCF_UADDR2|SCF_TIMEOUT; break;
  183. P_FUTEX_OP(UNLOCK_PI); arg->mask |= SCF_VAL3|SCF_UADDR2|SCF_TIMEOUT; break;
  184. P_FUTEX_OP(TRYLOCK_PI); arg->mask |= SCF_VAL3|SCF_UADDR2; break;
  185. P_FUTEX_OP(WAIT_BITSET); arg->mask |= SCF_UADDR2; break;
  186. P_FUTEX_OP(WAKE_BITSET); arg->mask |= SCF_UADDR2; break;
  187. P_FUTEX_OP(WAIT_REQUEUE_PI); break;
  188. default: printed = scnprintf(bf, size, "%#x", cmd); break;
  189. }
  190. if (op & FUTEX_PRIVATE_FLAG)
  191. printed += scnprintf(bf + printed, size - printed, "|PRIV");
  192. if (op & FUTEX_CLOCK_REALTIME)
  193. printed += scnprintf(bf + printed, size - printed, "|CLKRT");
  194. return printed;
  195. }
  196. #define SCA_FUTEX_OP syscall_arg__scnprintf_futex_op
  197. static const char *itimers[] = { "REAL", "VIRTUAL", "PROF", };
  198. static DEFINE_STRARRAY(itimers);
  199. static const char *whences[] = { "SET", "CUR", "END",
  200. #ifdef SEEK_DATA
  201. "DATA",
  202. #endif
  203. #ifdef SEEK_HOLE
  204. "HOLE",
  205. #endif
  206. };
  207. static DEFINE_STRARRAY(whences);
  208. static const char *fcntl_cmds[] = {
  209. "DUPFD", "GETFD", "SETFD", "GETFL", "SETFL", "GETLK", "SETLK",
  210. "SETLKW", "SETOWN", "GETOWN", "SETSIG", "GETSIG", "F_GETLK64",
  211. "F_SETLK64", "F_SETLKW64", "F_SETOWN_EX", "F_GETOWN_EX",
  212. "F_GETOWNER_UIDS",
  213. };
  214. static DEFINE_STRARRAY(fcntl_cmds);
  215. static size_t syscall_arg__scnprintf_open_flags(char *bf, size_t size,
  216. struct syscall_arg *arg)
  217. {
  218. int printed = 0, flags = arg->val;
  219. if (!(flags & O_CREAT))
  220. arg->mask |= 1 << (arg->idx + 1); /* Mask the mode parm */
  221. if (flags == 0)
  222. return scnprintf(bf, size, "RDONLY");
  223. #define P_FLAG(n) \
  224. if (flags & O_##n) { \
  225. printed += scnprintf(bf + printed, size - printed, "%s%s", printed ? "|" : "", #n); \
  226. flags &= ~O_##n; \
  227. }
  228. P_FLAG(APPEND);
  229. P_FLAG(ASYNC);
  230. P_FLAG(CLOEXEC);
  231. P_FLAG(CREAT);
  232. P_FLAG(DIRECT);
  233. P_FLAG(DIRECTORY);
  234. P_FLAG(EXCL);
  235. P_FLAG(LARGEFILE);
  236. P_FLAG(NOATIME);
  237. P_FLAG(NOCTTY);
  238. #ifdef O_NONBLOCK
  239. P_FLAG(NONBLOCK);
  240. #elif O_NDELAY
  241. P_FLAG(NDELAY);
  242. #endif
  243. #ifdef O_PATH
  244. P_FLAG(PATH);
  245. #endif
  246. P_FLAG(RDWR);
  247. #ifdef O_DSYNC
  248. if ((flags & O_SYNC) == O_SYNC)
  249. printed += scnprintf(bf + printed, size - printed, "%s%s", printed ? "|" : "", "SYNC");
  250. else {
  251. P_FLAG(DSYNC);
  252. }
  253. #else
  254. P_FLAG(SYNC);
  255. #endif
  256. P_FLAG(TRUNC);
  257. P_FLAG(WRONLY);
  258. #undef P_FLAG
  259. if (flags)
  260. printed += scnprintf(bf + printed, size - printed, "%s%#x", printed ? "|" : "", flags);
  261. return printed;
  262. }
  263. #define SCA_OPEN_FLAGS syscall_arg__scnprintf_open_flags
  264. static struct syscall_fmt {
  265. const char *name;
  266. const char *alias;
  267. size_t (*arg_scnprintf[6])(char *bf, size_t size, struct syscall_arg *arg);
  268. void *arg_parm[6];
  269. bool errmsg;
  270. bool timeout;
  271. bool hexret;
  272. } syscall_fmts[] = {
  273. { .name = "access", .errmsg = true, },
  274. { .name = "arch_prctl", .errmsg = true, .alias = "prctl", },
  275. { .name = "brk", .hexret = true,
  276. .arg_scnprintf = { [0] = SCA_HEX, /* brk */ }, },
  277. { .name = "mmap", .hexret = true, },
  278. { .name = "connect", .errmsg = true, },
  279. { .name = "fcntl", .errmsg = true,
  280. .arg_scnprintf = { [1] = SCA_STRARRAY, /* cmd */ },
  281. .arg_parm = { [1] = &strarray__fcntl_cmds, /* cmd */ }, },
  282. { .name = "fstat", .errmsg = true, .alias = "newfstat", },
  283. { .name = "fstatat", .errmsg = true, .alias = "newfstatat", },
  284. { .name = "futex", .errmsg = true,
  285. .arg_scnprintf = { [1] = SCA_FUTEX_OP, /* op */ }, },
  286. { .name = "getitimer", .errmsg = true,
  287. .arg_scnprintf = { [0] = SCA_STRARRAY, /* which */ },
  288. .arg_parm = { [0] = &strarray__itimers, /* which */ }, },
  289. { .name = "ioctl", .errmsg = true,
  290. .arg_scnprintf = { [2] = SCA_HEX, /* arg */ }, },
  291. { .name = "lseek", .errmsg = true,
  292. .arg_scnprintf = { [2] = SCA_STRARRAY, /* whence */ },
  293. .arg_parm = { [2] = &strarray__whences, /* whence */ }, },
  294. { .name = "lstat", .errmsg = true, .alias = "newlstat", },
  295. { .name = "madvise", .errmsg = true,
  296. .arg_scnprintf = { [0] = SCA_HEX, /* start */
  297. [2] = SCA_MADV_BHV, /* behavior */ }, },
  298. { .name = "mmap", .hexret = true,
  299. .arg_scnprintf = { [0] = SCA_HEX, /* addr */
  300. [2] = SCA_MMAP_PROT, /* prot */
  301. [3] = SCA_MMAP_FLAGS, /* flags */ }, },
  302. { .name = "mprotect", .errmsg = true,
  303. .arg_scnprintf = { [0] = SCA_HEX, /* start */
  304. [2] = SCA_MMAP_PROT, /* prot */ }, },
  305. { .name = "mremap", .hexret = true,
  306. .arg_scnprintf = { [0] = SCA_HEX, /* addr */
  307. [4] = SCA_HEX, /* new_addr */ }, },
  308. { .name = "munmap", .errmsg = true,
  309. .arg_scnprintf = { [0] = SCA_HEX, /* addr */ }, },
  310. { .name = "open", .errmsg = true,
  311. .arg_scnprintf = { [1] = SCA_OPEN_FLAGS, /* flags */ }, },
  312. { .name = "open_by_handle_at", .errmsg = true,
  313. .arg_scnprintf = { [2] = SCA_OPEN_FLAGS, /* flags */ }, },
  314. { .name = "openat", .errmsg = true,
  315. .arg_scnprintf = { [2] = SCA_OPEN_FLAGS, /* flags */ }, },
  316. { .name = "poll", .errmsg = true, .timeout = true, },
  317. { .name = "ppoll", .errmsg = true, .timeout = true, },
  318. { .name = "pread", .errmsg = true, .alias = "pread64", },
  319. { .name = "pwrite", .errmsg = true, .alias = "pwrite64", },
  320. { .name = "read", .errmsg = true, },
  321. { .name = "recvfrom", .errmsg = true, },
  322. { .name = "select", .errmsg = true, .timeout = true, },
  323. { .name = "setitimer", .errmsg = true,
  324. .arg_scnprintf = { [0] = SCA_STRARRAY, /* which */ },
  325. .arg_parm = { [0] = &strarray__itimers, /* which */ }, },
  326. { .name = "socket", .errmsg = true, },
  327. { .name = "stat", .errmsg = true, .alias = "newstat", },
  328. { .name = "uname", .errmsg = true, .alias = "newuname", },
  329. };
  330. static int syscall_fmt__cmp(const void *name, const void *fmtp)
  331. {
  332. const struct syscall_fmt *fmt = fmtp;
  333. return strcmp(name, fmt->name);
  334. }
  335. static struct syscall_fmt *syscall_fmt__find(const char *name)
  336. {
  337. const int nmemb = ARRAY_SIZE(syscall_fmts);
  338. return bsearch(name, syscall_fmts, nmemb, sizeof(struct syscall_fmt), syscall_fmt__cmp);
  339. }
  340. struct syscall {
  341. struct event_format *tp_format;
  342. const char *name;
  343. bool filtered;
  344. struct syscall_fmt *fmt;
  345. size_t (**arg_scnprintf)(char *bf, size_t size, struct syscall_arg *arg);
  346. void **arg_parm;
  347. };
  348. static size_t fprintf_duration(unsigned long t, FILE *fp)
  349. {
  350. double duration = (double)t / NSEC_PER_MSEC;
  351. size_t printed = fprintf(fp, "(");
  352. if (duration >= 1.0)
  353. printed += color_fprintf(fp, PERF_COLOR_RED, "%6.3f ms", duration);
  354. else if (duration >= 0.01)
  355. printed += color_fprintf(fp, PERF_COLOR_YELLOW, "%6.3f ms", duration);
  356. else
  357. printed += color_fprintf(fp, PERF_COLOR_NORMAL, "%6.3f ms", duration);
  358. return printed + fprintf(fp, "): ");
  359. }
  360. struct thread_trace {
  361. u64 entry_time;
  362. u64 exit_time;
  363. bool entry_pending;
  364. unsigned long nr_events;
  365. char *entry_str;
  366. double runtime_ms;
  367. };
  368. static struct thread_trace *thread_trace__new(void)
  369. {
  370. return zalloc(sizeof(struct thread_trace));
  371. }
  372. static struct thread_trace *thread__trace(struct thread *thread, FILE *fp)
  373. {
  374. struct thread_trace *ttrace;
  375. if (thread == NULL)
  376. goto fail;
  377. if (thread->priv == NULL)
  378. thread->priv = thread_trace__new();
  379. if (thread->priv == NULL)
  380. goto fail;
  381. ttrace = thread->priv;
  382. ++ttrace->nr_events;
  383. return ttrace;
  384. fail:
  385. color_fprintf(fp, PERF_COLOR_RED,
  386. "WARNING: not enough memory, dropping samples!\n");
  387. return NULL;
  388. }
  389. struct trace {
  390. struct perf_tool tool;
  391. int audit_machine;
  392. struct {
  393. int max;
  394. struct syscall *table;
  395. } syscalls;
  396. struct perf_record_opts opts;
  397. struct machine host;
  398. u64 base_time;
  399. FILE *output;
  400. unsigned long nr_events;
  401. struct strlist *ev_qualifier;
  402. bool not_ev_qualifier;
  403. struct intlist *tid_list;
  404. struct intlist *pid_list;
  405. bool sched;
  406. bool multiple_threads;
  407. double duration_filter;
  408. double runtime_ms;
  409. };
  410. static bool trace__filter_duration(struct trace *trace, double t)
  411. {
  412. return t < (trace->duration_filter * NSEC_PER_MSEC);
  413. }
  414. static size_t trace__fprintf_tstamp(struct trace *trace, u64 tstamp, FILE *fp)
  415. {
  416. double ts = (double)(tstamp - trace->base_time) / NSEC_PER_MSEC;
  417. return fprintf(fp, "%10.3f ", ts);
  418. }
  419. static bool done = false;
  420. static void sig_handler(int sig __maybe_unused)
  421. {
  422. done = true;
  423. }
  424. static size_t trace__fprintf_entry_head(struct trace *trace, struct thread *thread,
  425. u64 duration, u64 tstamp, FILE *fp)
  426. {
  427. size_t printed = trace__fprintf_tstamp(trace, tstamp, fp);
  428. printed += fprintf_duration(duration, fp);
  429. if (trace->multiple_threads)
  430. printed += fprintf(fp, "%d ", thread->tid);
  431. return printed;
  432. }
  433. static int trace__process_event(struct trace *trace, struct machine *machine,
  434. union perf_event *event)
  435. {
  436. int ret = 0;
  437. switch (event->header.type) {
  438. case PERF_RECORD_LOST:
  439. color_fprintf(trace->output, PERF_COLOR_RED,
  440. "LOST %" PRIu64 " events!\n", event->lost.lost);
  441. ret = machine__process_lost_event(machine, event);
  442. default:
  443. ret = machine__process_event(machine, event);
  444. break;
  445. }
  446. return ret;
  447. }
  448. static int trace__tool_process(struct perf_tool *tool,
  449. union perf_event *event,
  450. struct perf_sample *sample __maybe_unused,
  451. struct machine *machine)
  452. {
  453. struct trace *trace = container_of(tool, struct trace, tool);
  454. return trace__process_event(trace, machine, event);
  455. }
  456. static int trace__symbols_init(struct trace *trace, struct perf_evlist *evlist)
  457. {
  458. int err = symbol__init();
  459. if (err)
  460. return err;
  461. machine__init(&trace->host, "", HOST_KERNEL_ID);
  462. machine__create_kernel_maps(&trace->host);
  463. if (perf_target__has_task(&trace->opts.target)) {
  464. err = perf_event__synthesize_thread_map(&trace->tool, evlist->threads,
  465. trace__tool_process,
  466. &trace->host);
  467. } else {
  468. err = perf_event__synthesize_threads(&trace->tool, trace__tool_process,
  469. &trace->host);
  470. }
  471. if (err)
  472. symbol__exit();
  473. return err;
  474. }
  475. static int syscall__set_arg_fmts(struct syscall *sc)
  476. {
  477. struct format_field *field;
  478. int idx = 0;
  479. sc->arg_scnprintf = calloc(sc->tp_format->format.nr_fields - 1, sizeof(void *));
  480. if (sc->arg_scnprintf == NULL)
  481. return -1;
  482. if (sc->fmt)
  483. sc->arg_parm = sc->fmt->arg_parm;
  484. for (field = sc->tp_format->format.fields->next; field; field = field->next) {
  485. if (sc->fmt && sc->fmt->arg_scnprintf[idx])
  486. sc->arg_scnprintf[idx] = sc->fmt->arg_scnprintf[idx];
  487. else if (field->flags & FIELD_IS_POINTER)
  488. sc->arg_scnprintf[idx] = syscall_arg__scnprintf_hex;
  489. ++idx;
  490. }
  491. return 0;
  492. }
  493. static int trace__read_syscall_info(struct trace *trace, int id)
  494. {
  495. char tp_name[128];
  496. struct syscall *sc;
  497. const char *name = audit_syscall_to_name(id, trace->audit_machine);
  498. if (name == NULL)
  499. return -1;
  500. if (id > trace->syscalls.max) {
  501. struct syscall *nsyscalls = realloc(trace->syscalls.table, (id + 1) * sizeof(*sc));
  502. if (nsyscalls == NULL)
  503. return -1;
  504. if (trace->syscalls.max != -1) {
  505. memset(nsyscalls + trace->syscalls.max + 1, 0,
  506. (id - trace->syscalls.max) * sizeof(*sc));
  507. } else {
  508. memset(nsyscalls, 0, (id + 1) * sizeof(*sc));
  509. }
  510. trace->syscalls.table = nsyscalls;
  511. trace->syscalls.max = id;
  512. }
  513. sc = trace->syscalls.table + id;
  514. sc->name = name;
  515. if (trace->ev_qualifier) {
  516. bool in = strlist__find(trace->ev_qualifier, name) != NULL;
  517. if (!(in ^ trace->not_ev_qualifier)) {
  518. sc->filtered = true;
  519. /*
  520. * No need to do read tracepoint information since this will be
  521. * filtered out.
  522. */
  523. return 0;
  524. }
  525. }
  526. sc->fmt = syscall_fmt__find(sc->name);
  527. snprintf(tp_name, sizeof(tp_name), "sys_enter_%s", sc->name);
  528. sc->tp_format = event_format__new("syscalls", tp_name);
  529. if (sc->tp_format == NULL && sc->fmt && sc->fmt->alias) {
  530. snprintf(tp_name, sizeof(tp_name), "sys_enter_%s", sc->fmt->alias);
  531. sc->tp_format = event_format__new("syscalls", tp_name);
  532. }
  533. if (sc->tp_format == NULL)
  534. return -1;
  535. return syscall__set_arg_fmts(sc);
  536. }
  537. static size_t syscall__scnprintf_args(struct syscall *sc, char *bf, size_t size,
  538. unsigned long *args)
  539. {
  540. size_t printed = 0;
  541. if (sc->tp_format != NULL) {
  542. struct format_field *field;
  543. u8 bit = 1;
  544. struct syscall_arg arg = {
  545. .idx = 0,
  546. .mask = 0,
  547. };
  548. for (field = sc->tp_format->format.fields->next; field;
  549. field = field->next, ++arg.idx, bit <<= 1) {
  550. if (arg.mask & bit)
  551. continue;
  552. printed += scnprintf(bf + printed, size - printed,
  553. "%s%s: ", printed ? ", " : "", field->name);
  554. if (sc->arg_scnprintf && sc->arg_scnprintf[arg.idx]) {
  555. arg.val = args[arg.idx];
  556. if (sc->arg_parm)
  557. arg.parm = sc->arg_parm[arg.idx];
  558. printed += sc->arg_scnprintf[arg.idx](bf + printed,
  559. size - printed, &arg);
  560. } else {
  561. printed += scnprintf(bf + printed, size - printed,
  562. "%ld", args[arg.idx]);
  563. }
  564. }
  565. } else {
  566. int i = 0;
  567. while (i < 6) {
  568. printed += scnprintf(bf + printed, size - printed,
  569. "%sarg%d: %ld",
  570. printed ? ", " : "", i, args[i]);
  571. ++i;
  572. }
  573. }
  574. return printed;
  575. }
  576. typedef int (*tracepoint_handler)(struct trace *trace, struct perf_evsel *evsel,
  577. struct perf_sample *sample);
  578. static struct syscall *trace__syscall_info(struct trace *trace,
  579. struct perf_evsel *evsel,
  580. struct perf_sample *sample)
  581. {
  582. int id = perf_evsel__intval(evsel, sample, "id");
  583. if (id < 0) {
  584. /*
  585. * XXX: Noticed on x86_64, reproduced as far back as 3.0.36, haven't tried
  586. * before that, leaving at a higher verbosity level till that is
  587. * explained. Reproduced with plain ftrace with:
  588. *
  589. * echo 1 > /t/events/raw_syscalls/sys_exit/enable
  590. * grep "NR -1 " /t/trace_pipe
  591. *
  592. * After generating some load on the machine.
  593. */
  594. if (verbose > 1) {
  595. static u64 n;
  596. fprintf(trace->output, "Invalid syscall %d id, skipping (%s, %" PRIu64 ") ...\n",
  597. id, perf_evsel__name(evsel), ++n);
  598. }
  599. return NULL;
  600. }
  601. if ((id > trace->syscalls.max || trace->syscalls.table[id].name == NULL) &&
  602. trace__read_syscall_info(trace, id))
  603. goto out_cant_read;
  604. if ((id > trace->syscalls.max || trace->syscalls.table[id].name == NULL))
  605. goto out_cant_read;
  606. return &trace->syscalls.table[id];
  607. out_cant_read:
  608. if (verbose) {
  609. fprintf(trace->output, "Problems reading syscall %d", id);
  610. if (id <= trace->syscalls.max && trace->syscalls.table[id].name != NULL)
  611. fprintf(trace->output, "(%s)", trace->syscalls.table[id].name);
  612. fputs(" information\n", trace->output);
  613. }
  614. return NULL;
  615. }
  616. static int trace__sys_enter(struct trace *trace, struct perf_evsel *evsel,
  617. struct perf_sample *sample)
  618. {
  619. char *msg;
  620. void *args;
  621. size_t printed = 0;
  622. struct thread *thread;
  623. struct syscall *sc = trace__syscall_info(trace, evsel, sample);
  624. struct thread_trace *ttrace;
  625. if (sc == NULL)
  626. return -1;
  627. if (sc->filtered)
  628. return 0;
  629. thread = machine__findnew_thread(&trace->host, sample->pid,
  630. sample->tid);
  631. ttrace = thread__trace(thread, trace->output);
  632. if (ttrace == NULL)
  633. return -1;
  634. args = perf_evsel__rawptr(evsel, sample, "args");
  635. if (args == NULL) {
  636. fprintf(trace->output, "Problems reading syscall arguments\n");
  637. return -1;
  638. }
  639. ttrace = thread->priv;
  640. if (ttrace->entry_str == NULL) {
  641. ttrace->entry_str = malloc(1024);
  642. if (!ttrace->entry_str)
  643. return -1;
  644. }
  645. ttrace->entry_time = sample->time;
  646. msg = ttrace->entry_str;
  647. printed += scnprintf(msg + printed, 1024 - printed, "%s(", sc->name);
  648. printed += syscall__scnprintf_args(sc, msg + printed, 1024 - printed, args);
  649. if (!strcmp(sc->name, "exit_group") || !strcmp(sc->name, "exit")) {
  650. if (!trace->duration_filter) {
  651. trace__fprintf_entry_head(trace, thread, 1, sample->time, trace->output);
  652. fprintf(trace->output, "%-70s\n", ttrace->entry_str);
  653. }
  654. } else
  655. ttrace->entry_pending = true;
  656. return 0;
  657. }
  658. static int trace__sys_exit(struct trace *trace, struct perf_evsel *evsel,
  659. struct perf_sample *sample)
  660. {
  661. int ret;
  662. u64 duration = 0;
  663. struct thread *thread;
  664. struct syscall *sc = trace__syscall_info(trace, evsel, sample);
  665. struct thread_trace *ttrace;
  666. if (sc == NULL)
  667. return -1;
  668. if (sc->filtered)
  669. return 0;
  670. thread = machine__findnew_thread(&trace->host, sample->pid,
  671. sample->tid);
  672. ttrace = thread__trace(thread, trace->output);
  673. if (ttrace == NULL)
  674. return -1;
  675. ret = perf_evsel__intval(evsel, sample, "ret");
  676. ttrace = thread->priv;
  677. ttrace->exit_time = sample->time;
  678. if (ttrace->entry_time) {
  679. duration = sample->time - ttrace->entry_time;
  680. if (trace__filter_duration(trace, duration))
  681. goto out;
  682. } else if (trace->duration_filter)
  683. goto out;
  684. trace__fprintf_entry_head(trace, thread, duration, sample->time, trace->output);
  685. if (ttrace->entry_pending) {
  686. fprintf(trace->output, "%-70s", ttrace->entry_str);
  687. } else {
  688. fprintf(trace->output, " ... [");
  689. color_fprintf(trace->output, PERF_COLOR_YELLOW, "continued");
  690. fprintf(trace->output, "]: %s()", sc->name);
  691. }
  692. if (sc->fmt == NULL) {
  693. signed_print:
  694. fprintf(trace->output, ") = %d", ret);
  695. } else if (ret < 0 && sc->fmt->errmsg) {
  696. char bf[256];
  697. const char *emsg = strerror_r(-ret, bf, sizeof(bf)),
  698. *e = audit_errno_to_name(-ret);
  699. fprintf(trace->output, ") = -1 %s %s", e, emsg);
  700. } else if (ret == 0 && sc->fmt->timeout)
  701. fprintf(trace->output, ") = 0 Timeout");
  702. else if (sc->fmt->hexret)
  703. fprintf(trace->output, ") = %#x", ret);
  704. else
  705. goto signed_print;
  706. fputc('\n', trace->output);
  707. out:
  708. ttrace->entry_pending = false;
  709. return 0;
  710. }
  711. static int trace__sched_stat_runtime(struct trace *trace, struct perf_evsel *evsel,
  712. struct perf_sample *sample)
  713. {
  714. u64 runtime = perf_evsel__intval(evsel, sample, "runtime");
  715. double runtime_ms = (double)runtime / NSEC_PER_MSEC;
  716. struct thread *thread = machine__findnew_thread(&trace->host,
  717. sample->pid,
  718. sample->tid);
  719. struct thread_trace *ttrace = thread__trace(thread, trace->output);
  720. if (ttrace == NULL)
  721. goto out_dump;
  722. ttrace->runtime_ms += runtime_ms;
  723. trace->runtime_ms += runtime_ms;
  724. return 0;
  725. out_dump:
  726. fprintf(trace->output, "%s: comm=%s,pid=%u,runtime=%" PRIu64 ",vruntime=%" PRIu64 ")\n",
  727. evsel->name,
  728. perf_evsel__strval(evsel, sample, "comm"),
  729. (pid_t)perf_evsel__intval(evsel, sample, "pid"),
  730. runtime,
  731. perf_evsel__intval(evsel, sample, "vruntime"));
  732. return 0;
  733. }
  734. static bool skip_sample(struct trace *trace, struct perf_sample *sample)
  735. {
  736. if ((trace->pid_list && intlist__find(trace->pid_list, sample->pid)) ||
  737. (trace->tid_list && intlist__find(trace->tid_list, sample->tid)))
  738. return false;
  739. if (trace->pid_list || trace->tid_list)
  740. return true;
  741. return false;
  742. }
  743. static int trace__process_sample(struct perf_tool *tool,
  744. union perf_event *event __maybe_unused,
  745. struct perf_sample *sample,
  746. struct perf_evsel *evsel,
  747. struct machine *machine __maybe_unused)
  748. {
  749. struct trace *trace = container_of(tool, struct trace, tool);
  750. int err = 0;
  751. tracepoint_handler handler = evsel->handler.func;
  752. if (skip_sample(trace, sample))
  753. return 0;
  754. if (trace->base_time == 0)
  755. trace->base_time = sample->time;
  756. if (handler)
  757. handler(trace, evsel, sample);
  758. return err;
  759. }
  760. static bool
  761. perf_session__has_tp(struct perf_session *session, const char *name)
  762. {
  763. struct perf_evsel *evsel;
  764. evsel = perf_evlist__find_tracepoint_by_name(session->evlist, name);
  765. return evsel != NULL;
  766. }
  767. static int parse_target_str(struct trace *trace)
  768. {
  769. if (trace->opts.target.pid) {
  770. trace->pid_list = intlist__new(trace->opts.target.pid);
  771. if (trace->pid_list == NULL) {
  772. pr_err("Error parsing process id string\n");
  773. return -EINVAL;
  774. }
  775. }
  776. if (trace->opts.target.tid) {
  777. trace->tid_list = intlist__new(trace->opts.target.tid);
  778. if (trace->tid_list == NULL) {
  779. pr_err("Error parsing thread id string\n");
  780. return -EINVAL;
  781. }
  782. }
  783. return 0;
  784. }
  785. static int trace__run(struct trace *trace, int argc, const char **argv)
  786. {
  787. struct perf_evlist *evlist = perf_evlist__new();
  788. struct perf_evsel *evsel;
  789. int err = -1, i;
  790. unsigned long before;
  791. const bool forks = argc > 0;
  792. if (evlist == NULL) {
  793. fprintf(trace->output, "Not enough memory to run!\n");
  794. goto out;
  795. }
  796. if (perf_evlist__add_newtp(evlist, "raw_syscalls", "sys_enter", trace__sys_enter) ||
  797. perf_evlist__add_newtp(evlist, "raw_syscalls", "sys_exit", trace__sys_exit)) {
  798. fprintf(trace->output, "Couldn't read the raw_syscalls tracepoints information!\n");
  799. goto out_delete_evlist;
  800. }
  801. if (trace->sched &&
  802. perf_evlist__add_newtp(evlist, "sched", "sched_stat_runtime",
  803. trace__sched_stat_runtime)) {
  804. fprintf(trace->output, "Couldn't read the sched_stat_runtime tracepoint information!\n");
  805. goto out_delete_evlist;
  806. }
  807. err = perf_evlist__create_maps(evlist, &trace->opts.target);
  808. if (err < 0) {
  809. fprintf(trace->output, "Problems parsing the target to trace, check your options!\n");
  810. goto out_delete_evlist;
  811. }
  812. err = trace__symbols_init(trace, evlist);
  813. if (err < 0) {
  814. fprintf(trace->output, "Problems initializing symbol libraries!\n");
  815. goto out_delete_maps;
  816. }
  817. perf_evlist__config(evlist, &trace->opts);
  818. signal(SIGCHLD, sig_handler);
  819. signal(SIGINT, sig_handler);
  820. if (forks) {
  821. err = perf_evlist__prepare_workload(evlist, &trace->opts.target,
  822. argv, false, false);
  823. if (err < 0) {
  824. fprintf(trace->output, "Couldn't run the workload!\n");
  825. goto out_delete_maps;
  826. }
  827. }
  828. err = perf_evlist__open(evlist);
  829. if (err < 0) {
  830. fprintf(trace->output, "Couldn't create the events: %s\n", strerror(errno));
  831. goto out_delete_maps;
  832. }
  833. err = perf_evlist__mmap(evlist, UINT_MAX, false);
  834. if (err < 0) {
  835. fprintf(trace->output, "Couldn't mmap the events: %s\n", strerror(errno));
  836. goto out_close_evlist;
  837. }
  838. perf_evlist__enable(evlist);
  839. if (forks)
  840. perf_evlist__start_workload(evlist);
  841. trace->multiple_threads = evlist->threads->map[0] == -1 || evlist->threads->nr > 1;
  842. again:
  843. before = trace->nr_events;
  844. for (i = 0; i < evlist->nr_mmaps; i++) {
  845. union perf_event *event;
  846. while ((event = perf_evlist__mmap_read(evlist, i)) != NULL) {
  847. const u32 type = event->header.type;
  848. tracepoint_handler handler;
  849. struct perf_sample sample;
  850. ++trace->nr_events;
  851. err = perf_evlist__parse_sample(evlist, event, &sample);
  852. if (err) {
  853. fprintf(trace->output, "Can't parse sample, err = %d, skipping...\n", err);
  854. continue;
  855. }
  856. if (trace->base_time == 0)
  857. trace->base_time = sample.time;
  858. if (type != PERF_RECORD_SAMPLE) {
  859. trace__process_event(trace, &trace->host, event);
  860. continue;
  861. }
  862. evsel = perf_evlist__id2evsel(evlist, sample.id);
  863. if (evsel == NULL) {
  864. fprintf(trace->output, "Unknown tp ID %" PRIu64 ", skipping...\n", sample.id);
  865. continue;
  866. }
  867. if (sample.raw_data == NULL) {
  868. fprintf(trace->output, "%s sample with no payload for tid: %d, cpu %d, raw_size=%d, skipping...\n",
  869. perf_evsel__name(evsel), sample.tid,
  870. sample.cpu, sample.raw_size);
  871. continue;
  872. }
  873. handler = evsel->handler.func;
  874. handler(trace, evsel, &sample);
  875. if (done)
  876. goto out_unmap_evlist;
  877. }
  878. }
  879. if (trace->nr_events == before) {
  880. if (done)
  881. goto out_unmap_evlist;
  882. poll(evlist->pollfd, evlist->nr_fds, -1);
  883. }
  884. if (done)
  885. perf_evlist__disable(evlist);
  886. goto again;
  887. out_unmap_evlist:
  888. perf_evlist__munmap(evlist);
  889. out_close_evlist:
  890. perf_evlist__close(evlist);
  891. out_delete_maps:
  892. perf_evlist__delete_maps(evlist);
  893. out_delete_evlist:
  894. perf_evlist__delete(evlist);
  895. out:
  896. return err;
  897. }
  898. static int trace__replay(struct trace *trace)
  899. {
  900. const struct perf_evsel_str_handler handlers[] = {
  901. { "raw_syscalls:sys_enter", trace__sys_enter, },
  902. { "raw_syscalls:sys_exit", trace__sys_exit, },
  903. };
  904. struct perf_session *session;
  905. int err = -1;
  906. trace->tool.sample = trace__process_sample;
  907. trace->tool.mmap = perf_event__process_mmap;
  908. trace->tool.mmap2 = perf_event__process_mmap2;
  909. trace->tool.comm = perf_event__process_comm;
  910. trace->tool.exit = perf_event__process_exit;
  911. trace->tool.fork = perf_event__process_fork;
  912. trace->tool.attr = perf_event__process_attr;
  913. trace->tool.tracing_data = perf_event__process_tracing_data;
  914. trace->tool.build_id = perf_event__process_build_id;
  915. trace->tool.ordered_samples = true;
  916. trace->tool.ordering_requires_timestamps = true;
  917. /* add tid to output */
  918. trace->multiple_threads = true;
  919. if (symbol__init() < 0)
  920. return -1;
  921. session = perf_session__new(input_name, O_RDONLY, 0, false,
  922. &trace->tool);
  923. if (session == NULL)
  924. return -ENOMEM;
  925. err = perf_session__set_tracepoints_handlers(session, handlers);
  926. if (err)
  927. goto out;
  928. if (!perf_session__has_tp(session, "raw_syscalls:sys_enter")) {
  929. pr_err("Data file does not have raw_syscalls:sys_enter events\n");
  930. goto out;
  931. }
  932. if (!perf_session__has_tp(session, "raw_syscalls:sys_exit")) {
  933. pr_err("Data file does not have raw_syscalls:sys_exit events\n");
  934. goto out;
  935. }
  936. err = parse_target_str(trace);
  937. if (err != 0)
  938. goto out;
  939. setup_pager();
  940. err = perf_session__process_events(session, &trace->tool);
  941. if (err)
  942. pr_err("Failed to process events, error %d", err);
  943. out:
  944. perf_session__delete(session);
  945. return err;
  946. }
  947. static size_t trace__fprintf_threads_header(FILE *fp)
  948. {
  949. size_t printed;
  950. printed = fprintf(fp, "\n _____________________________________________________________________\n");
  951. printed += fprintf(fp," __) Summary of events (__\n\n");
  952. printed += fprintf(fp," [ task - pid ] [ events ] [ ratio ] [ runtime ]\n");
  953. printed += fprintf(fp," _____________________________________________________________________\n\n");
  954. return printed;
  955. }
  956. static size_t trace__fprintf_thread_summary(struct trace *trace, FILE *fp)
  957. {
  958. size_t printed = trace__fprintf_threads_header(fp);
  959. struct rb_node *nd;
  960. for (nd = rb_first(&trace->host.threads); nd; nd = rb_next(nd)) {
  961. struct thread *thread = rb_entry(nd, struct thread, rb_node);
  962. struct thread_trace *ttrace = thread->priv;
  963. const char *color;
  964. double ratio;
  965. if (ttrace == NULL)
  966. continue;
  967. ratio = (double)ttrace->nr_events / trace->nr_events * 100.0;
  968. color = PERF_COLOR_NORMAL;
  969. if (ratio > 50.0)
  970. color = PERF_COLOR_RED;
  971. else if (ratio > 25.0)
  972. color = PERF_COLOR_GREEN;
  973. else if (ratio > 5.0)
  974. color = PERF_COLOR_YELLOW;
  975. printed += color_fprintf(fp, color, "%20s", thread->comm);
  976. printed += fprintf(fp, " - %-5d :%11lu [", thread->tid, ttrace->nr_events);
  977. printed += color_fprintf(fp, color, "%5.1f%%", ratio);
  978. printed += fprintf(fp, " ] %10.3f ms\n", ttrace->runtime_ms);
  979. }
  980. return printed;
  981. }
  982. static int trace__set_duration(const struct option *opt, const char *str,
  983. int unset __maybe_unused)
  984. {
  985. struct trace *trace = opt->value;
  986. trace->duration_filter = atof(str);
  987. return 0;
  988. }
  989. static int trace__open_output(struct trace *trace, const char *filename)
  990. {
  991. struct stat st;
  992. if (!stat(filename, &st) && st.st_size) {
  993. char oldname[PATH_MAX];
  994. scnprintf(oldname, sizeof(oldname), "%s.old", filename);
  995. unlink(oldname);
  996. rename(filename, oldname);
  997. }
  998. trace->output = fopen(filename, "w");
  999. return trace->output == NULL ? -errno : 0;
  1000. }
  1001. int cmd_trace(int argc, const char **argv, const char *prefix __maybe_unused)
  1002. {
  1003. const char * const trace_usage[] = {
  1004. "perf trace [<options>] [<command>]",
  1005. "perf trace [<options>] -- <command> [<options>]",
  1006. NULL
  1007. };
  1008. struct trace trace = {
  1009. .audit_machine = audit_detect_machine(),
  1010. .syscalls = {
  1011. . max = -1,
  1012. },
  1013. .opts = {
  1014. .target = {
  1015. .uid = UINT_MAX,
  1016. .uses_mmap = true,
  1017. },
  1018. .user_freq = UINT_MAX,
  1019. .user_interval = ULLONG_MAX,
  1020. .no_delay = true,
  1021. .mmap_pages = 1024,
  1022. },
  1023. .output = stdout,
  1024. };
  1025. const char *output_name = NULL;
  1026. const char *ev_qualifier_str = NULL;
  1027. const struct option trace_options[] = {
  1028. OPT_STRING('e', "expr", &ev_qualifier_str, "expr",
  1029. "list of events to trace"),
  1030. OPT_STRING('o', "output", &output_name, "file", "output file name"),
  1031. OPT_STRING('i', "input", &input_name, "file", "Analyze events in file"),
  1032. OPT_STRING('p', "pid", &trace.opts.target.pid, "pid",
  1033. "trace events on existing process id"),
  1034. OPT_STRING('t', "tid", &trace.opts.target.tid, "tid",
  1035. "trace events on existing thread id"),
  1036. OPT_BOOLEAN('a', "all-cpus", &trace.opts.target.system_wide,
  1037. "system-wide collection from all CPUs"),
  1038. OPT_STRING('C', "cpu", &trace.opts.target.cpu_list, "cpu",
  1039. "list of cpus to monitor"),
  1040. OPT_BOOLEAN(0, "no-inherit", &trace.opts.no_inherit,
  1041. "child tasks do not inherit counters"),
  1042. OPT_UINTEGER('m', "mmap-pages", &trace.opts.mmap_pages,
  1043. "number of mmap data pages"),
  1044. OPT_STRING('u', "uid", &trace.opts.target.uid_str, "user",
  1045. "user to profile"),
  1046. OPT_CALLBACK(0, "duration", &trace, "float",
  1047. "show only events with duration > N.M ms",
  1048. trace__set_duration),
  1049. OPT_BOOLEAN(0, "sched", &trace.sched, "show blocking scheduler events"),
  1050. OPT_INCR('v', "verbose", &verbose, "be more verbose"),
  1051. OPT_END()
  1052. };
  1053. int err;
  1054. char bf[BUFSIZ];
  1055. argc = parse_options(argc, argv, trace_options, trace_usage, 0);
  1056. if (output_name != NULL) {
  1057. err = trace__open_output(&trace, output_name);
  1058. if (err < 0) {
  1059. perror("failed to create output file");
  1060. goto out;
  1061. }
  1062. }
  1063. if (ev_qualifier_str != NULL) {
  1064. const char *s = ev_qualifier_str;
  1065. trace.not_ev_qualifier = *s == '!';
  1066. if (trace.not_ev_qualifier)
  1067. ++s;
  1068. trace.ev_qualifier = strlist__new(true, s);
  1069. if (trace.ev_qualifier == NULL) {
  1070. fputs("Not enough memory to parse event qualifier",
  1071. trace.output);
  1072. err = -ENOMEM;
  1073. goto out_close;
  1074. }
  1075. }
  1076. err = perf_target__validate(&trace.opts.target);
  1077. if (err) {
  1078. perf_target__strerror(&trace.opts.target, err, bf, sizeof(bf));
  1079. fprintf(trace.output, "%s", bf);
  1080. goto out_close;
  1081. }
  1082. err = perf_target__parse_uid(&trace.opts.target);
  1083. if (err) {
  1084. perf_target__strerror(&trace.opts.target, err, bf, sizeof(bf));
  1085. fprintf(trace.output, "%s", bf);
  1086. goto out_close;
  1087. }
  1088. if (!argc && perf_target__none(&trace.opts.target))
  1089. trace.opts.target.system_wide = true;
  1090. if (input_name)
  1091. err = trace__replay(&trace);
  1092. else
  1093. err = trace__run(&trace, argc, argv);
  1094. if (trace.sched && !err)
  1095. trace__fprintf_thread_summary(&trace, trace.output);
  1096. out_close:
  1097. if (output_name != NULL)
  1098. fclose(trace.output);
  1099. out:
  1100. return err;
  1101. }