builtin-report.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892
  1. #include "util/util.h"
  2. #include "builtin.h"
  3. #include "util/list.h"
  4. #include "util/cache.h"
  5. #include "util/rbtree.h"
  6. #include "util/symbol.h"
  7. #include "util/string.h"
  8. #include "perf.h"
  9. #include "util/parse-options.h"
  10. #include "util/parse-events.h"
  11. #define SHOW_KERNEL 1
  12. #define SHOW_USER 2
  13. #define SHOW_HV 4
  14. static char const *input_name = "perf.data";
  15. static char *vmlinux = NULL;
  16. static char *sort_order = "comm,dso";
  17. static int input;
  18. static int show_mask = SHOW_KERNEL | SHOW_USER | SHOW_HV;
  19. static int dump_trace = 0;
  20. static int verbose;
  21. static int full_paths;
  22. static unsigned long page_size;
  23. static unsigned long mmap_window = 32;
  24. const char *perf_event_names[] = {
  25. [PERF_EVENT_MMAP] = " PERF_EVENT_MMAP",
  26. [PERF_EVENT_MUNMAP] = " PERF_EVENT_MUNMAP",
  27. [PERF_EVENT_COMM] = " PERF_EVENT_COMM",
  28. };
  29. struct ip_event {
  30. struct perf_event_header header;
  31. __u64 ip;
  32. __u32 pid, tid;
  33. };
  34. struct mmap_event {
  35. struct perf_event_header header;
  36. __u32 pid, tid;
  37. __u64 start;
  38. __u64 len;
  39. __u64 pgoff;
  40. char filename[PATH_MAX];
  41. };
  42. struct comm_event {
  43. struct perf_event_header header;
  44. __u32 pid,tid;
  45. char comm[16];
  46. };
  47. typedef union event_union {
  48. struct perf_event_header header;
  49. struct ip_event ip;
  50. struct mmap_event mmap;
  51. struct comm_event comm;
  52. } event_t;
  53. static LIST_HEAD(dsos);
  54. static struct dso *kernel_dso;
  55. static void dsos__add(struct dso *dso)
  56. {
  57. list_add_tail(&dso->node, &dsos);
  58. }
  59. static struct dso *dsos__find(const char *name)
  60. {
  61. struct dso *pos;
  62. list_for_each_entry(pos, &dsos, node)
  63. if (strcmp(pos->name, name) == 0)
  64. return pos;
  65. return NULL;
  66. }
  67. static struct dso *dsos__findnew(const char *name)
  68. {
  69. struct dso *dso = dsos__find(name);
  70. int nr;
  71. if (dso)
  72. return dso;
  73. dso = dso__new(name, 0);
  74. if (!dso)
  75. goto out_delete_dso;
  76. nr = dso__load(dso, NULL);
  77. if (nr < 0) {
  78. fprintf(stderr, "Failed to open: %s\n", name);
  79. goto out_delete_dso;
  80. }
  81. if (!nr && verbose) {
  82. fprintf(stderr,
  83. "No symbols found in: %s, maybe install a debug package?\n",
  84. name);
  85. }
  86. dsos__add(dso);
  87. return dso;
  88. out_delete_dso:
  89. dso__delete(dso);
  90. return NULL;
  91. }
  92. static void dsos__fprintf(FILE *fp)
  93. {
  94. struct dso *pos;
  95. list_for_each_entry(pos, &dsos, node)
  96. dso__fprintf(pos, fp);
  97. }
  98. static int load_kernel(void)
  99. {
  100. int err;
  101. kernel_dso = dso__new("[kernel]", 0);
  102. if (!kernel_dso)
  103. return -1;
  104. err = dso__load_kernel(kernel_dso, vmlinux, NULL);
  105. if (err) {
  106. dso__delete(kernel_dso);
  107. kernel_dso = NULL;
  108. } else
  109. dsos__add(kernel_dso);
  110. return err;
  111. }
  112. static int strcommon(const char *pathname, const char *cwd, int cwdlen)
  113. {
  114. int n = 0;
  115. while (pathname[n] == cwd[n] && n < cwdlen)
  116. ++n;
  117. return n;
  118. }
  119. struct map {
  120. struct list_head node;
  121. uint64_t start;
  122. uint64_t end;
  123. uint64_t pgoff;
  124. struct dso *dso;
  125. };
  126. static struct map *map__new(struct mmap_event *event, char *cwd, int cwdlen)
  127. {
  128. struct map *self = malloc(sizeof(*self));
  129. if (self != NULL) {
  130. const char *filename = event->filename;
  131. char newfilename[PATH_MAX];
  132. if (cwd) {
  133. int n = strcommon(filename, cwd, cwdlen);
  134. if (n == cwdlen) {
  135. snprintf(newfilename, sizeof(newfilename),
  136. ".%s", filename + n);
  137. filename = newfilename;
  138. }
  139. }
  140. self->start = event->start;
  141. self->end = event->start + event->len;
  142. self->pgoff = event->pgoff;
  143. self->dso = dsos__findnew(filename);
  144. if (self->dso == NULL)
  145. goto out_delete;
  146. }
  147. return self;
  148. out_delete:
  149. free(self);
  150. return NULL;
  151. }
  152. struct thread;
  153. struct thread {
  154. struct rb_node rb_node;
  155. struct list_head maps;
  156. pid_t pid;
  157. char *comm;
  158. };
  159. static struct thread *thread__new(pid_t pid)
  160. {
  161. struct thread *self = malloc(sizeof(*self));
  162. if (self != NULL) {
  163. self->pid = pid;
  164. self->comm = malloc(30);
  165. if (self->comm)
  166. sprintf(self->comm, ":%d", pid);
  167. INIT_LIST_HEAD(&self->maps);
  168. }
  169. return self;
  170. }
  171. static int thread__set_comm(struct thread *self, const char *comm)
  172. {
  173. self->comm = strdup(comm);
  174. return self->comm ? 0 : -ENOMEM;
  175. }
  176. static struct rb_root threads;
  177. static struct thread *threads__findnew(pid_t pid)
  178. {
  179. struct rb_node **p = &threads.rb_node;
  180. struct rb_node *parent = NULL;
  181. struct thread *th;
  182. while (*p != NULL) {
  183. parent = *p;
  184. th = rb_entry(parent, struct thread, rb_node);
  185. if (th->pid == pid)
  186. return th;
  187. if (pid < th->pid)
  188. p = &(*p)->rb_left;
  189. else
  190. p = &(*p)->rb_right;
  191. }
  192. th = thread__new(pid);
  193. if (th != NULL) {
  194. rb_link_node(&th->rb_node, parent, p);
  195. rb_insert_color(&th->rb_node, &threads);
  196. }
  197. return th;
  198. }
  199. static void thread__insert_map(struct thread *self, struct map *map)
  200. {
  201. list_add_tail(&map->node, &self->maps);
  202. }
  203. static struct map *thread__find_map(struct thread *self, uint64_t ip)
  204. {
  205. struct map *pos;
  206. if (self == NULL)
  207. return NULL;
  208. list_for_each_entry(pos, &self->maps, node)
  209. if (ip >= pos->start && ip <= pos->end)
  210. return pos;
  211. return NULL;
  212. }
  213. /*
  214. * histogram, sorted on item, collects counts
  215. */
  216. static struct rb_root hist;
  217. struct hist_entry {
  218. struct rb_node rb_node;
  219. struct thread *thread;
  220. struct map *map;
  221. struct dso *dso;
  222. struct symbol *sym;
  223. uint64_t ip;
  224. char level;
  225. uint32_t count;
  226. };
  227. /*
  228. * configurable sorting bits
  229. */
  230. struct sort_entry {
  231. struct list_head list;
  232. char *header;
  233. int64_t (*cmp)(struct hist_entry *, struct hist_entry *);
  234. size_t (*print)(FILE *fp, struct hist_entry *);
  235. };
  236. static int64_t
  237. sort__thread_cmp(struct hist_entry *left, struct hist_entry *right)
  238. {
  239. return right->thread->pid - left->thread->pid;
  240. }
  241. static size_t
  242. sort__thread_print(FILE *fp, struct hist_entry *self)
  243. {
  244. return fprintf(fp, " %16s:%5d", self->thread->comm ?: "", self->thread->pid);
  245. }
  246. static struct sort_entry sort_thread = {
  247. .header = " Command: Pid ",
  248. .cmp = sort__thread_cmp,
  249. .print = sort__thread_print,
  250. };
  251. static int64_t
  252. sort__comm_cmp(struct hist_entry *left, struct hist_entry *right)
  253. {
  254. char *comm_l = left->thread->comm;
  255. char *comm_r = right->thread->comm;
  256. if (!comm_l || !comm_r) {
  257. if (!comm_l && !comm_r)
  258. return 0;
  259. else if (!comm_l)
  260. return -1;
  261. else
  262. return 1;
  263. }
  264. return strcmp(comm_l, comm_r);
  265. }
  266. static size_t
  267. sort__comm_print(FILE *fp, struct hist_entry *self)
  268. {
  269. return fprintf(fp, " %16s", self->thread->comm);
  270. }
  271. static struct sort_entry sort_comm = {
  272. .header = " Command",
  273. .cmp = sort__comm_cmp,
  274. .print = sort__comm_print,
  275. };
  276. static int64_t
  277. sort__dso_cmp(struct hist_entry *left, struct hist_entry *right)
  278. {
  279. struct dso *dso_l = left->dso;
  280. struct dso *dso_r = right->dso;
  281. if (!dso_l || !dso_r) {
  282. if (!dso_l && !dso_r)
  283. return 0;
  284. else if (!dso_l)
  285. return -1;
  286. else
  287. return 1;
  288. }
  289. return strcmp(dso_l->name, dso_r->name);
  290. }
  291. static size_t
  292. sort__dso_print(FILE *fp, struct hist_entry *self)
  293. {
  294. if (self->dso)
  295. return fprintf(fp, " %-25s", self->dso->name);
  296. return fprintf(fp, " %016llx", (__u64)self->ip);
  297. }
  298. static struct sort_entry sort_dso = {
  299. .header = " Shared Object ",
  300. .cmp = sort__dso_cmp,
  301. .print = sort__dso_print,
  302. };
  303. static int64_t
  304. sort__sym_cmp(struct hist_entry *left, struct hist_entry *right)
  305. {
  306. uint64_t ip_l, ip_r;
  307. if (left->sym == right->sym)
  308. return 0;
  309. ip_l = left->sym ? left->sym->start : left->ip;
  310. ip_r = right->sym ? right->sym->start : right->ip;
  311. return (int64_t)(ip_r - ip_l);
  312. }
  313. static size_t
  314. sort__sym_print(FILE *fp, struct hist_entry *self)
  315. {
  316. size_t ret = 0;
  317. if (verbose)
  318. ret += fprintf(fp, " %#018llx", (__u64)self->ip);
  319. if (self->dso)
  320. ret += fprintf(fp, " %s: ", self->dso->name);
  321. else
  322. ret += fprintf(fp, " %#016llx: ", (__u64)self->ip);
  323. if (self->sym)
  324. ret += fprintf(fp, "%s", self->sym->name);
  325. else
  326. ret += fprintf(fp, "%#016llx", (__u64)self->ip);
  327. return ret;
  328. }
  329. static struct sort_entry sort_sym = {
  330. .header = " Shared Object: Symbol",
  331. .cmp = sort__sym_cmp,
  332. .print = sort__sym_print,
  333. };
  334. struct sort_dimension {
  335. char *name;
  336. struct sort_entry *entry;
  337. int taken;
  338. };
  339. static struct sort_dimension sort_dimensions[] = {
  340. { .name = "pid", .entry = &sort_thread, },
  341. { .name = "comm", .entry = &sort_comm, },
  342. { .name = "dso", .entry = &sort_dso, },
  343. { .name = "symbol", .entry = &sort_sym, },
  344. };
  345. static LIST_HEAD(hist_entry__sort_list);
  346. static int sort_dimension__add(char *tok)
  347. {
  348. int i;
  349. for (i = 0; i < ARRAY_SIZE(sort_dimensions); i++) {
  350. struct sort_dimension *sd = &sort_dimensions[i];
  351. if (sd->taken)
  352. continue;
  353. if (strcmp(tok, sd->name))
  354. continue;
  355. list_add_tail(&sd->entry->list, &hist_entry__sort_list);
  356. sd->taken = 1;
  357. return 0;
  358. }
  359. return -ESRCH;
  360. }
  361. static void setup_sorting(void)
  362. {
  363. char *tmp, *tok, *str = strdup(sort_order);
  364. for (tok = strtok_r(str, ", ", &tmp);
  365. tok; tok = strtok_r(NULL, ", ", &tmp))
  366. sort_dimension__add(tok);
  367. free(str);
  368. }
  369. static int64_t
  370. hist_entry__cmp(struct hist_entry *left, struct hist_entry *right)
  371. {
  372. struct sort_entry *se;
  373. int64_t cmp = 0;
  374. list_for_each_entry(se, &hist_entry__sort_list, list) {
  375. cmp = se->cmp(left, right);
  376. if (cmp)
  377. break;
  378. }
  379. return cmp;
  380. }
  381. static size_t
  382. hist_entry__fprintf(FILE *fp, struct hist_entry *self, uint64_t total_samples)
  383. {
  384. struct sort_entry *se;
  385. size_t ret;
  386. if (total_samples) {
  387. ret = fprintf(fp, " %5.2f%%",
  388. (self->count * 100.0) / total_samples);
  389. } else
  390. ret = fprintf(fp, "%12d ", self->count);
  391. list_for_each_entry(se, &hist_entry__sort_list, list)
  392. ret += se->print(fp, self);
  393. ret += fprintf(fp, "\n");
  394. return ret;
  395. }
  396. /*
  397. * collect histogram counts
  398. */
  399. static int
  400. hist_entry__add(struct thread *thread, struct map *map, struct dso *dso,
  401. struct symbol *sym, uint64_t ip, char level)
  402. {
  403. struct rb_node **p = &hist.rb_node;
  404. struct rb_node *parent = NULL;
  405. struct hist_entry *he;
  406. struct hist_entry entry = {
  407. .thread = thread,
  408. .map = map,
  409. .dso = dso,
  410. .sym = sym,
  411. .ip = ip,
  412. .level = level,
  413. .count = 1,
  414. };
  415. int cmp;
  416. while (*p != NULL) {
  417. parent = *p;
  418. he = rb_entry(parent, struct hist_entry, rb_node);
  419. cmp = hist_entry__cmp(&entry, he);
  420. if (!cmp) {
  421. he->count++;
  422. return 0;
  423. }
  424. if (cmp < 0)
  425. p = &(*p)->rb_left;
  426. else
  427. p = &(*p)->rb_right;
  428. }
  429. he = malloc(sizeof(*he));
  430. if (!he)
  431. return -ENOMEM;
  432. *he = entry;
  433. rb_link_node(&he->rb_node, parent, p);
  434. rb_insert_color(&he->rb_node, &hist);
  435. return 0;
  436. }
  437. /*
  438. * reverse the map, sort on count.
  439. */
  440. static struct rb_root output_hists;
  441. static void output__insert_entry(struct hist_entry *he)
  442. {
  443. struct rb_node **p = &output_hists.rb_node;
  444. struct rb_node *parent = NULL;
  445. struct hist_entry *iter;
  446. while (*p != NULL) {
  447. parent = *p;
  448. iter = rb_entry(parent, struct hist_entry, rb_node);
  449. if (he->count > iter->count)
  450. p = &(*p)->rb_left;
  451. else
  452. p = &(*p)->rb_right;
  453. }
  454. rb_link_node(&he->rb_node, parent, p);
  455. rb_insert_color(&he->rb_node, &output_hists);
  456. }
  457. static void output__resort(void)
  458. {
  459. struct rb_node *next = rb_first(&hist);
  460. struct hist_entry *n;
  461. while (next) {
  462. n = rb_entry(next, struct hist_entry, rb_node);
  463. next = rb_next(&n->rb_node);
  464. rb_erase(&n->rb_node, &hist);
  465. output__insert_entry(n);
  466. }
  467. }
  468. static size_t output__fprintf(FILE *fp, uint64_t total_samples)
  469. {
  470. struct hist_entry *pos;
  471. struct sort_entry *se;
  472. struct rb_node *nd;
  473. size_t ret = 0;
  474. fprintf(fp, "#\n");
  475. fprintf(fp, "# Overhead");
  476. list_for_each_entry(se, &hist_entry__sort_list, list)
  477. fprintf(fp, " %s", se->header);
  478. fprintf(fp, "\n");
  479. fprintf(fp, "# ........");
  480. list_for_each_entry(se, &hist_entry__sort_list, list) {
  481. int i;
  482. fprintf(fp, " ");
  483. for (i = 0; i < strlen(se->header)-1; i++)
  484. fprintf(fp, ".");
  485. }
  486. fprintf(fp, "\n");
  487. fprintf(fp, "#\n");
  488. for (nd = rb_first(&output_hists); nd; nd = rb_next(nd)) {
  489. pos = rb_entry(nd, struct hist_entry, rb_node);
  490. ret += hist_entry__fprintf(fp, pos, total_samples);
  491. }
  492. return ret;
  493. }
  494. static void register_idle_thread(void)
  495. {
  496. struct thread *thread = threads__findnew(0);
  497. if (thread == NULL ||
  498. thread__set_comm(thread, "[idle]")) {
  499. fprintf(stderr, "problem inserting idle task.\n");
  500. exit(-1);
  501. }
  502. }
  503. static int __cmd_report(void)
  504. {
  505. unsigned long offset = 0;
  506. unsigned long head = 0;
  507. struct stat stat;
  508. char *buf;
  509. event_t *event;
  510. int ret, rc = EXIT_FAILURE;
  511. uint32_t size;
  512. unsigned long total = 0, total_mmap = 0, total_comm = 0, total_unknown = 0;
  513. char cwd[PATH_MAX], *cwdp = cwd;
  514. int cwdlen;
  515. register_idle_thread();
  516. input = open(input_name, O_RDONLY);
  517. if (input < 0) {
  518. perror("failed to open file");
  519. exit(-1);
  520. }
  521. ret = fstat(input, &stat);
  522. if (ret < 0) {
  523. perror("failed to stat file");
  524. exit(-1);
  525. }
  526. if (!stat.st_size) {
  527. fprintf(stderr, "zero-sized file, nothing to do!\n");
  528. exit(0);
  529. }
  530. if (load_kernel() < 0) {
  531. perror("failed to load kernel symbols");
  532. return EXIT_FAILURE;
  533. }
  534. if (!full_paths) {
  535. if (getcwd(cwd, sizeof(cwd)) == NULL) {
  536. perror("failed to get the current directory");
  537. return EXIT_FAILURE;
  538. }
  539. cwdlen = strlen(cwd);
  540. } else {
  541. cwdp = NULL;
  542. cwdlen = 0;
  543. }
  544. remap:
  545. buf = (char *)mmap(NULL, page_size * mmap_window, PROT_READ,
  546. MAP_SHARED, input, offset);
  547. if (buf == MAP_FAILED) {
  548. perror("failed to mmap file");
  549. exit(-1);
  550. }
  551. more:
  552. event = (event_t *)(buf + head);
  553. size = event->header.size;
  554. if (!size)
  555. size = 8;
  556. if (head + event->header.size >= page_size * mmap_window) {
  557. unsigned long shift = page_size * (head / page_size);
  558. int ret;
  559. ret = munmap(buf, page_size * mmap_window);
  560. assert(ret == 0);
  561. offset += shift;
  562. head -= shift;
  563. goto remap;
  564. }
  565. size = event->header.size;
  566. if (!size)
  567. goto broken_event;
  568. if (event->header.misc & PERF_EVENT_MISC_OVERFLOW) {
  569. char level;
  570. int show = 0;
  571. struct dso *dso = NULL;
  572. struct thread *thread = threads__findnew(event->ip.pid);
  573. uint64_t ip = event->ip.ip;
  574. struct map *map = NULL;
  575. if (dump_trace) {
  576. fprintf(stderr, "%p [%p]: PERF_EVENT (IP, %d): %d: %p\n",
  577. (void *)(offset + head),
  578. (void *)(long)(event->header.size),
  579. event->header.misc,
  580. event->ip.pid,
  581. (void *)(long)ip);
  582. }
  583. if (thread == NULL) {
  584. fprintf(stderr, "problem processing %d event, skipping it.\n",
  585. event->header.type);
  586. goto broken_event;
  587. }
  588. if (event->header.misc & PERF_EVENT_MISC_KERNEL) {
  589. show = SHOW_KERNEL;
  590. level = 'k';
  591. dso = kernel_dso;
  592. } else if (event->header.misc & PERF_EVENT_MISC_USER) {
  593. show = SHOW_USER;
  594. level = '.';
  595. map = thread__find_map(thread, ip);
  596. if (map != NULL) {
  597. dso = map->dso;
  598. ip -= map->start + map->pgoff;
  599. }
  600. } else {
  601. show = SHOW_HV;
  602. level = 'H';
  603. }
  604. if (show & show_mask) {
  605. struct symbol *sym = dso__find_symbol(dso, ip);
  606. if (hist_entry__add(thread, map, dso, sym, ip, level)) {
  607. fprintf(stderr,
  608. "problem incrementing symbol count, skipping event\n");
  609. goto broken_event;
  610. }
  611. }
  612. total++;
  613. } else switch (event->header.type) {
  614. case PERF_EVENT_MMAP: {
  615. struct thread *thread = threads__findnew(event->mmap.pid);
  616. struct map *map = map__new(&event->mmap, cwdp, cwdlen);
  617. if (dump_trace) {
  618. fprintf(stderr, "%p [%p]: PERF_EVENT_MMAP: [%p(%p) @ %p]: %s\n",
  619. (void *)(offset + head),
  620. (void *)(long)(event->header.size),
  621. (void *)(long)event->mmap.start,
  622. (void *)(long)event->mmap.len,
  623. (void *)(long)event->mmap.pgoff,
  624. event->mmap.filename);
  625. }
  626. if (thread == NULL || map == NULL) {
  627. if (verbose)
  628. fprintf(stderr, "problem processing PERF_EVENT_MMAP, skipping event.\n");
  629. goto broken_event;
  630. }
  631. thread__insert_map(thread, map);
  632. total_mmap++;
  633. break;
  634. }
  635. case PERF_EVENT_COMM: {
  636. struct thread *thread = threads__findnew(event->comm.pid);
  637. if (dump_trace) {
  638. fprintf(stderr, "%p [%p]: PERF_EVENT_COMM: %s:%d\n",
  639. (void *)(offset + head),
  640. (void *)(long)(event->header.size),
  641. event->comm.comm, event->comm.pid);
  642. }
  643. if (thread == NULL ||
  644. thread__set_comm(thread, event->comm.comm)) {
  645. fprintf(stderr, "problem processing PERF_EVENT_COMM, skipping event.\n");
  646. goto broken_event;
  647. }
  648. total_comm++;
  649. break;
  650. }
  651. default: {
  652. broken_event:
  653. if (dump_trace)
  654. fprintf(stderr, "%p [%p]: skipping unknown header type: %d\n",
  655. (void *)(offset + head),
  656. (void *)(long)(event->header.size),
  657. event->header.type);
  658. total_unknown++;
  659. /*
  660. * assume we lost track of the stream, check alignment, and
  661. * increment a single u64 in the hope to catch on again 'soon'.
  662. */
  663. if (unlikely(head & 7))
  664. head &= ~7ULL;
  665. size = 8;
  666. }
  667. }
  668. head += size;
  669. if (offset + head < stat.st_size)
  670. goto more;
  671. rc = EXIT_SUCCESS;
  672. close(input);
  673. if (dump_trace) {
  674. fprintf(stderr, " IP events: %10ld\n", total);
  675. fprintf(stderr, " mmap events: %10ld\n", total_mmap);
  676. fprintf(stderr, " comm events: %10ld\n", total_comm);
  677. fprintf(stderr, " unknown events: %10ld\n", total_unknown);
  678. return 0;
  679. }
  680. if (verbose >= 2)
  681. dsos__fprintf(stdout);
  682. output__resort();
  683. output__fprintf(stdout, total);
  684. return rc;
  685. }
  686. static const char * const report_usage[] = {
  687. "perf report [<options>] <command>",
  688. NULL
  689. };
  690. static const struct option options[] = {
  691. OPT_STRING('i', "input", &input_name, "file",
  692. "input file name"),
  693. OPT_BOOLEAN('v', "verbose", &verbose,
  694. "be more verbose (show symbol address, etc)"),
  695. OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace,
  696. "dump raw trace in ASCII"),
  697. OPT_STRING('k', "vmlinux", &vmlinux, "file", "vmlinux pathname"),
  698. OPT_STRING('s', "sort", &sort_order, "key[,key2...]",
  699. "sort by key(s): pid, comm, dso, symbol. Default: pid,symbol"),
  700. OPT_BOOLEAN('P', "full-paths", &full_paths,
  701. "Don't shorten the pathnames taking into account the cwd"),
  702. OPT_END()
  703. };
  704. int cmd_report(int argc, const char **argv, const char *prefix)
  705. {
  706. symbol__init();
  707. page_size = getpagesize();
  708. parse_options(argc, argv, options, report_usage, 0);
  709. setup_sorting();
  710. setup_pager();
  711. return __cmd_report();
  712. }