builtin-report.c 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035
  1. /*
  2. * builtin-report.c
  3. *
  4. * Builtin report command: Analyze the perf.data input file,
  5. * look up and read DSOs and symbol information and display
  6. * a histogram of results, along various sorting keys.
  7. */
  8. #include "builtin.h"
  9. #include "util/util.h"
  10. #include "util/list.h"
  11. #include "util/cache.h"
  12. #include "util/rbtree.h"
  13. #include "util/symbol.h"
  14. #include "util/string.h"
  15. #include "perf.h"
  16. #include "util/parse-options.h"
  17. #include "util/parse-events.h"
  18. #define SHOW_KERNEL 1
  19. #define SHOW_USER 2
  20. #define SHOW_HV 4
  21. static char const *input_name = "perf.data";
  22. static char *vmlinux = NULL;
  23. static char *sort_order = "comm,dso";
  24. static int input;
  25. static int show_mask = SHOW_KERNEL | SHOW_USER | SHOW_HV;
  26. static int dump_trace = 0;
  27. #define dprintf(x...) do { if (dump_trace) printf(x); } while (0)
  28. static int verbose;
  29. static int full_paths;
  30. static unsigned long page_size;
  31. static unsigned long mmap_window = 32;
  32. const char *perf_event_names[] = {
  33. [PERF_EVENT_MMAP] = " PERF_EVENT_MMAP",
  34. [PERF_EVENT_MUNMAP] = " PERF_EVENT_MUNMAP",
  35. [PERF_EVENT_COMM] = " PERF_EVENT_COMM",
  36. };
  37. struct ip_event {
  38. struct perf_event_header header;
  39. __u64 ip;
  40. __u32 pid, tid;
  41. };
  42. struct mmap_event {
  43. struct perf_event_header header;
  44. __u32 pid, tid;
  45. __u64 start;
  46. __u64 len;
  47. __u64 pgoff;
  48. char filename[PATH_MAX];
  49. };
  50. struct comm_event {
  51. struct perf_event_header header;
  52. __u32 pid,tid;
  53. char comm[16];
  54. };
  55. typedef union event_union {
  56. struct perf_event_header header;
  57. struct ip_event ip;
  58. struct mmap_event mmap;
  59. struct comm_event comm;
  60. } event_t;
  61. static LIST_HEAD(dsos);
  62. static struct dso *kernel_dso;
  63. static void dsos__add(struct dso *dso)
  64. {
  65. list_add_tail(&dso->node, &dsos);
  66. }
  67. static struct dso *dsos__find(const char *name)
  68. {
  69. struct dso *pos;
  70. list_for_each_entry(pos, &dsos, node)
  71. if (strcmp(pos->name, name) == 0)
  72. return pos;
  73. return NULL;
  74. }
  75. static struct dso *dsos__findnew(const char *name)
  76. {
  77. struct dso *dso = dsos__find(name);
  78. int nr;
  79. if (dso)
  80. return dso;
  81. dso = dso__new(name, 0);
  82. if (!dso)
  83. goto out_delete_dso;
  84. nr = dso__load(dso, NULL);
  85. if (nr < 0) {
  86. fprintf(stderr, "Failed to open: %s\n", name);
  87. goto out_delete_dso;
  88. }
  89. if (!nr && verbose) {
  90. fprintf(stderr,
  91. "No symbols found in: %s, maybe install a debug package?\n",
  92. name);
  93. }
  94. dsos__add(dso);
  95. return dso;
  96. out_delete_dso:
  97. dso__delete(dso);
  98. return NULL;
  99. }
  100. static void dsos__fprintf(FILE *fp)
  101. {
  102. struct dso *pos;
  103. list_for_each_entry(pos, &dsos, node)
  104. dso__fprintf(pos, fp);
  105. }
  106. static int load_kernel(void)
  107. {
  108. int err;
  109. kernel_dso = dso__new("[kernel]", 0);
  110. if (!kernel_dso)
  111. return -1;
  112. err = dso__load_kernel(kernel_dso, vmlinux, NULL);
  113. if (err) {
  114. dso__delete(kernel_dso);
  115. kernel_dso = NULL;
  116. } else
  117. dsos__add(kernel_dso);
  118. return err;
  119. }
  120. static int strcommon(const char *pathname, const char *cwd, int cwdlen)
  121. {
  122. int n = 0;
  123. while (pathname[n] == cwd[n] && n < cwdlen)
  124. ++n;
  125. return n;
  126. }
  127. struct map {
  128. struct list_head node;
  129. uint64_t start;
  130. uint64_t end;
  131. uint64_t pgoff;
  132. struct dso *dso;
  133. };
  134. static struct map *map__new(struct mmap_event *event, char *cwd, int cwdlen)
  135. {
  136. struct map *self = malloc(sizeof(*self));
  137. if (self != NULL) {
  138. const char *filename = event->filename;
  139. char newfilename[PATH_MAX];
  140. if (cwd) {
  141. int n = strcommon(filename, cwd, cwdlen);
  142. if (n == cwdlen) {
  143. snprintf(newfilename, sizeof(newfilename),
  144. ".%s", filename + n);
  145. filename = newfilename;
  146. }
  147. }
  148. self->start = event->start;
  149. self->end = event->start + event->len;
  150. self->pgoff = event->pgoff;
  151. self->dso = dsos__findnew(filename);
  152. if (self->dso == NULL)
  153. goto out_delete;
  154. }
  155. return self;
  156. out_delete:
  157. free(self);
  158. return NULL;
  159. }
  160. struct thread;
  161. struct thread {
  162. struct rb_node rb_node;
  163. struct list_head maps;
  164. pid_t pid;
  165. char *comm;
  166. };
  167. static struct thread *thread__new(pid_t pid)
  168. {
  169. struct thread *self = malloc(sizeof(*self));
  170. if (self != NULL) {
  171. self->pid = pid;
  172. self->comm = malloc(32);
  173. if (self->comm)
  174. snprintf(self->comm, 32, ":%d", self->pid);
  175. INIT_LIST_HEAD(&self->maps);
  176. }
  177. return self;
  178. }
  179. static int thread__set_comm(struct thread *self, const char *comm)
  180. {
  181. if (self->comm)
  182. free(self->comm);
  183. self->comm = strdup(comm);
  184. return self->comm ? 0 : -ENOMEM;
  185. }
  186. static struct rb_root threads;
  187. static struct thread *last_match;
  188. static struct thread *threads__findnew(pid_t pid)
  189. {
  190. struct rb_node **p = &threads.rb_node;
  191. struct rb_node *parent = NULL;
  192. struct thread *th;
  193. /*
  194. * Font-end cache - PID lookups come in blocks,
  195. * so most of the time we dont have to look up
  196. * the full rbtree:
  197. */
  198. if (last_match && last_match->pid == pid)
  199. return last_match;
  200. while (*p != NULL) {
  201. parent = *p;
  202. th = rb_entry(parent, struct thread, rb_node);
  203. if (th->pid == pid) {
  204. last_match = th;
  205. return th;
  206. }
  207. if (pid < th->pid)
  208. p = &(*p)->rb_left;
  209. else
  210. p = &(*p)->rb_right;
  211. }
  212. th = thread__new(pid);
  213. if (th != NULL) {
  214. rb_link_node(&th->rb_node, parent, p);
  215. rb_insert_color(&th->rb_node, &threads);
  216. last_match = th;
  217. }
  218. return th;
  219. }
  220. static void thread__insert_map(struct thread *self, struct map *map)
  221. {
  222. list_add_tail(&map->node, &self->maps);
  223. }
  224. static struct map *thread__find_map(struct thread *self, uint64_t ip)
  225. {
  226. struct map *pos;
  227. if (self == NULL)
  228. return NULL;
  229. list_for_each_entry(pos, &self->maps, node)
  230. if (ip >= pos->start && ip <= pos->end)
  231. return pos;
  232. return NULL;
  233. }
  234. /*
  235. * histogram, sorted on item, collects counts
  236. */
  237. static struct rb_root hist;
  238. struct hist_entry {
  239. struct rb_node rb_node;
  240. struct thread *thread;
  241. struct map *map;
  242. struct dso *dso;
  243. struct symbol *sym;
  244. uint64_t ip;
  245. char level;
  246. uint32_t count;
  247. };
  248. /*
  249. * configurable sorting bits
  250. */
  251. struct sort_entry {
  252. struct list_head list;
  253. char *header;
  254. int64_t (*cmp)(struct hist_entry *, struct hist_entry *);
  255. int64_t (*collapse)(struct hist_entry *, struct hist_entry *);
  256. size_t (*print)(FILE *fp, struct hist_entry *);
  257. };
  258. /* --sort pid */
  259. static int64_t
  260. sort__thread_cmp(struct hist_entry *left, struct hist_entry *right)
  261. {
  262. return right->thread->pid - left->thread->pid;
  263. }
  264. static size_t
  265. sort__thread_print(FILE *fp, struct hist_entry *self)
  266. {
  267. return fprintf(fp, " %16s:%5d", self->thread->comm ?: "", self->thread->pid);
  268. }
  269. static struct sort_entry sort_thread = {
  270. .header = " Command: Pid ",
  271. .cmp = sort__thread_cmp,
  272. .print = sort__thread_print,
  273. };
  274. /* --sort comm */
  275. static int64_t
  276. sort__comm_cmp(struct hist_entry *left, struct hist_entry *right)
  277. {
  278. return right->thread->pid - left->thread->pid;
  279. }
  280. static int64_t
  281. sort__comm_collapse(struct hist_entry *left, struct hist_entry *right)
  282. {
  283. char *comm_l = left->thread->comm;
  284. char *comm_r = right->thread->comm;
  285. if (!comm_l || !comm_r) {
  286. if (!comm_l && !comm_r)
  287. return 0;
  288. else if (!comm_l)
  289. return -1;
  290. else
  291. return 1;
  292. }
  293. return strcmp(comm_l, comm_r);
  294. }
  295. static size_t
  296. sort__comm_print(FILE *fp, struct hist_entry *self)
  297. {
  298. return fprintf(fp, " %16s", self->thread->comm);
  299. }
  300. static struct sort_entry sort_comm = {
  301. .header = " Command",
  302. .cmp = sort__comm_cmp,
  303. .collapse = sort__comm_collapse,
  304. .print = sort__comm_print,
  305. };
  306. /* --sort dso */
  307. static int64_t
  308. sort__dso_cmp(struct hist_entry *left, struct hist_entry *right)
  309. {
  310. struct dso *dso_l = left->dso;
  311. struct dso *dso_r = right->dso;
  312. if (!dso_l || !dso_r) {
  313. if (!dso_l && !dso_r)
  314. return 0;
  315. else if (!dso_l)
  316. return -1;
  317. else
  318. return 1;
  319. }
  320. return strcmp(dso_l->name, dso_r->name);
  321. }
  322. static size_t
  323. sort__dso_print(FILE *fp, struct hist_entry *self)
  324. {
  325. if (self->dso)
  326. return fprintf(fp, " %-25s", self->dso->name);
  327. return fprintf(fp, " %016llx", (__u64)self->ip);
  328. }
  329. static struct sort_entry sort_dso = {
  330. .header = " Shared Object ",
  331. .cmp = sort__dso_cmp,
  332. .print = sort__dso_print,
  333. };
  334. /* --sort symbol */
  335. static int64_t
  336. sort__sym_cmp(struct hist_entry *left, struct hist_entry *right)
  337. {
  338. uint64_t ip_l, ip_r;
  339. if (left->sym == right->sym)
  340. return 0;
  341. ip_l = left->sym ? left->sym->start : left->ip;
  342. ip_r = right->sym ? right->sym->start : right->ip;
  343. return (int64_t)(ip_r - ip_l);
  344. }
  345. static size_t
  346. sort__sym_print(FILE *fp, struct hist_entry *self)
  347. {
  348. size_t ret = 0;
  349. if (verbose)
  350. ret += fprintf(fp, " %#018llx", (__u64)self->ip);
  351. if (self->dso)
  352. ret += fprintf(fp, " %s: ", self->dso->name);
  353. else
  354. ret += fprintf(fp, " %#016llx: ", (__u64)self->ip);
  355. if (self->sym)
  356. ret += fprintf(fp, "%s", self->sym->name);
  357. else
  358. ret += fprintf(fp, "%#016llx", (__u64)self->ip);
  359. return ret;
  360. }
  361. static struct sort_entry sort_sym = {
  362. .header = " Shared Object: Symbol",
  363. .cmp = sort__sym_cmp,
  364. .print = sort__sym_print,
  365. };
  366. static int sort__need_collapse = 0;
  367. struct sort_dimension {
  368. char *name;
  369. struct sort_entry *entry;
  370. int taken;
  371. };
  372. static struct sort_dimension sort_dimensions[] = {
  373. { .name = "pid", .entry = &sort_thread, },
  374. { .name = "comm", .entry = &sort_comm, },
  375. { .name = "dso", .entry = &sort_dso, },
  376. { .name = "symbol", .entry = &sort_sym, },
  377. };
  378. static LIST_HEAD(hist_entry__sort_list);
  379. static int sort_dimension__add(char *tok)
  380. {
  381. int i;
  382. for (i = 0; i < ARRAY_SIZE(sort_dimensions); i++) {
  383. struct sort_dimension *sd = &sort_dimensions[i];
  384. if (sd->taken)
  385. continue;
  386. if (strncasecmp(tok, sd->name, strlen(tok)))
  387. continue;
  388. if (sd->entry->collapse)
  389. sort__need_collapse = 1;
  390. list_add_tail(&sd->entry->list, &hist_entry__sort_list);
  391. sd->taken = 1;
  392. return 0;
  393. }
  394. return -ESRCH;
  395. }
  396. static int64_t
  397. hist_entry__cmp(struct hist_entry *left, struct hist_entry *right)
  398. {
  399. struct sort_entry *se;
  400. int64_t cmp = 0;
  401. list_for_each_entry(se, &hist_entry__sort_list, list) {
  402. cmp = se->cmp(left, right);
  403. if (cmp)
  404. break;
  405. }
  406. return cmp;
  407. }
  408. static int64_t
  409. hist_entry__collapse(struct hist_entry *left, struct hist_entry *right)
  410. {
  411. struct sort_entry *se;
  412. int64_t cmp = 0;
  413. list_for_each_entry(se, &hist_entry__sort_list, list) {
  414. int64_t (*f)(struct hist_entry *, struct hist_entry *);
  415. f = se->collapse ?: se->cmp;
  416. cmp = f(left, right);
  417. if (cmp)
  418. break;
  419. }
  420. return cmp;
  421. }
  422. static size_t
  423. hist_entry__fprintf(FILE *fp, struct hist_entry *self, uint64_t total_samples)
  424. {
  425. struct sort_entry *se;
  426. size_t ret;
  427. if (total_samples) {
  428. ret = fprintf(fp, " %6.2f%%",
  429. (self->count * 100.0) / total_samples);
  430. } else
  431. ret = fprintf(fp, "%12d ", self->count);
  432. list_for_each_entry(se, &hist_entry__sort_list, list)
  433. ret += se->print(fp, self);
  434. ret += fprintf(fp, "\n");
  435. return ret;
  436. }
  437. /*
  438. * collect histogram counts
  439. */
  440. static int
  441. hist_entry__add(struct thread *thread, struct map *map, struct dso *dso,
  442. struct symbol *sym, uint64_t ip, char level)
  443. {
  444. struct rb_node **p = &hist.rb_node;
  445. struct rb_node *parent = NULL;
  446. struct hist_entry *he;
  447. struct hist_entry entry = {
  448. .thread = thread,
  449. .map = map,
  450. .dso = dso,
  451. .sym = sym,
  452. .ip = ip,
  453. .level = level,
  454. .count = 1,
  455. };
  456. int cmp;
  457. while (*p != NULL) {
  458. parent = *p;
  459. he = rb_entry(parent, struct hist_entry, rb_node);
  460. cmp = hist_entry__cmp(&entry, he);
  461. if (!cmp) {
  462. he->count++;
  463. return 0;
  464. }
  465. if (cmp < 0)
  466. p = &(*p)->rb_left;
  467. else
  468. p = &(*p)->rb_right;
  469. }
  470. he = malloc(sizeof(*he));
  471. if (!he)
  472. return -ENOMEM;
  473. *he = entry;
  474. rb_link_node(&he->rb_node, parent, p);
  475. rb_insert_color(&he->rb_node, &hist);
  476. return 0;
  477. }
  478. static void hist_entry__free(struct hist_entry *he)
  479. {
  480. free(he);
  481. }
  482. /*
  483. * collapse the histogram
  484. */
  485. static struct rb_root collapse_hists;
  486. static void collapse__insert_entry(struct hist_entry *he)
  487. {
  488. struct rb_node **p = &collapse_hists.rb_node;
  489. struct rb_node *parent = NULL;
  490. struct hist_entry *iter;
  491. int64_t cmp;
  492. while (*p != NULL) {
  493. parent = *p;
  494. iter = rb_entry(parent, struct hist_entry, rb_node);
  495. cmp = hist_entry__collapse(iter, he);
  496. if (!cmp) {
  497. iter->count += he->count;
  498. hist_entry__free(he);
  499. return;
  500. }
  501. if (cmp < 0)
  502. p = &(*p)->rb_left;
  503. else
  504. p = &(*p)->rb_right;
  505. }
  506. rb_link_node(&he->rb_node, parent, p);
  507. rb_insert_color(&he->rb_node, &collapse_hists);
  508. }
  509. static void collapse__resort(void)
  510. {
  511. struct rb_node *next;
  512. struct hist_entry *n;
  513. if (!sort__need_collapse)
  514. return;
  515. next = rb_first(&hist);
  516. while (next) {
  517. n = rb_entry(next, struct hist_entry, rb_node);
  518. next = rb_next(&n->rb_node);
  519. rb_erase(&n->rb_node, &hist);
  520. collapse__insert_entry(n);
  521. }
  522. }
  523. /*
  524. * reverse the map, sort on count.
  525. */
  526. static struct rb_root output_hists;
  527. static void output__insert_entry(struct hist_entry *he)
  528. {
  529. struct rb_node **p = &output_hists.rb_node;
  530. struct rb_node *parent = NULL;
  531. struct hist_entry *iter;
  532. while (*p != NULL) {
  533. parent = *p;
  534. iter = rb_entry(parent, struct hist_entry, rb_node);
  535. if (he->count > iter->count)
  536. p = &(*p)->rb_left;
  537. else
  538. p = &(*p)->rb_right;
  539. }
  540. rb_link_node(&he->rb_node, parent, p);
  541. rb_insert_color(&he->rb_node, &output_hists);
  542. }
  543. static void output__resort(void)
  544. {
  545. struct rb_node *next;
  546. struct hist_entry *n;
  547. if (sort__need_collapse)
  548. next = rb_first(&collapse_hists);
  549. else
  550. next = rb_first(&hist);
  551. while (next) {
  552. n = rb_entry(next, struct hist_entry, rb_node);
  553. next = rb_next(&n->rb_node);
  554. rb_erase(&n->rb_node, &hist);
  555. output__insert_entry(n);
  556. }
  557. }
  558. static size_t output__fprintf(FILE *fp, uint64_t total_samples)
  559. {
  560. struct hist_entry *pos;
  561. struct sort_entry *se;
  562. struct rb_node *nd;
  563. size_t ret = 0;
  564. fprintf(fp, "#\n");
  565. fprintf(fp, "# Overhead");
  566. list_for_each_entry(se, &hist_entry__sort_list, list)
  567. fprintf(fp, " %s", se->header);
  568. fprintf(fp, "\n");
  569. fprintf(fp, "# ........");
  570. list_for_each_entry(se, &hist_entry__sort_list, list) {
  571. int i;
  572. fprintf(fp, " ");
  573. for (i = 0; i < strlen(se->header)-1; i++)
  574. fprintf(fp, ".");
  575. }
  576. fprintf(fp, "\n");
  577. fprintf(fp, "#\n");
  578. for (nd = rb_first(&output_hists); nd; nd = rb_next(nd)) {
  579. pos = rb_entry(nd, struct hist_entry, rb_node);
  580. ret += hist_entry__fprintf(fp, pos, total_samples);
  581. }
  582. return ret;
  583. }
  584. static void register_idle_thread(void)
  585. {
  586. struct thread *thread = threads__findnew(0);
  587. if (thread == NULL ||
  588. thread__set_comm(thread, "[idle]")) {
  589. fprintf(stderr, "problem inserting idle task.\n");
  590. exit(-1);
  591. }
  592. }
  593. static int __cmd_report(void)
  594. {
  595. unsigned long offset = 0;
  596. unsigned long head = 0;
  597. struct stat stat;
  598. char *buf;
  599. event_t *event;
  600. int ret, rc = EXIT_FAILURE;
  601. uint32_t size;
  602. unsigned long total = 0, total_mmap = 0, total_comm = 0, total_unknown = 0;
  603. char cwd[PATH_MAX], *cwdp = cwd;
  604. int cwdlen;
  605. register_idle_thread();
  606. input = open(input_name, O_RDONLY);
  607. if (input < 0) {
  608. perror("failed to open file");
  609. exit(-1);
  610. }
  611. ret = fstat(input, &stat);
  612. if (ret < 0) {
  613. perror("failed to stat file");
  614. exit(-1);
  615. }
  616. if (!stat.st_size) {
  617. fprintf(stderr, "zero-sized file, nothing to do!\n");
  618. exit(0);
  619. }
  620. if (load_kernel() < 0) {
  621. perror("failed to load kernel symbols");
  622. return EXIT_FAILURE;
  623. }
  624. if (!full_paths) {
  625. if (getcwd(cwd, sizeof(cwd)) == NULL) {
  626. perror("failed to get the current directory");
  627. return EXIT_FAILURE;
  628. }
  629. cwdlen = strlen(cwd);
  630. } else {
  631. cwdp = NULL;
  632. cwdlen = 0;
  633. }
  634. remap:
  635. buf = (char *)mmap(NULL, page_size * mmap_window, PROT_READ,
  636. MAP_SHARED, input, offset);
  637. if (buf == MAP_FAILED) {
  638. perror("failed to mmap file");
  639. exit(-1);
  640. }
  641. more:
  642. event = (event_t *)(buf + head);
  643. size = event->header.size;
  644. if (!size)
  645. size = 8;
  646. if (head + event->header.size >= page_size * mmap_window) {
  647. unsigned long shift = page_size * (head / page_size);
  648. int ret;
  649. ret = munmap(buf, page_size * mmap_window);
  650. assert(ret == 0);
  651. offset += shift;
  652. head -= shift;
  653. goto remap;
  654. }
  655. size = event->header.size;
  656. if (!size)
  657. goto broken_event;
  658. if (event->header.misc & PERF_EVENT_MISC_OVERFLOW) {
  659. char level;
  660. int show = 0;
  661. struct dso *dso = NULL;
  662. struct thread *thread = threads__findnew(event->ip.pid);
  663. uint64_t ip = event->ip.ip;
  664. struct map *map = NULL;
  665. dprintf("%p [%p]: PERF_EVENT (IP, %d): %d: %p\n",
  666. (void *)(offset + head),
  667. (void *)(long)(event->header.size),
  668. event->header.misc,
  669. event->ip.pid,
  670. (void *)(long)ip);
  671. dprintf(" ... thread: %s:%d\n", thread->comm, thread->pid);
  672. if (thread == NULL) {
  673. fprintf(stderr, "problem processing %d event, skipping it.\n",
  674. event->header.type);
  675. goto broken_event;
  676. }
  677. if (event->header.misc & PERF_EVENT_MISC_KERNEL) {
  678. show = SHOW_KERNEL;
  679. level = 'k';
  680. dso = kernel_dso;
  681. dprintf(" ...... dso: %s\n", dso->name);
  682. } else if (event->header.misc & PERF_EVENT_MISC_USER) {
  683. show = SHOW_USER;
  684. level = '.';
  685. map = thread__find_map(thread, ip);
  686. if (map != NULL) {
  687. dso = map->dso;
  688. ip -= map->start + map->pgoff;
  689. } else {
  690. /*
  691. * If this is outside of all known maps,
  692. * and is a negative address, try to look it
  693. * up in the kernel dso, as it might be a
  694. * vsyscall (which executes in user-mode):
  695. */
  696. if ((long long)ip < 0)
  697. dso = kernel_dso;
  698. }
  699. dprintf(" ...... dso: %s\n", dso ? dso->name : "<not found>");
  700. } else {
  701. show = SHOW_HV;
  702. level = 'H';
  703. dprintf(" ...... dso: [hypervisor]\n");
  704. }
  705. if (show & show_mask) {
  706. struct symbol *sym = dso__find_symbol(dso, ip);
  707. if (hist_entry__add(thread, map, dso, sym, ip, level)) {
  708. fprintf(stderr,
  709. "problem incrementing symbol count, skipping event\n");
  710. goto broken_event;
  711. }
  712. }
  713. total++;
  714. } else switch (event->header.type) {
  715. case PERF_EVENT_MMAP: {
  716. struct thread *thread = threads__findnew(event->mmap.pid);
  717. struct map *map = map__new(&event->mmap, cwdp, cwdlen);
  718. dprintf("%p [%p]: PERF_EVENT_MMAP: [%p(%p) @ %p]: %s\n",
  719. (void *)(offset + head),
  720. (void *)(long)(event->header.size),
  721. (void *)(long)event->mmap.start,
  722. (void *)(long)event->mmap.len,
  723. (void *)(long)event->mmap.pgoff,
  724. event->mmap.filename);
  725. if (thread == NULL || map == NULL) {
  726. if (verbose)
  727. fprintf(stderr, "problem processing PERF_EVENT_MMAP, skipping event.\n");
  728. goto broken_event;
  729. }
  730. thread__insert_map(thread, map);
  731. total_mmap++;
  732. break;
  733. }
  734. case PERF_EVENT_COMM: {
  735. struct thread *thread = threads__findnew(event->comm.pid);
  736. dprintf("%p [%p]: PERF_EVENT_COMM: %s:%d\n",
  737. (void *)(offset + head),
  738. (void *)(long)(event->header.size),
  739. event->comm.comm, event->comm.pid);
  740. if (thread == NULL ||
  741. thread__set_comm(thread, event->comm.comm)) {
  742. fprintf(stderr, "problem processing PERF_EVENT_COMM, skipping event.\n");
  743. goto broken_event;
  744. }
  745. total_comm++;
  746. break;
  747. }
  748. default: {
  749. broken_event:
  750. dprintf("%p [%p]: skipping unknown header type: %d\n",
  751. (void *)(offset + head),
  752. (void *)(long)(event->header.size),
  753. event->header.type);
  754. total_unknown++;
  755. /*
  756. * assume we lost track of the stream, check alignment, and
  757. * increment a single u64 in the hope to catch on again 'soon'.
  758. */
  759. if (unlikely(head & 7))
  760. head &= ~7ULL;
  761. size = 8;
  762. }
  763. }
  764. head += size;
  765. if (offset + head < stat.st_size)
  766. goto more;
  767. rc = EXIT_SUCCESS;
  768. close(input);
  769. dprintf(" IP events: %10ld\n", total);
  770. dprintf(" mmap events: %10ld\n", total_mmap);
  771. dprintf(" comm events: %10ld\n", total_comm);
  772. dprintf(" unknown events: %10ld\n", total_unknown);
  773. if (dump_trace)
  774. return 0;
  775. if (verbose >= 2)
  776. dsos__fprintf(stdout);
  777. collapse__resort();
  778. output__resort();
  779. output__fprintf(stdout, total);
  780. return rc;
  781. }
  782. static const char * const report_usage[] = {
  783. "perf report [<options>] <command>",
  784. NULL
  785. };
  786. static const struct option options[] = {
  787. OPT_STRING('i', "input", &input_name, "file",
  788. "input file name"),
  789. OPT_BOOLEAN('v', "verbose", &verbose,
  790. "be more verbose (show symbol address, etc)"),
  791. OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace,
  792. "dump raw trace in ASCII"),
  793. OPT_STRING('k', "vmlinux", &vmlinux, "file", "vmlinux pathname"),
  794. OPT_STRING('s', "sort", &sort_order, "key[,key2...]",
  795. "sort by key(s): pid, comm, dso, symbol. Default: pid,symbol"),
  796. OPT_BOOLEAN('P', "full-paths", &full_paths,
  797. "Don't shorten the pathnames taking into account the cwd"),
  798. OPT_END()
  799. };
  800. static void setup_sorting(void)
  801. {
  802. char *tmp, *tok, *str = strdup(sort_order);
  803. for (tok = strtok_r(str, ", ", &tmp);
  804. tok; tok = strtok_r(NULL, ", ", &tmp)) {
  805. if (sort_dimension__add(tok) < 0) {
  806. error("Unknown --sort key: `%s'", tok);
  807. usage_with_options(report_usage, options);
  808. }
  809. }
  810. free(str);
  811. }
  812. int cmd_report(int argc, const char **argv, const char *prefix)
  813. {
  814. symbol__init();
  815. page_size = getpagesize();
  816. parse_options(argc, argv, options, report_usage, 0);
  817. setup_sorting();
  818. setup_pager();
  819. return __cmd_report();
  820. }