builtin-report.c 43 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085
  1. /*
  2. * builtin-report.c
  3. *
  4. * Builtin report command: Analyze the perf.data input file,
  5. * look up and read DSOs and symbol information and display
  6. * a histogram of results, along various sorting keys.
  7. */
  8. #include "builtin.h"
  9. #include "util/util.h"
  10. #include "util/color.h"
  11. #include <linux/list.h>
  12. #include "util/cache.h"
  13. #include <linux/rbtree.h>
  14. #include "util/symbol.h"
  15. #include "util/string.h"
  16. #include "util/callchain.h"
  17. #include "util/strlist.h"
  18. #include "perf.h"
  19. #include "util/header.h"
  20. #include "util/parse-options.h"
  21. #include "util/parse-events.h"
  22. #define SHOW_KERNEL 1
  23. #define SHOW_USER 2
  24. #define SHOW_HV 4
  25. static char const *input_name = "perf.data";
  26. static char *vmlinux = NULL;
  27. static char default_sort_order[] = "comm,dso,symbol";
  28. static char *sort_order = default_sort_order;
  29. static char *dso_list_str, *comm_list_str, *sym_list_str,
  30. *col_width_list_str;
  31. static struct strlist *dso_list, *comm_list, *sym_list;
  32. static char *field_sep;
  33. static int input;
  34. static int show_mask = SHOW_KERNEL | SHOW_USER | SHOW_HV;
  35. static int dump_trace = 0;
  36. #define dprintf(x...) do { if (dump_trace) printf(x); } while (0)
  37. #define cdprintf(x...) do { if (dump_trace) color_fprintf(stdout, color, x); } while (0)
  38. static int verbose;
  39. #define eprintf(x...) do { if (verbose) fprintf(stderr, x); } while (0)
  40. static int modules;
  41. static int full_paths;
  42. static int show_nr_samples;
  43. static unsigned long page_size;
  44. static unsigned long mmap_window = 32;
  45. static char default_parent_pattern[] = "^sys_|^do_page_fault";
  46. static char *parent_pattern = default_parent_pattern;
  47. static regex_t parent_regex;
  48. static int exclude_other = 1;
  49. static char callchain_default_opt[] = "fractal,0.5";
  50. static int callchain;
  51. static
  52. struct callchain_param callchain_param = {
  53. .mode = CHAIN_GRAPH_ABS,
  54. .min_percent = 0.5
  55. };
  56. static u64 sample_type;
  57. struct ip_event {
  58. struct perf_event_header header;
  59. u64 ip;
  60. u32 pid, tid;
  61. unsigned char __more_data[];
  62. };
  63. struct mmap_event {
  64. struct perf_event_header header;
  65. u32 pid, tid;
  66. u64 start;
  67. u64 len;
  68. u64 pgoff;
  69. char filename[PATH_MAX];
  70. };
  71. struct comm_event {
  72. struct perf_event_header header;
  73. u32 pid, tid;
  74. char comm[16];
  75. };
  76. struct fork_event {
  77. struct perf_event_header header;
  78. u32 pid, ppid;
  79. u32 tid, ptid;
  80. };
  81. struct lost_event {
  82. struct perf_event_header header;
  83. u64 id;
  84. u64 lost;
  85. };
  86. struct read_event {
  87. struct perf_event_header header;
  88. u32 pid,tid;
  89. u64 value;
  90. u64 format[3];
  91. };
  92. typedef union event_union {
  93. struct perf_event_header header;
  94. struct ip_event ip;
  95. struct mmap_event mmap;
  96. struct comm_event comm;
  97. struct fork_event fork;
  98. struct lost_event lost;
  99. struct read_event read;
  100. } event_t;
  101. static int repsep_fprintf(FILE *fp, const char *fmt, ...)
  102. {
  103. int n;
  104. va_list ap;
  105. va_start(ap, fmt);
  106. if (!field_sep)
  107. n = vfprintf(fp, fmt, ap);
  108. else {
  109. char *bf = NULL;
  110. n = vasprintf(&bf, fmt, ap);
  111. if (n > 0) {
  112. char *sep = bf;
  113. while (1) {
  114. sep = strchr(sep, *field_sep);
  115. if (sep == NULL)
  116. break;
  117. *sep = '.';
  118. }
  119. }
  120. fputs(bf, fp);
  121. free(bf);
  122. }
  123. va_end(ap);
  124. return n;
  125. }
  126. static LIST_HEAD(dsos);
  127. static struct dso *kernel_dso;
  128. static struct dso *vdso;
  129. static struct dso *hypervisor_dso;
  130. static void dsos__add(struct dso *dso)
  131. {
  132. list_add_tail(&dso->node, &dsos);
  133. }
  134. static struct dso *dsos__find(const char *name)
  135. {
  136. struct dso *pos;
  137. list_for_each_entry(pos, &dsos, node)
  138. if (strcmp(pos->name, name) == 0)
  139. return pos;
  140. return NULL;
  141. }
  142. static struct dso *dsos__findnew(const char *name)
  143. {
  144. struct dso *dso = dsos__find(name);
  145. int nr;
  146. if (dso)
  147. return dso;
  148. dso = dso__new(name, 0);
  149. if (!dso)
  150. goto out_delete_dso;
  151. nr = dso__load(dso, NULL, verbose);
  152. if (nr < 0) {
  153. eprintf("Failed to open: %s\n", name);
  154. goto out_delete_dso;
  155. }
  156. if (!nr)
  157. eprintf("No symbols found in: %s, maybe install a debug package?\n", name);
  158. dsos__add(dso);
  159. return dso;
  160. out_delete_dso:
  161. dso__delete(dso);
  162. return NULL;
  163. }
  164. static void dsos__fprintf(FILE *fp)
  165. {
  166. struct dso *pos;
  167. list_for_each_entry(pos, &dsos, node)
  168. dso__fprintf(pos, fp);
  169. }
  170. static struct symbol *vdso__find_symbol(struct dso *dso, u64 ip)
  171. {
  172. return dso__find_symbol(dso, ip);
  173. }
  174. static int load_kernel(void)
  175. {
  176. int err;
  177. kernel_dso = dso__new("[kernel]", 0);
  178. if (!kernel_dso)
  179. return -1;
  180. err = dso__load_kernel(kernel_dso, vmlinux, NULL, verbose, modules);
  181. if (err <= 0) {
  182. dso__delete(kernel_dso);
  183. kernel_dso = NULL;
  184. } else
  185. dsos__add(kernel_dso);
  186. vdso = dso__new("[vdso]", 0);
  187. if (!vdso)
  188. return -1;
  189. vdso->find_symbol = vdso__find_symbol;
  190. dsos__add(vdso);
  191. hypervisor_dso = dso__new("[hypervisor]", 0);
  192. if (!hypervisor_dso)
  193. return -1;
  194. dsos__add(hypervisor_dso);
  195. return err;
  196. }
  197. static char __cwd[PATH_MAX];
  198. static char *cwd = __cwd;
  199. static int cwdlen;
  200. static int strcommon(const char *pathname)
  201. {
  202. int n = 0;
  203. while (n < cwdlen && pathname[n] == cwd[n])
  204. ++n;
  205. return n;
  206. }
  207. struct map {
  208. struct list_head node;
  209. u64 start;
  210. u64 end;
  211. u64 pgoff;
  212. u64 (*map_ip)(struct map *, u64);
  213. struct dso *dso;
  214. };
  215. static u64 map__map_ip(struct map *map, u64 ip)
  216. {
  217. return ip - map->start + map->pgoff;
  218. }
  219. static u64 vdso__map_ip(struct map *map __used, u64 ip)
  220. {
  221. return ip;
  222. }
  223. static inline int is_anon_memory(const char *filename)
  224. {
  225. return strcmp(filename, "//anon") == 0;
  226. }
  227. static struct map *map__new(struct mmap_event *event)
  228. {
  229. struct map *self = malloc(sizeof(*self));
  230. if (self != NULL) {
  231. const char *filename = event->filename;
  232. char newfilename[PATH_MAX];
  233. int anon;
  234. if (cwd) {
  235. int n = strcommon(filename);
  236. if (n == cwdlen) {
  237. snprintf(newfilename, sizeof(newfilename),
  238. ".%s", filename + n);
  239. filename = newfilename;
  240. }
  241. }
  242. anon = is_anon_memory(filename);
  243. if (anon) {
  244. snprintf(newfilename, sizeof(newfilename), "/tmp/perf-%d.map", event->pid);
  245. filename = newfilename;
  246. }
  247. self->start = event->start;
  248. self->end = event->start + event->len;
  249. self->pgoff = event->pgoff;
  250. self->dso = dsos__findnew(filename);
  251. if (self->dso == NULL)
  252. goto out_delete;
  253. if (self->dso == vdso || anon)
  254. self->map_ip = vdso__map_ip;
  255. else
  256. self->map_ip = map__map_ip;
  257. }
  258. return self;
  259. out_delete:
  260. free(self);
  261. return NULL;
  262. }
  263. static struct map *map__clone(struct map *self)
  264. {
  265. struct map *map = malloc(sizeof(*self));
  266. if (!map)
  267. return NULL;
  268. memcpy(map, self, sizeof(*self));
  269. return map;
  270. }
  271. static int map__overlap(struct map *l, struct map *r)
  272. {
  273. if (l->start > r->start) {
  274. struct map *t = l;
  275. l = r;
  276. r = t;
  277. }
  278. if (l->end > r->start)
  279. return 1;
  280. return 0;
  281. }
  282. static size_t map__fprintf(struct map *self, FILE *fp)
  283. {
  284. return fprintf(fp, " %Lx-%Lx %Lx %s\n",
  285. self->start, self->end, self->pgoff, self->dso->name);
  286. }
  287. struct thread {
  288. struct rb_node rb_node;
  289. struct list_head maps;
  290. pid_t pid;
  291. char *comm;
  292. };
  293. static struct thread *thread__new(pid_t pid)
  294. {
  295. struct thread *self = malloc(sizeof(*self));
  296. if (self != NULL) {
  297. self->pid = pid;
  298. self->comm = malloc(32);
  299. if (self->comm)
  300. snprintf(self->comm, 32, ":%d", self->pid);
  301. INIT_LIST_HEAD(&self->maps);
  302. }
  303. return self;
  304. }
  305. static unsigned int dsos__col_width,
  306. comms__col_width,
  307. threads__col_width;
  308. static int thread__set_comm(struct thread *self, const char *comm)
  309. {
  310. if (self->comm)
  311. free(self->comm);
  312. self->comm = strdup(comm);
  313. if (!self->comm)
  314. return -ENOMEM;
  315. if (!col_width_list_str && !field_sep &&
  316. (!comm_list || strlist__has_entry(comm_list, comm))) {
  317. unsigned int slen = strlen(comm);
  318. if (slen > comms__col_width) {
  319. comms__col_width = slen;
  320. threads__col_width = slen + 6;
  321. }
  322. }
  323. return 0;
  324. }
  325. static size_t thread__fprintf(struct thread *self, FILE *fp)
  326. {
  327. struct map *pos;
  328. size_t ret = fprintf(fp, "Thread %d %s\n", self->pid, self->comm);
  329. list_for_each_entry(pos, &self->maps, node)
  330. ret += map__fprintf(pos, fp);
  331. return ret;
  332. }
  333. static struct rb_root threads;
  334. static struct thread *last_match;
  335. static struct thread *threads__findnew(pid_t pid)
  336. {
  337. struct rb_node **p = &threads.rb_node;
  338. struct rb_node *parent = NULL;
  339. struct thread *th;
  340. /*
  341. * Font-end cache - PID lookups come in blocks,
  342. * so most of the time we dont have to look up
  343. * the full rbtree:
  344. */
  345. if (last_match && last_match->pid == pid)
  346. return last_match;
  347. while (*p != NULL) {
  348. parent = *p;
  349. th = rb_entry(parent, struct thread, rb_node);
  350. if (th->pid == pid) {
  351. last_match = th;
  352. return th;
  353. }
  354. if (pid < th->pid)
  355. p = &(*p)->rb_left;
  356. else
  357. p = &(*p)->rb_right;
  358. }
  359. th = thread__new(pid);
  360. if (th != NULL) {
  361. rb_link_node(&th->rb_node, parent, p);
  362. rb_insert_color(&th->rb_node, &threads);
  363. last_match = th;
  364. }
  365. return th;
  366. }
  367. static void thread__insert_map(struct thread *self, struct map *map)
  368. {
  369. struct map *pos, *tmp;
  370. list_for_each_entry_safe(pos, tmp, &self->maps, node) {
  371. if (map__overlap(pos, map)) {
  372. if (verbose >= 2) {
  373. printf("overlapping maps:\n");
  374. map__fprintf(map, stdout);
  375. map__fprintf(pos, stdout);
  376. }
  377. if (map->start <= pos->start && map->end > pos->start)
  378. pos->start = map->end;
  379. if (map->end >= pos->end && map->start < pos->end)
  380. pos->end = map->start;
  381. if (verbose >= 2) {
  382. printf("after collision:\n");
  383. map__fprintf(pos, stdout);
  384. }
  385. if (pos->start >= pos->end) {
  386. list_del_init(&pos->node);
  387. free(pos);
  388. }
  389. }
  390. }
  391. list_add_tail(&map->node, &self->maps);
  392. }
  393. static int thread__fork(struct thread *self, struct thread *parent)
  394. {
  395. struct map *map;
  396. if (self->comm)
  397. free(self->comm);
  398. self->comm = strdup(parent->comm);
  399. if (!self->comm)
  400. return -ENOMEM;
  401. list_for_each_entry(map, &parent->maps, node) {
  402. struct map *new = map__clone(map);
  403. if (!new)
  404. return -ENOMEM;
  405. thread__insert_map(self, new);
  406. }
  407. return 0;
  408. }
  409. static struct map *thread__find_map(struct thread *self, u64 ip)
  410. {
  411. struct map *pos;
  412. if (self == NULL)
  413. return NULL;
  414. list_for_each_entry(pos, &self->maps, node)
  415. if (ip >= pos->start && ip <= pos->end)
  416. return pos;
  417. return NULL;
  418. }
  419. static size_t threads__fprintf(FILE *fp)
  420. {
  421. size_t ret = 0;
  422. struct rb_node *nd;
  423. for (nd = rb_first(&threads); nd; nd = rb_next(nd)) {
  424. struct thread *pos = rb_entry(nd, struct thread, rb_node);
  425. ret += thread__fprintf(pos, fp);
  426. }
  427. return ret;
  428. }
  429. /*
  430. * histogram, sorted on item, collects counts
  431. */
  432. static struct rb_root hist;
  433. struct hist_entry {
  434. struct rb_node rb_node;
  435. struct thread *thread;
  436. struct map *map;
  437. struct dso *dso;
  438. struct symbol *sym;
  439. struct symbol *parent;
  440. u64 ip;
  441. char level;
  442. struct callchain_node callchain;
  443. struct rb_root sorted_chain;
  444. u64 count;
  445. };
  446. /*
  447. * configurable sorting bits
  448. */
  449. struct sort_entry {
  450. struct list_head list;
  451. char *header;
  452. int64_t (*cmp)(struct hist_entry *, struct hist_entry *);
  453. int64_t (*collapse)(struct hist_entry *, struct hist_entry *);
  454. size_t (*print)(FILE *fp, struct hist_entry *, unsigned int width);
  455. unsigned int *width;
  456. bool elide;
  457. };
  458. static int64_t cmp_null(void *l, void *r)
  459. {
  460. if (!l && !r)
  461. return 0;
  462. else if (!l)
  463. return -1;
  464. else
  465. return 1;
  466. }
  467. /* --sort pid */
  468. static int64_t
  469. sort__thread_cmp(struct hist_entry *left, struct hist_entry *right)
  470. {
  471. return right->thread->pid - left->thread->pid;
  472. }
  473. static size_t
  474. sort__thread_print(FILE *fp, struct hist_entry *self, unsigned int width)
  475. {
  476. return repsep_fprintf(fp, "%*s:%5d", width - 6,
  477. self->thread->comm ?: "", self->thread->pid);
  478. }
  479. static struct sort_entry sort_thread = {
  480. .header = "Command: Pid",
  481. .cmp = sort__thread_cmp,
  482. .print = sort__thread_print,
  483. .width = &threads__col_width,
  484. };
  485. /* --sort comm */
  486. static int64_t
  487. sort__comm_cmp(struct hist_entry *left, struct hist_entry *right)
  488. {
  489. return right->thread->pid - left->thread->pid;
  490. }
  491. static int64_t
  492. sort__comm_collapse(struct hist_entry *left, struct hist_entry *right)
  493. {
  494. char *comm_l = left->thread->comm;
  495. char *comm_r = right->thread->comm;
  496. if (!comm_l || !comm_r)
  497. return cmp_null(comm_l, comm_r);
  498. return strcmp(comm_l, comm_r);
  499. }
  500. static size_t
  501. sort__comm_print(FILE *fp, struct hist_entry *self, unsigned int width)
  502. {
  503. return repsep_fprintf(fp, "%*s", width, self->thread->comm);
  504. }
  505. static struct sort_entry sort_comm = {
  506. .header = "Command",
  507. .cmp = sort__comm_cmp,
  508. .collapse = sort__comm_collapse,
  509. .print = sort__comm_print,
  510. .width = &comms__col_width,
  511. };
  512. /* --sort dso */
  513. static int64_t
  514. sort__dso_cmp(struct hist_entry *left, struct hist_entry *right)
  515. {
  516. struct dso *dso_l = left->dso;
  517. struct dso *dso_r = right->dso;
  518. if (!dso_l || !dso_r)
  519. return cmp_null(dso_l, dso_r);
  520. return strcmp(dso_l->name, dso_r->name);
  521. }
  522. static size_t
  523. sort__dso_print(FILE *fp, struct hist_entry *self, unsigned int width)
  524. {
  525. if (self->dso)
  526. return repsep_fprintf(fp, "%-*s", width, self->dso->name);
  527. return repsep_fprintf(fp, "%*llx", width, (u64)self->ip);
  528. }
  529. static struct sort_entry sort_dso = {
  530. .header = "Shared Object",
  531. .cmp = sort__dso_cmp,
  532. .print = sort__dso_print,
  533. .width = &dsos__col_width,
  534. };
  535. /* --sort symbol */
  536. static int64_t
  537. sort__sym_cmp(struct hist_entry *left, struct hist_entry *right)
  538. {
  539. u64 ip_l, ip_r;
  540. if (left->sym == right->sym)
  541. return 0;
  542. ip_l = left->sym ? left->sym->start : left->ip;
  543. ip_r = right->sym ? right->sym->start : right->ip;
  544. return (int64_t)(ip_r - ip_l);
  545. }
  546. static size_t
  547. sort__sym_print(FILE *fp, struct hist_entry *self, unsigned int width __used)
  548. {
  549. size_t ret = 0;
  550. if (verbose)
  551. ret += repsep_fprintf(fp, "%#018llx ", (u64)self->ip);
  552. ret += repsep_fprintf(fp, "[%c] ", self->level);
  553. if (self->sym) {
  554. ret += repsep_fprintf(fp, "%s", self->sym->name);
  555. if (self->sym->module)
  556. ret += repsep_fprintf(fp, "\t[%s]",
  557. self->sym->module->name);
  558. } else {
  559. ret += repsep_fprintf(fp, "%#016llx", (u64)self->ip);
  560. }
  561. return ret;
  562. }
  563. static struct sort_entry sort_sym = {
  564. .header = "Symbol",
  565. .cmp = sort__sym_cmp,
  566. .print = sort__sym_print,
  567. };
  568. /* --sort parent */
  569. static int64_t
  570. sort__parent_cmp(struct hist_entry *left, struct hist_entry *right)
  571. {
  572. struct symbol *sym_l = left->parent;
  573. struct symbol *sym_r = right->parent;
  574. if (!sym_l || !sym_r)
  575. return cmp_null(sym_l, sym_r);
  576. return strcmp(sym_l->name, sym_r->name);
  577. }
  578. static size_t
  579. sort__parent_print(FILE *fp, struct hist_entry *self, unsigned int width)
  580. {
  581. return repsep_fprintf(fp, "%-*s", width,
  582. self->parent ? self->parent->name : "[other]");
  583. }
  584. static unsigned int parent_symbol__col_width;
  585. static struct sort_entry sort_parent = {
  586. .header = "Parent symbol",
  587. .cmp = sort__parent_cmp,
  588. .print = sort__parent_print,
  589. .width = &parent_symbol__col_width,
  590. };
  591. static int sort__need_collapse = 0;
  592. static int sort__has_parent = 0;
  593. struct sort_dimension {
  594. char *name;
  595. struct sort_entry *entry;
  596. int taken;
  597. };
  598. static struct sort_dimension sort_dimensions[] = {
  599. { .name = "pid", .entry = &sort_thread, },
  600. { .name = "comm", .entry = &sort_comm, },
  601. { .name = "dso", .entry = &sort_dso, },
  602. { .name = "symbol", .entry = &sort_sym, },
  603. { .name = "parent", .entry = &sort_parent, },
  604. };
  605. static LIST_HEAD(hist_entry__sort_list);
  606. static int sort_dimension__add(char *tok)
  607. {
  608. unsigned int i;
  609. for (i = 0; i < ARRAY_SIZE(sort_dimensions); i++) {
  610. struct sort_dimension *sd = &sort_dimensions[i];
  611. if (sd->taken)
  612. continue;
  613. if (strncasecmp(tok, sd->name, strlen(tok)))
  614. continue;
  615. if (sd->entry->collapse)
  616. sort__need_collapse = 1;
  617. if (sd->entry == &sort_parent) {
  618. int ret = regcomp(&parent_regex, parent_pattern, REG_EXTENDED);
  619. if (ret) {
  620. char err[BUFSIZ];
  621. regerror(ret, &parent_regex, err, sizeof(err));
  622. fprintf(stderr, "Invalid regex: %s\n%s",
  623. parent_pattern, err);
  624. exit(-1);
  625. }
  626. sort__has_parent = 1;
  627. }
  628. list_add_tail(&sd->entry->list, &hist_entry__sort_list);
  629. sd->taken = 1;
  630. return 0;
  631. }
  632. return -ESRCH;
  633. }
  634. static int64_t
  635. hist_entry__cmp(struct hist_entry *left, struct hist_entry *right)
  636. {
  637. struct sort_entry *se;
  638. int64_t cmp = 0;
  639. list_for_each_entry(se, &hist_entry__sort_list, list) {
  640. cmp = se->cmp(left, right);
  641. if (cmp)
  642. break;
  643. }
  644. return cmp;
  645. }
  646. static int64_t
  647. hist_entry__collapse(struct hist_entry *left, struct hist_entry *right)
  648. {
  649. struct sort_entry *se;
  650. int64_t cmp = 0;
  651. list_for_each_entry(se, &hist_entry__sort_list, list) {
  652. int64_t (*f)(struct hist_entry *, struct hist_entry *);
  653. f = se->collapse ?: se->cmp;
  654. cmp = f(left, right);
  655. if (cmp)
  656. break;
  657. }
  658. return cmp;
  659. }
  660. static size_t ipchain__fprintf_graph_line(FILE *fp, int depth, int depth_mask)
  661. {
  662. int i;
  663. size_t ret = 0;
  664. ret += fprintf(fp, "%s", " ");
  665. for (i = 0; i < depth; i++)
  666. if (depth_mask & (1 << i))
  667. ret += fprintf(fp, "| ");
  668. else
  669. ret += fprintf(fp, " ");
  670. ret += fprintf(fp, "\n");
  671. return ret;
  672. }
  673. static size_t
  674. ipchain__fprintf_graph(FILE *fp, struct callchain_list *chain, int depth,
  675. int depth_mask, int count, u64 total_samples,
  676. int hits)
  677. {
  678. int i;
  679. size_t ret = 0;
  680. ret += fprintf(fp, "%s", " ");
  681. for (i = 0; i < depth; i++) {
  682. if (depth_mask & (1 << i))
  683. ret += fprintf(fp, "|");
  684. else
  685. ret += fprintf(fp, " ");
  686. if (!count && i == depth - 1) {
  687. double percent;
  688. percent = hits * 100.0 / total_samples;
  689. ret += percent_color_fprintf(fp, "--%2.2f%%-- ", percent);
  690. } else
  691. ret += fprintf(fp, "%s", " ");
  692. }
  693. if (chain->sym)
  694. ret += fprintf(fp, "%s\n", chain->sym->name);
  695. else
  696. ret += fprintf(fp, "%p\n", (void *)(long)chain->ip);
  697. return ret;
  698. }
  699. static size_t
  700. callchain__fprintf_graph(FILE *fp, struct callchain_node *self,
  701. u64 total_samples, int depth, int depth_mask)
  702. {
  703. struct rb_node *node, *next;
  704. struct callchain_node *child;
  705. struct callchain_list *chain;
  706. int new_depth_mask = depth_mask;
  707. u64 new_total;
  708. size_t ret = 0;
  709. int i;
  710. if (callchain_param.mode == CHAIN_GRAPH_REL)
  711. new_total = self->cumul_hit;
  712. else
  713. new_total = total_samples;
  714. node = rb_first(&self->rb_root);
  715. while (node) {
  716. child = rb_entry(node, struct callchain_node, rb_node);
  717. /*
  718. * The depth mask manages the output of pipes that show
  719. * the depth. We don't want to keep the pipes of the current
  720. * level for the last child of this depth
  721. */
  722. next = rb_next(node);
  723. if (!next)
  724. new_depth_mask &= ~(1 << (depth - 1));
  725. /*
  726. * But we keep the older depth mask for the line seperator
  727. * to keep the level link until we reach the last child
  728. */
  729. ret += ipchain__fprintf_graph_line(fp, depth, depth_mask);
  730. i = 0;
  731. list_for_each_entry(chain, &child->val, list) {
  732. if (chain->ip >= PERF_CONTEXT_MAX)
  733. continue;
  734. ret += ipchain__fprintf_graph(fp, chain, depth,
  735. new_depth_mask, i++,
  736. new_total,
  737. child->cumul_hit);
  738. }
  739. ret += callchain__fprintf_graph(fp, child, new_total,
  740. depth + 1,
  741. new_depth_mask | (1 << depth));
  742. node = next;
  743. }
  744. return ret;
  745. }
  746. static size_t
  747. callchain__fprintf_flat(FILE *fp, struct callchain_node *self,
  748. u64 total_samples)
  749. {
  750. struct callchain_list *chain;
  751. size_t ret = 0;
  752. if (!self)
  753. return 0;
  754. ret += callchain__fprintf_flat(fp, self->parent, total_samples);
  755. list_for_each_entry(chain, &self->val, list) {
  756. if (chain->ip >= PERF_CONTEXT_MAX)
  757. continue;
  758. if (chain->sym)
  759. ret += fprintf(fp, " %s\n", chain->sym->name);
  760. else
  761. ret += fprintf(fp, " %p\n",
  762. (void *)(long)chain->ip);
  763. }
  764. return ret;
  765. }
  766. static size_t
  767. hist_entry_callchain__fprintf(FILE *fp, struct hist_entry *self,
  768. u64 total_samples)
  769. {
  770. struct rb_node *rb_node;
  771. struct callchain_node *chain;
  772. size_t ret = 0;
  773. rb_node = rb_first(&self->sorted_chain);
  774. while (rb_node) {
  775. double percent;
  776. chain = rb_entry(rb_node, struct callchain_node, rb_node);
  777. percent = chain->hit * 100.0 / total_samples;
  778. switch (callchain_param.mode) {
  779. case CHAIN_FLAT:
  780. ret += percent_color_fprintf(fp, " %6.2f%%\n",
  781. percent);
  782. ret += callchain__fprintf_flat(fp, chain, total_samples);
  783. break;
  784. case CHAIN_GRAPH_ABS: /* Falldown */
  785. case CHAIN_GRAPH_REL:
  786. ret += callchain__fprintf_graph(fp, chain,
  787. total_samples, 1, 1);
  788. default:
  789. break;
  790. }
  791. ret += fprintf(fp, "\n");
  792. rb_node = rb_next(rb_node);
  793. }
  794. return ret;
  795. }
  796. static size_t
  797. hist_entry__fprintf(FILE *fp, struct hist_entry *self, u64 total_samples)
  798. {
  799. struct sort_entry *se;
  800. size_t ret;
  801. if (exclude_other && !self->parent)
  802. return 0;
  803. if (total_samples)
  804. ret = percent_color_fprintf(fp,
  805. field_sep ? "%.2f" : " %6.2f%%",
  806. (self->count * 100.0) / total_samples);
  807. else
  808. ret = fprintf(fp, field_sep ? "%lld" : "%12lld ", self->count);
  809. if (show_nr_samples) {
  810. if (field_sep)
  811. fprintf(fp, "%c%lld", *field_sep, self->count);
  812. else
  813. fprintf(fp, "%11lld", self->count);
  814. }
  815. list_for_each_entry(se, &hist_entry__sort_list, list) {
  816. if (se->elide)
  817. continue;
  818. fprintf(fp, "%s", field_sep ?: " ");
  819. ret += se->print(fp, self, se->width ? *se->width : 0);
  820. }
  821. ret += fprintf(fp, "\n");
  822. if (callchain)
  823. hist_entry_callchain__fprintf(fp, self, total_samples);
  824. return ret;
  825. }
  826. /*
  827. *
  828. */
  829. static void dso__calc_col_width(struct dso *self)
  830. {
  831. if (!col_width_list_str && !field_sep &&
  832. (!dso_list || strlist__has_entry(dso_list, self->name))) {
  833. unsigned int slen = strlen(self->name);
  834. if (slen > dsos__col_width)
  835. dsos__col_width = slen;
  836. }
  837. self->slen_calculated = 1;
  838. }
  839. static struct symbol *
  840. resolve_symbol(struct thread *thread, struct map **mapp,
  841. struct dso **dsop, u64 *ipp)
  842. {
  843. struct dso *dso = dsop ? *dsop : NULL;
  844. struct map *map = mapp ? *mapp : NULL;
  845. u64 ip = *ipp;
  846. if (!thread)
  847. return NULL;
  848. if (dso)
  849. goto got_dso;
  850. if (map)
  851. goto got_map;
  852. map = thread__find_map(thread, ip);
  853. if (map != NULL) {
  854. /*
  855. * We have to do this here as we may have a dso
  856. * with no symbol hit that has a name longer than
  857. * the ones with symbols sampled.
  858. */
  859. if (!sort_dso.elide && !map->dso->slen_calculated)
  860. dso__calc_col_width(map->dso);
  861. if (mapp)
  862. *mapp = map;
  863. got_map:
  864. ip = map->map_ip(map, ip);
  865. dso = map->dso;
  866. } else {
  867. /*
  868. * If this is outside of all known maps,
  869. * and is a negative address, try to look it
  870. * up in the kernel dso, as it might be a
  871. * vsyscall (which executes in user-mode):
  872. */
  873. if ((long long)ip < 0)
  874. dso = kernel_dso;
  875. }
  876. dprintf(" ...... dso: %s\n", dso ? dso->name : "<not found>");
  877. dprintf(" ...... map: %Lx -> %Lx\n", *ipp, ip);
  878. *ipp = ip;
  879. if (dsop)
  880. *dsop = dso;
  881. if (!dso)
  882. return NULL;
  883. got_dso:
  884. return dso->find_symbol(dso, ip);
  885. }
  886. static int call__match(struct symbol *sym)
  887. {
  888. if (sym->name && !regexec(&parent_regex, sym->name, 0, NULL, 0))
  889. return 1;
  890. return 0;
  891. }
  892. static struct symbol **
  893. resolve_callchain(struct thread *thread, struct map *map __used,
  894. struct ip_callchain *chain, struct hist_entry *entry)
  895. {
  896. u64 context = PERF_CONTEXT_MAX;
  897. struct symbol **syms = NULL;
  898. unsigned int i;
  899. if (callchain) {
  900. syms = calloc(chain->nr, sizeof(*syms));
  901. if (!syms) {
  902. fprintf(stderr, "Can't allocate memory for symbols\n");
  903. exit(-1);
  904. }
  905. }
  906. for (i = 0; i < chain->nr; i++) {
  907. u64 ip = chain->ips[i];
  908. struct dso *dso = NULL;
  909. struct symbol *sym;
  910. if (ip >= PERF_CONTEXT_MAX) {
  911. context = ip;
  912. continue;
  913. }
  914. switch (context) {
  915. case PERF_CONTEXT_HV:
  916. dso = hypervisor_dso;
  917. break;
  918. case PERF_CONTEXT_KERNEL:
  919. dso = kernel_dso;
  920. break;
  921. default:
  922. break;
  923. }
  924. sym = resolve_symbol(thread, NULL, &dso, &ip);
  925. if (sym) {
  926. if (sort__has_parent && call__match(sym) &&
  927. !entry->parent)
  928. entry->parent = sym;
  929. if (!callchain)
  930. break;
  931. syms[i] = sym;
  932. }
  933. }
  934. return syms;
  935. }
  936. /*
  937. * collect histogram counts
  938. */
  939. static int
  940. hist_entry__add(struct thread *thread, struct map *map, struct dso *dso,
  941. struct symbol *sym, u64 ip, struct ip_callchain *chain,
  942. char level, u64 count)
  943. {
  944. struct rb_node **p = &hist.rb_node;
  945. struct rb_node *parent = NULL;
  946. struct hist_entry *he;
  947. struct symbol **syms = NULL;
  948. struct hist_entry entry = {
  949. .thread = thread,
  950. .map = map,
  951. .dso = dso,
  952. .sym = sym,
  953. .ip = ip,
  954. .level = level,
  955. .count = count,
  956. .parent = NULL,
  957. .sorted_chain = RB_ROOT
  958. };
  959. int cmp;
  960. if ((sort__has_parent || callchain) && chain)
  961. syms = resolve_callchain(thread, map, chain, &entry);
  962. while (*p != NULL) {
  963. parent = *p;
  964. he = rb_entry(parent, struct hist_entry, rb_node);
  965. cmp = hist_entry__cmp(&entry, he);
  966. if (!cmp) {
  967. he->count += count;
  968. if (callchain) {
  969. append_chain(&he->callchain, chain, syms);
  970. free(syms);
  971. }
  972. return 0;
  973. }
  974. if (cmp < 0)
  975. p = &(*p)->rb_left;
  976. else
  977. p = &(*p)->rb_right;
  978. }
  979. he = malloc(sizeof(*he));
  980. if (!he)
  981. return -ENOMEM;
  982. *he = entry;
  983. if (callchain) {
  984. callchain_init(&he->callchain);
  985. append_chain(&he->callchain, chain, syms);
  986. free(syms);
  987. }
  988. rb_link_node(&he->rb_node, parent, p);
  989. rb_insert_color(&he->rb_node, &hist);
  990. return 0;
  991. }
  992. static void hist_entry__free(struct hist_entry *he)
  993. {
  994. free(he);
  995. }
  996. /*
  997. * collapse the histogram
  998. */
  999. static struct rb_root collapse_hists;
  1000. static void collapse__insert_entry(struct hist_entry *he)
  1001. {
  1002. struct rb_node **p = &collapse_hists.rb_node;
  1003. struct rb_node *parent = NULL;
  1004. struct hist_entry *iter;
  1005. int64_t cmp;
  1006. while (*p != NULL) {
  1007. parent = *p;
  1008. iter = rb_entry(parent, struct hist_entry, rb_node);
  1009. cmp = hist_entry__collapse(iter, he);
  1010. if (!cmp) {
  1011. iter->count += he->count;
  1012. hist_entry__free(he);
  1013. return;
  1014. }
  1015. if (cmp < 0)
  1016. p = &(*p)->rb_left;
  1017. else
  1018. p = &(*p)->rb_right;
  1019. }
  1020. rb_link_node(&he->rb_node, parent, p);
  1021. rb_insert_color(&he->rb_node, &collapse_hists);
  1022. }
  1023. static void collapse__resort(void)
  1024. {
  1025. struct rb_node *next;
  1026. struct hist_entry *n;
  1027. if (!sort__need_collapse)
  1028. return;
  1029. next = rb_first(&hist);
  1030. while (next) {
  1031. n = rb_entry(next, struct hist_entry, rb_node);
  1032. next = rb_next(&n->rb_node);
  1033. rb_erase(&n->rb_node, &hist);
  1034. collapse__insert_entry(n);
  1035. }
  1036. }
  1037. /*
  1038. * reverse the map, sort on count.
  1039. */
  1040. static struct rb_root output_hists;
  1041. static void output__insert_entry(struct hist_entry *he, u64 min_callchain_hits)
  1042. {
  1043. struct rb_node **p = &output_hists.rb_node;
  1044. struct rb_node *parent = NULL;
  1045. struct hist_entry *iter;
  1046. if (callchain)
  1047. callchain_param.sort(&he->sorted_chain, &he->callchain,
  1048. min_callchain_hits, &callchain_param);
  1049. while (*p != NULL) {
  1050. parent = *p;
  1051. iter = rb_entry(parent, struct hist_entry, rb_node);
  1052. if (he->count > iter->count)
  1053. p = &(*p)->rb_left;
  1054. else
  1055. p = &(*p)->rb_right;
  1056. }
  1057. rb_link_node(&he->rb_node, parent, p);
  1058. rb_insert_color(&he->rb_node, &output_hists);
  1059. }
  1060. static void output__resort(u64 total_samples)
  1061. {
  1062. struct rb_node *next;
  1063. struct hist_entry *n;
  1064. struct rb_root *tree = &hist;
  1065. u64 min_callchain_hits;
  1066. min_callchain_hits = total_samples * (callchain_param.min_percent / 100);
  1067. if (sort__need_collapse)
  1068. tree = &collapse_hists;
  1069. next = rb_first(tree);
  1070. while (next) {
  1071. n = rb_entry(next, struct hist_entry, rb_node);
  1072. next = rb_next(&n->rb_node);
  1073. rb_erase(&n->rb_node, tree);
  1074. output__insert_entry(n, min_callchain_hits);
  1075. }
  1076. }
  1077. static size_t output__fprintf(FILE *fp, u64 total_samples)
  1078. {
  1079. struct hist_entry *pos;
  1080. struct sort_entry *se;
  1081. struct rb_node *nd;
  1082. size_t ret = 0;
  1083. unsigned int width;
  1084. char *col_width = col_width_list_str;
  1085. fprintf(fp, "# Samples: %Ld\n", (u64)total_samples);
  1086. fprintf(fp, "#\n");
  1087. fprintf(fp, "# Overhead");
  1088. if (show_nr_samples) {
  1089. if (field_sep)
  1090. fprintf(fp, "%cSamples", *field_sep);
  1091. else
  1092. fputs(" Samples ", fp);
  1093. }
  1094. list_for_each_entry(se, &hist_entry__sort_list, list) {
  1095. if (se->elide)
  1096. continue;
  1097. if (field_sep) {
  1098. fprintf(fp, "%c%s", *field_sep, se->header);
  1099. continue;
  1100. }
  1101. width = strlen(se->header);
  1102. if (se->width) {
  1103. if (col_width_list_str) {
  1104. if (col_width) {
  1105. *se->width = atoi(col_width);
  1106. col_width = strchr(col_width, ',');
  1107. if (col_width)
  1108. ++col_width;
  1109. }
  1110. }
  1111. width = *se->width = max(*se->width, width);
  1112. }
  1113. fprintf(fp, " %*s", width, se->header);
  1114. }
  1115. fprintf(fp, "\n");
  1116. if (field_sep)
  1117. goto print_entries;
  1118. fprintf(fp, "# ........");
  1119. if (show_nr_samples)
  1120. fprintf(fp, " ..........");
  1121. list_for_each_entry(se, &hist_entry__sort_list, list) {
  1122. unsigned int i;
  1123. if (se->elide)
  1124. continue;
  1125. fprintf(fp, " ");
  1126. if (se->width)
  1127. width = *se->width;
  1128. else
  1129. width = strlen(se->header);
  1130. for (i = 0; i < width; i++)
  1131. fprintf(fp, ".");
  1132. }
  1133. fprintf(fp, "\n");
  1134. fprintf(fp, "#\n");
  1135. print_entries:
  1136. for (nd = rb_first(&output_hists); nd; nd = rb_next(nd)) {
  1137. pos = rb_entry(nd, struct hist_entry, rb_node);
  1138. ret += hist_entry__fprintf(fp, pos, total_samples);
  1139. }
  1140. if (sort_order == default_sort_order &&
  1141. parent_pattern == default_parent_pattern) {
  1142. fprintf(fp, "#\n");
  1143. fprintf(fp, "# (For a higher level overview, try: perf report --sort comm,dso)\n");
  1144. fprintf(fp, "#\n");
  1145. }
  1146. fprintf(fp, "\n");
  1147. return ret;
  1148. }
  1149. static void register_idle_thread(void)
  1150. {
  1151. struct thread *thread = threads__findnew(0);
  1152. if (thread == NULL ||
  1153. thread__set_comm(thread, "[idle]")) {
  1154. fprintf(stderr, "problem inserting idle task.\n");
  1155. exit(-1);
  1156. }
  1157. }
  1158. static unsigned long total = 0,
  1159. total_mmap = 0,
  1160. total_comm = 0,
  1161. total_fork = 0,
  1162. total_unknown = 0,
  1163. total_lost = 0;
  1164. static int validate_chain(struct ip_callchain *chain, event_t *event)
  1165. {
  1166. unsigned int chain_size;
  1167. chain_size = event->header.size;
  1168. chain_size -= (unsigned long)&event->ip.__more_data - (unsigned long)event;
  1169. if (chain->nr*sizeof(u64) > chain_size)
  1170. return -1;
  1171. return 0;
  1172. }
  1173. static int
  1174. process_sample_event(event_t *event, unsigned long offset, unsigned long head)
  1175. {
  1176. char level;
  1177. int show = 0;
  1178. struct dso *dso = NULL;
  1179. struct thread *thread = threads__findnew(event->ip.pid);
  1180. u64 ip = event->ip.ip;
  1181. u64 period = 1;
  1182. struct map *map = NULL;
  1183. void *more_data = event->ip.__more_data;
  1184. struct ip_callchain *chain = NULL;
  1185. int cpumode;
  1186. if (sample_type & PERF_SAMPLE_PERIOD) {
  1187. period = *(u64 *)more_data;
  1188. more_data += sizeof(u64);
  1189. }
  1190. dprintf("%p [%p]: PERF_EVENT_SAMPLE (IP, %d): %d: %p period: %Ld\n",
  1191. (void *)(offset + head),
  1192. (void *)(long)(event->header.size),
  1193. event->header.misc,
  1194. event->ip.pid,
  1195. (void *)(long)ip,
  1196. (long long)period);
  1197. if (sample_type & PERF_SAMPLE_CALLCHAIN) {
  1198. unsigned int i;
  1199. chain = (void *)more_data;
  1200. dprintf("... chain: nr:%Lu\n", chain->nr);
  1201. if (validate_chain(chain, event) < 0) {
  1202. eprintf("call-chain problem with event, skipping it.\n");
  1203. return 0;
  1204. }
  1205. if (dump_trace) {
  1206. for (i = 0; i < chain->nr; i++)
  1207. dprintf("..... %2d: %016Lx\n", i, chain->ips[i]);
  1208. }
  1209. }
  1210. dprintf(" ... thread: %s:%d\n", thread->comm, thread->pid);
  1211. if (thread == NULL) {
  1212. eprintf("problem processing %d event, skipping it.\n",
  1213. event->header.type);
  1214. return -1;
  1215. }
  1216. if (comm_list && !strlist__has_entry(comm_list, thread->comm))
  1217. return 0;
  1218. cpumode = event->header.misc & PERF_EVENT_MISC_CPUMODE_MASK;
  1219. if (cpumode == PERF_EVENT_MISC_KERNEL) {
  1220. show = SHOW_KERNEL;
  1221. level = 'k';
  1222. dso = kernel_dso;
  1223. dprintf(" ...... dso: %s\n", dso->name);
  1224. } else if (cpumode == PERF_EVENT_MISC_USER) {
  1225. show = SHOW_USER;
  1226. level = '.';
  1227. } else {
  1228. show = SHOW_HV;
  1229. level = 'H';
  1230. dso = hypervisor_dso;
  1231. dprintf(" ...... dso: [hypervisor]\n");
  1232. }
  1233. if (show & show_mask) {
  1234. struct symbol *sym = resolve_symbol(thread, &map, &dso, &ip);
  1235. if (dso_list && dso && dso->name && !strlist__has_entry(dso_list, dso->name))
  1236. return 0;
  1237. if (sym_list && sym && !strlist__has_entry(sym_list, sym->name))
  1238. return 0;
  1239. if (hist_entry__add(thread, map, dso, sym, ip, chain, level, period)) {
  1240. eprintf("problem incrementing symbol count, skipping event\n");
  1241. return -1;
  1242. }
  1243. }
  1244. total += period;
  1245. return 0;
  1246. }
  1247. static int
  1248. process_mmap_event(event_t *event, unsigned long offset, unsigned long head)
  1249. {
  1250. struct thread *thread = threads__findnew(event->mmap.pid);
  1251. struct map *map = map__new(&event->mmap);
  1252. dprintf("%p [%p]: PERF_EVENT_MMAP %d: [%p(%p) @ %p]: %s\n",
  1253. (void *)(offset + head),
  1254. (void *)(long)(event->header.size),
  1255. event->mmap.pid,
  1256. (void *)(long)event->mmap.start,
  1257. (void *)(long)event->mmap.len,
  1258. (void *)(long)event->mmap.pgoff,
  1259. event->mmap.filename);
  1260. if (thread == NULL || map == NULL) {
  1261. dprintf("problem processing PERF_EVENT_MMAP, skipping event.\n");
  1262. return 0;
  1263. }
  1264. thread__insert_map(thread, map);
  1265. total_mmap++;
  1266. return 0;
  1267. }
  1268. static int
  1269. process_comm_event(event_t *event, unsigned long offset, unsigned long head)
  1270. {
  1271. struct thread *thread = threads__findnew(event->comm.pid);
  1272. dprintf("%p [%p]: PERF_EVENT_COMM: %s:%d\n",
  1273. (void *)(offset + head),
  1274. (void *)(long)(event->header.size),
  1275. event->comm.comm, event->comm.pid);
  1276. if (thread == NULL ||
  1277. thread__set_comm(thread, event->comm.comm)) {
  1278. dprintf("problem processing PERF_EVENT_COMM, skipping event.\n");
  1279. return -1;
  1280. }
  1281. total_comm++;
  1282. return 0;
  1283. }
  1284. static int
  1285. process_task_event(event_t *event, unsigned long offset, unsigned long head)
  1286. {
  1287. struct thread *thread = threads__findnew(event->fork.pid);
  1288. struct thread *parent = threads__findnew(event->fork.ppid);
  1289. dprintf("%p [%p]: PERF_EVENT_%s: (%d:%d):(%d:%d)\n",
  1290. (void *)(offset + head),
  1291. (void *)(long)(event->header.size),
  1292. event->header.type == PERF_EVENT_FORK ? "FORK" : "EXIT",
  1293. event->fork.pid, event->fork.tid,
  1294. event->fork.ppid, event->fork.ptid);
  1295. /*
  1296. * A thread clone will have the same PID for both
  1297. * parent and child.
  1298. */
  1299. if (thread == parent)
  1300. return 0;
  1301. if (event->header.type == PERF_EVENT_EXIT)
  1302. return 0;
  1303. if (!thread || !parent || thread__fork(thread, parent)) {
  1304. dprintf("problem processing PERF_EVENT_FORK, skipping event.\n");
  1305. return -1;
  1306. }
  1307. total_fork++;
  1308. return 0;
  1309. }
  1310. static int
  1311. process_lost_event(event_t *event, unsigned long offset, unsigned long head)
  1312. {
  1313. dprintf("%p [%p]: PERF_EVENT_LOST: id:%Ld: lost:%Ld\n",
  1314. (void *)(offset + head),
  1315. (void *)(long)(event->header.size),
  1316. event->lost.id,
  1317. event->lost.lost);
  1318. total_lost += event->lost.lost;
  1319. return 0;
  1320. }
  1321. static void trace_event(event_t *event)
  1322. {
  1323. unsigned char *raw_event = (void *)event;
  1324. char *color = PERF_COLOR_BLUE;
  1325. int i, j;
  1326. if (!dump_trace)
  1327. return;
  1328. dprintf(".");
  1329. cdprintf("\n. ... raw event: size %d bytes\n", event->header.size);
  1330. for (i = 0; i < event->header.size; i++) {
  1331. if ((i & 15) == 0) {
  1332. dprintf(".");
  1333. cdprintf(" %04x: ", i);
  1334. }
  1335. cdprintf(" %02x", raw_event[i]);
  1336. if (((i & 15) == 15) || i == event->header.size-1) {
  1337. cdprintf(" ");
  1338. for (j = 0; j < 15-(i & 15); j++)
  1339. cdprintf(" ");
  1340. for (j = 0; j < (i & 15); j++) {
  1341. if (isprint(raw_event[i-15+j]))
  1342. cdprintf("%c", raw_event[i-15+j]);
  1343. else
  1344. cdprintf(".");
  1345. }
  1346. cdprintf("\n");
  1347. }
  1348. }
  1349. dprintf(".\n");
  1350. }
  1351. static int
  1352. process_read_event(event_t *event, unsigned long offset, unsigned long head)
  1353. {
  1354. dprintf("%p [%p]: PERF_EVENT_READ: %d %d %Lu\n",
  1355. (void *)(offset + head),
  1356. (void *)(long)(event->header.size),
  1357. event->read.pid,
  1358. event->read.tid,
  1359. event->read.value);
  1360. return 0;
  1361. }
  1362. static int
  1363. process_event(event_t *event, unsigned long offset, unsigned long head)
  1364. {
  1365. trace_event(event);
  1366. switch (event->header.type) {
  1367. case PERF_EVENT_SAMPLE:
  1368. return process_sample_event(event, offset, head);
  1369. case PERF_EVENT_MMAP:
  1370. return process_mmap_event(event, offset, head);
  1371. case PERF_EVENT_COMM:
  1372. return process_comm_event(event, offset, head);
  1373. case PERF_EVENT_FORK:
  1374. case PERF_EVENT_EXIT:
  1375. return process_task_event(event, offset, head);
  1376. case PERF_EVENT_LOST:
  1377. return process_lost_event(event, offset, head);
  1378. case PERF_EVENT_READ:
  1379. return process_read_event(event, offset, head);
  1380. /*
  1381. * We dont process them right now but they are fine:
  1382. */
  1383. case PERF_EVENT_THROTTLE:
  1384. case PERF_EVENT_UNTHROTTLE:
  1385. return 0;
  1386. default:
  1387. return -1;
  1388. }
  1389. return 0;
  1390. }
  1391. static struct perf_header *header;
  1392. static u64 perf_header__sample_type(void)
  1393. {
  1394. u64 sample_type = 0;
  1395. int i;
  1396. for (i = 0; i < header->attrs; i++) {
  1397. struct perf_header_attr *attr = header->attr[i];
  1398. if (!sample_type)
  1399. sample_type = attr->attr.sample_type;
  1400. else if (sample_type != attr->attr.sample_type)
  1401. die("non matching sample_type");
  1402. }
  1403. return sample_type;
  1404. }
  1405. static int __cmd_report(void)
  1406. {
  1407. int ret, rc = EXIT_FAILURE;
  1408. unsigned long offset = 0;
  1409. unsigned long head, shift;
  1410. struct stat stat;
  1411. event_t *event;
  1412. uint32_t size;
  1413. char *buf;
  1414. register_idle_thread();
  1415. input = open(input_name, O_RDONLY);
  1416. if (input < 0) {
  1417. fprintf(stderr, " failed to open file: %s", input_name);
  1418. if (!strcmp(input_name, "perf.data"))
  1419. fprintf(stderr, " (try 'perf record' first)");
  1420. fprintf(stderr, "\n");
  1421. exit(-1);
  1422. }
  1423. ret = fstat(input, &stat);
  1424. if (ret < 0) {
  1425. perror("failed to stat file");
  1426. exit(-1);
  1427. }
  1428. if (!stat.st_size) {
  1429. fprintf(stderr, "zero-sized file, nothing to do!\n");
  1430. exit(0);
  1431. }
  1432. header = perf_header__read(input);
  1433. head = header->data_offset;
  1434. sample_type = perf_header__sample_type();
  1435. if (!(sample_type & PERF_SAMPLE_CALLCHAIN)) {
  1436. if (sort__has_parent) {
  1437. fprintf(stderr, "selected --sort parent, but no"
  1438. " callchain data. Did you call"
  1439. " perf record without -g?\n");
  1440. exit(-1);
  1441. }
  1442. if (callchain) {
  1443. fprintf(stderr, "selected -c but no callchain data."
  1444. " Did you call perf record without"
  1445. " -g?\n");
  1446. exit(-1);
  1447. }
  1448. }
  1449. if (load_kernel() < 0) {
  1450. perror("failed to load kernel symbols");
  1451. return EXIT_FAILURE;
  1452. }
  1453. if (!full_paths) {
  1454. if (getcwd(__cwd, sizeof(__cwd)) == NULL) {
  1455. perror("failed to get the current directory");
  1456. return EXIT_FAILURE;
  1457. }
  1458. cwdlen = strlen(cwd);
  1459. } else {
  1460. cwd = NULL;
  1461. cwdlen = 0;
  1462. }
  1463. shift = page_size * (head / page_size);
  1464. offset += shift;
  1465. head -= shift;
  1466. remap:
  1467. buf = (char *)mmap(NULL, page_size * mmap_window, PROT_READ,
  1468. MAP_SHARED, input, offset);
  1469. if (buf == MAP_FAILED) {
  1470. perror("failed to mmap file");
  1471. exit(-1);
  1472. }
  1473. more:
  1474. event = (event_t *)(buf + head);
  1475. size = event->header.size;
  1476. if (!size)
  1477. size = 8;
  1478. if (head + event->header.size >= page_size * mmap_window) {
  1479. int ret;
  1480. shift = page_size * (head / page_size);
  1481. ret = munmap(buf, page_size * mmap_window);
  1482. assert(ret == 0);
  1483. offset += shift;
  1484. head -= shift;
  1485. goto remap;
  1486. }
  1487. size = event->header.size;
  1488. dprintf("\n%p [%p]: event: %d\n",
  1489. (void *)(offset + head),
  1490. (void *)(long)event->header.size,
  1491. event->header.type);
  1492. if (!size || process_event(event, offset, head) < 0) {
  1493. dprintf("%p [%p]: skipping unknown header type: %d\n",
  1494. (void *)(offset + head),
  1495. (void *)(long)(event->header.size),
  1496. event->header.type);
  1497. total_unknown++;
  1498. /*
  1499. * assume we lost track of the stream, check alignment, and
  1500. * increment a single u64 in the hope to catch on again 'soon'.
  1501. */
  1502. if (unlikely(head & 7))
  1503. head &= ~7ULL;
  1504. size = 8;
  1505. }
  1506. head += size;
  1507. if (offset + head >= header->data_offset + header->data_size)
  1508. goto done;
  1509. if (offset + head < (unsigned long)stat.st_size)
  1510. goto more;
  1511. done:
  1512. rc = EXIT_SUCCESS;
  1513. close(input);
  1514. dprintf(" IP events: %10ld\n", total);
  1515. dprintf(" mmap events: %10ld\n", total_mmap);
  1516. dprintf(" comm events: %10ld\n", total_comm);
  1517. dprintf(" fork events: %10ld\n", total_fork);
  1518. dprintf(" lost events: %10ld\n", total_lost);
  1519. dprintf(" unknown events: %10ld\n", total_unknown);
  1520. if (dump_trace)
  1521. return 0;
  1522. if (verbose >= 3)
  1523. threads__fprintf(stdout);
  1524. if (verbose >= 2)
  1525. dsos__fprintf(stdout);
  1526. collapse__resort();
  1527. output__resort(total);
  1528. output__fprintf(stdout, total);
  1529. return rc;
  1530. }
  1531. static int
  1532. parse_callchain_opt(const struct option *opt __used, const char *arg,
  1533. int unset __used)
  1534. {
  1535. char *tok;
  1536. char *endptr;
  1537. callchain = 1;
  1538. if (!arg)
  1539. return 0;
  1540. tok = strtok((char *)arg, ",");
  1541. if (!tok)
  1542. return -1;
  1543. /* get the output mode */
  1544. if (!strncmp(tok, "graph", strlen(arg)))
  1545. callchain_param.mode = CHAIN_GRAPH_ABS;
  1546. else if (!strncmp(tok, "flat", strlen(arg)))
  1547. callchain_param.mode = CHAIN_FLAT;
  1548. else if (!strncmp(tok, "fractal", strlen(arg)))
  1549. callchain_param.mode = CHAIN_GRAPH_REL;
  1550. else
  1551. return -1;
  1552. /* get the min percentage */
  1553. tok = strtok(NULL, ",");
  1554. if (!tok)
  1555. goto setup;
  1556. callchain_param.min_percent = strtod(tok, &endptr);
  1557. if (tok == endptr)
  1558. return -1;
  1559. setup:
  1560. if (register_callchain_param(&callchain_param) < 0) {
  1561. fprintf(stderr, "Can't register callchain params\n");
  1562. return -1;
  1563. }
  1564. return 0;
  1565. }
  1566. static const char * const report_usage[] = {
  1567. "perf report [<options>] <command>",
  1568. NULL
  1569. };
  1570. static const struct option options[] = {
  1571. OPT_STRING('i', "input", &input_name, "file",
  1572. "input file name"),
  1573. OPT_BOOLEAN('v', "verbose", &verbose,
  1574. "be more verbose (show symbol address, etc)"),
  1575. OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace,
  1576. "dump raw trace in ASCII"),
  1577. OPT_STRING('k', "vmlinux", &vmlinux, "file", "vmlinux pathname"),
  1578. OPT_BOOLEAN('m', "modules", &modules,
  1579. "load module symbols - WARNING: use only with -k and LIVE kernel"),
  1580. OPT_BOOLEAN('n', "show-nr-samples", &show_nr_samples,
  1581. "Show a column with the number of samples"),
  1582. OPT_STRING('s', "sort", &sort_order, "key[,key2...]",
  1583. "sort by key(s): pid, comm, dso, symbol, parent"),
  1584. OPT_BOOLEAN('P', "full-paths", &full_paths,
  1585. "Don't shorten the pathnames taking into account the cwd"),
  1586. OPT_STRING('p', "parent", &parent_pattern, "regex",
  1587. "regex filter to identify parent, see: '--sort parent'"),
  1588. OPT_BOOLEAN('x', "exclude-other", &exclude_other,
  1589. "Only display entries with parent-match"),
  1590. OPT_CALLBACK_DEFAULT('g', "call-graph", NULL, "output_type,min_percent",
  1591. "Display callchains using output_type and min percent threshold. "
  1592. "Default: fractal,0.5", &parse_callchain_opt, callchain_default_opt),
  1593. OPT_STRING('d', "dsos", &dso_list_str, "dso[,dso...]",
  1594. "only consider symbols in these dsos"),
  1595. OPT_STRING('C', "comms", &comm_list_str, "comm[,comm...]",
  1596. "only consider symbols in these comms"),
  1597. OPT_STRING('S', "symbols", &sym_list_str, "symbol[,symbol...]",
  1598. "only consider these symbols"),
  1599. OPT_STRING('w', "column-widths", &col_width_list_str,
  1600. "width[,width...]",
  1601. "don't try to adjust column width, use these fixed values"),
  1602. OPT_STRING('t', "field-separator", &field_sep, "separator",
  1603. "separator for columns, no spaces will be added between "
  1604. "columns '.' is reserved."),
  1605. OPT_END()
  1606. };
  1607. static void setup_sorting(void)
  1608. {
  1609. char *tmp, *tok, *str = strdup(sort_order);
  1610. for (tok = strtok_r(str, ", ", &tmp);
  1611. tok; tok = strtok_r(NULL, ", ", &tmp)) {
  1612. if (sort_dimension__add(tok) < 0) {
  1613. error("Unknown --sort key: `%s'", tok);
  1614. usage_with_options(report_usage, options);
  1615. }
  1616. }
  1617. free(str);
  1618. }
  1619. static void setup_list(struct strlist **list, const char *list_str,
  1620. struct sort_entry *se, const char *list_name,
  1621. FILE *fp)
  1622. {
  1623. if (list_str) {
  1624. *list = strlist__new(true, list_str);
  1625. if (!*list) {
  1626. fprintf(stderr, "problems parsing %s list\n",
  1627. list_name);
  1628. exit(129);
  1629. }
  1630. if (strlist__nr_entries(*list) == 1) {
  1631. fprintf(fp, "# %s: %s\n", list_name,
  1632. strlist__entry(*list, 0)->s);
  1633. se->elide = true;
  1634. }
  1635. }
  1636. }
  1637. int cmd_report(int argc, const char **argv, const char *prefix __used)
  1638. {
  1639. symbol__init();
  1640. page_size = getpagesize();
  1641. argc = parse_options(argc, argv, options, report_usage, 0);
  1642. setup_sorting();
  1643. if (parent_pattern != default_parent_pattern) {
  1644. sort_dimension__add("parent");
  1645. sort_parent.elide = 1;
  1646. } else
  1647. exclude_other = 0;
  1648. /*
  1649. * Any (unrecognized) arguments left?
  1650. */
  1651. if (argc)
  1652. usage_with_options(report_usage, options);
  1653. setup_pager();
  1654. setup_list(&dso_list, dso_list_str, &sort_dso, "dso", stdout);
  1655. setup_list(&comm_list, comm_list_str, &sort_comm, "comm", stdout);
  1656. setup_list(&sym_list, sym_list_str, &sort_sym, "symbol", stdout);
  1657. if (field_sep && *field_sep == '.') {
  1658. fputs("'.' is the only non valid --field-separator argument\n",
  1659. stderr);
  1660. exit(129);
  1661. }
  1662. return __cmd_report();
  1663. }