builtin-report.c 43 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095
  1. /*
  2. * builtin-report.c
  3. *
  4. * Builtin report command: Analyze the perf.data input file,
  5. * look up and read DSOs and symbol information and display
  6. * a histogram of results, along various sorting keys.
  7. */
  8. #include "builtin.h"
  9. #include "util/util.h"
  10. #include "util/color.h"
  11. #include <linux/list.h>
  12. #include "util/cache.h"
  13. #include <linux/rbtree.h>
  14. #include "util/symbol.h"
  15. #include "util/string.h"
  16. #include "util/callchain.h"
  17. #include "util/strlist.h"
  18. #include "perf.h"
  19. #include "util/header.h"
  20. #include "util/parse-options.h"
  21. #include "util/parse-events.h"
  22. #define SHOW_KERNEL 1
  23. #define SHOW_USER 2
  24. #define SHOW_HV 4
  25. static char const *input_name = "perf.data";
  26. static char *vmlinux = NULL;
  27. static char default_sort_order[] = "comm,dso";
  28. static char *sort_order = default_sort_order;
  29. static char *dso_list_str, *comm_list_str, *sym_list_str,
  30. *col_width_list_str;
  31. static struct strlist *dso_list, *comm_list, *sym_list;
  32. static char *field_sep;
  33. static int input;
  34. static int show_mask = SHOW_KERNEL | SHOW_USER | SHOW_HV;
  35. static int dump_trace = 0;
  36. #define dprintf(x...) do { if (dump_trace) printf(x); } while (0)
  37. #define cdprintf(x...) do { if (dump_trace) color_fprintf(stdout, color, x); } while (0)
  38. static int verbose;
  39. #define eprintf(x...) do { if (verbose) fprintf(stderr, x); } while (0)
  40. static int modules;
  41. static int full_paths;
  42. static int show_nr_samples;
  43. static unsigned long page_size;
  44. static unsigned long mmap_window = 32;
  45. static char default_parent_pattern[] = "^sys_|^do_page_fault";
  46. static char *parent_pattern = default_parent_pattern;
  47. static regex_t parent_regex;
  48. static int exclude_other = 1;
  49. static char callchain_default_opt[] = "fractal,0.5";
  50. static int callchain;
  51. static
  52. struct callchain_param callchain_param = {
  53. .mode = CHAIN_GRAPH_ABS,
  54. .min_percent = 0.5
  55. };
  56. static u64 sample_type;
  57. struct ip_event {
  58. struct perf_event_header header;
  59. u64 ip;
  60. u32 pid, tid;
  61. unsigned char __more_data[];
  62. };
  63. struct mmap_event {
  64. struct perf_event_header header;
  65. u32 pid, tid;
  66. u64 start;
  67. u64 len;
  68. u64 pgoff;
  69. char filename[PATH_MAX];
  70. };
  71. struct comm_event {
  72. struct perf_event_header header;
  73. u32 pid, tid;
  74. char comm[16];
  75. };
  76. struct fork_event {
  77. struct perf_event_header header;
  78. u32 pid, ppid;
  79. };
  80. struct period_event {
  81. struct perf_event_header header;
  82. u64 time;
  83. u64 id;
  84. u64 sample_period;
  85. };
  86. struct lost_event {
  87. struct perf_event_header header;
  88. u64 id;
  89. u64 lost;
  90. };
  91. struct read_event {
  92. struct perf_event_header header;
  93. u32 pid,tid;
  94. u64 value;
  95. u64 format[3];
  96. };
  97. typedef union event_union {
  98. struct perf_event_header header;
  99. struct ip_event ip;
  100. struct mmap_event mmap;
  101. struct comm_event comm;
  102. struct fork_event fork;
  103. struct period_event period;
  104. struct lost_event lost;
  105. struct read_event read;
  106. } event_t;
  107. static int repsep_fprintf(FILE *fp, const char *fmt, ...)
  108. {
  109. int n;
  110. va_list ap;
  111. va_start(ap, fmt);
  112. if (!field_sep)
  113. n = vfprintf(fp, fmt, ap);
  114. else {
  115. char *bf = NULL;
  116. n = vasprintf(&bf, fmt, ap);
  117. if (n > 0) {
  118. char *sep = bf;
  119. while (1) {
  120. sep = strchr(sep, *field_sep);
  121. if (sep == NULL)
  122. break;
  123. *sep = '.';
  124. }
  125. }
  126. fputs(bf, fp);
  127. free(bf);
  128. }
  129. va_end(ap);
  130. return n;
  131. }
  132. static LIST_HEAD(dsos);
  133. static struct dso *kernel_dso;
  134. static struct dso *vdso;
  135. static struct dso *hypervisor_dso;
  136. static void dsos__add(struct dso *dso)
  137. {
  138. list_add_tail(&dso->node, &dsos);
  139. }
  140. static struct dso *dsos__find(const char *name)
  141. {
  142. struct dso *pos;
  143. list_for_each_entry(pos, &dsos, node)
  144. if (strcmp(pos->name, name) == 0)
  145. return pos;
  146. return NULL;
  147. }
  148. static struct dso *dsos__findnew(const char *name)
  149. {
  150. struct dso *dso = dsos__find(name);
  151. int nr;
  152. if (dso)
  153. return dso;
  154. dso = dso__new(name, 0);
  155. if (!dso)
  156. goto out_delete_dso;
  157. nr = dso__load(dso, NULL, verbose);
  158. if (nr < 0) {
  159. eprintf("Failed to open: %s\n", name);
  160. goto out_delete_dso;
  161. }
  162. if (!nr)
  163. eprintf("No symbols found in: %s, maybe install a debug package?\n", name);
  164. dsos__add(dso);
  165. return dso;
  166. out_delete_dso:
  167. dso__delete(dso);
  168. return NULL;
  169. }
  170. static void dsos__fprintf(FILE *fp)
  171. {
  172. struct dso *pos;
  173. list_for_each_entry(pos, &dsos, node)
  174. dso__fprintf(pos, fp);
  175. }
  176. static struct symbol *vdso__find_symbol(struct dso *dso, u64 ip)
  177. {
  178. return dso__find_symbol(dso, ip);
  179. }
  180. static int load_kernel(void)
  181. {
  182. int err;
  183. kernel_dso = dso__new("[kernel]", 0);
  184. if (!kernel_dso)
  185. return -1;
  186. err = dso__load_kernel(kernel_dso, vmlinux, NULL, verbose, modules);
  187. if (err <= 0) {
  188. dso__delete(kernel_dso);
  189. kernel_dso = NULL;
  190. } else
  191. dsos__add(kernel_dso);
  192. vdso = dso__new("[vdso]", 0);
  193. if (!vdso)
  194. return -1;
  195. vdso->find_symbol = vdso__find_symbol;
  196. dsos__add(vdso);
  197. hypervisor_dso = dso__new("[hypervisor]", 0);
  198. if (!hypervisor_dso)
  199. return -1;
  200. dsos__add(hypervisor_dso);
  201. return err;
  202. }
  203. static char __cwd[PATH_MAX];
  204. static char *cwd = __cwd;
  205. static int cwdlen;
  206. static int strcommon(const char *pathname)
  207. {
  208. int n = 0;
  209. while (pathname[n] == cwd[n] && n < cwdlen)
  210. ++n;
  211. return n;
  212. }
  213. struct map {
  214. struct list_head node;
  215. u64 start;
  216. u64 end;
  217. u64 pgoff;
  218. u64 (*map_ip)(struct map *, u64);
  219. struct dso *dso;
  220. };
  221. static u64 map__map_ip(struct map *map, u64 ip)
  222. {
  223. return ip - map->start + map->pgoff;
  224. }
  225. static u64 vdso__map_ip(struct map *map __used, u64 ip)
  226. {
  227. return ip;
  228. }
  229. static inline int is_anon_memory(const char *filename)
  230. {
  231. return strcmp(filename, "//anon") == 0;
  232. }
  233. static struct map *map__new(struct mmap_event *event)
  234. {
  235. struct map *self = malloc(sizeof(*self));
  236. if (self != NULL) {
  237. const char *filename = event->filename;
  238. char newfilename[PATH_MAX];
  239. int anon;
  240. if (cwd) {
  241. int n = strcommon(filename);
  242. if (n == cwdlen) {
  243. snprintf(newfilename, sizeof(newfilename),
  244. ".%s", filename + n);
  245. filename = newfilename;
  246. }
  247. }
  248. anon = is_anon_memory(filename);
  249. if (anon) {
  250. snprintf(newfilename, sizeof(newfilename), "/tmp/perf-%d.map", event->pid);
  251. filename = newfilename;
  252. }
  253. self->start = event->start;
  254. self->end = event->start + event->len;
  255. self->pgoff = event->pgoff;
  256. self->dso = dsos__findnew(filename);
  257. if (self->dso == NULL)
  258. goto out_delete;
  259. if (self->dso == vdso || anon)
  260. self->map_ip = vdso__map_ip;
  261. else
  262. self->map_ip = map__map_ip;
  263. }
  264. return self;
  265. out_delete:
  266. free(self);
  267. return NULL;
  268. }
  269. static struct map *map__clone(struct map *self)
  270. {
  271. struct map *map = malloc(sizeof(*self));
  272. if (!map)
  273. return NULL;
  274. memcpy(map, self, sizeof(*self));
  275. return map;
  276. }
  277. static int map__overlap(struct map *l, struct map *r)
  278. {
  279. if (l->start > r->start) {
  280. struct map *t = l;
  281. l = r;
  282. r = t;
  283. }
  284. if (l->end > r->start)
  285. return 1;
  286. return 0;
  287. }
  288. static size_t map__fprintf(struct map *self, FILE *fp)
  289. {
  290. return fprintf(fp, " %Lx-%Lx %Lx %s\n",
  291. self->start, self->end, self->pgoff, self->dso->name);
  292. }
  293. struct thread {
  294. struct rb_node rb_node;
  295. struct list_head maps;
  296. pid_t pid;
  297. char *comm;
  298. };
  299. static struct thread *thread__new(pid_t pid)
  300. {
  301. struct thread *self = malloc(sizeof(*self));
  302. if (self != NULL) {
  303. self->pid = pid;
  304. self->comm = malloc(32);
  305. if (self->comm)
  306. snprintf(self->comm, 32, ":%d", self->pid);
  307. INIT_LIST_HEAD(&self->maps);
  308. }
  309. return self;
  310. }
  311. static unsigned int dsos__col_width,
  312. comms__col_width,
  313. threads__col_width;
  314. static int thread__set_comm(struct thread *self, const char *comm)
  315. {
  316. if (self->comm)
  317. free(self->comm);
  318. self->comm = strdup(comm);
  319. if (!self->comm)
  320. return -ENOMEM;
  321. if (!col_width_list_str && !field_sep &&
  322. (!comm_list || strlist__has_entry(comm_list, comm))) {
  323. unsigned int slen = strlen(comm);
  324. if (slen > comms__col_width) {
  325. comms__col_width = slen;
  326. threads__col_width = slen + 6;
  327. }
  328. }
  329. return 0;
  330. }
  331. static size_t thread__fprintf(struct thread *self, FILE *fp)
  332. {
  333. struct map *pos;
  334. size_t ret = fprintf(fp, "Thread %d %s\n", self->pid, self->comm);
  335. list_for_each_entry(pos, &self->maps, node)
  336. ret += map__fprintf(pos, fp);
  337. return ret;
  338. }
  339. static struct rb_root threads;
  340. static struct thread *last_match;
  341. static struct thread *threads__findnew(pid_t pid)
  342. {
  343. struct rb_node **p = &threads.rb_node;
  344. struct rb_node *parent = NULL;
  345. struct thread *th;
  346. /*
  347. * Font-end cache - PID lookups come in blocks,
  348. * so most of the time we dont have to look up
  349. * the full rbtree:
  350. */
  351. if (last_match && last_match->pid == pid)
  352. return last_match;
  353. while (*p != NULL) {
  354. parent = *p;
  355. th = rb_entry(parent, struct thread, rb_node);
  356. if (th->pid == pid) {
  357. last_match = th;
  358. return th;
  359. }
  360. if (pid < th->pid)
  361. p = &(*p)->rb_left;
  362. else
  363. p = &(*p)->rb_right;
  364. }
  365. th = thread__new(pid);
  366. if (th != NULL) {
  367. rb_link_node(&th->rb_node, parent, p);
  368. rb_insert_color(&th->rb_node, &threads);
  369. last_match = th;
  370. }
  371. return th;
  372. }
  373. static void thread__insert_map(struct thread *self, struct map *map)
  374. {
  375. struct map *pos, *tmp;
  376. list_for_each_entry_safe(pos, tmp, &self->maps, node) {
  377. if (map__overlap(pos, map)) {
  378. if (verbose >= 2) {
  379. printf("overlapping maps:\n");
  380. map__fprintf(map, stdout);
  381. map__fprintf(pos, stdout);
  382. }
  383. if (map->start <= pos->start && map->end > pos->start)
  384. pos->start = map->end;
  385. if (map->end >= pos->end && map->start < pos->end)
  386. pos->end = map->start;
  387. if (verbose >= 2) {
  388. printf("after collision:\n");
  389. map__fprintf(pos, stdout);
  390. }
  391. if (pos->start >= pos->end) {
  392. list_del_init(&pos->node);
  393. free(pos);
  394. }
  395. }
  396. }
  397. list_add_tail(&map->node, &self->maps);
  398. }
  399. static int thread__fork(struct thread *self, struct thread *parent)
  400. {
  401. struct map *map;
  402. if (self->comm)
  403. free(self->comm);
  404. self->comm = strdup(parent->comm);
  405. if (!self->comm)
  406. return -ENOMEM;
  407. list_for_each_entry(map, &parent->maps, node) {
  408. struct map *new = map__clone(map);
  409. if (!new)
  410. return -ENOMEM;
  411. thread__insert_map(self, new);
  412. }
  413. return 0;
  414. }
  415. static struct map *thread__find_map(struct thread *self, u64 ip)
  416. {
  417. struct map *pos;
  418. if (self == NULL)
  419. return NULL;
  420. list_for_each_entry(pos, &self->maps, node)
  421. if (ip >= pos->start && ip <= pos->end)
  422. return pos;
  423. return NULL;
  424. }
  425. static size_t threads__fprintf(FILE *fp)
  426. {
  427. size_t ret = 0;
  428. struct rb_node *nd;
  429. for (nd = rb_first(&threads); nd; nd = rb_next(nd)) {
  430. struct thread *pos = rb_entry(nd, struct thread, rb_node);
  431. ret += thread__fprintf(pos, fp);
  432. }
  433. return ret;
  434. }
  435. /*
  436. * histogram, sorted on item, collects counts
  437. */
  438. static struct rb_root hist;
  439. struct hist_entry {
  440. struct rb_node rb_node;
  441. struct thread *thread;
  442. struct map *map;
  443. struct dso *dso;
  444. struct symbol *sym;
  445. struct symbol *parent;
  446. u64 ip;
  447. char level;
  448. struct callchain_node callchain;
  449. struct rb_root sorted_chain;
  450. u64 count;
  451. };
  452. /*
  453. * configurable sorting bits
  454. */
  455. struct sort_entry {
  456. struct list_head list;
  457. char *header;
  458. int64_t (*cmp)(struct hist_entry *, struct hist_entry *);
  459. int64_t (*collapse)(struct hist_entry *, struct hist_entry *);
  460. size_t (*print)(FILE *fp, struct hist_entry *, unsigned int width);
  461. unsigned int *width;
  462. bool elide;
  463. };
  464. static int64_t cmp_null(void *l, void *r)
  465. {
  466. if (!l && !r)
  467. return 0;
  468. else if (!l)
  469. return -1;
  470. else
  471. return 1;
  472. }
  473. /* --sort pid */
  474. static int64_t
  475. sort__thread_cmp(struct hist_entry *left, struct hist_entry *right)
  476. {
  477. return right->thread->pid - left->thread->pid;
  478. }
  479. static size_t
  480. sort__thread_print(FILE *fp, struct hist_entry *self, unsigned int width)
  481. {
  482. return repsep_fprintf(fp, "%*s:%5d", width - 6,
  483. self->thread->comm ?: "", self->thread->pid);
  484. }
  485. static struct sort_entry sort_thread = {
  486. .header = "Command: Pid",
  487. .cmp = sort__thread_cmp,
  488. .print = sort__thread_print,
  489. .width = &threads__col_width,
  490. };
  491. /* --sort comm */
  492. static int64_t
  493. sort__comm_cmp(struct hist_entry *left, struct hist_entry *right)
  494. {
  495. return right->thread->pid - left->thread->pid;
  496. }
  497. static int64_t
  498. sort__comm_collapse(struct hist_entry *left, struct hist_entry *right)
  499. {
  500. char *comm_l = left->thread->comm;
  501. char *comm_r = right->thread->comm;
  502. if (!comm_l || !comm_r)
  503. return cmp_null(comm_l, comm_r);
  504. return strcmp(comm_l, comm_r);
  505. }
  506. static size_t
  507. sort__comm_print(FILE *fp, struct hist_entry *self, unsigned int width)
  508. {
  509. return repsep_fprintf(fp, "%*s", width, self->thread->comm);
  510. }
  511. static struct sort_entry sort_comm = {
  512. .header = "Command",
  513. .cmp = sort__comm_cmp,
  514. .collapse = sort__comm_collapse,
  515. .print = sort__comm_print,
  516. .width = &comms__col_width,
  517. };
  518. /* --sort dso */
  519. static int64_t
  520. sort__dso_cmp(struct hist_entry *left, struct hist_entry *right)
  521. {
  522. struct dso *dso_l = left->dso;
  523. struct dso *dso_r = right->dso;
  524. if (!dso_l || !dso_r)
  525. return cmp_null(dso_l, dso_r);
  526. return strcmp(dso_l->name, dso_r->name);
  527. }
  528. static size_t
  529. sort__dso_print(FILE *fp, struct hist_entry *self, unsigned int width)
  530. {
  531. if (self->dso)
  532. return repsep_fprintf(fp, "%-*s", width, self->dso->name);
  533. return repsep_fprintf(fp, "%*llx", width, (u64)self->ip);
  534. }
  535. static struct sort_entry sort_dso = {
  536. .header = "Shared Object",
  537. .cmp = sort__dso_cmp,
  538. .print = sort__dso_print,
  539. .width = &dsos__col_width,
  540. };
  541. /* --sort symbol */
  542. static int64_t
  543. sort__sym_cmp(struct hist_entry *left, struct hist_entry *right)
  544. {
  545. u64 ip_l, ip_r;
  546. if (left->sym == right->sym)
  547. return 0;
  548. ip_l = left->sym ? left->sym->start : left->ip;
  549. ip_r = right->sym ? right->sym->start : right->ip;
  550. return (int64_t)(ip_r - ip_l);
  551. }
  552. static size_t
  553. sort__sym_print(FILE *fp, struct hist_entry *self, unsigned int width __used)
  554. {
  555. size_t ret = 0;
  556. if (verbose)
  557. ret += repsep_fprintf(fp, "%#018llx ", (u64)self->ip);
  558. ret += repsep_fprintf(fp, "[%c] ", self->level);
  559. if (self->sym) {
  560. ret += repsep_fprintf(fp, "%s", self->sym->name);
  561. if (self->sym->module)
  562. ret += repsep_fprintf(fp, "\t[%s]",
  563. self->sym->module->name);
  564. } else {
  565. ret += repsep_fprintf(fp, "%#016llx", (u64)self->ip);
  566. }
  567. return ret;
  568. }
  569. static struct sort_entry sort_sym = {
  570. .header = "Symbol",
  571. .cmp = sort__sym_cmp,
  572. .print = sort__sym_print,
  573. };
  574. /* --sort parent */
  575. static int64_t
  576. sort__parent_cmp(struct hist_entry *left, struct hist_entry *right)
  577. {
  578. struct symbol *sym_l = left->parent;
  579. struct symbol *sym_r = right->parent;
  580. if (!sym_l || !sym_r)
  581. return cmp_null(sym_l, sym_r);
  582. return strcmp(sym_l->name, sym_r->name);
  583. }
  584. static size_t
  585. sort__parent_print(FILE *fp, struct hist_entry *self, unsigned int width)
  586. {
  587. return repsep_fprintf(fp, "%-*s", width,
  588. self->parent ? self->parent->name : "[other]");
  589. }
  590. static unsigned int parent_symbol__col_width;
  591. static struct sort_entry sort_parent = {
  592. .header = "Parent symbol",
  593. .cmp = sort__parent_cmp,
  594. .print = sort__parent_print,
  595. .width = &parent_symbol__col_width,
  596. };
  597. static int sort__need_collapse = 0;
  598. static int sort__has_parent = 0;
  599. struct sort_dimension {
  600. char *name;
  601. struct sort_entry *entry;
  602. int taken;
  603. };
  604. static struct sort_dimension sort_dimensions[] = {
  605. { .name = "pid", .entry = &sort_thread, },
  606. { .name = "comm", .entry = &sort_comm, },
  607. { .name = "dso", .entry = &sort_dso, },
  608. { .name = "symbol", .entry = &sort_sym, },
  609. { .name = "parent", .entry = &sort_parent, },
  610. };
  611. static LIST_HEAD(hist_entry__sort_list);
  612. static int sort_dimension__add(char *tok)
  613. {
  614. unsigned int i;
  615. for (i = 0; i < ARRAY_SIZE(sort_dimensions); i++) {
  616. struct sort_dimension *sd = &sort_dimensions[i];
  617. if (sd->taken)
  618. continue;
  619. if (strncasecmp(tok, sd->name, strlen(tok)))
  620. continue;
  621. if (sd->entry->collapse)
  622. sort__need_collapse = 1;
  623. if (sd->entry == &sort_parent) {
  624. int ret = regcomp(&parent_regex, parent_pattern, REG_EXTENDED);
  625. if (ret) {
  626. char err[BUFSIZ];
  627. regerror(ret, &parent_regex, err, sizeof(err));
  628. fprintf(stderr, "Invalid regex: %s\n%s",
  629. parent_pattern, err);
  630. exit(-1);
  631. }
  632. sort__has_parent = 1;
  633. }
  634. list_add_tail(&sd->entry->list, &hist_entry__sort_list);
  635. sd->taken = 1;
  636. return 0;
  637. }
  638. return -ESRCH;
  639. }
  640. static int64_t
  641. hist_entry__cmp(struct hist_entry *left, struct hist_entry *right)
  642. {
  643. struct sort_entry *se;
  644. int64_t cmp = 0;
  645. list_for_each_entry(se, &hist_entry__sort_list, list) {
  646. cmp = se->cmp(left, right);
  647. if (cmp)
  648. break;
  649. }
  650. return cmp;
  651. }
  652. static int64_t
  653. hist_entry__collapse(struct hist_entry *left, struct hist_entry *right)
  654. {
  655. struct sort_entry *se;
  656. int64_t cmp = 0;
  657. list_for_each_entry(se, &hist_entry__sort_list, list) {
  658. int64_t (*f)(struct hist_entry *, struct hist_entry *);
  659. f = se->collapse ?: se->cmp;
  660. cmp = f(left, right);
  661. if (cmp)
  662. break;
  663. }
  664. return cmp;
  665. }
  666. static size_t ipchain__fprintf_graph_line(FILE *fp, int depth, int depth_mask)
  667. {
  668. int i;
  669. size_t ret = 0;
  670. ret += fprintf(fp, "%s", " ");
  671. for (i = 0; i < depth; i++)
  672. if (depth_mask & (1 << i))
  673. ret += fprintf(fp, "| ");
  674. else
  675. ret += fprintf(fp, " ");
  676. ret += fprintf(fp, "\n");
  677. return ret;
  678. }
  679. static size_t
  680. ipchain__fprintf_graph(FILE *fp, struct callchain_list *chain, int depth,
  681. int depth_mask, int count, u64 total_samples,
  682. int hits)
  683. {
  684. int i;
  685. size_t ret = 0;
  686. ret += fprintf(fp, "%s", " ");
  687. for (i = 0; i < depth; i++) {
  688. if (depth_mask & (1 << i))
  689. ret += fprintf(fp, "|");
  690. else
  691. ret += fprintf(fp, " ");
  692. if (!count && i == depth - 1) {
  693. double percent;
  694. percent = hits * 100.0 / total_samples;
  695. ret += percent_color_fprintf(fp, "--%2.2f%%-- ", percent);
  696. } else
  697. ret += fprintf(fp, "%s", " ");
  698. }
  699. if (chain->sym)
  700. ret += fprintf(fp, "%s\n", chain->sym->name);
  701. else
  702. ret += fprintf(fp, "%p\n", (void *)(long)chain->ip);
  703. return ret;
  704. }
  705. static size_t
  706. callchain__fprintf_graph(FILE *fp, struct callchain_node *self,
  707. u64 total_samples, int depth, int depth_mask)
  708. {
  709. struct rb_node *node, *next;
  710. struct callchain_node *child;
  711. struct callchain_list *chain;
  712. int new_depth_mask = depth_mask;
  713. u64 new_total;
  714. size_t ret = 0;
  715. int i;
  716. if (callchain_param.mode == CHAIN_GRAPH_REL)
  717. new_total = self->cumul_hit;
  718. else
  719. new_total = total_samples;
  720. node = rb_first(&self->rb_root);
  721. while (node) {
  722. child = rb_entry(node, struct callchain_node, rb_node);
  723. /*
  724. * The depth mask manages the output of pipes that show
  725. * the depth. We don't want to keep the pipes of the current
  726. * level for the last child of this depth
  727. */
  728. next = rb_next(node);
  729. if (!next)
  730. new_depth_mask &= ~(1 << (depth - 1));
  731. /*
  732. * But we keep the older depth mask for the line seperator
  733. * to keep the level link until we reach the last child
  734. */
  735. ret += ipchain__fprintf_graph_line(fp, depth, depth_mask);
  736. i = 0;
  737. list_for_each_entry(chain, &child->val, list) {
  738. if (chain->ip >= PERF_CONTEXT_MAX)
  739. continue;
  740. ret += ipchain__fprintf_graph(fp, chain, depth,
  741. new_depth_mask, i++,
  742. new_total,
  743. child->cumul_hit);
  744. }
  745. ret += callchain__fprintf_graph(fp, child, new_total,
  746. depth + 1,
  747. new_depth_mask | (1 << depth));
  748. node = next;
  749. }
  750. return ret;
  751. }
  752. static size_t
  753. callchain__fprintf_flat(FILE *fp, struct callchain_node *self,
  754. u64 total_samples)
  755. {
  756. struct callchain_list *chain;
  757. size_t ret = 0;
  758. if (!self)
  759. return 0;
  760. ret += callchain__fprintf_flat(fp, self->parent, total_samples);
  761. list_for_each_entry(chain, &self->val, list) {
  762. if (chain->ip >= PERF_CONTEXT_MAX)
  763. continue;
  764. if (chain->sym)
  765. ret += fprintf(fp, " %s\n", chain->sym->name);
  766. else
  767. ret += fprintf(fp, " %p\n",
  768. (void *)(long)chain->ip);
  769. }
  770. return ret;
  771. }
  772. static size_t
  773. hist_entry_callchain__fprintf(FILE *fp, struct hist_entry *self,
  774. u64 total_samples)
  775. {
  776. struct rb_node *rb_node;
  777. struct callchain_node *chain;
  778. size_t ret = 0;
  779. rb_node = rb_first(&self->sorted_chain);
  780. while (rb_node) {
  781. double percent;
  782. chain = rb_entry(rb_node, struct callchain_node, rb_node);
  783. percent = chain->hit * 100.0 / total_samples;
  784. switch (callchain_param.mode) {
  785. case CHAIN_FLAT:
  786. ret += percent_color_fprintf(fp, " %6.2f%%\n",
  787. percent);
  788. ret += callchain__fprintf_flat(fp, chain, total_samples);
  789. break;
  790. case CHAIN_GRAPH_ABS: /* Falldown */
  791. case CHAIN_GRAPH_REL:
  792. ret += callchain__fprintf_graph(fp, chain,
  793. total_samples, 1, 1);
  794. default:
  795. break;
  796. }
  797. ret += fprintf(fp, "\n");
  798. rb_node = rb_next(rb_node);
  799. }
  800. return ret;
  801. }
  802. static size_t
  803. hist_entry__fprintf(FILE *fp, struct hist_entry *self, u64 total_samples)
  804. {
  805. struct sort_entry *se;
  806. size_t ret;
  807. if (exclude_other && !self->parent)
  808. return 0;
  809. if (total_samples)
  810. ret = percent_color_fprintf(fp,
  811. field_sep ? "%.2f" : " %6.2f%%",
  812. (self->count * 100.0) / total_samples);
  813. else
  814. ret = fprintf(fp, field_sep ? "%lld" : "%12lld ", self->count);
  815. if (show_nr_samples) {
  816. if (field_sep)
  817. fprintf(fp, "%c%lld", *field_sep, self->count);
  818. else
  819. fprintf(fp, "%11lld", self->count);
  820. }
  821. list_for_each_entry(se, &hist_entry__sort_list, list) {
  822. if (se->elide)
  823. continue;
  824. fprintf(fp, "%s", field_sep ?: " ");
  825. ret += se->print(fp, self, se->width ? *se->width : 0);
  826. }
  827. ret += fprintf(fp, "\n");
  828. if (callchain)
  829. hist_entry_callchain__fprintf(fp, self, total_samples);
  830. return ret;
  831. }
  832. /*
  833. *
  834. */
  835. static void dso__calc_col_width(struct dso *self)
  836. {
  837. if (!col_width_list_str && !field_sep &&
  838. (!dso_list || strlist__has_entry(dso_list, self->name))) {
  839. unsigned int slen = strlen(self->name);
  840. if (slen > dsos__col_width)
  841. dsos__col_width = slen;
  842. }
  843. self->slen_calculated = 1;
  844. }
  845. static struct symbol *
  846. resolve_symbol(struct thread *thread, struct map **mapp,
  847. struct dso **dsop, u64 *ipp)
  848. {
  849. struct dso *dso = dsop ? *dsop : NULL;
  850. struct map *map = mapp ? *mapp : NULL;
  851. u64 ip = *ipp;
  852. if (!thread)
  853. return NULL;
  854. if (dso)
  855. goto got_dso;
  856. if (map)
  857. goto got_map;
  858. map = thread__find_map(thread, ip);
  859. if (map != NULL) {
  860. /*
  861. * We have to do this here as we may have a dso
  862. * with no symbol hit that has a name longer than
  863. * the ones with symbols sampled.
  864. */
  865. if (!sort_dso.elide && !map->dso->slen_calculated)
  866. dso__calc_col_width(map->dso);
  867. if (mapp)
  868. *mapp = map;
  869. got_map:
  870. ip = map->map_ip(map, ip);
  871. dso = map->dso;
  872. } else {
  873. /*
  874. * If this is outside of all known maps,
  875. * and is a negative address, try to look it
  876. * up in the kernel dso, as it might be a
  877. * vsyscall (which executes in user-mode):
  878. */
  879. if ((long long)ip < 0)
  880. dso = kernel_dso;
  881. }
  882. dprintf(" ...... dso: %s\n", dso ? dso->name : "<not found>");
  883. dprintf(" ...... map: %Lx -> %Lx\n", *ipp, ip);
  884. *ipp = ip;
  885. if (dsop)
  886. *dsop = dso;
  887. if (!dso)
  888. return NULL;
  889. got_dso:
  890. return dso->find_symbol(dso, ip);
  891. }
  892. static int call__match(struct symbol *sym)
  893. {
  894. if (sym->name && !regexec(&parent_regex, sym->name, 0, NULL, 0))
  895. return 1;
  896. return 0;
  897. }
  898. static struct symbol **
  899. resolve_callchain(struct thread *thread, struct map *map __used,
  900. struct ip_callchain *chain, struct hist_entry *entry)
  901. {
  902. u64 context = PERF_CONTEXT_MAX;
  903. struct symbol **syms = NULL;
  904. unsigned int i;
  905. if (callchain) {
  906. syms = calloc(chain->nr, sizeof(*syms));
  907. if (!syms) {
  908. fprintf(stderr, "Can't allocate memory for symbols\n");
  909. exit(-1);
  910. }
  911. }
  912. for (i = 0; i < chain->nr; i++) {
  913. u64 ip = chain->ips[i];
  914. struct dso *dso = NULL;
  915. struct symbol *sym;
  916. if (ip >= PERF_CONTEXT_MAX) {
  917. context = ip;
  918. continue;
  919. }
  920. switch (context) {
  921. case PERF_CONTEXT_HV:
  922. dso = hypervisor_dso;
  923. break;
  924. case PERF_CONTEXT_KERNEL:
  925. dso = kernel_dso;
  926. break;
  927. default:
  928. break;
  929. }
  930. sym = resolve_symbol(thread, NULL, &dso, &ip);
  931. if (sym) {
  932. if (sort__has_parent && call__match(sym) &&
  933. !entry->parent)
  934. entry->parent = sym;
  935. if (!callchain)
  936. break;
  937. syms[i] = sym;
  938. }
  939. }
  940. return syms;
  941. }
  942. /*
  943. * collect histogram counts
  944. */
  945. static int
  946. hist_entry__add(struct thread *thread, struct map *map, struct dso *dso,
  947. struct symbol *sym, u64 ip, struct ip_callchain *chain,
  948. char level, u64 count)
  949. {
  950. struct rb_node **p = &hist.rb_node;
  951. struct rb_node *parent = NULL;
  952. struct hist_entry *he;
  953. struct symbol **syms = NULL;
  954. struct hist_entry entry = {
  955. .thread = thread,
  956. .map = map,
  957. .dso = dso,
  958. .sym = sym,
  959. .ip = ip,
  960. .level = level,
  961. .count = count,
  962. .parent = NULL,
  963. .sorted_chain = RB_ROOT
  964. };
  965. int cmp;
  966. if ((sort__has_parent || callchain) && chain)
  967. syms = resolve_callchain(thread, map, chain, &entry);
  968. while (*p != NULL) {
  969. parent = *p;
  970. he = rb_entry(parent, struct hist_entry, rb_node);
  971. cmp = hist_entry__cmp(&entry, he);
  972. if (!cmp) {
  973. he->count += count;
  974. if (callchain) {
  975. append_chain(&he->callchain, chain, syms);
  976. free(syms);
  977. }
  978. return 0;
  979. }
  980. if (cmp < 0)
  981. p = &(*p)->rb_left;
  982. else
  983. p = &(*p)->rb_right;
  984. }
  985. he = malloc(sizeof(*he));
  986. if (!he)
  987. return -ENOMEM;
  988. *he = entry;
  989. if (callchain) {
  990. callchain_init(&he->callchain);
  991. append_chain(&he->callchain, chain, syms);
  992. free(syms);
  993. }
  994. rb_link_node(&he->rb_node, parent, p);
  995. rb_insert_color(&he->rb_node, &hist);
  996. return 0;
  997. }
  998. static void hist_entry__free(struct hist_entry *he)
  999. {
  1000. free(he);
  1001. }
  1002. /*
  1003. * collapse the histogram
  1004. */
  1005. static struct rb_root collapse_hists;
  1006. static void collapse__insert_entry(struct hist_entry *he)
  1007. {
  1008. struct rb_node **p = &collapse_hists.rb_node;
  1009. struct rb_node *parent = NULL;
  1010. struct hist_entry *iter;
  1011. int64_t cmp;
  1012. while (*p != NULL) {
  1013. parent = *p;
  1014. iter = rb_entry(parent, struct hist_entry, rb_node);
  1015. cmp = hist_entry__collapse(iter, he);
  1016. if (!cmp) {
  1017. iter->count += he->count;
  1018. hist_entry__free(he);
  1019. return;
  1020. }
  1021. if (cmp < 0)
  1022. p = &(*p)->rb_left;
  1023. else
  1024. p = &(*p)->rb_right;
  1025. }
  1026. rb_link_node(&he->rb_node, parent, p);
  1027. rb_insert_color(&he->rb_node, &collapse_hists);
  1028. }
  1029. static void collapse__resort(void)
  1030. {
  1031. struct rb_node *next;
  1032. struct hist_entry *n;
  1033. if (!sort__need_collapse)
  1034. return;
  1035. next = rb_first(&hist);
  1036. while (next) {
  1037. n = rb_entry(next, struct hist_entry, rb_node);
  1038. next = rb_next(&n->rb_node);
  1039. rb_erase(&n->rb_node, &hist);
  1040. collapse__insert_entry(n);
  1041. }
  1042. }
  1043. /*
  1044. * reverse the map, sort on count.
  1045. */
  1046. static struct rb_root output_hists;
  1047. static void output__insert_entry(struct hist_entry *he, u64 min_callchain_hits)
  1048. {
  1049. struct rb_node **p = &output_hists.rb_node;
  1050. struct rb_node *parent = NULL;
  1051. struct hist_entry *iter;
  1052. if (callchain)
  1053. callchain_param.sort(&he->sorted_chain, &he->callchain,
  1054. min_callchain_hits, &callchain_param);
  1055. while (*p != NULL) {
  1056. parent = *p;
  1057. iter = rb_entry(parent, struct hist_entry, rb_node);
  1058. if (he->count > iter->count)
  1059. p = &(*p)->rb_left;
  1060. else
  1061. p = &(*p)->rb_right;
  1062. }
  1063. rb_link_node(&he->rb_node, parent, p);
  1064. rb_insert_color(&he->rb_node, &output_hists);
  1065. }
  1066. static void output__resort(u64 total_samples)
  1067. {
  1068. struct rb_node *next;
  1069. struct hist_entry *n;
  1070. struct rb_root *tree = &hist;
  1071. u64 min_callchain_hits;
  1072. min_callchain_hits = total_samples * (callchain_param.min_percent / 100);
  1073. if (sort__need_collapse)
  1074. tree = &collapse_hists;
  1075. next = rb_first(tree);
  1076. while (next) {
  1077. n = rb_entry(next, struct hist_entry, rb_node);
  1078. next = rb_next(&n->rb_node);
  1079. rb_erase(&n->rb_node, tree);
  1080. output__insert_entry(n, min_callchain_hits);
  1081. }
  1082. }
  1083. static size_t output__fprintf(FILE *fp, u64 total_samples)
  1084. {
  1085. struct hist_entry *pos;
  1086. struct sort_entry *se;
  1087. struct rb_node *nd;
  1088. size_t ret = 0;
  1089. unsigned int width;
  1090. char *col_width = col_width_list_str;
  1091. fprintf(fp, "# Samples: %Ld\n", (u64)total_samples);
  1092. fprintf(fp, "#\n");
  1093. fprintf(fp, "# Overhead");
  1094. if (show_nr_samples) {
  1095. if (field_sep)
  1096. fprintf(fp, "%cSamples", *field_sep);
  1097. else
  1098. fputs(" Samples ", fp);
  1099. }
  1100. list_for_each_entry(se, &hist_entry__sort_list, list) {
  1101. if (se->elide)
  1102. continue;
  1103. if (field_sep) {
  1104. fprintf(fp, "%c%s", *field_sep, se->header);
  1105. continue;
  1106. }
  1107. width = strlen(se->header);
  1108. if (se->width) {
  1109. if (col_width_list_str) {
  1110. if (col_width) {
  1111. *se->width = atoi(col_width);
  1112. col_width = strchr(col_width, ',');
  1113. if (col_width)
  1114. ++col_width;
  1115. }
  1116. }
  1117. width = *se->width = max(*se->width, width);
  1118. }
  1119. fprintf(fp, " %*s", width, se->header);
  1120. }
  1121. fprintf(fp, "\n");
  1122. if (field_sep)
  1123. goto print_entries;
  1124. fprintf(fp, "# ........");
  1125. if (show_nr_samples)
  1126. fprintf(fp, " ..........");
  1127. list_for_each_entry(se, &hist_entry__sort_list, list) {
  1128. unsigned int i;
  1129. if (se->elide)
  1130. continue;
  1131. fprintf(fp, " ");
  1132. if (se->width)
  1133. width = *se->width;
  1134. else
  1135. width = strlen(se->header);
  1136. for (i = 0; i < width; i++)
  1137. fprintf(fp, ".");
  1138. }
  1139. fprintf(fp, "\n");
  1140. fprintf(fp, "#\n");
  1141. print_entries:
  1142. for (nd = rb_first(&output_hists); nd; nd = rb_next(nd)) {
  1143. pos = rb_entry(nd, struct hist_entry, rb_node);
  1144. ret += hist_entry__fprintf(fp, pos, total_samples);
  1145. }
  1146. if (sort_order == default_sort_order &&
  1147. parent_pattern == default_parent_pattern) {
  1148. fprintf(fp, "#\n");
  1149. fprintf(fp, "# (For more details, try: perf report --sort comm,dso,symbol)\n");
  1150. fprintf(fp, "#\n");
  1151. }
  1152. fprintf(fp, "\n");
  1153. return ret;
  1154. }
  1155. static void register_idle_thread(void)
  1156. {
  1157. struct thread *thread = threads__findnew(0);
  1158. if (thread == NULL ||
  1159. thread__set_comm(thread, "[idle]")) {
  1160. fprintf(stderr, "problem inserting idle task.\n");
  1161. exit(-1);
  1162. }
  1163. }
  1164. static unsigned long total = 0,
  1165. total_mmap = 0,
  1166. total_comm = 0,
  1167. total_fork = 0,
  1168. total_unknown = 0,
  1169. total_lost = 0;
  1170. static int validate_chain(struct ip_callchain *chain, event_t *event)
  1171. {
  1172. unsigned int chain_size;
  1173. chain_size = event->header.size;
  1174. chain_size -= (unsigned long)&event->ip.__more_data - (unsigned long)event;
  1175. if (chain->nr*sizeof(u64) > chain_size)
  1176. return -1;
  1177. return 0;
  1178. }
  1179. static int
  1180. process_sample_event(event_t *event, unsigned long offset, unsigned long head)
  1181. {
  1182. char level;
  1183. int show = 0;
  1184. struct dso *dso = NULL;
  1185. struct thread *thread = threads__findnew(event->ip.pid);
  1186. u64 ip = event->ip.ip;
  1187. u64 period = 1;
  1188. struct map *map = NULL;
  1189. void *more_data = event->ip.__more_data;
  1190. struct ip_callchain *chain = NULL;
  1191. int cpumode;
  1192. if (sample_type & PERF_SAMPLE_PERIOD) {
  1193. period = *(u64 *)more_data;
  1194. more_data += sizeof(u64);
  1195. }
  1196. dprintf("%p [%p]: PERF_EVENT_SAMPLE (IP, %d): %d: %p period: %Ld\n",
  1197. (void *)(offset + head),
  1198. (void *)(long)(event->header.size),
  1199. event->header.misc,
  1200. event->ip.pid,
  1201. (void *)(long)ip,
  1202. (long long)period);
  1203. if (sample_type & PERF_SAMPLE_CALLCHAIN) {
  1204. unsigned int i;
  1205. chain = (void *)more_data;
  1206. dprintf("... chain: nr:%Lu\n", chain->nr);
  1207. if (validate_chain(chain, event) < 0) {
  1208. eprintf("call-chain problem with event, skipping it.\n");
  1209. return 0;
  1210. }
  1211. if (dump_trace) {
  1212. for (i = 0; i < chain->nr; i++)
  1213. dprintf("..... %2d: %016Lx\n", i, chain->ips[i]);
  1214. }
  1215. }
  1216. dprintf(" ... thread: %s:%d\n", thread->comm, thread->pid);
  1217. if (thread == NULL) {
  1218. eprintf("problem processing %d event, skipping it.\n",
  1219. event->header.type);
  1220. return -1;
  1221. }
  1222. if (comm_list && !strlist__has_entry(comm_list, thread->comm))
  1223. return 0;
  1224. cpumode = event->header.misc & PERF_EVENT_MISC_CPUMODE_MASK;
  1225. if (cpumode == PERF_EVENT_MISC_KERNEL) {
  1226. show = SHOW_KERNEL;
  1227. level = 'k';
  1228. dso = kernel_dso;
  1229. dprintf(" ...... dso: %s\n", dso->name);
  1230. } else if (cpumode == PERF_EVENT_MISC_USER) {
  1231. show = SHOW_USER;
  1232. level = '.';
  1233. } else {
  1234. show = SHOW_HV;
  1235. level = 'H';
  1236. dso = hypervisor_dso;
  1237. dprintf(" ...... dso: [hypervisor]\n");
  1238. }
  1239. if (show & show_mask) {
  1240. struct symbol *sym = resolve_symbol(thread, &map, &dso, &ip);
  1241. if (dso_list && dso && dso->name && !strlist__has_entry(dso_list, dso->name))
  1242. return 0;
  1243. if (sym_list && sym && !strlist__has_entry(sym_list, sym->name))
  1244. return 0;
  1245. if (hist_entry__add(thread, map, dso, sym, ip, chain, level, period)) {
  1246. eprintf("problem incrementing symbol count, skipping event\n");
  1247. return -1;
  1248. }
  1249. }
  1250. total += period;
  1251. return 0;
  1252. }
  1253. static int
  1254. process_mmap_event(event_t *event, unsigned long offset, unsigned long head)
  1255. {
  1256. struct thread *thread = threads__findnew(event->mmap.pid);
  1257. struct map *map = map__new(&event->mmap);
  1258. dprintf("%p [%p]: PERF_EVENT_MMAP %d: [%p(%p) @ %p]: %s\n",
  1259. (void *)(offset + head),
  1260. (void *)(long)(event->header.size),
  1261. event->mmap.pid,
  1262. (void *)(long)event->mmap.start,
  1263. (void *)(long)event->mmap.len,
  1264. (void *)(long)event->mmap.pgoff,
  1265. event->mmap.filename);
  1266. if (thread == NULL || map == NULL) {
  1267. dprintf("problem processing PERF_EVENT_MMAP, skipping event.\n");
  1268. return 0;
  1269. }
  1270. thread__insert_map(thread, map);
  1271. total_mmap++;
  1272. return 0;
  1273. }
  1274. static int
  1275. process_comm_event(event_t *event, unsigned long offset, unsigned long head)
  1276. {
  1277. struct thread *thread = threads__findnew(event->comm.pid);
  1278. dprintf("%p [%p]: PERF_EVENT_COMM: %s:%d\n",
  1279. (void *)(offset + head),
  1280. (void *)(long)(event->header.size),
  1281. event->comm.comm, event->comm.pid);
  1282. if (thread == NULL ||
  1283. thread__set_comm(thread, event->comm.comm)) {
  1284. dprintf("problem processing PERF_EVENT_COMM, skipping event.\n");
  1285. return -1;
  1286. }
  1287. total_comm++;
  1288. return 0;
  1289. }
  1290. static int
  1291. process_fork_event(event_t *event, unsigned long offset, unsigned long head)
  1292. {
  1293. struct thread *thread = threads__findnew(event->fork.pid);
  1294. struct thread *parent = threads__findnew(event->fork.ppid);
  1295. dprintf("%p [%p]: PERF_EVENT_FORK: %d:%d\n",
  1296. (void *)(offset + head),
  1297. (void *)(long)(event->header.size),
  1298. event->fork.pid, event->fork.ppid);
  1299. if (!thread || !parent || thread__fork(thread, parent)) {
  1300. dprintf("problem processing PERF_EVENT_FORK, skipping event.\n");
  1301. return -1;
  1302. }
  1303. total_fork++;
  1304. return 0;
  1305. }
  1306. static int
  1307. process_period_event(event_t *event, unsigned long offset, unsigned long head)
  1308. {
  1309. dprintf("%p [%p]: PERF_EVENT_PERIOD: time:%Ld, id:%Ld: period:%Ld\n",
  1310. (void *)(offset + head),
  1311. (void *)(long)(event->header.size),
  1312. event->period.time,
  1313. event->period.id,
  1314. event->period.sample_period);
  1315. return 0;
  1316. }
  1317. static int
  1318. process_lost_event(event_t *event, unsigned long offset, unsigned long head)
  1319. {
  1320. dprintf("%p [%p]: PERF_EVENT_LOST: id:%Ld: lost:%Ld\n",
  1321. (void *)(offset + head),
  1322. (void *)(long)(event->header.size),
  1323. event->lost.id,
  1324. event->lost.lost);
  1325. total_lost += event->lost.lost;
  1326. return 0;
  1327. }
  1328. static void trace_event(event_t *event)
  1329. {
  1330. unsigned char *raw_event = (void *)event;
  1331. char *color = PERF_COLOR_BLUE;
  1332. int i, j;
  1333. if (!dump_trace)
  1334. return;
  1335. dprintf(".");
  1336. cdprintf("\n. ... raw event: size %d bytes\n", event->header.size);
  1337. for (i = 0; i < event->header.size; i++) {
  1338. if ((i & 15) == 0) {
  1339. dprintf(".");
  1340. cdprintf(" %04x: ", i);
  1341. }
  1342. cdprintf(" %02x", raw_event[i]);
  1343. if (((i & 15) == 15) || i == event->header.size-1) {
  1344. cdprintf(" ");
  1345. for (j = 0; j < 15-(i & 15); j++)
  1346. cdprintf(" ");
  1347. for (j = 0; j < (i & 15); j++) {
  1348. if (isprint(raw_event[i-15+j]))
  1349. cdprintf("%c", raw_event[i-15+j]);
  1350. else
  1351. cdprintf(".");
  1352. }
  1353. cdprintf("\n");
  1354. }
  1355. }
  1356. dprintf(".\n");
  1357. }
  1358. static int
  1359. process_read_event(event_t *event, unsigned long offset, unsigned long head)
  1360. {
  1361. dprintf("%p [%p]: PERF_EVENT_READ: %d %d %Lu\n",
  1362. (void *)(offset + head),
  1363. (void *)(long)(event->header.size),
  1364. event->read.pid,
  1365. event->read.tid,
  1366. event->read.value);
  1367. return 0;
  1368. }
  1369. static int
  1370. process_event(event_t *event, unsigned long offset, unsigned long head)
  1371. {
  1372. trace_event(event);
  1373. switch (event->header.type) {
  1374. case PERF_EVENT_SAMPLE:
  1375. return process_sample_event(event, offset, head);
  1376. case PERF_EVENT_MMAP:
  1377. return process_mmap_event(event, offset, head);
  1378. case PERF_EVENT_COMM:
  1379. return process_comm_event(event, offset, head);
  1380. case PERF_EVENT_FORK:
  1381. return process_fork_event(event, offset, head);
  1382. case PERF_EVENT_PERIOD:
  1383. return process_period_event(event, offset, head);
  1384. case PERF_EVENT_LOST:
  1385. return process_lost_event(event, offset, head);
  1386. case PERF_EVENT_READ:
  1387. return process_read_event(event, offset, head);
  1388. /*
  1389. * We dont process them right now but they are fine:
  1390. */
  1391. case PERF_EVENT_THROTTLE:
  1392. case PERF_EVENT_UNTHROTTLE:
  1393. return 0;
  1394. default:
  1395. return -1;
  1396. }
  1397. return 0;
  1398. }
  1399. static struct perf_header *header;
  1400. static u64 perf_header__sample_type(void)
  1401. {
  1402. u64 sample_type = 0;
  1403. int i;
  1404. for (i = 0; i < header->attrs; i++) {
  1405. struct perf_header_attr *attr = header->attr[i];
  1406. if (!sample_type)
  1407. sample_type = attr->attr.sample_type;
  1408. else if (sample_type != attr->attr.sample_type)
  1409. die("non matching sample_type");
  1410. }
  1411. return sample_type;
  1412. }
  1413. static int __cmd_report(void)
  1414. {
  1415. int ret, rc = EXIT_FAILURE;
  1416. unsigned long offset = 0;
  1417. unsigned long head, shift;
  1418. struct stat stat;
  1419. event_t *event;
  1420. uint32_t size;
  1421. char *buf;
  1422. register_idle_thread();
  1423. input = open(input_name, O_RDONLY);
  1424. if (input < 0) {
  1425. fprintf(stderr, " failed to open file: %s", input_name);
  1426. if (!strcmp(input_name, "perf.data"))
  1427. fprintf(stderr, " (try 'perf record' first)");
  1428. fprintf(stderr, "\n");
  1429. exit(-1);
  1430. }
  1431. ret = fstat(input, &stat);
  1432. if (ret < 0) {
  1433. perror("failed to stat file");
  1434. exit(-1);
  1435. }
  1436. if (!stat.st_size) {
  1437. fprintf(stderr, "zero-sized file, nothing to do!\n");
  1438. exit(0);
  1439. }
  1440. header = perf_header__read(input);
  1441. head = header->data_offset;
  1442. sample_type = perf_header__sample_type();
  1443. if (!(sample_type & PERF_SAMPLE_CALLCHAIN)) {
  1444. if (sort__has_parent) {
  1445. fprintf(stderr, "selected --sort parent, but no"
  1446. " callchain data. Did you call"
  1447. " perf record without -g?\n");
  1448. exit(-1);
  1449. }
  1450. if (callchain) {
  1451. fprintf(stderr, "selected -c but no callchain data."
  1452. " Did you call perf record without"
  1453. " -g?\n");
  1454. exit(-1);
  1455. }
  1456. }
  1457. if (load_kernel() < 0) {
  1458. perror("failed to load kernel symbols");
  1459. return EXIT_FAILURE;
  1460. }
  1461. if (!full_paths) {
  1462. if (getcwd(__cwd, sizeof(__cwd)) == NULL) {
  1463. perror("failed to get the current directory");
  1464. return EXIT_FAILURE;
  1465. }
  1466. cwdlen = strlen(cwd);
  1467. } else {
  1468. cwd = NULL;
  1469. cwdlen = 0;
  1470. }
  1471. shift = page_size * (head / page_size);
  1472. offset += shift;
  1473. head -= shift;
  1474. remap:
  1475. buf = (char *)mmap(NULL, page_size * mmap_window, PROT_READ,
  1476. MAP_SHARED, input, offset);
  1477. if (buf == MAP_FAILED) {
  1478. perror("failed to mmap file");
  1479. exit(-1);
  1480. }
  1481. more:
  1482. event = (event_t *)(buf + head);
  1483. size = event->header.size;
  1484. if (!size)
  1485. size = 8;
  1486. if (head + event->header.size >= page_size * mmap_window) {
  1487. int ret;
  1488. shift = page_size * (head / page_size);
  1489. ret = munmap(buf, page_size * mmap_window);
  1490. assert(ret == 0);
  1491. offset += shift;
  1492. head -= shift;
  1493. goto remap;
  1494. }
  1495. size = event->header.size;
  1496. dprintf("\n%p [%p]: event: %d\n",
  1497. (void *)(offset + head),
  1498. (void *)(long)event->header.size,
  1499. event->header.type);
  1500. if (!size || process_event(event, offset, head) < 0) {
  1501. dprintf("%p [%p]: skipping unknown header type: %d\n",
  1502. (void *)(offset + head),
  1503. (void *)(long)(event->header.size),
  1504. event->header.type);
  1505. total_unknown++;
  1506. /*
  1507. * assume we lost track of the stream, check alignment, and
  1508. * increment a single u64 in the hope to catch on again 'soon'.
  1509. */
  1510. if (unlikely(head & 7))
  1511. head &= ~7ULL;
  1512. size = 8;
  1513. }
  1514. head += size;
  1515. if (offset + head >= header->data_offset + header->data_size)
  1516. goto done;
  1517. if (offset + head < (unsigned long)stat.st_size)
  1518. goto more;
  1519. done:
  1520. rc = EXIT_SUCCESS;
  1521. close(input);
  1522. dprintf(" IP events: %10ld\n", total);
  1523. dprintf(" mmap events: %10ld\n", total_mmap);
  1524. dprintf(" comm events: %10ld\n", total_comm);
  1525. dprintf(" fork events: %10ld\n", total_fork);
  1526. dprintf(" lost events: %10ld\n", total_lost);
  1527. dprintf(" unknown events: %10ld\n", total_unknown);
  1528. if (dump_trace)
  1529. return 0;
  1530. if (verbose >= 3)
  1531. threads__fprintf(stdout);
  1532. if (verbose >= 2)
  1533. dsos__fprintf(stdout);
  1534. collapse__resort();
  1535. output__resort(total);
  1536. output__fprintf(stdout, total);
  1537. return rc;
  1538. }
  1539. static int
  1540. parse_callchain_opt(const struct option *opt __used, const char *arg,
  1541. int unset __used)
  1542. {
  1543. char *tok;
  1544. char *endptr;
  1545. callchain = 1;
  1546. if (!arg)
  1547. return 0;
  1548. tok = strtok((char *)arg, ",");
  1549. if (!tok)
  1550. return -1;
  1551. /* get the output mode */
  1552. if (!strncmp(tok, "graph", strlen(arg)))
  1553. callchain_param.mode = CHAIN_GRAPH_ABS;
  1554. else if (!strncmp(tok, "flat", strlen(arg)))
  1555. callchain_param.mode = CHAIN_FLAT;
  1556. else if (!strncmp(tok, "fractal", strlen(arg)))
  1557. callchain_param.mode = CHAIN_GRAPH_REL;
  1558. else
  1559. return -1;
  1560. /* get the min percentage */
  1561. tok = strtok(NULL, ",");
  1562. if (!tok)
  1563. goto setup;
  1564. callchain_param.min_percent = strtod(tok, &endptr);
  1565. if (tok == endptr)
  1566. return -1;
  1567. setup:
  1568. if (register_callchain_param(&callchain_param) < 0) {
  1569. fprintf(stderr, "Can't register callchain params\n");
  1570. return -1;
  1571. }
  1572. return 0;
  1573. }
  1574. static const char * const report_usage[] = {
  1575. "perf report [<options>] <command>",
  1576. NULL
  1577. };
  1578. static const struct option options[] = {
  1579. OPT_STRING('i', "input", &input_name, "file",
  1580. "input file name"),
  1581. OPT_BOOLEAN('v', "verbose", &verbose,
  1582. "be more verbose (show symbol address, etc)"),
  1583. OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace,
  1584. "dump raw trace in ASCII"),
  1585. OPT_STRING('k', "vmlinux", &vmlinux, "file", "vmlinux pathname"),
  1586. OPT_BOOLEAN('m', "modules", &modules,
  1587. "load module symbols - WARNING: use only with -k and LIVE kernel"),
  1588. OPT_BOOLEAN('n', "show-nr-samples", &show_nr_samples,
  1589. "Show a column with the number of samples"),
  1590. OPT_STRING('s', "sort", &sort_order, "key[,key2...]",
  1591. "sort by key(s): pid, comm, dso, symbol, parent"),
  1592. OPT_BOOLEAN('P', "full-paths", &full_paths,
  1593. "Don't shorten the pathnames taking into account the cwd"),
  1594. OPT_STRING('p', "parent", &parent_pattern, "regex",
  1595. "regex filter to identify parent, see: '--sort parent'"),
  1596. OPT_BOOLEAN('x', "exclude-other", &exclude_other,
  1597. "Only display entries with parent-match"),
  1598. OPT_CALLBACK_DEFAULT('c', "callchain", NULL, "output_type,min_percent",
  1599. "Display callchains using output_type and min percent threshold. "
  1600. "Default: flat,0", &parse_callchain_opt, callchain_default_opt),
  1601. OPT_STRING('d', "dsos", &dso_list_str, "dso[,dso...]",
  1602. "only consider symbols in these dsos"),
  1603. OPT_STRING('C', "comms", &comm_list_str, "comm[,comm...]",
  1604. "only consider symbols in these comms"),
  1605. OPT_STRING('S', "symbols", &sym_list_str, "symbol[,symbol...]",
  1606. "only consider these symbols"),
  1607. OPT_STRING('w', "column-widths", &col_width_list_str,
  1608. "width[,width...]",
  1609. "don't try to adjust column width, use these fixed values"),
  1610. OPT_STRING('t', "field-separator", &field_sep, "separator",
  1611. "separator for columns, no spaces will be added between "
  1612. "columns '.' is reserved."),
  1613. OPT_END()
  1614. };
  1615. static void setup_sorting(void)
  1616. {
  1617. char *tmp, *tok, *str = strdup(sort_order);
  1618. for (tok = strtok_r(str, ", ", &tmp);
  1619. tok; tok = strtok_r(NULL, ", ", &tmp)) {
  1620. if (sort_dimension__add(tok) < 0) {
  1621. error("Unknown --sort key: `%s'", tok);
  1622. usage_with_options(report_usage, options);
  1623. }
  1624. }
  1625. free(str);
  1626. }
  1627. static void setup_list(struct strlist **list, const char *list_str,
  1628. struct sort_entry *se, const char *list_name,
  1629. FILE *fp)
  1630. {
  1631. if (list_str) {
  1632. *list = strlist__new(true, list_str);
  1633. if (!*list) {
  1634. fprintf(stderr, "problems parsing %s list\n",
  1635. list_name);
  1636. exit(129);
  1637. }
  1638. if (strlist__nr_entries(*list) == 1) {
  1639. fprintf(fp, "# %s: %s\n", list_name,
  1640. strlist__entry(*list, 0)->s);
  1641. se->elide = true;
  1642. }
  1643. }
  1644. }
  1645. int cmd_report(int argc, const char **argv, const char *prefix __used)
  1646. {
  1647. symbol__init();
  1648. page_size = getpagesize();
  1649. argc = parse_options(argc, argv, options, report_usage, 0);
  1650. setup_sorting();
  1651. if (parent_pattern != default_parent_pattern) {
  1652. sort_dimension__add("parent");
  1653. sort_parent.elide = 1;
  1654. } else
  1655. exclude_other = 0;
  1656. /*
  1657. * Any (unrecognized) arguments left?
  1658. */
  1659. if (argc)
  1660. usage_with_options(report_usage, options);
  1661. setup_pager();
  1662. setup_list(&dso_list, dso_list_str, &sort_dso, "dso", stdout);
  1663. setup_list(&comm_list, comm_list_str, &sort_comm, "comm", stdout);
  1664. setup_list(&sym_list, sym_list_str, &sort_sym, "symbol", stdout);
  1665. if (field_sep && *field_sep == '.') {
  1666. fputs("'.' is the only non valid --field-separator argument\n",
  1667. stderr);
  1668. exit(129);
  1669. }
  1670. return __cmd_report();
  1671. }