builtin-report.c 44 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164
  1. /*
  2. * builtin-report.c
  3. *
  4. * Builtin report command: Analyze the perf.data input file,
  5. * look up and read DSOs and symbol information and display
  6. * a histogram of results, along various sorting keys.
  7. */
  8. #include "builtin.h"
  9. #include "util/util.h"
  10. #include "util/color.h"
  11. #include <linux/list.h>
  12. #include "util/cache.h"
  13. #include <linux/rbtree.h>
  14. #include "util/symbol.h"
  15. #include "util/string.h"
  16. #include "util/callchain.h"
  17. #include "util/strlist.h"
  18. #include "perf.h"
  19. #include "util/header.h"
  20. #include "util/parse-options.h"
  21. #include "util/parse-events.h"
  22. #define SHOW_KERNEL 1
  23. #define SHOW_USER 2
  24. #define SHOW_HV 4
  25. static char const *input_name = "perf.data";
  26. static char *vmlinux = NULL;
  27. static char default_sort_order[] = "comm,dso,symbol";
  28. static char *sort_order = default_sort_order;
  29. static char *dso_list_str, *comm_list_str, *sym_list_str,
  30. *col_width_list_str;
  31. static struct strlist *dso_list, *comm_list, *sym_list;
  32. static char *field_sep;
  33. static int input;
  34. static int show_mask = SHOW_KERNEL | SHOW_USER | SHOW_HV;
  35. static int dump_trace = 0;
  36. #define dprintf(x...) do { if (dump_trace) printf(x); } while (0)
  37. #define cdprintf(x...) do { if (dump_trace) color_fprintf(stdout, color, x); } while (0)
  38. static int verbose;
  39. #define eprintf(x...) do { if (verbose) fprintf(stderr, x); } while (0)
  40. static int modules;
  41. static int full_paths;
  42. static int show_nr_samples;
  43. static unsigned long page_size;
  44. static unsigned long mmap_window = 32;
  45. static char default_parent_pattern[] = "^sys_|^do_page_fault";
  46. static char *parent_pattern = default_parent_pattern;
  47. static regex_t parent_regex;
  48. static int exclude_other = 1;
  49. static char callchain_default_opt[] = "fractal,0.5";
  50. static int callchain;
  51. static
  52. struct callchain_param callchain_param = {
  53. .mode = CHAIN_GRAPH_REL,
  54. .min_percent = 0.5
  55. };
  56. static u64 sample_type;
  57. struct ip_event {
  58. struct perf_event_header header;
  59. u64 ip;
  60. u32 pid, tid;
  61. unsigned char __more_data[];
  62. };
  63. struct mmap_event {
  64. struct perf_event_header header;
  65. u32 pid, tid;
  66. u64 start;
  67. u64 len;
  68. u64 pgoff;
  69. char filename[PATH_MAX];
  70. };
  71. struct comm_event {
  72. struct perf_event_header header;
  73. u32 pid, tid;
  74. char comm[16];
  75. };
  76. struct fork_event {
  77. struct perf_event_header header;
  78. u32 pid, ppid;
  79. u32 tid, ptid;
  80. };
  81. struct lost_event {
  82. struct perf_event_header header;
  83. u64 id;
  84. u64 lost;
  85. };
  86. struct read_event {
  87. struct perf_event_header header;
  88. u32 pid,tid;
  89. u64 value;
  90. u64 time_enabled;
  91. u64 time_running;
  92. u64 id;
  93. };
  94. typedef union event_union {
  95. struct perf_event_header header;
  96. struct ip_event ip;
  97. struct mmap_event mmap;
  98. struct comm_event comm;
  99. struct fork_event fork;
  100. struct lost_event lost;
  101. struct read_event read;
  102. } event_t;
  103. static int repsep_fprintf(FILE *fp, const char *fmt, ...)
  104. {
  105. int n;
  106. va_list ap;
  107. va_start(ap, fmt);
  108. if (!field_sep)
  109. n = vfprintf(fp, fmt, ap);
  110. else {
  111. char *bf = NULL;
  112. n = vasprintf(&bf, fmt, ap);
  113. if (n > 0) {
  114. char *sep = bf;
  115. while (1) {
  116. sep = strchr(sep, *field_sep);
  117. if (sep == NULL)
  118. break;
  119. *sep = '.';
  120. }
  121. }
  122. fputs(bf, fp);
  123. free(bf);
  124. }
  125. va_end(ap);
  126. return n;
  127. }
  128. static LIST_HEAD(dsos);
  129. static struct dso *kernel_dso;
  130. static struct dso *vdso;
  131. static struct dso *hypervisor_dso;
  132. static void dsos__add(struct dso *dso)
  133. {
  134. list_add_tail(&dso->node, &dsos);
  135. }
  136. static struct dso *dsos__find(const char *name)
  137. {
  138. struct dso *pos;
  139. list_for_each_entry(pos, &dsos, node)
  140. if (strcmp(pos->name, name) == 0)
  141. return pos;
  142. return NULL;
  143. }
  144. static struct dso *dsos__findnew(const char *name)
  145. {
  146. struct dso *dso = dsos__find(name);
  147. int nr;
  148. if (dso)
  149. return dso;
  150. dso = dso__new(name, 0);
  151. if (!dso)
  152. goto out_delete_dso;
  153. nr = dso__load(dso, NULL, verbose);
  154. if (nr < 0) {
  155. eprintf("Failed to open: %s\n", name);
  156. goto out_delete_dso;
  157. }
  158. if (!nr)
  159. eprintf("No symbols found in: %s, maybe install a debug package?\n", name);
  160. dsos__add(dso);
  161. return dso;
  162. out_delete_dso:
  163. dso__delete(dso);
  164. return NULL;
  165. }
  166. static void dsos__fprintf(FILE *fp)
  167. {
  168. struct dso *pos;
  169. list_for_each_entry(pos, &dsos, node)
  170. dso__fprintf(pos, fp);
  171. }
  172. static struct symbol *vdso__find_symbol(struct dso *dso, u64 ip)
  173. {
  174. return dso__find_symbol(dso, ip);
  175. }
  176. static int load_kernel(void)
  177. {
  178. int err;
  179. kernel_dso = dso__new("[kernel]", 0);
  180. if (!kernel_dso)
  181. return -1;
  182. err = dso__load_kernel(kernel_dso, vmlinux, NULL, verbose, modules);
  183. if (err <= 0) {
  184. dso__delete(kernel_dso);
  185. kernel_dso = NULL;
  186. } else
  187. dsos__add(kernel_dso);
  188. vdso = dso__new("[vdso]", 0);
  189. if (!vdso)
  190. return -1;
  191. vdso->find_symbol = vdso__find_symbol;
  192. dsos__add(vdso);
  193. hypervisor_dso = dso__new("[hypervisor]", 0);
  194. if (!hypervisor_dso)
  195. return -1;
  196. dsos__add(hypervisor_dso);
  197. return err;
  198. }
  199. static char __cwd[PATH_MAX];
  200. static char *cwd = __cwd;
  201. static int cwdlen;
  202. static int strcommon(const char *pathname)
  203. {
  204. int n = 0;
  205. while (n < cwdlen && pathname[n] == cwd[n])
  206. ++n;
  207. return n;
  208. }
  209. struct map {
  210. struct list_head node;
  211. u64 start;
  212. u64 end;
  213. u64 pgoff;
  214. u64 (*map_ip)(struct map *, u64);
  215. struct dso *dso;
  216. };
  217. static u64 map__map_ip(struct map *map, u64 ip)
  218. {
  219. return ip - map->start + map->pgoff;
  220. }
  221. static u64 vdso__map_ip(struct map *map __used, u64 ip)
  222. {
  223. return ip;
  224. }
  225. static inline int is_anon_memory(const char *filename)
  226. {
  227. return strcmp(filename, "//anon") == 0;
  228. }
  229. static struct map *map__new(struct mmap_event *event)
  230. {
  231. struct map *self = malloc(sizeof(*self));
  232. if (self != NULL) {
  233. const char *filename = event->filename;
  234. char newfilename[PATH_MAX];
  235. int anon;
  236. if (cwd) {
  237. int n = strcommon(filename);
  238. if (n == cwdlen) {
  239. snprintf(newfilename, sizeof(newfilename),
  240. ".%s", filename + n);
  241. filename = newfilename;
  242. }
  243. }
  244. anon = is_anon_memory(filename);
  245. if (anon) {
  246. snprintf(newfilename, sizeof(newfilename), "/tmp/perf-%d.map", event->pid);
  247. filename = newfilename;
  248. }
  249. self->start = event->start;
  250. self->end = event->start + event->len;
  251. self->pgoff = event->pgoff;
  252. self->dso = dsos__findnew(filename);
  253. if (self->dso == NULL)
  254. goto out_delete;
  255. if (self->dso == vdso || anon)
  256. self->map_ip = vdso__map_ip;
  257. else
  258. self->map_ip = map__map_ip;
  259. }
  260. return self;
  261. out_delete:
  262. free(self);
  263. return NULL;
  264. }
  265. static struct map *map__clone(struct map *self)
  266. {
  267. struct map *map = malloc(sizeof(*self));
  268. if (!map)
  269. return NULL;
  270. memcpy(map, self, sizeof(*self));
  271. return map;
  272. }
  273. static int map__overlap(struct map *l, struct map *r)
  274. {
  275. if (l->start > r->start) {
  276. struct map *t = l;
  277. l = r;
  278. r = t;
  279. }
  280. if (l->end > r->start)
  281. return 1;
  282. return 0;
  283. }
  284. static size_t map__fprintf(struct map *self, FILE *fp)
  285. {
  286. return fprintf(fp, " %Lx-%Lx %Lx %s\n",
  287. self->start, self->end, self->pgoff, self->dso->name);
  288. }
  289. struct thread {
  290. struct rb_node rb_node;
  291. struct list_head maps;
  292. pid_t pid;
  293. char *comm;
  294. };
  295. static struct thread *thread__new(pid_t pid)
  296. {
  297. struct thread *self = malloc(sizeof(*self));
  298. if (self != NULL) {
  299. self->pid = pid;
  300. self->comm = malloc(32);
  301. if (self->comm)
  302. snprintf(self->comm, 32, ":%d", self->pid);
  303. INIT_LIST_HEAD(&self->maps);
  304. }
  305. return self;
  306. }
  307. static unsigned int dsos__col_width,
  308. comms__col_width,
  309. threads__col_width;
  310. static int thread__set_comm(struct thread *self, const char *comm)
  311. {
  312. if (self->comm)
  313. free(self->comm);
  314. self->comm = strdup(comm);
  315. if (!self->comm)
  316. return -ENOMEM;
  317. if (!col_width_list_str && !field_sep &&
  318. (!comm_list || strlist__has_entry(comm_list, comm))) {
  319. unsigned int slen = strlen(comm);
  320. if (slen > comms__col_width) {
  321. comms__col_width = slen;
  322. threads__col_width = slen + 6;
  323. }
  324. }
  325. return 0;
  326. }
  327. static size_t thread__fprintf(struct thread *self, FILE *fp)
  328. {
  329. struct map *pos;
  330. size_t ret = fprintf(fp, "Thread %d %s\n", self->pid, self->comm);
  331. list_for_each_entry(pos, &self->maps, node)
  332. ret += map__fprintf(pos, fp);
  333. return ret;
  334. }
  335. static struct rb_root threads;
  336. static struct thread *last_match;
  337. static struct thread *threads__findnew(pid_t pid)
  338. {
  339. struct rb_node **p = &threads.rb_node;
  340. struct rb_node *parent = NULL;
  341. struct thread *th;
  342. /*
  343. * Font-end cache - PID lookups come in blocks,
  344. * so most of the time we dont have to look up
  345. * the full rbtree:
  346. */
  347. if (last_match && last_match->pid == pid)
  348. return last_match;
  349. while (*p != NULL) {
  350. parent = *p;
  351. th = rb_entry(parent, struct thread, rb_node);
  352. if (th->pid == pid) {
  353. last_match = th;
  354. return th;
  355. }
  356. if (pid < th->pid)
  357. p = &(*p)->rb_left;
  358. else
  359. p = &(*p)->rb_right;
  360. }
  361. th = thread__new(pid);
  362. if (th != NULL) {
  363. rb_link_node(&th->rb_node, parent, p);
  364. rb_insert_color(&th->rb_node, &threads);
  365. last_match = th;
  366. }
  367. return th;
  368. }
  369. static void thread__insert_map(struct thread *self, struct map *map)
  370. {
  371. struct map *pos, *tmp;
  372. list_for_each_entry_safe(pos, tmp, &self->maps, node) {
  373. if (map__overlap(pos, map)) {
  374. if (verbose >= 2) {
  375. printf("overlapping maps:\n");
  376. map__fprintf(map, stdout);
  377. map__fprintf(pos, stdout);
  378. }
  379. if (map->start <= pos->start && map->end > pos->start)
  380. pos->start = map->end;
  381. if (map->end >= pos->end && map->start < pos->end)
  382. pos->end = map->start;
  383. if (verbose >= 2) {
  384. printf("after collision:\n");
  385. map__fprintf(pos, stdout);
  386. }
  387. if (pos->start >= pos->end) {
  388. list_del_init(&pos->node);
  389. free(pos);
  390. }
  391. }
  392. }
  393. list_add_tail(&map->node, &self->maps);
  394. }
  395. static int thread__fork(struct thread *self, struct thread *parent)
  396. {
  397. struct map *map;
  398. if (self->comm)
  399. free(self->comm);
  400. self->comm = strdup(parent->comm);
  401. if (!self->comm)
  402. return -ENOMEM;
  403. list_for_each_entry(map, &parent->maps, node) {
  404. struct map *new = map__clone(map);
  405. if (!new)
  406. return -ENOMEM;
  407. thread__insert_map(self, new);
  408. }
  409. return 0;
  410. }
  411. static struct map *thread__find_map(struct thread *self, u64 ip)
  412. {
  413. struct map *pos;
  414. if (self == NULL)
  415. return NULL;
  416. list_for_each_entry(pos, &self->maps, node)
  417. if (ip >= pos->start && ip <= pos->end)
  418. return pos;
  419. return NULL;
  420. }
  421. static size_t threads__fprintf(FILE *fp)
  422. {
  423. size_t ret = 0;
  424. struct rb_node *nd;
  425. for (nd = rb_first(&threads); nd; nd = rb_next(nd)) {
  426. struct thread *pos = rb_entry(nd, struct thread, rb_node);
  427. ret += thread__fprintf(pos, fp);
  428. }
  429. return ret;
  430. }
  431. /*
  432. * histogram, sorted on item, collects counts
  433. */
  434. static struct rb_root hist;
  435. struct hist_entry {
  436. struct rb_node rb_node;
  437. struct thread *thread;
  438. struct map *map;
  439. struct dso *dso;
  440. struct symbol *sym;
  441. struct symbol *parent;
  442. u64 ip;
  443. char level;
  444. struct callchain_node callchain;
  445. struct rb_root sorted_chain;
  446. u64 count;
  447. };
  448. /*
  449. * configurable sorting bits
  450. */
  451. struct sort_entry {
  452. struct list_head list;
  453. char *header;
  454. int64_t (*cmp)(struct hist_entry *, struct hist_entry *);
  455. int64_t (*collapse)(struct hist_entry *, struct hist_entry *);
  456. size_t (*print)(FILE *fp, struct hist_entry *, unsigned int width);
  457. unsigned int *width;
  458. bool elide;
  459. };
  460. static int64_t cmp_null(void *l, void *r)
  461. {
  462. if (!l && !r)
  463. return 0;
  464. else if (!l)
  465. return -1;
  466. else
  467. return 1;
  468. }
  469. /* --sort pid */
  470. static int64_t
  471. sort__thread_cmp(struct hist_entry *left, struct hist_entry *right)
  472. {
  473. return right->thread->pid - left->thread->pid;
  474. }
  475. static size_t
  476. sort__thread_print(FILE *fp, struct hist_entry *self, unsigned int width)
  477. {
  478. return repsep_fprintf(fp, "%*s:%5d", width - 6,
  479. self->thread->comm ?: "", self->thread->pid);
  480. }
  481. static struct sort_entry sort_thread = {
  482. .header = "Command: Pid",
  483. .cmp = sort__thread_cmp,
  484. .print = sort__thread_print,
  485. .width = &threads__col_width,
  486. };
  487. /* --sort comm */
  488. static int64_t
  489. sort__comm_cmp(struct hist_entry *left, struct hist_entry *right)
  490. {
  491. return right->thread->pid - left->thread->pid;
  492. }
  493. static int64_t
  494. sort__comm_collapse(struct hist_entry *left, struct hist_entry *right)
  495. {
  496. char *comm_l = left->thread->comm;
  497. char *comm_r = right->thread->comm;
  498. if (!comm_l || !comm_r)
  499. return cmp_null(comm_l, comm_r);
  500. return strcmp(comm_l, comm_r);
  501. }
  502. static size_t
  503. sort__comm_print(FILE *fp, struct hist_entry *self, unsigned int width)
  504. {
  505. return repsep_fprintf(fp, "%*s", width, self->thread->comm);
  506. }
  507. static struct sort_entry sort_comm = {
  508. .header = "Command",
  509. .cmp = sort__comm_cmp,
  510. .collapse = sort__comm_collapse,
  511. .print = sort__comm_print,
  512. .width = &comms__col_width,
  513. };
  514. /* --sort dso */
  515. static int64_t
  516. sort__dso_cmp(struct hist_entry *left, struct hist_entry *right)
  517. {
  518. struct dso *dso_l = left->dso;
  519. struct dso *dso_r = right->dso;
  520. if (!dso_l || !dso_r)
  521. return cmp_null(dso_l, dso_r);
  522. return strcmp(dso_l->name, dso_r->name);
  523. }
  524. static size_t
  525. sort__dso_print(FILE *fp, struct hist_entry *self, unsigned int width)
  526. {
  527. if (self->dso)
  528. return repsep_fprintf(fp, "%-*s", width, self->dso->name);
  529. return repsep_fprintf(fp, "%*llx", width, (u64)self->ip);
  530. }
  531. static struct sort_entry sort_dso = {
  532. .header = "Shared Object",
  533. .cmp = sort__dso_cmp,
  534. .print = sort__dso_print,
  535. .width = &dsos__col_width,
  536. };
  537. /* --sort symbol */
  538. static int64_t
  539. sort__sym_cmp(struct hist_entry *left, struct hist_entry *right)
  540. {
  541. u64 ip_l, ip_r;
  542. if (left->sym == right->sym)
  543. return 0;
  544. ip_l = left->sym ? left->sym->start : left->ip;
  545. ip_r = right->sym ? right->sym->start : right->ip;
  546. return (int64_t)(ip_r - ip_l);
  547. }
  548. static size_t
  549. sort__sym_print(FILE *fp, struct hist_entry *self, unsigned int width __used)
  550. {
  551. size_t ret = 0;
  552. if (verbose)
  553. ret += repsep_fprintf(fp, "%#018llx %c ", (u64)self->ip,
  554. dso__symtab_origin(self->dso));
  555. ret += repsep_fprintf(fp, "[%c] ", self->level);
  556. if (self->sym) {
  557. ret += repsep_fprintf(fp, "%s", self->sym->name);
  558. if (self->sym->module)
  559. ret += repsep_fprintf(fp, "\t[%s]",
  560. self->sym->module->name);
  561. } else {
  562. ret += repsep_fprintf(fp, "%#016llx", (u64)self->ip);
  563. }
  564. return ret;
  565. }
  566. static struct sort_entry sort_sym = {
  567. .header = "Symbol",
  568. .cmp = sort__sym_cmp,
  569. .print = sort__sym_print,
  570. };
  571. /* --sort parent */
  572. static int64_t
  573. sort__parent_cmp(struct hist_entry *left, struct hist_entry *right)
  574. {
  575. struct symbol *sym_l = left->parent;
  576. struct symbol *sym_r = right->parent;
  577. if (!sym_l || !sym_r)
  578. return cmp_null(sym_l, sym_r);
  579. return strcmp(sym_l->name, sym_r->name);
  580. }
  581. static size_t
  582. sort__parent_print(FILE *fp, struct hist_entry *self, unsigned int width)
  583. {
  584. return repsep_fprintf(fp, "%-*s", width,
  585. self->parent ? self->parent->name : "[other]");
  586. }
  587. static unsigned int parent_symbol__col_width;
  588. static struct sort_entry sort_parent = {
  589. .header = "Parent symbol",
  590. .cmp = sort__parent_cmp,
  591. .print = sort__parent_print,
  592. .width = &parent_symbol__col_width,
  593. };
  594. static int sort__need_collapse = 0;
  595. static int sort__has_parent = 0;
  596. struct sort_dimension {
  597. char *name;
  598. struct sort_entry *entry;
  599. int taken;
  600. };
  601. static struct sort_dimension sort_dimensions[] = {
  602. { .name = "pid", .entry = &sort_thread, },
  603. { .name = "comm", .entry = &sort_comm, },
  604. { .name = "dso", .entry = &sort_dso, },
  605. { .name = "symbol", .entry = &sort_sym, },
  606. { .name = "parent", .entry = &sort_parent, },
  607. };
  608. static LIST_HEAD(hist_entry__sort_list);
  609. static int sort_dimension__add(char *tok)
  610. {
  611. unsigned int i;
  612. for (i = 0; i < ARRAY_SIZE(sort_dimensions); i++) {
  613. struct sort_dimension *sd = &sort_dimensions[i];
  614. if (sd->taken)
  615. continue;
  616. if (strncasecmp(tok, sd->name, strlen(tok)))
  617. continue;
  618. if (sd->entry->collapse)
  619. sort__need_collapse = 1;
  620. if (sd->entry == &sort_parent) {
  621. int ret = regcomp(&parent_regex, parent_pattern, REG_EXTENDED);
  622. if (ret) {
  623. char err[BUFSIZ];
  624. regerror(ret, &parent_regex, err, sizeof(err));
  625. fprintf(stderr, "Invalid regex: %s\n%s",
  626. parent_pattern, err);
  627. exit(-1);
  628. }
  629. sort__has_parent = 1;
  630. }
  631. list_add_tail(&sd->entry->list, &hist_entry__sort_list);
  632. sd->taken = 1;
  633. return 0;
  634. }
  635. return -ESRCH;
  636. }
  637. static int64_t
  638. hist_entry__cmp(struct hist_entry *left, struct hist_entry *right)
  639. {
  640. struct sort_entry *se;
  641. int64_t cmp = 0;
  642. list_for_each_entry(se, &hist_entry__sort_list, list) {
  643. cmp = se->cmp(left, right);
  644. if (cmp)
  645. break;
  646. }
  647. return cmp;
  648. }
  649. static int64_t
  650. hist_entry__collapse(struct hist_entry *left, struct hist_entry *right)
  651. {
  652. struct sort_entry *se;
  653. int64_t cmp = 0;
  654. list_for_each_entry(se, &hist_entry__sort_list, list) {
  655. int64_t (*f)(struct hist_entry *, struct hist_entry *);
  656. f = se->collapse ?: se->cmp;
  657. cmp = f(left, right);
  658. if (cmp)
  659. break;
  660. }
  661. return cmp;
  662. }
  663. static size_t ipchain__fprintf_graph_line(FILE *fp, int depth, int depth_mask)
  664. {
  665. int i;
  666. size_t ret = 0;
  667. ret += fprintf(fp, "%s", " ");
  668. for (i = 0; i < depth; i++)
  669. if (depth_mask & (1 << i))
  670. ret += fprintf(fp, "| ");
  671. else
  672. ret += fprintf(fp, " ");
  673. ret += fprintf(fp, "\n");
  674. return ret;
  675. }
  676. static size_t
  677. ipchain__fprintf_graph(FILE *fp, struct callchain_list *chain, int depth,
  678. int depth_mask, int count, u64 total_samples,
  679. int hits)
  680. {
  681. int i;
  682. size_t ret = 0;
  683. ret += fprintf(fp, "%s", " ");
  684. for (i = 0; i < depth; i++) {
  685. if (depth_mask & (1 << i))
  686. ret += fprintf(fp, "|");
  687. else
  688. ret += fprintf(fp, " ");
  689. if (!count && i == depth - 1) {
  690. double percent;
  691. percent = hits * 100.0 / total_samples;
  692. ret += percent_color_fprintf(fp, "--%2.2f%%-- ", percent);
  693. } else
  694. ret += fprintf(fp, "%s", " ");
  695. }
  696. if (chain->sym)
  697. ret += fprintf(fp, "%s\n", chain->sym->name);
  698. else
  699. ret += fprintf(fp, "%p\n", (void *)(long)chain->ip);
  700. return ret;
  701. }
  702. static struct symbol *rem_sq_bracket;
  703. static struct callchain_list rem_hits;
  704. static void init_rem_hits(void)
  705. {
  706. rem_sq_bracket = malloc(sizeof(*rem_sq_bracket) + 6);
  707. if (!rem_sq_bracket) {
  708. fprintf(stderr, "Not enough memory to display remaining hits\n");
  709. return;
  710. }
  711. strcpy(rem_sq_bracket->name, "[...]");
  712. rem_hits.sym = rem_sq_bracket;
  713. }
  714. static size_t
  715. callchain__fprintf_graph(FILE *fp, struct callchain_node *self,
  716. u64 total_samples, int depth, int depth_mask)
  717. {
  718. struct rb_node *node, *next;
  719. struct callchain_node *child;
  720. struct callchain_list *chain;
  721. int new_depth_mask = depth_mask;
  722. u64 new_total;
  723. u64 remaining;
  724. size_t ret = 0;
  725. int i;
  726. if (callchain_param.mode == CHAIN_GRAPH_REL)
  727. new_total = self->children_hit;
  728. else
  729. new_total = total_samples;
  730. remaining = new_total;
  731. node = rb_first(&self->rb_root);
  732. while (node) {
  733. u64 cumul;
  734. child = rb_entry(node, struct callchain_node, rb_node);
  735. cumul = cumul_hits(child);
  736. remaining -= cumul;
  737. /*
  738. * The depth mask manages the output of pipes that show
  739. * the depth. We don't want to keep the pipes of the current
  740. * level for the last child of this depth.
  741. * Except if we have remaining filtered hits. They will
  742. * supersede the last child
  743. */
  744. next = rb_next(node);
  745. if (!next && (callchain_param.mode != CHAIN_GRAPH_REL || !remaining))
  746. new_depth_mask &= ~(1 << (depth - 1));
  747. /*
  748. * But we keep the older depth mask for the line seperator
  749. * to keep the level link until we reach the last child
  750. */
  751. ret += ipchain__fprintf_graph_line(fp, depth, depth_mask);
  752. i = 0;
  753. list_for_each_entry(chain, &child->val, list) {
  754. if (chain->ip >= PERF_CONTEXT_MAX)
  755. continue;
  756. ret += ipchain__fprintf_graph(fp, chain, depth,
  757. new_depth_mask, i++,
  758. new_total,
  759. cumul);
  760. }
  761. ret += callchain__fprintf_graph(fp, child, new_total,
  762. depth + 1,
  763. new_depth_mask | (1 << depth));
  764. node = next;
  765. }
  766. if (callchain_param.mode == CHAIN_GRAPH_REL &&
  767. remaining && remaining != new_total) {
  768. if (!rem_sq_bracket)
  769. return ret;
  770. new_depth_mask &= ~(1 << (depth - 1));
  771. ret += ipchain__fprintf_graph(fp, &rem_hits, depth,
  772. new_depth_mask, 0, new_total,
  773. remaining);
  774. }
  775. return ret;
  776. }
  777. static size_t
  778. callchain__fprintf_flat(FILE *fp, struct callchain_node *self,
  779. u64 total_samples)
  780. {
  781. struct callchain_list *chain;
  782. size_t ret = 0;
  783. if (!self)
  784. return 0;
  785. ret += callchain__fprintf_flat(fp, self->parent, total_samples);
  786. list_for_each_entry(chain, &self->val, list) {
  787. if (chain->ip >= PERF_CONTEXT_MAX)
  788. continue;
  789. if (chain->sym)
  790. ret += fprintf(fp, " %s\n", chain->sym->name);
  791. else
  792. ret += fprintf(fp, " %p\n",
  793. (void *)(long)chain->ip);
  794. }
  795. return ret;
  796. }
  797. static size_t
  798. hist_entry_callchain__fprintf(FILE *fp, struct hist_entry *self,
  799. u64 total_samples)
  800. {
  801. struct rb_node *rb_node;
  802. struct callchain_node *chain;
  803. size_t ret = 0;
  804. rb_node = rb_first(&self->sorted_chain);
  805. while (rb_node) {
  806. double percent;
  807. chain = rb_entry(rb_node, struct callchain_node, rb_node);
  808. percent = chain->hit * 100.0 / total_samples;
  809. switch (callchain_param.mode) {
  810. case CHAIN_FLAT:
  811. ret += percent_color_fprintf(fp, " %6.2f%%\n",
  812. percent);
  813. ret += callchain__fprintf_flat(fp, chain, total_samples);
  814. break;
  815. case CHAIN_GRAPH_ABS: /* Falldown */
  816. case CHAIN_GRAPH_REL:
  817. ret += callchain__fprintf_graph(fp, chain,
  818. total_samples, 1, 1);
  819. default:
  820. break;
  821. }
  822. ret += fprintf(fp, "\n");
  823. rb_node = rb_next(rb_node);
  824. }
  825. return ret;
  826. }
  827. static size_t
  828. hist_entry__fprintf(FILE *fp, struct hist_entry *self, u64 total_samples)
  829. {
  830. struct sort_entry *se;
  831. size_t ret;
  832. if (exclude_other && !self->parent)
  833. return 0;
  834. if (total_samples)
  835. ret = percent_color_fprintf(fp,
  836. field_sep ? "%.2f" : " %6.2f%%",
  837. (self->count * 100.0) / total_samples);
  838. else
  839. ret = fprintf(fp, field_sep ? "%lld" : "%12lld ", self->count);
  840. if (show_nr_samples) {
  841. if (field_sep)
  842. fprintf(fp, "%c%lld", *field_sep, self->count);
  843. else
  844. fprintf(fp, "%11lld", self->count);
  845. }
  846. list_for_each_entry(se, &hist_entry__sort_list, list) {
  847. if (se->elide)
  848. continue;
  849. fprintf(fp, "%s", field_sep ?: " ");
  850. ret += se->print(fp, self, se->width ? *se->width : 0);
  851. }
  852. ret += fprintf(fp, "\n");
  853. if (callchain)
  854. hist_entry_callchain__fprintf(fp, self, total_samples);
  855. return ret;
  856. }
  857. /*
  858. *
  859. */
  860. static void dso__calc_col_width(struct dso *self)
  861. {
  862. if (!col_width_list_str && !field_sep &&
  863. (!dso_list || strlist__has_entry(dso_list, self->name))) {
  864. unsigned int slen = strlen(self->name);
  865. if (slen > dsos__col_width)
  866. dsos__col_width = slen;
  867. }
  868. self->slen_calculated = 1;
  869. }
  870. static struct symbol *
  871. resolve_symbol(struct thread *thread, struct map **mapp,
  872. struct dso **dsop, u64 *ipp)
  873. {
  874. struct dso *dso = dsop ? *dsop : NULL;
  875. struct map *map = mapp ? *mapp : NULL;
  876. u64 ip = *ipp;
  877. if (!thread)
  878. return NULL;
  879. if (dso)
  880. goto got_dso;
  881. if (map)
  882. goto got_map;
  883. map = thread__find_map(thread, ip);
  884. if (map != NULL) {
  885. /*
  886. * We have to do this here as we may have a dso
  887. * with no symbol hit that has a name longer than
  888. * the ones with symbols sampled.
  889. */
  890. if (!sort_dso.elide && !map->dso->slen_calculated)
  891. dso__calc_col_width(map->dso);
  892. if (mapp)
  893. *mapp = map;
  894. got_map:
  895. ip = map->map_ip(map, ip);
  896. dso = map->dso;
  897. } else {
  898. /*
  899. * If this is outside of all known maps,
  900. * and is a negative address, try to look it
  901. * up in the kernel dso, as it might be a
  902. * vsyscall (which executes in user-mode):
  903. */
  904. if ((long long)ip < 0)
  905. dso = kernel_dso;
  906. }
  907. dprintf(" ...... dso: %s\n", dso ? dso->name : "<not found>");
  908. dprintf(" ...... map: %Lx -> %Lx\n", *ipp, ip);
  909. *ipp = ip;
  910. if (dsop)
  911. *dsop = dso;
  912. if (!dso)
  913. return NULL;
  914. got_dso:
  915. return dso->find_symbol(dso, ip);
  916. }
  917. static int call__match(struct symbol *sym)
  918. {
  919. if (sym->name && !regexec(&parent_regex, sym->name, 0, NULL, 0))
  920. return 1;
  921. return 0;
  922. }
  923. static struct symbol **
  924. resolve_callchain(struct thread *thread, struct map *map __used,
  925. struct ip_callchain *chain, struct hist_entry *entry)
  926. {
  927. u64 context = PERF_CONTEXT_MAX;
  928. struct symbol **syms = NULL;
  929. unsigned int i;
  930. if (callchain) {
  931. syms = calloc(chain->nr, sizeof(*syms));
  932. if (!syms) {
  933. fprintf(stderr, "Can't allocate memory for symbols\n");
  934. exit(-1);
  935. }
  936. }
  937. for (i = 0; i < chain->nr; i++) {
  938. u64 ip = chain->ips[i];
  939. struct dso *dso = NULL;
  940. struct symbol *sym;
  941. if (ip >= PERF_CONTEXT_MAX) {
  942. context = ip;
  943. continue;
  944. }
  945. switch (context) {
  946. case PERF_CONTEXT_HV:
  947. dso = hypervisor_dso;
  948. break;
  949. case PERF_CONTEXT_KERNEL:
  950. dso = kernel_dso;
  951. break;
  952. default:
  953. break;
  954. }
  955. sym = resolve_symbol(thread, NULL, &dso, &ip);
  956. if (sym) {
  957. if (sort__has_parent && call__match(sym) &&
  958. !entry->parent)
  959. entry->parent = sym;
  960. if (!callchain)
  961. break;
  962. syms[i] = sym;
  963. }
  964. }
  965. return syms;
  966. }
  967. /*
  968. * collect histogram counts
  969. */
  970. static int
  971. hist_entry__add(struct thread *thread, struct map *map, struct dso *dso,
  972. struct symbol *sym, u64 ip, struct ip_callchain *chain,
  973. char level, u64 count)
  974. {
  975. struct rb_node **p = &hist.rb_node;
  976. struct rb_node *parent = NULL;
  977. struct hist_entry *he;
  978. struct symbol **syms = NULL;
  979. struct hist_entry entry = {
  980. .thread = thread,
  981. .map = map,
  982. .dso = dso,
  983. .sym = sym,
  984. .ip = ip,
  985. .level = level,
  986. .count = count,
  987. .parent = NULL,
  988. .sorted_chain = RB_ROOT
  989. };
  990. int cmp;
  991. if ((sort__has_parent || callchain) && chain)
  992. syms = resolve_callchain(thread, map, chain, &entry);
  993. while (*p != NULL) {
  994. parent = *p;
  995. he = rb_entry(parent, struct hist_entry, rb_node);
  996. cmp = hist_entry__cmp(&entry, he);
  997. if (!cmp) {
  998. he->count += count;
  999. if (callchain) {
  1000. append_chain(&he->callchain, chain, syms);
  1001. free(syms);
  1002. }
  1003. return 0;
  1004. }
  1005. if (cmp < 0)
  1006. p = &(*p)->rb_left;
  1007. else
  1008. p = &(*p)->rb_right;
  1009. }
  1010. he = malloc(sizeof(*he));
  1011. if (!he)
  1012. return -ENOMEM;
  1013. *he = entry;
  1014. if (callchain) {
  1015. callchain_init(&he->callchain);
  1016. append_chain(&he->callchain, chain, syms);
  1017. free(syms);
  1018. }
  1019. rb_link_node(&he->rb_node, parent, p);
  1020. rb_insert_color(&he->rb_node, &hist);
  1021. return 0;
  1022. }
  1023. static void hist_entry__free(struct hist_entry *he)
  1024. {
  1025. free(he);
  1026. }
  1027. /*
  1028. * collapse the histogram
  1029. */
  1030. static struct rb_root collapse_hists;
  1031. static void collapse__insert_entry(struct hist_entry *he)
  1032. {
  1033. struct rb_node **p = &collapse_hists.rb_node;
  1034. struct rb_node *parent = NULL;
  1035. struct hist_entry *iter;
  1036. int64_t cmp;
  1037. while (*p != NULL) {
  1038. parent = *p;
  1039. iter = rb_entry(parent, struct hist_entry, rb_node);
  1040. cmp = hist_entry__collapse(iter, he);
  1041. if (!cmp) {
  1042. iter->count += he->count;
  1043. hist_entry__free(he);
  1044. return;
  1045. }
  1046. if (cmp < 0)
  1047. p = &(*p)->rb_left;
  1048. else
  1049. p = &(*p)->rb_right;
  1050. }
  1051. rb_link_node(&he->rb_node, parent, p);
  1052. rb_insert_color(&he->rb_node, &collapse_hists);
  1053. }
  1054. static void collapse__resort(void)
  1055. {
  1056. struct rb_node *next;
  1057. struct hist_entry *n;
  1058. if (!sort__need_collapse)
  1059. return;
  1060. next = rb_first(&hist);
  1061. while (next) {
  1062. n = rb_entry(next, struct hist_entry, rb_node);
  1063. next = rb_next(&n->rb_node);
  1064. rb_erase(&n->rb_node, &hist);
  1065. collapse__insert_entry(n);
  1066. }
  1067. }
  1068. /*
  1069. * reverse the map, sort on count.
  1070. */
  1071. static struct rb_root output_hists;
  1072. static void output__insert_entry(struct hist_entry *he, u64 min_callchain_hits)
  1073. {
  1074. struct rb_node **p = &output_hists.rb_node;
  1075. struct rb_node *parent = NULL;
  1076. struct hist_entry *iter;
  1077. if (callchain)
  1078. callchain_param.sort(&he->sorted_chain, &he->callchain,
  1079. min_callchain_hits, &callchain_param);
  1080. while (*p != NULL) {
  1081. parent = *p;
  1082. iter = rb_entry(parent, struct hist_entry, rb_node);
  1083. if (he->count > iter->count)
  1084. p = &(*p)->rb_left;
  1085. else
  1086. p = &(*p)->rb_right;
  1087. }
  1088. rb_link_node(&he->rb_node, parent, p);
  1089. rb_insert_color(&he->rb_node, &output_hists);
  1090. }
  1091. static void output__resort(u64 total_samples)
  1092. {
  1093. struct rb_node *next;
  1094. struct hist_entry *n;
  1095. struct rb_root *tree = &hist;
  1096. u64 min_callchain_hits;
  1097. min_callchain_hits = total_samples * (callchain_param.min_percent / 100);
  1098. if (sort__need_collapse)
  1099. tree = &collapse_hists;
  1100. next = rb_first(tree);
  1101. while (next) {
  1102. n = rb_entry(next, struct hist_entry, rb_node);
  1103. next = rb_next(&n->rb_node);
  1104. rb_erase(&n->rb_node, tree);
  1105. output__insert_entry(n, min_callchain_hits);
  1106. }
  1107. }
  1108. static size_t output__fprintf(FILE *fp, u64 total_samples)
  1109. {
  1110. struct hist_entry *pos;
  1111. struct sort_entry *se;
  1112. struct rb_node *nd;
  1113. size_t ret = 0;
  1114. unsigned int width;
  1115. char *col_width = col_width_list_str;
  1116. init_rem_hits();
  1117. fprintf(fp, "# Samples: %Ld\n", (u64)total_samples);
  1118. fprintf(fp, "#\n");
  1119. fprintf(fp, "# Overhead");
  1120. if (show_nr_samples) {
  1121. if (field_sep)
  1122. fprintf(fp, "%cSamples", *field_sep);
  1123. else
  1124. fputs(" Samples ", fp);
  1125. }
  1126. list_for_each_entry(se, &hist_entry__sort_list, list) {
  1127. if (se->elide)
  1128. continue;
  1129. if (field_sep) {
  1130. fprintf(fp, "%c%s", *field_sep, se->header);
  1131. continue;
  1132. }
  1133. width = strlen(se->header);
  1134. if (se->width) {
  1135. if (col_width_list_str) {
  1136. if (col_width) {
  1137. *se->width = atoi(col_width);
  1138. col_width = strchr(col_width, ',');
  1139. if (col_width)
  1140. ++col_width;
  1141. }
  1142. }
  1143. width = *se->width = max(*se->width, width);
  1144. }
  1145. fprintf(fp, " %*s", width, se->header);
  1146. }
  1147. fprintf(fp, "\n");
  1148. if (field_sep)
  1149. goto print_entries;
  1150. fprintf(fp, "# ........");
  1151. if (show_nr_samples)
  1152. fprintf(fp, " ..........");
  1153. list_for_each_entry(se, &hist_entry__sort_list, list) {
  1154. unsigned int i;
  1155. if (se->elide)
  1156. continue;
  1157. fprintf(fp, " ");
  1158. if (se->width)
  1159. width = *se->width;
  1160. else
  1161. width = strlen(se->header);
  1162. for (i = 0; i < width; i++)
  1163. fprintf(fp, ".");
  1164. }
  1165. fprintf(fp, "\n");
  1166. fprintf(fp, "#\n");
  1167. print_entries:
  1168. for (nd = rb_first(&output_hists); nd; nd = rb_next(nd)) {
  1169. pos = rb_entry(nd, struct hist_entry, rb_node);
  1170. ret += hist_entry__fprintf(fp, pos, total_samples);
  1171. }
  1172. if (sort_order == default_sort_order &&
  1173. parent_pattern == default_parent_pattern) {
  1174. fprintf(fp, "#\n");
  1175. fprintf(fp, "# (For a higher level overview, try: perf report --sort comm,dso)\n");
  1176. fprintf(fp, "#\n");
  1177. }
  1178. fprintf(fp, "\n");
  1179. free(rem_sq_bracket);
  1180. return ret;
  1181. }
  1182. static void register_idle_thread(void)
  1183. {
  1184. struct thread *thread = threads__findnew(0);
  1185. if (thread == NULL ||
  1186. thread__set_comm(thread, "[idle]")) {
  1187. fprintf(stderr, "problem inserting idle task.\n");
  1188. exit(-1);
  1189. }
  1190. }
  1191. static unsigned long total = 0,
  1192. total_mmap = 0,
  1193. total_comm = 0,
  1194. total_fork = 0,
  1195. total_unknown = 0,
  1196. total_lost = 0;
  1197. static int validate_chain(struct ip_callchain *chain, event_t *event)
  1198. {
  1199. unsigned int chain_size;
  1200. chain_size = event->header.size;
  1201. chain_size -= (unsigned long)&event->ip.__more_data - (unsigned long)event;
  1202. if (chain->nr*sizeof(u64) > chain_size)
  1203. return -1;
  1204. return 0;
  1205. }
  1206. static int
  1207. process_sample_event(event_t *event, unsigned long offset, unsigned long head)
  1208. {
  1209. char level;
  1210. int show = 0;
  1211. struct dso *dso = NULL;
  1212. struct thread *thread = threads__findnew(event->ip.pid);
  1213. u64 ip = event->ip.ip;
  1214. u64 period = 1;
  1215. struct map *map = NULL;
  1216. void *more_data = event->ip.__more_data;
  1217. struct ip_callchain *chain = NULL;
  1218. int cpumode;
  1219. if (sample_type & PERF_SAMPLE_PERIOD) {
  1220. period = *(u64 *)more_data;
  1221. more_data += sizeof(u64);
  1222. }
  1223. dprintf("%p [%p]: PERF_EVENT_SAMPLE (IP, %d): %d: %p period: %Ld\n",
  1224. (void *)(offset + head),
  1225. (void *)(long)(event->header.size),
  1226. event->header.misc,
  1227. event->ip.pid,
  1228. (void *)(long)ip,
  1229. (long long)period);
  1230. if (sample_type & PERF_SAMPLE_CALLCHAIN) {
  1231. unsigned int i;
  1232. chain = (void *)more_data;
  1233. dprintf("... chain: nr:%Lu\n", chain->nr);
  1234. if (validate_chain(chain, event) < 0) {
  1235. eprintf("call-chain problem with event, skipping it.\n");
  1236. return 0;
  1237. }
  1238. if (dump_trace) {
  1239. for (i = 0; i < chain->nr; i++)
  1240. dprintf("..... %2d: %016Lx\n", i, chain->ips[i]);
  1241. }
  1242. }
  1243. dprintf(" ... thread: %s:%d\n", thread->comm, thread->pid);
  1244. if (thread == NULL) {
  1245. eprintf("problem processing %d event, skipping it.\n",
  1246. event->header.type);
  1247. return -1;
  1248. }
  1249. if (comm_list && !strlist__has_entry(comm_list, thread->comm))
  1250. return 0;
  1251. cpumode = event->header.misc & PERF_EVENT_MISC_CPUMODE_MASK;
  1252. if (cpumode == PERF_EVENT_MISC_KERNEL) {
  1253. show = SHOW_KERNEL;
  1254. level = 'k';
  1255. dso = kernel_dso;
  1256. dprintf(" ...... dso: %s\n", dso->name);
  1257. } else if (cpumode == PERF_EVENT_MISC_USER) {
  1258. show = SHOW_USER;
  1259. level = '.';
  1260. } else {
  1261. show = SHOW_HV;
  1262. level = 'H';
  1263. dso = hypervisor_dso;
  1264. dprintf(" ...... dso: [hypervisor]\n");
  1265. }
  1266. if (show & show_mask) {
  1267. struct symbol *sym = resolve_symbol(thread, &map, &dso, &ip);
  1268. if (dso_list && dso && dso->name && !strlist__has_entry(dso_list, dso->name))
  1269. return 0;
  1270. if (sym_list && sym && !strlist__has_entry(sym_list, sym->name))
  1271. return 0;
  1272. if (hist_entry__add(thread, map, dso, sym, ip, chain, level, period)) {
  1273. eprintf("problem incrementing symbol count, skipping event\n");
  1274. return -1;
  1275. }
  1276. }
  1277. total += period;
  1278. return 0;
  1279. }
  1280. static int
  1281. process_mmap_event(event_t *event, unsigned long offset, unsigned long head)
  1282. {
  1283. struct thread *thread = threads__findnew(event->mmap.pid);
  1284. struct map *map = map__new(&event->mmap);
  1285. dprintf("%p [%p]: PERF_EVENT_MMAP %d: [%p(%p) @ %p]: %s\n",
  1286. (void *)(offset + head),
  1287. (void *)(long)(event->header.size),
  1288. event->mmap.pid,
  1289. (void *)(long)event->mmap.start,
  1290. (void *)(long)event->mmap.len,
  1291. (void *)(long)event->mmap.pgoff,
  1292. event->mmap.filename);
  1293. if (thread == NULL || map == NULL) {
  1294. dprintf("problem processing PERF_EVENT_MMAP, skipping event.\n");
  1295. return 0;
  1296. }
  1297. thread__insert_map(thread, map);
  1298. total_mmap++;
  1299. return 0;
  1300. }
  1301. static int
  1302. process_comm_event(event_t *event, unsigned long offset, unsigned long head)
  1303. {
  1304. struct thread *thread = threads__findnew(event->comm.pid);
  1305. dprintf("%p [%p]: PERF_EVENT_COMM: %s:%d\n",
  1306. (void *)(offset + head),
  1307. (void *)(long)(event->header.size),
  1308. event->comm.comm, event->comm.pid);
  1309. if (thread == NULL ||
  1310. thread__set_comm(thread, event->comm.comm)) {
  1311. dprintf("problem processing PERF_EVENT_COMM, skipping event.\n");
  1312. return -1;
  1313. }
  1314. total_comm++;
  1315. return 0;
  1316. }
  1317. static int
  1318. process_task_event(event_t *event, unsigned long offset, unsigned long head)
  1319. {
  1320. struct thread *thread = threads__findnew(event->fork.pid);
  1321. struct thread *parent = threads__findnew(event->fork.ppid);
  1322. dprintf("%p [%p]: PERF_EVENT_%s: (%d:%d):(%d:%d)\n",
  1323. (void *)(offset + head),
  1324. (void *)(long)(event->header.size),
  1325. event->header.type == PERF_EVENT_FORK ? "FORK" : "EXIT",
  1326. event->fork.pid, event->fork.tid,
  1327. event->fork.ppid, event->fork.ptid);
  1328. /*
  1329. * A thread clone will have the same PID for both
  1330. * parent and child.
  1331. */
  1332. if (thread == parent)
  1333. return 0;
  1334. if (event->header.type == PERF_EVENT_EXIT)
  1335. return 0;
  1336. if (!thread || !parent || thread__fork(thread, parent)) {
  1337. dprintf("problem processing PERF_EVENT_FORK, skipping event.\n");
  1338. return -1;
  1339. }
  1340. total_fork++;
  1341. return 0;
  1342. }
  1343. static int
  1344. process_lost_event(event_t *event, unsigned long offset, unsigned long head)
  1345. {
  1346. dprintf("%p [%p]: PERF_EVENT_LOST: id:%Ld: lost:%Ld\n",
  1347. (void *)(offset + head),
  1348. (void *)(long)(event->header.size),
  1349. event->lost.id,
  1350. event->lost.lost);
  1351. total_lost += event->lost.lost;
  1352. return 0;
  1353. }
  1354. static void trace_event(event_t *event)
  1355. {
  1356. unsigned char *raw_event = (void *)event;
  1357. char *color = PERF_COLOR_BLUE;
  1358. int i, j;
  1359. if (!dump_trace)
  1360. return;
  1361. dprintf(".");
  1362. cdprintf("\n. ... raw event: size %d bytes\n", event->header.size);
  1363. for (i = 0; i < event->header.size; i++) {
  1364. if ((i & 15) == 0) {
  1365. dprintf(".");
  1366. cdprintf(" %04x: ", i);
  1367. }
  1368. cdprintf(" %02x", raw_event[i]);
  1369. if (((i & 15) == 15) || i == event->header.size-1) {
  1370. cdprintf(" ");
  1371. for (j = 0; j < 15-(i & 15); j++)
  1372. cdprintf(" ");
  1373. for (j = 0; j < (i & 15); j++) {
  1374. if (isprint(raw_event[i-15+j]))
  1375. cdprintf("%c", raw_event[i-15+j]);
  1376. else
  1377. cdprintf(".");
  1378. }
  1379. cdprintf("\n");
  1380. }
  1381. }
  1382. dprintf(".\n");
  1383. }
  1384. static struct perf_header *header;
  1385. static struct perf_counter_attr *perf_header__find_attr(u64 id)
  1386. {
  1387. int i;
  1388. for (i = 0; i < header->attrs; i++) {
  1389. struct perf_header_attr *attr = header->attr[i];
  1390. int j;
  1391. for (j = 0; j < attr->ids; j++) {
  1392. if (attr->id[j] == id)
  1393. return &attr->attr;
  1394. }
  1395. }
  1396. return NULL;
  1397. }
  1398. static int
  1399. process_read_event(event_t *event, unsigned long offset, unsigned long head)
  1400. {
  1401. struct perf_counter_attr *attr = perf_header__find_attr(event->read.id);
  1402. dprintf("%p [%p]: PERF_EVENT_READ: %d %d %s %Lu\n",
  1403. (void *)(offset + head),
  1404. (void *)(long)(event->header.size),
  1405. event->read.pid,
  1406. event->read.tid,
  1407. attr ? __event_name(attr->type, attr->config)
  1408. : "FAIL",
  1409. event->read.value);
  1410. return 0;
  1411. }
  1412. static int
  1413. process_event(event_t *event, unsigned long offset, unsigned long head)
  1414. {
  1415. trace_event(event);
  1416. switch (event->header.type) {
  1417. case PERF_EVENT_SAMPLE:
  1418. return process_sample_event(event, offset, head);
  1419. case PERF_EVENT_MMAP:
  1420. return process_mmap_event(event, offset, head);
  1421. case PERF_EVENT_COMM:
  1422. return process_comm_event(event, offset, head);
  1423. case PERF_EVENT_FORK:
  1424. case PERF_EVENT_EXIT:
  1425. return process_task_event(event, offset, head);
  1426. case PERF_EVENT_LOST:
  1427. return process_lost_event(event, offset, head);
  1428. case PERF_EVENT_READ:
  1429. return process_read_event(event, offset, head);
  1430. /*
  1431. * We dont process them right now but they are fine:
  1432. */
  1433. case PERF_EVENT_THROTTLE:
  1434. case PERF_EVENT_UNTHROTTLE:
  1435. return 0;
  1436. default:
  1437. return -1;
  1438. }
  1439. return 0;
  1440. }
  1441. static u64 perf_header__sample_type(void)
  1442. {
  1443. u64 sample_type = 0;
  1444. int i;
  1445. for (i = 0; i < header->attrs; i++) {
  1446. struct perf_header_attr *attr = header->attr[i];
  1447. if (!sample_type)
  1448. sample_type = attr->attr.sample_type;
  1449. else if (sample_type != attr->attr.sample_type)
  1450. die("non matching sample_type");
  1451. }
  1452. return sample_type;
  1453. }
  1454. static int __cmd_report(void)
  1455. {
  1456. int ret, rc = EXIT_FAILURE;
  1457. unsigned long offset = 0;
  1458. unsigned long head, shift;
  1459. struct stat stat;
  1460. event_t *event;
  1461. uint32_t size;
  1462. char *buf;
  1463. register_idle_thread();
  1464. input = open(input_name, O_RDONLY);
  1465. if (input < 0) {
  1466. fprintf(stderr, " failed to open file: %s", input_name);
  1467. if (!strcmp(input_name, "perf.data"))
  1468. fprintf(stderr, " (try 'perf record' first)");
  1469. fprintf(stderr, "\n");
  1470. exit(-1);
  1471. }
  1472. ret = fstat(input, &stat);
  1473. if (ret < 0) {
  1474. perror("failed to stat file");
  1475. exit(-1);
  1476. }
  1477. if (!stat.st_size) {
  1478. fprintf(stderr, "zero-sized file, nothing to do!\n");
  1479. exit(0);
  1480. }
  1481. header = perf_header__read(input);
  1482. head = header->data_offset;
  1483. sample_type = perf_header__sample_type();
  1484. if (!(sample_type & PERF_SAMPLE_CALLCHAIN)) {
  1485. if (sort__has_parent) {
  1486. fprintf(stderr, "selected --sort parent, but no"
  1487. " callchain data. Did you call"
  1488. " perf record without -g?\n");
  1489. exit(-1);
  1490. }
  1491. if (callchain) {
  1492. fprintf(stderr, "selected -c but no callchain data."
  1493. " Did you call perf record without"
  1494. " -g?\n");
  1495. exit(-1);
  1496. }
  1497. } else if (callchain_param.mode != CHAIN_NONE && !callchain) {
  1498. callchain = 1;
  1499. if (register_callchain_param(&callchain_param) < 0) {
  1500. fprintf(stderr, "Can't register callchain"
  1501. " params\n");
  1502. exit(-1);
  1503. }
  1504. }
  1505. if (load_kernel() < 0) {
  1506. perror("failed to load kernel symbols");
  1507. return EXIT_FAILURE;
  1508. }
  1509. if (!full_paths) {
  1510. if (getcwd(__cwd, sizeof(__cwd)) == NULL) {
  1511. perror("failed to get the current directory");
  1512. return EXIT_FAILURE;
  1513. }
  1514. cwdlen = strlen(cwd);
  1515. } else {
  1516. cwd = NULL;
  1517. cwdlen = 0;
  1518. }
  1519. shift = page_size * (head / page_size);
  1520. offset += shift;
  1521. head -= shift;
  1522. remap:
  1523. buf = (char *)mmap(NULL, page_size * mmap_window, PROT_READ,
  1524. MAP_SHARED, input, offset);
  1525. if (buf == MAP_FAILED) {
  1526. perror("failed to mmap file");
  1527. exit(-1);
  1528. }
  1529. more:
  1530. event = (event_t *)(buf + head);
  1531. size = event->header.size;
  1532. if (!size)
  1533. size = 8;
  1534. if (head + event->header.size >= page_size * mmap_window) {
  1535. int ret;
  1536. shift = page_size * (head / page_size);
  1537. ret = munmap(buf, page_size * mmap_window);
  1538. assert(ret == 0);
  1539. offset += shift;
  1540. head -= shift;
  1541. goto remap;
  1542. }
  1543. size = event->header.size;
  1544. dprintf("\n%p [%p]: event: %d\n",
  1545. (void *)(offset + head),
  1546. (void *)(long)event->header.size,
  1547. event->header.type);
  1548. if (!size || process_event(event, offset, head) < 0) {
  1549. dprintf("%p [%p]: skipping unknown header type: %d\n",
  1550. (void *)(offset + head),
  1551. (void *)(long)(event->header.size),
  1552. event->header.type);
  1553. total_unknown++;
  1554. /*
  1555. * assume we lost track of the stream, check alignment, and
  1556. * increment a single u64 in the hope to catch on again 'soon'.
  1557. */
  1558. if (unlikely(head & 7))
  1559. head &= ~7ULL;
  1560. size = 8;
  1561. }
  1562. head += size;
  1563. if (offset + head >= header->data_offset + header->data_size)
  1564. goto done;
  1565. if (offset + head < (unsigned long)stat.st_size)
  1566. goto more;
  1567. done:
  1568. rc = EXIT_SUCCESS;
  1569. close(input);
  1570. dprintf(" IP events: %10ld\n", total);
  1571. dprintf(" mmap events: %10ld\n", total_mmap);
  1572. dprintf(" comm events: %10ld\n", total_comm);
  1573. dprintf(" fork events: %10ld\n", total_fork);
  1574. dprintf(" lost events: %10ld\n", total_lost);
  1575. dprintf(" unknown events: %10ld\n", total_unknown);
  1576. if (dump_trace)
  1577. return 0;
  1578. if (verbose >= 3)
  1579. threads__fprintf(stdout);
  1580. if (verbose >= 2)
  1581. dsos__fprintf(stdout);
  1582. collapse__resort();
  1583. output__resort(total);
  1584. output__fprintf(stdout, total);
  1585. return rc;
  1586. }
  1587. static int
  1588. parse_callchain_opt(const struct option *opt __used, const char *arg,
  1589. int unset __used)
  1590. {
  1591. char *tok;
  1592. char *endptr;
  1593. callchain = 1;
  1594. if (!arg)
  1595. return 0;
  1596. tok = strtok((char *)arg, ",");
  1597. if (!tok)
  1598. return -1;
  1599. /* get the output mode */
  1600. if (!strncmp(tok, "graph", strlen(arg)))
  1601. callchain_param.mode = CHAIN_GRAPH_ABS;
  1602. else if (!strncmp(tok, "flat", strlen(arg)))
  1603. callchain_param.mode = CHAIN_FLAT;
  1604. else if (!strncmp(tok, "fractal", strlen(arg)))
  1605. callchain_param.mode = CHAIN_GRAPH_REL;
  1606. else if (!strncmp(tok, "none", strlen(arg))) {
  1607. callchain_param.mode = CHAIN_NONE;
  1608. callchain = 0;
  1609. return 0;
  1610. }
  1611. else
  1612. return -1;
  1613. /* get the min percentage */
  1614. tok = strtok(NULL, ",");
  1615. if (!tok)
  1616. goto setup;
  1617. callchain_param.min_percent = strtod(tok, &endptr);
  1618. if (tok == endptr)
  1619. return -1;
  1620. setup:
  1621. if (register_callchain_param(&callchain_param) < 0) {
  1622. fprintf(stderr, "Can't register callchain params\n");
  1623. return -1;
  1624. }
  1625. return 0;
  1626. }
  1627. static const char * const report_usage[] = {
  1628. "perf report [<options>] <command>",
  1629. NULL
  1630. };
  1631. static const struct option options[] = {
  1632. OPT_STRING('i', "input", &input_name, "file",
  1633. "input file name"),
  1634. OPT_BOOLEAN('v', "verbose", &verbose,
  1635. "be more verbose (show symbol address, etc)"),
  1636. OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace,
  1637. "dump raw trace in ASCII"),
  1638. OPT_STRING('k', "vmlinux", &vmlinux, "file", "vmlinux pathname"),
  1639. OPT_BOOLEAN('m', "modules", &modules,
  1640. "load module symbols - WARNING: use only with -k and LIVE kernel"),
  1641. OPT_BOOLEAN('n', "show-nr-samples", &show_nr_samples,
  1642. "Show a column with the number of samples"),
  1643. OPT_STRING('s', "sort", &sort_order, "key[,key2...]",
  1644. "sort by key(s): pid, comm, dso, symbol, parent"),
  1645. OPT_BOOLEAN('P', "full-paths", &full_paths,
  1646. "Don't shorten the pathnames taking into account the cwd"),
  1647. OPT_STRING('p', "parent", &parent_pattern, "regex",
  1648. "regex filter to identify parent, see: '--sort parent'"),
  1649. OPT_BOOLEAN('x', "exclude-other", &exclude_other,
  1650. "Only display entries with parent-match"),
  1651. OPT_CALLBACK_DEFAULT('g', "call-graph", NULL, "output_type,min_percent",
  1652. "Display callchains using output_type and min percent threshold. "
  1653. "Default: fractal,0.5", &parse_callchain_opt, callchain_default_opt),
  1654. OPT_STRING('d', "dsos", &dso_list_str, "dso[,dso...]",
  1655. "only consider symbols in these dsos"),
  1656. OPT_STRING('C', "comms", &comm_list_str, "comm[,comm...]",
  1657. "only consider symbols in these comms"),
  1658. OPT_STRING('S', "symbols", &sym_list_str, "symbol[,symbol...]",
  1659. "only consider these symbols"),
  1660. OPT_STRING('w', "column-widths", &col_width_list_str,
  1661. "width[,width...]",
  1662. "don't try to adjust column width, use these fixed values"),
  1663. OPT_STRING('t', "field-separator", &field_sep, "separator",
  1664. "separator for columns, no spaces will be added between "
  1665. "columns '.' is reserved."),
  1666. OPT_END()
  1667. };
  1668. static void setup_sorting(void)
  1669. {
  1670. char *tmp, *tok, *str = strdup(sort_order);
  1671. for (tok = strtok_r(str, ", ", &tmp);
  1672. tok; tok = strtok_r(NULL, ", ", &tmp)) {
  1673. if (sort_dimension__add(tok) < 0) {
  1674. error("Unknown --sort key: `%s'", tok);
  1675. usage_with_options(report_usage, options);
  1676. }
  1677. }
  1678. free(str);
  1679. }
  1680. static void setup_list(struct strlist **list, const char *list_str,
  1681. struct sort_entry *se, const char *list_name,
  1682. FILE *fp)
  1683. {
  1684. if (list_str) {
  1685. *list = strlist__new(true, list_str);
  1686. if (!*list) {
  1687. fprintf(stderr, "problems parsing %s list\n",
  1688. list_name);
  1689. exit(129);
  1690. }
  1691. if (strlist__nr_entries(*list) == 1) {
  1692. fprintf(fp, "# %s: %s\n", list_name,
  1693. strlist__entry(*list, 0)->s);
  1694. se->elide = true;
  1695. }
  1696. }
  1697. }
  1698. int cmd_report(int argc, const char **argv, const char *prefix __used)
  1699. {
  1700. symbol__init();
  1701. page_size = getpagesize();
  1702. argc = parse_options(argc, argv, options, report_usage, 0);
  1703. setup_sorting();
  1704. if (parent_pattern != default_parent_pattern) {
  1705. sort_dimension__add("parent");
  1706. sort_parent.elide = 1;
  1707. } else
  1708. exclude_other = 0;
  1709. /*
  1710. * Any (unrecognized) arguments left?
  1711. */
  1712. if (argc)
  1713. usage_with_options(report_usage, options);
  1714. setup_pager();
  1715. setup_list(&dso_list, dso_list_str, &sort_dso, "dso", stdout);
  1716. setup_list(&comm_list, comm_list_str, &sort_comm, "comm", stdout);
  1717. setup_list(&sym_list, sym_list_str, &sort_sym, "symbol", stdout);
  1718. if (field_sep && *field_sep == '.') {
  1719. fputs("'.' is the only non valid --field-separator argument\n",
  1720. stderr);
  1721. exit(129);
  1722. }
  1723. return __cmd_report();
  1724. }