builtin-report.c 43 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123
  1. /*
  2. * builtin-report.c
  3. *
  4. * Builtin report command: Analyze the perf.data input file,
  5. * look up and read DSOs and symbol information and display
  6. * a histogram of results, along various sorting keys.
  7. */
  8. #include "builtin.h"
  9. #include "util/util.h"
  10. #include "util/color.h"
  11. #include <linux/list.h>
  12. #include "util/cache.h"
  13. #include <linux/rbtree.h>
  14. #include "util/symbol.h"
  15. #include "util/string.h"
  16. #include "util/callchain.h"
  17. #include "util/strlist.h"
  18. #include "perf.h"
  19. #include "util/header.h"
  20. #include "util/parse-options.h"
  21. #include "util/parse-events.h"
  22. #define SHOW_KERNEL 1
  23. #define SHOW_USER 2
  24. #define SHOW_HV 4
  25. static char const *input_name = "perf.data";
  26. static char *vmlinux = NULL;
  27. static char default_sort_order[] = "comm,dso,symbol";
  28. static char *sort_order = default_sort_order;
  29. static char *dso_list_str, *comm_list_str, *sym_list_str,
  30. *col_width_list_str;
  31. static struct strlist *dso_list, *comm_list, *sym_list;
  32. static char *field_sep;
  33. static int input;
  34. static int show_mask = SHOW_KERNEL | SHOW_USER | SHOW_HV;
  35. static int dump_trace = 0;
  36. #define dprintf(x...) do { if (dump_trace) printf(x); } while (0)
  37. #define cdprintf(x...) do { if (dump_trace) color_fprintf(stdout, color, x); } while (0)
  38. static int verbose;
  39. #define eprintf(x...) do { if (verbose) fprintf(stderr, x); } while (0)
  40. static int modules;
  41. static int full_paths;
  42. static int show_nr_samples;
  43. static unsigned long page_size;
  44. static unsigned long mmap_window = 32;
  45. static char default_parent_pattern[] = "^sys_|^do_page_fault";
  46. static char *parent_pattern = default_parent_pattern;
  47. static regex_t parent_regex;
  48. static int exclude_other = 1;
  49. static char callchain_default_opt[] = "fractal,0.5";
  50. static int callchain;
  51. static
  52. struct callchain_param callchain_param = {
  53. .mode = CHAIN_GRAPH_REL,
  54. .min_percent = 0.5
  55. };
  56. static u64 sample_type;
  57. struct ip_event {
  58. struct perf_event_header header;
  59. u64 ip;
  60. u32 pid, tid;
  61. unsigned char __more_data[];
  62. };
  63. struct mmap_event {
  64. struct perf_event_header header;
  65. u32 pid, tid;
  66. u64 start;
  67. u64 len;
  68. u64 pgoff;
  69. char filename[PATH_MAX];
  70. };
  71. struct comm_event {
  72. struct perf_event_header header;
  73. u32 pid, tid;
  74. char comm[16];
  75. };
  76. struct fork_event {
  77. struct perf_event_header header;
  78. u32 pid, ppid;
  79. u32 tid, ptid;
  80. };
  81. struct lost_event {
  82. struct perf_event_header header;
  83. u64 id;
  84. u64 lost;
  85. };
  86. struct read_event {
  87. struct perf_event_header header;
  88. u32 pid,tid;
  89. u64 value;
  90. u64 time_enabled;
  91. u64 time_running;
  92. u64 id;
  93. };
  94. typedef union event_union {
  95. struct perf_event_header header;
  96. struct ip_event ip;
  97. struct mmap_event mmap;
  98. struct comm_event comm;
  99. struct fork_event fork;
  100. struct lost_event lost;
  101. struct read_event read;
  102. } event_t;
  103. static int repsep_fprintf(FILE *fp, const char *fmt, ...)
  104. {
  105. int n;
  106. va_list ap;
  107. va_start(ap, fmt);
  108. if (!field_sep)
  109. n = vfprintf(fp, fmt, ap);
  110. else {
  111. char *bf = NULL;
  112. n = vasprintf(&bf, fmt, ap);
  113. if (n > 0) {
  114. char *sep = bf;
  115. while (1) {
  116. sep = strchr(sep, *field_sep);
  117. if (sep == NULL)
  118. break;
  119. *sep = '.';
  120. }
  121. }
  122. fputs(bf, fp);
  123. free(bf);
  124. }
  125. va_end(ap);
  126. return n;
  127. }
  128. static LIST_HEAD(dsos);
  129. static struct dso *kernel_dso;
  130. static struct dso *vdso;
  131. static struct dso *hypervisor_dso;
  132. static void dsos__add(struct dso *dso)
  133. {
  134. list_add_tail(&dso->node, &dsos);
  135. }
  136. static struct dso *dsos__find(const char *name)
  137. {
  138. struct dso *pos;
  139. list_for_each_entry(pos, &dsos, node)
  140. if (strcmp(pos->name, name) == 0)
  141. return pos;
  142. return NULL;
  143. }
  144. static struct dso *dsos__findnew(const char *name)
  145. {
  146. struct dso *dso = dsos__find(name);
  147. int nr;
  148. if (dso)
  149. return dso;
  150. dso = dso__new(name, 0);
  151. if (!dso)
  152. goto out_delete_dso;
  153. nr = dso__load(dso, NULL, verbose);
  154. if (nr < 0) {
  155. eprintf("Failed to open: %s\n", name);
  156. goto out_delete_dso;
  157. }
  158. if (!nr)
  159. eprintf("No symbols found in: %s, maybe install a debug package?\n", name);
  160. dsos__add(dso);
  161. return dso;
  162. out_delete_dso:
  163. dso__delete(dso);
  164. return NULL;
  165. }
  166. static void dsos__fprintf(FILE *fp)
  167. {
  168. struct dso *pos;
  169. list_for_each_entry(pos, &dsos, node)
  170. dso__fprintf(pos, fp);
  171. }
  172. static struct symbol *vdso__find_symbol(struct dso *dso, u64 ip)
  173. {
  174. return dso__find_symbol(dso, ip);
  175. }
  176. static int load_kernel(void)
  177. {
  178. int err;
  179. kernel_dso = dso__new("[kernel]", 0);
  180. if (!kernel_dso)
  181. return -1;
  182. err = dso__load_kernel(kernel_dso, vmlinux, NULL, verbose, modules);
  183. if (err <= 0) {
  184. dso__delete(kernel_dso);
  185. kernel_dso = NULL;
  186. } else
  187. dsos__add(kernel_dso);
  188. vdso = dso__new("[vdso]", 0);
  189. if (!vdso)
  190. return -1;
  191. vdso->find_symbol = vdso__find_symbol;
  192. dsos__add(vdso);
  193. hypervisor_dso = dso__new("[hypervisor]", 0);
  194. if (!hypervisor_dso)
  195. return -1;
  196. dsos__add(hypervisor_dso);
  197. return err;
  198. }
  199. static char __cwd[PATH_MAX];
  200. static char *cwd = __cwd;
  201. static int cwdlen;
  202. static int strcommon(const char *pathname)
  203. {
  204. int n = 0;
  205. while (n < cwdlen && pathname[n] == cwd[n])
  206. ++n;
  207. return n;
  208. }
  209. struct map {
  210. struct list_head node;
  211. u64 start;
  212. u64 end;
  213. u64 pgoff;
  214. u64 (*map_ip)(struct map *, u64);
  215. struct dso *dso;
  216. };
  217. static u64 map__map_ip(struct map *map, u64 ip)
  218. {
  219. return ip - map->start + map->pgoff;
  220. }
  221. static u64 vdso__map_ip(struct map *map __used, u64 ip)
  222. {
  223. return ip;
  224. }
  225. static inline int is_anon_memory(const char *filename)
  226. {
  227. return strcmp(filename, "//anon") == 0;
  228. }
  229. static struct map *map__new(struct mmap_event *event)
  230. {
  231. struct map *self = malloc(sizeof(*self));
  232. if (self != NULL) {
  233. const char *filename = event->filename;
  234. char newfilename[PATH_MAX];
  235. int anon;
  236. if (cwd) {
  237. int n = strcommon(filename);
  238. if (n == cwdlen) {
  239. snprintf(newfilename, sizeof(newfilename),
  240. ".%s", filename + n);
  241. filename = newfilename;
  242. }
  243. }
  244. anon = is_anon_memory(filename);
  245. if (anon) {
  246. snprintf(newfilename, sizeof(newfilename), "/tmp/perf-%d.map", event->pid);
  247. filename = newfilename;
  248. }
  249. self->start = event->start;
  250. self->end = event->start + event->len;
  251. self->pgoff = event->pgoff;
  252. self->dso = dsos__findnew(filename);
  253. if (self->dso == NULL)
  254. goto out_delete;
  255. if (self->dso == vdso || anon)
  256. self->map_ip = vdso__map_ip;
  257. else
  258. self->map_ip = map__map_ip;
  259. }
  260. return self;
  261. out_delete:
  262. free(self);
  263. return NULL;
  264. }
  265. static struct map *map__clone(struct map *self)
  266. {
  267. struct map *map = malloc(sizeof(*self));
  268. if (!map)
  269. return NULL;
  270. memcpy(map, self, sizeof(*self));
  271. return map;
  272. }
  273. static int map__overlap(struct map *l, struct map *r)
  274. {
  275. if (l->start > r->start) {
  276. struct map *t = l;
  277. l = r;
  278. r = t;
  279. }
  280. if (l->end > r->start)
  281. return 1;
  282. return 0;
  283. }
  284. static size_t map__fprintf(struct map *self, FILE *fp)
  285. {
  286. return fprintf(fp, " %Lx-%Lx %Lx %s\n",
  287. self->start, self->end, self->pgoff, self->dso->name);
  288. }
  289. struct thread {
  290. struct rb_node rb_node;
  291. struct list_head maps;
  292. pid_t pid;
  293. char *comm;
  294. };
  295. static struct thread *thread__new(pid_t pid)
  296. {
  297. struct thread *self = malloc(sizeof(*self));
  298. if (self != NULL) {
  299. self->pid = pid;
  300. self->comm = malloc(32);
  301. if (self->comm)
  302. snprintf(self->comm, 32, ":%d", self->pid);
  303. INIT_LIST_HEAD(&self->maps);
  304. }
  305. return self;
  306. }
  307. static unsigned int dsos__col_width,
  308. comms__col_width,
  309. threads__col_width;
  310. static int thread__set_comm(struct thread *self, const char *comm)
  311. {
  312. if (self->comm)
  313. free(self->comm);
  314. self->comm = strdup(comm);
  315. if (!self->comm)
  316. return -ENOMEM;
  317. if (!col_width_list_str && !field_sep &&
  318. (!comm_list || strlist__has_entry(comm_list, comm))) {
  319. unsigned int slen = strlen(comm);
  320. if (slen > comms__col_width) {
  321. comms__col_width = slen;
  322. threads__col_width = slen + 6;
  323. }
  324. }
  325. return 0;
  326. }
  327. static size_t thread__fprintf(struct thread *self, FILE *fp)
  328. {
  329. struct map *pos;
  330. size_t ret = fprintf(fp, "Thread %d %s\n", self->pid, self->comm);
  331. list_for_each_entry(pos, &self->maps, node)
  332. ret += map__fprintf(pos, fp);
  333. return ret;
  334. }
  335. static struct rb_root threads;
  336. static struct thread *last_match;
  337. static struct thread *threads__findnew(pid_t pid)
  338. {
  339. struct rb_node **p = &threads.rb_node;
  340. struct rb_node *parent = NULL;
  341. struct thread *th;
  342. /*
  343. * Font-end cache - PID lookups come in blocks,
  344. * so most of the time we dont have to look up
  345. * the full rbtree:
  346. */
  347. if (last_match && last_match->pid == pid)
  348. return last_match;
  349. while (*p != NULL) {
  350. parent = *p;
  351. th = rb_entry(parent, struct thread, rb_node);
  352. if (th->pid == pid) {
  353. last_match = th;
  354. return th;
  355. }
  356. if (pid < th->pid)
  357. p = &(*p)->rb_left;
  358. else
  359. p = &(*p)->rb_right;
  360. }
  361. th = thread__new(pid);
  362. if (th != NULL) {
  363. rb_link_node(&th->rb_node, parent, p);
  364. rb_insert_color(&th->rb_node, &threads);
  365. last_match = th;
  366. }
  367. return th;
  368. }
  369. static void thread__insert_map(struct thread *self, struct map *map)
  370. {
  371. struct map *pos, *tmp;
  372. list_for_each_entry_safe(pos, tmp, &self->maps, node) {
  373. if (map__overlap(pos, map)) {
  374. if (verbose >= 2) {
  375. printf("overlapping maps:\n");
  376. map__fprintf(map, stdout);
  377. map__fprintf(pos, stdout);
  378. }
  379. if (map->start <= pos->start && map->end > pos->start)
  380. pos->start = map->end;
  381. if (map->end >= pos->end && map->start < pos->end)
  382. pos->end = map->start;
  383. if (verbose >= 2) {
  384. printf("after collision:\n");
  385. map__fprintf(pos, stdout);
  386. }
  387. if (pos->start >= pos->end) {
  388. list_del_init(&pos->node);
  389. free(pos);
  390. }
  391. }
  392. }
  393. list_add_tail(&map->node, &self->maps);
  394. }
  395. static int thread__fork(struct thread *self, struct thread *parent)
  396. {
  397. struct map *map;
  398. if (self->comm)
  399. free(self->comm);
  400. self->comm = strdup(parent->comm);
  401. if (!self->comm)
  402. return -ENOMEM;
  403. list_for_each_entry(map, &parent->maps, node) {
  404. struct map *new = map__clone(map);
  405. if (!new)
  406. return -ENOMEM;
  407. thread__insert_map(self, new);
  408. }
  409. return 0;
  410. }
  411. static struct map *thread__find_map(struct thread *self, u64 ip)
  412. {
  413. struct map *pos;
  414. if (self == NULL)
  415. return NULL;
  416. list_for_each_entry(pos, &self->maps, node)
  417. if (ip >= pos->start && ip <= pos->end)
  418. return pos;
  419. return NULL;
  420. }
  421. static size_t threads__fprintf(FILE *fp)
  422. {
  423. size_t ret = 0;
  424. struct rb_node *nd;
  425. for (nd = rb_first(&threads); nd; nd = rb_next(nd)) {
  426. struct thread *pos = rb_entry(nd, struct thread, rb_node);
  427. ret += thread__fprintf(pos, fp);
  428. }
  429. return ret;
  430. }
  431. /*
  432. * histogram, sorted on item, collects counts
  433. */
  434. static struct rb_root hist;
  435. struct hist_entry {
  436. struct rb_node rb_node;
  437. struct thread *thread;
  438. struct map *map;
  439. struct dso *dso;
  440. struct symbol *sym;
  441. struct symbol *parent;
  442. u64 ip;
  443. char level;
  444. struct callchain_node callchain;
  445. struct rb_root sorted_chain;
  446. u64 count;
  447. };
  448. /*
  449. * configurable sorting bits
  450. */
  451. struct sort_entry {
  452. struct list_head list;
  453. char *header;
  454. int64_t (*cmp)(struct hist_entry *, struct hist_entry *);
  455. int64_t (*collapse)(struct hist_entry *, struct hist_entry *);
  456. size_t (*print)(FILE *fp, struct hist_entry *, unsigned int width);
  457. unsigned int *width;
  458. bool elide;
  459. };
  460. static int64_t cmp_null(void *l, void *r)
  461. {
  462. if (!l && !r)
  463. return 0;
  464. else if (!l)
  465. return -1;
  466. else
  467. return 1;
  468. }
  469. /* --sort pid */
  470. static int64_t
  471. sort__thread_cmp(struct hist_entry *left, struct hist_entry *right)
  472. {
  473. return right->thread->pid - left->thread->pid;
  474. }
  475. static size_t
  476. sort__thread_print(FILE *fp, struct hist_entry *self, unsigned int width)
  477. {
  478. return repsep_fprintf(fp, "%*s:%5d", width - 6,
  479. self->thread->comm ?: "", self->thread->pid);
  480. }
  481. static struct sort_entry sort_thread = {
  482. .header = "Command: Pid",
  483. .cmp = sort__thread_cmp,
  484. .print = sort__thread_print,
  485. .width = &threads__col_width,
  486. };
  487. /* --sort comm */
  488. static int64_t
  489. sort__comm_cmp(struct hist_entry *left, struct hist_entry *right)
  490. {
  491. return right->thread->pid - left->thread->pid;
  492. }
  493. static int64_t
  494. sort__comm_collapse(struct hist_entry *left, struct hist_entry *right)
  495. {
  496. char *comm_l = left->thread->comm;
  497. char *comm_r = right->thread->comm;
  498. if (!comm_l || !comm_r)
  499. return cmp_null(comm_l, comm_r);
  500. return strcmp(comm_l, comm_r);
  501. }
  502. static size_t
  503. sort__comm_print(FILE *fp, struct hist_entry *self, unsigned int width)
  504. {
  505. return repsep_fprintf(fp, "%*s", width, self->thread->comm);
  506. }
  507. static struct sort_entry sort_comm = {
  508. .header = "Command",
  509. .cmp = sort__comm_cmp,
  510. .collapse = sort__comm_collapse,
  511. .print = sort__comm_print,
  512. .width = &comms__col_width,
  513. };
  514. /* --sort dso */
  515. static int64_t
  516. sort__dso_cmp(struct hist_entry *left, struct hist_entry *right)
  517. {
  518. struct dso *dso_l = left->dso;
  519. struct dso *dso_r = right->dso;
  520. if (!dso_l || !dso_r)
  521. return cmp_null(dso_l, dso_r);
  522. return strcmp(dso_l->name, dso_r->name);
  523. }
  524. static size_t
  525. sort__dso_print(FILE *fp, struct hist_entry *self, unsigned int width)
  526. {
  527. if (self->dso)
  528. return repsep_fprintf(fp, "%-*s", width, self->dso->name);
  529. return repsep_fprintf(fp, "%*llx", width, (u64)self->ip);
  530. }
  531. static struct sort_entry sort_dso = {
  532. .header = "Shared Object",
  533. .cmp = sort__dso_cmp,
  534. .print = sort__dso_print,
  535. .width = &dsos__col_width,
  536. };
  537. /* --sort symbol */
  538. static int64_t
  539. sort__sym_cmp(struct hist_entry *left, struct hist_entry *right)
  540. {
  541. u64 ip_l, ip_r;
  542. if (left->sym == right->sym)
  543. return 0;
  544. ip_l = left->sym ? left->sym->start : left->ip;
  545. ip_r = right->sym ? right->sym->start : right->ip;
  546. return (int64_t)(ip_r - ip_l);
  547. }
  548. static size_t
  549. sort__sym_print(FILE *fp, struct hist_entry *self, unsigned int width __used)
  550. {
  551. size_t ret = 0;
  552. if (verbose)
  553. ret += repsep_fprintf(fp, "%#018llx %c ", (u64)self->ip,
  554. dso__symtab_origin(self->dso));
  555. ret += repsep_fprintf(fp, "[%c] ", self->level);
  556. if (self->sym) {
  557. ret += repsep_fprintf(fp, "%s", self->sym->name);
  558. if (self->sym->module)
  559. ret += repsep_fprintf(fp, "\t[%s]",
  560. self->sym->module->name);
  561. } else {
  562. ret += repsep_fprintf(fp, "%#016llx", (u64)self->ip);
  563. }
  564. return ret;
  565. }
  566. static struct sort_entry sort_sym = {
  567. .header = "Symbol",
  568. .cmp = sort__sym_cmp,
  569. .print = sort__sym_print,
  570. };
  571. /* --sort parent */
  572. static int64_t
  573. sort__parent_cmp(struct hist_entry *left, struct hist_entry *right)
  574. {
  575. struct symbol *sym_l = left->parent;
  576. struct symbol *sym_r = right->parent;
  577. if (!sym_l || !sym_r)
  578. return cmp_null(sym_l, sym_r);
  579. return strcmp(sym_l->name, sym_r->name);
  580. }
  581. static size_t
  582. sort__parent_print(FILE *fp, struct hist_entry *self, unsigned int width)
  583. {
  584. return repsep_fprintf(fp, "%-*s", width,
  585. self->parent ? self->parent->name : "[other]");
  586. }
  587. static unsigned int parent_symbol__col_width;
  588. static struct sort_entry sort_parent = {
  589. .header = "Parent symbol",
  590. .cmp = sort__parent_cmp,
  591. .print = sort__parent_print,
  592. .width = &parent_symbol__col_width,
  593. };
  594. static int sort__need_collapse = 0;
  595. static int sort__has_parent = 0;
  596. struct sort_dimension {
  597. char *name;
  598. struct sort_entry *entry;
  599. int taken;
  600. };
  601. static struct sort_dimension sort_dimensions[] = {
  602. { .name = "pid", .entry = &sort_thread, },
  603. { .name = "comm", .entry = &sort_comm, },
  604. { .name = "dso", .entry = &sort_dso, },
  605. { .name = "symbol", .entry = &sort_sym, },
  606. { .name = "parent", .entry = &sort_parent, },
  607. };
  608. static LIST_HEAD(hist_entry__sort_list);
  609. static int sort_dimension__add(char *tok)
  610. {
  611. unsigned int i;
  612. for (i = 0; i < ARRAY_SIZE(sort_dimensions); i++) {
  613. struct sort_dimension *sd = &sort_dimensions[i];
  614. if (sd->taken)
  615. continue;
  616. if (strncasecmp(tok, sd->name, strlen(tok)))
  617. continue;
  618. if (sd->entry->collapse)
  619. sort__need_collapse = 1;
  620. if (sd->entry == &sort_parent) {
  621. int ret = regcomp(&parent_regex, parent_pattern, REG_EXTENDED);
  622. if (ret) {
  623. char err[BUFSIZ];
  624. regerror(ret, &parent_regex, err, sizeof(err));
  625. fprintf(stderr, "Invalid regex: %s\n%s",
  626. parent_pattern, err);
  627. exit(-1);
  628. }
  629. sort__has_parent = 1;
  630. }
  631. list_add_tail(&sd->entry->list, &hist_entry__sort_list);
  632. sd->taken = 1;
  633. return 0;
  634. }
  635. return -ESRCH;
  636. }
  637. static int64_t
  638. hist_entry__cmp(struct hist_entry *left, struct hist_entry *right)
  639. {
  640. struct sort_entry *se;
  641. int64_t cmp = 0;
  642. list_for_each_entry(se, &hist_entry__sort_list, list) {
  643. cmp = se->cmp(left, right);
  644. if (cmp)
  645. break;
  646. }
  647. return cmp;
  648. }
  649. static int64_t
  650. hist_entry__collapse(struct hist_entry *left, struct hist_entry *right)
  651. {
  652. struct sort_entry *se;
  653. int64_t cmp = 0;
  654. list_for_each_entry(se, &hist_entry__sort_list, list) {
  655. int64_t (*f)(struct hist_entry *, struct hist_entry *);
  656. f = se->collapse ?: se->cmp;
  657. cmp = f(left, right);
  658. if (cmp)
  659. break;
  660. }
  661. return cmp;
  662. }
  663. static size_t ipchain__fprintf_graph_line(FILE *fp, int depth, int depth_mask)
  664. {
  665. int i;
  666. size_t ret = 0;
  667. ret += fprintf(fp, "%s", " ");
  668. for (i = 0; i < depth; i++)
  669. if (depth_mask & (1 << i))
  670. ret += fprintf(fp, "| ");
  671. else
  672. ret += fprintf(fp, " ");
  673. ret += fprintf(fp, "\n");
  674. return ret;
  675. }
  676. static size_t
  677. ipchain__fprintf_graph(FILE *fp, struct callchain_list *chain, int depth,
  678. int depth_mask, int count, u64 total_samples,
  679. int hits)
  680. {
  681. int i;
  682. size_t ret = 0;
  683. ret += fprintf(fp, "%s", " ");
  684. for (i = 0; i < depth; i++) {
  685. if (depth_mask & (1 << i))
  686. ret += fprintf(fp, "|");
  687. else
  688. ret += fprintf(fp, " ");
  689. if (!count && i == depth - 1) {
  690. double percent;
  691. percent = hits * 100.0 / total_samples;
  692. ret += percent_color_fprintf(fp, "--%2.2f%%-- ", percent);
  693. } else
  694. ret += fprintf(fp, "%s", " ");
  695. }
  696. if (chain->sym)
  697. ret += fprintf(fp, "%s\n", chain->sym->name);
  698. else
  699. ret += fprintf(fp, "%p\n", (void *)(long)chain->ip);
  700. return ret;
  701. }
  702. static size_t
  703. callchain__fprintf_graph(FILE *fp, struct callchain_node *self,
  704. u64 total_samples, int depth, int depth_mask)
  705. {
  706. struct rb_node *node, *next;
  707. struct callchain_node *child;
  708. struct callchain_list *chain;
  709. int new_depth_mask = depth_mask;
  710. u64 new_total;
  711. size_t ret = 0;
  712. int i;
  713. if (callchain_param.mode == CHAIN_GRAPH_REL)
  714. new_total = self->children_hit;
  715. else
  716. new_total = total_samples;
  717. node = rb_first(&self->rb_root);
  718. while (node) {
  719. child = rb_entry(node, struct callchain_node, rb_node);
  720. /*
  721. * The depth mask manages the output of pipes that show
  722. * the depth. We don't want to keep the pipes of the current
  723. * level for the last child of this depth
  724. */
  725. next = rb_next(node);
  726. if (!next)
  727. new_depth_mask &= ~(1 << (depth - 1));
  728. /*
  729. * But we keep the older depth mask for the line seperator
  730. * to keep the level link until we reach the last child
  731. */
  732. ret += ipchain__fprintf_graph_line(fp, depth, depth_mask);
  733. i = 0;
  734. list_for_each_entry(chain, &child->val, list) {
  735. if (chain->ip >= PERF_CONTEXT_MAX)
  736. continue;
  737. ret += ipchain__fprintf_graph(fp, chain, depth,
  738. new_depth_mask, i++,
  739. new_total,
  740. cumul_hits(child));
  741. }
  742. ret += callchain__fprintf_graph(fp, child, new_total,
  743. depth + 1,
  744. new_depth_mask | (1 << depth));
  745. node = next;
  746. }
  747. return ret;
  748. }
  749. static size_t
  750. callchain__fprintf_flat(FILE *fp, struct callchain_node *self,
  751. u64 total_samples)
  752. {
  753. struct callchain_list *chain;
  754. size_t ret = 0;
  755. if (!self)
  756. return 0;
  757. ret += callchain__fprintf_flat(fp, self->parent, total_samples);
  758. list_for_each_entry(chain, &self->val, list) {
  759. if (chain->ip >= PERF_CONTEXT_MAX)
  760. continue;
  761. if (chain->sym)
  762. ret += fprintf(fp, " %s\n", chain->sym->name);
  763. else
  764. ret += fprintf(fp, " %p\n",
  765. (void *)(long)chain->ip);
  766. }
  767. return ret;
  768. }
  769. static size_t
  770. hist_entry_callchain__fprintf(FILE *fp, struct hist_entry *self,
  771. u64 total_samples)
  772. {
  773. struct rb_node *rb_node;
  774. struct callchain_node *chain;
  775. size_t ret = 0;
  776. rb_node = rb_first(&self->sorted_chain);
  777. while (rb_node) {
  778. double percent;
  779. chain = rb_entry(rb_node, struct callchain_node, rb_node);
  780. percent = chain->hit * 100.0 / total_samples;
  781. switch (callchain_param.mode) {
  782. case CHAIN_FLAT:
  783. ret += percent_color_fprintf(fp, " %6.2f%%\n",
  784. percent);
  785. ret += callchain__fprintf_flat(fp, chain, total_samples);
  786. break;
  787. case CHAIN_GRAPH_ABS: /* Falldown */
  788. case CHAIN_GRAPH_REL:
  789. ret += callchain__fprintf_graph(fp, chain,
  790. total_samples, 1, 1);
  791. default:
  792. break;
  793. }
  794. ret += fprintf(fp, "\n");
  795. rb_node = rb_next(rb_node);
  796. }
  797. return ret;
  798. }
  799. static size_t
  800. hist_entry__fprintf(FILE *fp, struct hist_entry *self, u64 total_samples)
  801. {
  802. struct sort_entry *se;
  803. size_t ret;
  804. if (exclude_other && !self->parent)
  805. return 0;
  806. if (total_samples)
  807. ret = percent_color_fprintf(fp,
  808. field_sep ? "%.2f" : " %6.2f%%",
  809. (self->count * 100.0) / total_samples);
  810. else
  811. ret = fprintf(fp, field_sep ? "%lld" : "%12lld ", self->count);
  812. if (show_nr_samples) {
  813. if (field_sep)
  814. fprintf(fp, "%c%lld", *field_sep, self->count);
  815. else
  816. fprintf(fp, "%11lld", self->count);
  817. }
  818. list_for_each_entry(se, &hist_entry__sort_list, list) {
  819. if (se->elide)
  820. continue;
  821. fprintf(fp, "%s", field_sep ?: " ");
  822. ret += se->print(fp, self, se->width ? *se->width : 0);
  823. }
  824. ret += fprintf(fp, "\n");
  825. if (callchain)
  826. hist_entry_callchain__fprintf(fp, self, total_samples);
  827. return ret;
  828. }
  829. /*
  830. *
  831. */
  832. static void dso__calc_col_width(struct dso *self)
  833. {
  834. if (!col_width_list_str && !field_sep &&
  835. (!dso_list || strlist__has_entry(dso_list, self->name))) {
  836. unsigned int slen = strlen(self->name);
  837. if (slen > dsos__col_width)
  838. dsos__col_width = slen;
  839. }
  840. self->slen_calculated = 1;
  841. }
  842. static struct symbol *
  843. resolve_symbol(struct thread *thread, struct map **mapp,
  844. struct dso **dsop, u64 *ipp)
  845. {
  846. struct dso *dso = dsop ? *dsop : NULL;
  847. struct map *map = mapp ? *mapp : NULL;
  848. u64 ip = *ipp;
  849. if (!thread)
  850. return NULL;
  851. if (dso)
  852. goto got_dso;
  853. if (map)
  854. goto got_map;
  855. map = thread__find_map(thread, ip);
  856. if (map != NULL) {
  857. /*
  858. * We have to do this here as we may have a dso
  859. * with no symbol hit that has a name longer than
  860. * the ones with symbols sampled.
  861. */
  862. if (!sort_dso.elide && !map->dso->slen_calculated)
  863. dso__calc_col_width(map->dso);
  864. if (mapp)
  865. *mapp = map;
  866. got_map:
  867. ip = map->map_ip(map, ip);
  868. dso = map->dso;
  869. } else {
  870. /*
  871. * If this is outside of all known maps,
  872. * and is a negative address, try to look it
  873. * up in the kernel dso, as it might be a
  874. * vsyscall (which executes in user-mode):
  875. */
  876. if ((long long)ip < 0)
  877. dso = kernel_dso;
  878. }
  879. dprintf(" ...... dso: %s\n", dso ? dso->name : "<not found>");
  880. dprintf(" ...... map: %Lx -> %Lx\n", *ipp, ip);
  881. *ipp = ip;
  882. if (dsop)
  883. *dsop = dso;
  884. if (!dso)
  885. return NULL;
  886. got_dso:
  887. return dso->find_symbol(dso, ip);
  888. }
  889. static int call__match(struct symbol *sym)
  890. {
  891. if (sym->name && !regexec(&parent_regex, sym->name, 0, NULL, 0))
  892. return 1;
  893. return 0;
  894. }
  895. static struct symbol **
  896. resolve_callchain(struct thread *thread, struct map *map __used,
  897. struct ip_callchain *chain, struct hist_entry *entry)
  898. {
  899. u64 context = PERF_CONTEXT_MAX;
  900. struct symbol **syms = NULL;
  901. unsigned int i;
  902. if (callchain) {
  903. syms = calloc(chain->nr, sizeof(*syms));
  904. if (!syms) {
  905. fprintf(stderr, "Can't allocate memory for symbols\n");
  906. exit(-1);
  907. }
  908. }
  909. for (i = 0; i < chain->nr; i++) {
  910. u64 ip = chain->ips[i];
  911. struct dso *dso = NULL;
  912. struct symbol *sym;
  913. if (ip >= PERF_CONTEXT_MAX) {
  914. context = ip;
  915. continue;
  916. }
  917. switch (context) {
  918. case PERF_CONTEXT_HV:
  919. dso = hypervisor_dso;
  920. break;
  921. case PERF_CONTEXT_KERNEL:
  922. dso = kernel_dso;
  923. break;
  924. default:
  925. break;
  926. }
  927. sym = resolve_symbol(thread, NULL, &dso, &ip);
  928. if (sym) {
  929. if (sort__has_parent && call__match(sym) &&
  930. !entry->parent)
  931. entry->parent = sym;
  932. if (!callchain)
  933. break;
  934. syms[i] = sym;
  935. }
  936. }
  937. return syms;
  938. }
  939. /*
  940. * collect histogram counts
  941. */
  942. static int
  943. hist_entry__add(struct thread *thread, struct map *map, struct dso *dso,
  944. struct symbol *sym, u64 ip, struct ip_callchain *chain,
  945. char level, u64 count)
  946. {
  947. struct rb_node **p = &hist.rb_node;
  948. struct rb_node *parent = NULL;
  949. struct hist_entry *he;
  950. struct symbol **syms = NULL;
  951. struct hist_entry entry = {
  952. .thread = thread,
  953. .map = map,
  954. .dso = dso,
  955. .sym = sym,
  956. .ip = ip,
  957. .level = level,
  958. .count = count,
  959. .parent = NULL,
  960. .sorted_chain = RB_ROOT
  961. };
  962. int cmp;
  963. if ((sort__has_parent || callchain) && chain)
  964. syms = resolve_callchain(thread, map, chain, &entry);
  965. while (*p != NULL) {
  966. parent = *p;
  967. he = rb_entry(parent, struct hist_entry, rb_node);
  968. cmp = hist_entry__cmp(&entry, he);
  969. if (!cmp) {
  970. he->count += count;
  971. if (callchain) {
  972. append_chain(&he->callchain, chain, syms);
  973. free(syms);
  974. }
  975. return 0;
  976. }
  977. if (cmp < 0)
  978. p = &(*p)->rb_left;
  979. else
  980. p = &(*p)->rb_right;
  981. }
  982. he = malloc(sizeof(*he));
  983. if (!he)
  984. return -ENOMEM;
  985. *he = entry;
  986. if (callchain) {
  987. callchain_init(&he->callchain);
  988. append_chain(&he->callchain, chain, syms);
  989. free(syms);
  990. }
  991. rb_link_node(&he->rb_node, parent, p);
  992. rb_insert_color(&he->rb_node, &hist);
  993. return 0;
  994. }
  995. static void hist_entry__free(struct hist_entry *he)
  996. {
  997. free(he);
  998. }
  999. /*
  1000. * collapse the histogram
  1001. */
  1002. static struct rb_root collapse_hists;
  1003. static void collapse__insert_entry(struct hist_entry *he)
  1004. {
  1005. struct rb_node **p = &collapse_hists.rb_node;
  1006. struct rb_node *parent = NULL;
  1007. struct hist_entry *iter;
  1008. int64_t cmp;
  1009. while (*p != NULL) {
  1010. parent = *p;
  1011. iter = rb_entry(parent, struct hist_entry, rb_node);
  1012. cmp = hist_entry__collapse(iter, he);
  1013. if (!cmp) {
  1014. iter->count += he->count;
  1015. hist_entry__free(he);
  1016. return;
  1017. }
  1018. if (cmp < 0)
  1019. p = &(*p)->rb_left;
  1020. else
  1021. p = &(*p)->rb_right;
  1022. }
  1023. rb_link_node(&he->rb_node, parent, p);
  1024. rb_insert_color(&he->rb_node, &collapse_hists);
  1025. }
  1026. static void collapse__resort(void)
  1027. {
  1028. struct rb_node *next;
  1029. struct hist_entry *n;
  1030. if (!sort__need_collapse)
  1031. return;
  1032. next = rb_first(&hist);
  1033. while (next) {
  1034. n = rb_entry(next, struct hist_entry, rb_node);
  1035. next = rb_next(&n->rb_node);
  1036. rb_erase(&n->rb_node, &hist);
  1037. collapse__insert_entry(n);
  1038. }
  1039. }
  1040. /*
  1041. * reverse the map, sort on count.
  1042. */
  1043. static struct rb_root output_hists;
  1044. static void output__insert_entry(struct hist_entry *he, u64 min_callchain_hits)
  1045. {
  1046. struct rb_node **p = &output_hists.rb_node;
  1047. struct rb_node *parent = NULL;
  1048. struct hist_entry *iter;
  1049. if (callchain)
  1050. callchain_param.sort(&he->sorted_chain, &he->callchain,
  1051. min_callchain_hits, &callchain_param);
  1052. while (*p != NULL) {
  1053. parent = *p;
  1054. iter = rb_entry(parent, struct hist_entry, rb_node);
  1055. if (he->count > iter->count)
  1056. p = &(*p)->rb_left;
  1057. else
  1058. p = &(*p)->rb_right;
  1059. }
  1060. rb_link_node(&he->rb_node, parent, p);
  1061. rb_insert_color(&he->rb_node, &output_hists);
  1062. }
  1063. static void output__resort(u64 total_samples)
  1064. {
  1065. struct rb_node *next;
  1066. struct hist_entry *n;
  1067. struct rb_root *tree = &hist;
  1068. u64 min_callchain_hits;
  1069. min_callchain_hits = total_samples * (callchain_param.min_percent / 100);
  1070. if (sort__need_collapse)
  1071. tree = &collapse_hists;
  1072. next = rb_first(tree);
  1073. while (next) {
  1074. n = rb_entry(next, struct hist_entry, rb_node);
  1075. next = rb_next(&n->rb_node);
  1076. rb_erase(&n->rb_node, tree);
  1077. output__insert_entry(n, min_callchain_hits);
  1078. }
  1079. }
  1080. static size_t output__fprintf(FILE *fp, u64 total_samples)
  1081. {
  1082. struct hist_entry *pos;
  1083. struct sort_entry *se;
  1084. struct rb_node *nd;
  1085. size_t ret = 0;
  1086. unsigned int width;
  1087. char *col_width = col_width_list_str;
  1088. fprintf(fp, "# Samples: %Ld\n", (u64)total_samples);
  1089. fprintf(fp, "#\n");
  1090. fprintf(fp, "# Overhead");
  1091. if (show_nr_samples) {
  1092. if (field_sep)
  1093. fprintf(fp, "%cSamples", *field_sep);
  1094. else
  1095. fputs(" Samples ", fp);
  1096. }
  1097. list_for_each_entry(se, &hist_entry__sort_list, list) {
  1098. if (se->elide)
  1099. continue;
  1100. if (field_sep) {
  1101. fprintf(fp, "%c%s", *field_sep, se->header);
  1102. continue;
  1103. }
  1104. width = strlen(se->header);
  1105. if (se->width) {
  1106. if (col_width_list_str) {
  1107. if (col_width) {
  1108. *se->width = atoi(col_width);
  1109. col_width = strchr(col_width, ',');
  1110. if (col_width)
  1111. ++col_width;
  1112. }
  1113. }
  1114. width = *se->width = max(*se->width, width);
  1115. }
  1116. fprintf(fp, " %*s", width, se->header);
  1117. }
  1118. fprintf(fp, "\n");
  1119. if (field_sep)
  1120. goto print_entries;
  1121. fprintf(fp, "# ........");
  1122. if (show_nr_samples)
  1123. fprintf(fp, " ..........");
  1124. list_for_each_entry(se, &hist_entry__sort_list, list) {
  1125. unsigned int i;
  1126. if (se->elide)
  1127. continue;
  1128. fprintf(fp, " ");
  1129. if (se->width)
  1130. width = *se->width;
  1131. else
  1132. width = strlen(se->header);
  1133. for (i = 0; i < width; i++)
  1134. fprintf(fp, ".");
  1135. }
  1136. fprintf(fp, "\n");
  1137. fprintf(fp, "#\n");
  1138. print_entries:
  1139. for (nd = rb_first(&output_hists); nd; nd = rb_next(nd)) {
  1140. pos = rb_entry(nd, struct hist_entry, rb_node);
  1141. ret += hist_entry__fprintf(fp, pos, total_samples);
  1142. }
  1143. if (sort_order == default_sort_order &&
  1144. parent_pattern == default_parent_pattern) {
  1145. fprintf(fp, "#\n");
  1146. fprintf(fp, "# (For a higher level overview, try: perf report --sort comm,dso)\n");
  1147. fprintf(fp, "#\n");
  1148. }
  1149. fprintf(fp, "\n");
  1150. return ret;
  1151. }
  1152. static void register_idle_thread(void)
  1153. {
  1154. struct thread *thread = threads__findnew(0);
  1155. if (thread == NULL ||
  1156. thread__set_comm(thread, "[idle]")) {
  1157. fprintf(stderr, "problem inserting idle task.\n");
  1158. exit(-1);
  1159. }
  1160. }
  1161. static unsigned long total = 0,
  1162. total_mmap = 0,
  1163. total_comm = 0,
  1164. total_fork = 0,
  1165. total_unknown = 0,
  1166. total_lost = 0;
  1167. static int validate_chain(struct ip_callchain *chain, event_t *event)
  1168. {
  1169. unsigned int chain_size;
  1170. chain_size = event->header.size;
  1171. chain_size -= (unsigned long)&event->ip.__more_data - (unsigned long)event;
  1172. if (chain->nr*sizeof(u64) > chain_size)
  1173. return -1;
  1174. return 0;
  1175. }
  1176. static int
  1177. process_sample_event(event_t *event, unsigned long offset, unsigned long head)
  1178. {
  1179. char level;
  1180. int show = 0;
  1181. struct dso *dso = NULL;
  1182. struct thread *thread = threads__findnew(event->ip.pid);
  1183. u64 ip = event->ip.ip;
  1184. u64 period = 1;
  1185. struct map *map = NULL;
  1186. void *more_data = event->ip.__more_data;
  1187. struct ip_callchain *chain = NULL;
  1188. int cpumode;
  1189. if (sample_type & PERF_SAMPLE_PERIOD) {
  1190. period = *(u64 *)more_data;
  1191. more_data += sizeof(u64);
  1192. }
  1193. dprintf("%p [%p]: PERF_EVENT_SAMPLE (IP, %d): %d: %p period: %Ld\n",
  1194. (void *)(offset + head),
  1195. (void *)(long)(event->header.size),
  1196. event->header.misc,
  1197. event->ip.pid,
  1198. (void *)(long)ip,
  1199. (long long)period);
  1200. if (sample_type & PERF_SAMPLE_CALLCHAIN) {
  1201. unsigned int i;
  1202. chain = (void *)more_data;
  1203. dprintf("... chain: nr:%Lu\n", chain->nr);
  1204. if (validate_chain(chain, event) < 0) {
  1205. eprintf("call-chain problem with event, skipping it.\n");
  1206. return 0;
  1207. }
  1208. if (dump_trace) {
  1209. for (i = 0; i < chain->nr; i++)
  1210. dprintf("..... %2d: %016Lx\n", i, chain->ips[i]);
  1211. }
  1212. }
  1213. dprintf(" ... thread: %s:%d\n", thread->comm, thread->pid);
  1214. if (thread == NULL) {
  1215. eprintf("problem processing %d event, skipping it.\n",
  1216. event->header.type);
  1217. return -1;
  1218. }
  1219. if (comm_list && !strlist__has_entry(comm_list, thread->comm))
  1220. return 0;
  1221. cpumode = event->header.misc & PERF_EVENT_MISC_CPUMODE_MASK;
  1222. if (cpumode == PERF_EVENT_MISC_KERNEL) {
  1223. show = SHOW_KERNEL;
  1224. level = 'k';
  1225. dso = kernel_dso;
  1226. dprintf(" ...... dso: %s\n", dso->name);
  1227. } else if (cpumode == PERF_EVENT_MISC_USER) {
  1228. show = SHOW_USER;
  1229. level = '.';
  1230. } else {
  1231. show = SHOW_HV;
  1232. level = 'H';
  1233. dso = hypervisor_dso;
  1234. dprintf(" ...... dso: [hypervisor]\n");
  1235. }
  1236. if (show & show_mask) {
  1237. struct symbol *sym = resolve_symbol(thread, &map, &dso, &ip);
  1238. if (dso_list && dso && dso->name && !strlist__has_entry(dso_list, dso->name))
  1239. return 0;
  1240. if (sym_list && sym && !strlist__has_entry(sym_list, sym->name))
  1241. return 0;
  1242. if (hist_entry__add(thread, map, dso, sym, ip, chain, level, period)) {
  1243. eprintf("problem incrementing symbol count, skipping event\n");
  1244. return -1;
  1245. }
  1246. }
  1247. total += period;
  1248. return 0;
  1249. }
  1250. static int
  1251. process_mmap_event(event_t *event, unsigned long offset, unsigned long head)
  1252. {
  1253. struct thread *thread = threads__findnew(event->mmap.pid);
  1254. struct map *map = map__new(&event->mmap);
  1255. dprintf("%p [%p]: PERF_EVENT_MMAP %d: [%p(%p) @ %p]: %s\n",
  1256. (void *)(offset + head),
  1257. (void *)(long)(event->header.size),
  1258. event->mmap.pid,
  1259. (void *)(long)event->mmap.start,
  1260. (void *)(long)event->mmap.len,
  1261. (void *)(long)event->mmap.pgoff,
  1262. event->mmap.filename);
  1263. if (thread == NULL || map == NULL) {
  1264. dprintf("problem processing PERF_EVENT_MMAP, skipping event.\n");
  1265. return 0;
  1266. }
  1267. thread__insert_map(thread, map);
  1268. total_mmap++;
  1269. return 0;
  1270. }
  1271. static int
  1272. process_comm_event(event_t *event, unsigned long offset, unsigned long head)
  1273. {
  1274. struct thread *thread = threads__findnew(event->comm.pid);
  1275. dprintf("%p [%p]: PERF_EVENT_COMM: %s:%d\n",
  1276. (void *)(offset + head),
  1277. (void *)(long)(event->header.size),
  1278. event->comm.comm, event->comm.pid);
  1279. if (thread == NULL ||
  1280. thread__set_comm(thread, event->comm.comm)) {
  1281. dprintf("problem processing PERF_EVENT_COMM, skipping event.\n");
  1282. return -1;
  1283. }
  1284. total_comm++;
  1285. return 0;
  1286. }
  1287. static int
  1288. process_task_event(event_t *event, unsigned long offset, unsigned long head)
  1289. {
  1290. struct thread *thread = threads__findnew(event->fork.pid);
  1291. struct thread *parent = threads__findnew(event->fork.ppid);
  1292. dprintf("%p [%p]: PERF_EVENT_%s: (%d:%d):(%d:%d)\n",
  1293. (void *)(offset + head),
  1294. (void *)(long)(event->header.size),
  1295. event->header.type == PERF_EVENT_FORK ? "FORK" : "EXIT",
  1296. event->fork.pid, event->fork.tid,
  1297. event->fork.ppid, event->fork.ptid);
  1298. /*
  1299. * A thread clone will have the same PID for both
  1300. * parent and child.
  1301. */
  1302. if (thread == parent)
  1303. return 0;
  1304. if (event->header.type == PERF_EVENT_EXIT)
  1305. return 0;
  1306. if (!thread || !parent || thread__fork(thread, parent)) {
  1307. dprintf("problem processing PERF_EVENT_FORK, skipping event.\n");
  1308. return -1;
  1309. }
  1310. total_fork++;
  1311. return 0;
  1312. }
  1313. static int
  1314. process_lost_event(event_t *event, unsigned long offset, unsigned long head)
  1315. {
  1316. dprintf("%p [%p]: PERF_EVENT_LOST: id:%Ld: lost:%Ld\n",
  1317. (void *)(offset + head),
  1318. (void *)(long)(event->header.size),
  1319. event->lost.id,
  1320. event->lost.lost);
  1321. total_lost += event->lost.lost;
  1322. return 0;
  1323. }
  1324. static void trace_event(event_t *event)
  1325. {
  1326. unsigned char *raw_event = (void *)event;
  1327. char *color = PERF_COLOR_BLUE;
  1328. int i, j;
  1329. if (!dump_trace)
  1330. return;
  1331. dprintf(".");
  1332. cdprintf("\n. ... raw event: size %d bytes\n", event->header.size);
  1333. for (i = 0; i < event->header.size; i++) {
  1334. if ((i & 15) == 0) {
  1335. dprintf(".");
  1336. cdprintf(" %04x: ", i);
  1337. }
  1338. cdprintf(" %02x", raw_event[i]);
  1339. if (((i & 15) == 15) || i == event->header.size-1) {
  1340. cdprintf(" ");
  1341. for (j = 0; j < 15-(i & 15); j++)
  1342. cdprintf(" ");
  1343. for (j = 0; j < (i & 15); j++) {
  1344. if (isprint(raw_event[i-15+j]))
  1345. cdprintf("%c", raw_event[i-15+j]);
  1346. else
  1347. cdprintf(".");
  1348. }
  1349. cdprintf("\n");
  1350. }
  1351. }
  1352. dprintf(".\n");
  1353. }
  1354. static struct perf_header *header;
  1355. static struct perf_counter_attr *perf_header__find_attr(u64 id)
  1356. {
  1357. int i;
  1358. for (i = 0; i < header->attrs; i++) {
  1359. struct perf_header_attr *attr = header->attr[i];
  1360. int j;
  1361. for (j = 0; j < attr->ids; j++) {
  1362. if (attr->id[j] == id)
  1363. return &attr->attr;
  1364. }
  1365. }
  1366. return NULL;
  1367. }
  1368. static int
  1369. process_read_event(event_t *event, unsigned long offset, unsigned long head)
  1370. {
  1371. struct perf_counter_attr *attr = perf_header__find_attr(event->read.id);
  1372. dprintf("%p [%p]: PERF_EVENT_READ: %d %d %s %Lu\n",
  1373. (void *)(offset + head),
  1374. (void *)(long)(event->header.size),
  1375. event->read.pid,
  1376. event->read.tid,
  1377. attr ? __event_name(attr->type, attr->config)
  1378. : "FAIL",
  1379. event->read.value);
  1380. return 0;
  1381. }
  1382. static int
  1383. process_event(event_t *event, unsigned long offset, unsigned long head)
  1384. {
  1385. trace_event(event);
  1386. switch (event->header.type) {
  1387. case PERF_EVENT_SAMPLE:
  1388. return process_sample_event(event, offset, head);
  1389. case PERF_EVENT_MMAP:
  1390. return process_mmap_event(event, offset, head);
  1391. case PERF_EVENT_COMM:
  1392. return process_comm_event(event, offset, head);
  1393. case PERF_EVENT_FORK:
  1394. case PERF_EVENT_EXIT:
  1395. return process_task_event(event, offset, head);
  1396. case PERF_EVENT_LOST:
  1397. return process_lost_event(event, offset, head);
  1398. case PERF_EVENT_READ:
  1399. return process_read_event(event, offset, head);
  1400. /*
  1401. * We dont process them right now but they are fine:
  1402. */
  1403. case PERF_EVENT_THROTTLE:
  1404. case PERF_EVENT_UNTHROTTLE:
  1405. return 0;
  1406. default:
  1407. return -1;
  1408. }
  1409. return 0;
  1410. }
  1411. static u64 perf_header__sample_type(void)
  1412. {
  1413. u64 sample_type = 0;
  1414. int i;
  1415. for (i = 0; i < header->attrs; i++) {
  1416. struct perf_header_attr *attr = header->attr[i];
  1417. if (!sample_type)
  1418. sample_type = attr->attr.sample_type;
  1419. else if (sample_type != attr->attr.sample_type)
  1420. die("non matching sample_type");
  1421. }
  1422. return sample_type;
  1423. }
  1424. static int __cmd_report(void)
  1425. {
  1426. int ret, rc = EXIT_FAILURE;
  1427. unsigned long offset = 0;
  1428. unsigned long head, shift;
  1429. struct stat stat;
  1430. event_t *event;
  1431. uint32_t size;
  1432. char *buf;
  1433. register_idle_thread();
  1434. input = open(input_name, O_RDONLY);
  1435. if (input < 0) {
  1436. fprintf(stderr, " failed to open file: %s", input_name);
  1437. if (!strcmp(input_name, "perf.data"))
  1438. fprintf(stderr, " (try 'perf record' first)");
  1439. fprintf(stderr, "\n");
  1440. exit(-1);
  1441. }
  1442. ret = fstat(input, &stat);
  1443. if (ret < 0) {
  1444. perror("failed to stat file");
  1445. exit(-1);
  1446. }
  1447. if (!stat.st_size) {
  1448. fprintf(stderr, "zero-sized file, nothing to do!\n");
  1449. exit(0);
  1450. }
  1451. header = perf_header__read(input);
  1452. head = header->data_offset;
  1453. sample_type = perf_header__sample_type();
  1454. if (!(sample_type & PERF_SAMPLE_CALLCHAIN)) {
  1455. if (sort__has_parent) {
  1456. fprintf(stderr, "selected --sort parent, but no"
  1457. " callchain data. Did you call"
  1458. " perf record without -g?\n");
  1459. exit(-1);
  1460. }
  1461. if (callchain) {
  1462. fprintf(stderr, "selected -c but no callchain data."
  1463. " Did you call perf record without"
  1464. " -g?\n");
  1465. exit(-1);
  1466. }
  1467. } else if (callchain_param.mode != CHAIN_NONE && !callchain) {
  1468. callchain = 1;
  1469. if (register_callchain_param(&callchain_param) < 0) {
  1470. fprintf(stderr, "Can't register callchain"
  1471. " params\n");
  1472. exit(-1);
  1473. }
  1474. }
  1475. if (load_kernel() < 0) {
  1476. perror("failed to load kernel symbols");
  1477. return EXIT_FAILURE;
  1478. }
  1479. if (!full_paths) {
  1480. if (getcwd(__cwd, sizeof(__cwd)) == NULL) {
  1481. perror("failed to get the current directory");
  1482. return EXIT_FAILURE;
  1483. }
  1484. cwdlen = strlen(cwd);
  1485. } else {
  1486. cwd = NULL;
  1487. cwdlen = 0;
  1488. }
  1489. shift = page_size * (head / page_size);
  1490. offset += shift;
  1491. head -= shift;
  1492. remap:
  1493. buf = (char *)mmap(NULL, page_size * mmap_window, PROT_READ,
  1494. MAP_SHARED, input, offset);
  1495. if (buf == MAP_FAILED) {
  1496. perror("failed to mmap file");
  1497. exit(-1);
  1498. }
  1499. more:
  1500. event = (event_t *)(buf + head);
  1501. size = event->header.size;
  1502. if (!size)
  1503. size = 8;
  1504. if (head + event->header.size >= page_size * mmap_window) {
  1505. int ret;
  1506. shift = page_size * (head / page_size);
  1507. ret = munmap(buf, page_size * mmap_window);
  1508. assert(ret == 0);
  1509. offset += shift;
  1510. head -= shift;
  1511. goto remap;
  1512. }
  1513. size = event->header.size;
  1514. dprintf("\n%p [%p]: event: %d\n",
  1515. (void *)(offset + head),
  1516. (void *)(long)event->header.size,
  1517. event->header.type);
  1518. if (!size || process_event(event, offset, head) < 0) {
  1519. dprintf("%p [%p]: skipping unknown header type: %d\n",
  1520. (void *)(offset + head),
  1521. (void *)(long)(event->header.size),
  1522. event->header.type);
  1523. total_unknown++;
  1524. /*
  1525. * assume we lost track of the stream, check alignment, and
  1526. * increment a single u64 in the hope to catch on again 'soon'.
  1527. */
  1528. if (unlikely(head & 7))
  1529. head &= ~7ULL;
  1530. size = 8;
  1531. }
  1532. head += size;
  1533. if (offset + head >= header->data_offset + header->data_size)
  1534. goto done;
  1535. if (offset + head < (unsigned long)stat.st_size)
  1536. goto more;
  1537. done:
  1538. rc = EXIT_SUCCESS;
  1539. close(input);
  1540. dprintf(" IP events: %10ld\n", total);
  1541. dprintf(" mmap events: %10ld\n", total_mmap);
  1542. dprintf(" comm events: %10ld\n", total_comm);
  1543. dprintf(" fork events: %10ld\n", total_fork);
  1544. dprintf(" lost events: %10ld\n", total_lost);
  1545. dprintf(" unknown events: %10ld\n", total_unknown);
  1546. if (dump_trace)
  1547. return 0;
  1548. if (verbose >= 3)
  1549. threads__fprintf(stdout);
  1550. if (verbose >= 2)
  1551. dsos__fprintf(stdout);
  1552. collapse__resort();
  1553. output__resort(total);
  1554. output__fprintf(stdout, total);
  1555. return rc;
  1556. }
  1557. static int
  1558. parse_callchain_opt(const struct option *opt __used, const char *arg,
  1559. int unset __used)
  1560. {
  1561. char *tok;
  1562. char *endptr;
  1563. callchain = 1;
  1564. if (!arg)
  1565. return 0;
  1566. tok = strtok((char *)arg, ",");
  1567. if (!tok)
  1568. return -1;
  1569. /* get the output mode */
  1570. if (!strncmp(tok, "graph", strlen(arg)))
  1571. callchain_param.mode = CHAIN_GRAPH_ABS;
  1572. else if (!strncmp(tok, "flat", strlen(arg)))
  1573. callchain_param.mode = CHAIN_FLAT;
  1574. else if (!strncmp(tok, "fractal", strlen(arg)))
  1575. callchain_param.mode = CHAIN_GRAPH_REL;
  1576. else if (!strncmp(tok, "none", strlen(arg))) {
  1577. callchain_param.mode = CHAIN_NONE;
  1578. callchain = 0;
  1579. return 0;
  1580. }
  1581. else
  1582. return -1;
  1583. /* get the min percentage */
  1584. tok = strtok(NULL, ",");
  1585. if (!tok)
  1586. goto setup;
  1587. callchain_param.min_percent = strtod(tok, &endptr);
  1588. if (tok == endptr)
  1589. return -1;
  1590. setup:
  1591. if (register_callchain_param(&callchain_param) < 0) {
  1592. fprintf(stderr, "Can't register callchain params\n");
  1593. return -1;
  1594. }
  1595. return 0;
  1596. }
  1597. static const char * const report_usage[] = {
  1598. "perf report [<options>] <command>",
  1599. NULL
  1600. };
  1601. static const struct option options[] = {
  1602. OPT_STRING('i', "input", &input_name, "file",
  1603. "input file name"),
  1604. OPT_BOOLEAN('v', "verbose", &verbose,
  1605. "be more verbose (show symbol address, etc)"),
  1606. OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace,
  1607. "dump raw trace in ASCII"),
  1608. OPT_STRING('k', "vmlinux", &vmlinux, "file", "vmlinux pathname"),
  1609. OPT_BOOLEAN('m', "modules", &modules,
  1610. "load module symbols - WARNING: use only with -k and LIVE kernel"),
  1611. OPT_BOOLEAN('n', "show-nr-samples", &show_nr_samples,
  1612. "Show a column with the number of samples"),
  1613. OPT_STRING('s', "sort", &sort_order, "key[,key2...]",
  1614. "sort by key(s): pid, comm, dso, symbol, parent"),
  1615. OPT_BOOLEAN('P', "full-paths", &full_paths,
  1616. "Don't shorten the pathnames taking into account the cwd"),
  1617. OPT_STRING('p', "parent", &parent_pattern, "regex",
  1618. "regex filter to identify parent, see: '--sort parent'"),
  1619. OPT_BOOLEAN('x', "exclude-other", &exclude_other,
  1620. "Only display entries with parent-match"),
  1621. OPT_CALLBACK_DEFAULT('g', "call-graph", NULL, "output_type,min_percent",
  1622. "Display callchains using output_type and min percent threshold. "
  1623. "Default: fractal,0.5", &parse_callchain_opt, callchain_default_opt),
  1624. OPT_STRING('d', "dsos", &dso_list_str, "dso[,dso...]",
  1625. "only consider symbols in these dsos"),
  1626. OPT_STRING('C', "comms", &comm_list_str, "comm[,comm...]",
  1627. "only consider symbols in these comms"),
  1628. OPT_STRING('S', "symbols", &sym_list_str, "symbol[,symbol...]",
  1629. "only consider these symbols"),
  1630. OPT_STRING('w', "column-widths", &col_width_list_str,
  1631. "width[,width...]",
  1632. "don't try to adjust column width, use these fixed values"),
  1633. OPT_STRING('t', "field-separator", &field_sep, "separator",
  1634. "separator for columns, no spaces will be added between "
  1635. "columns '.' is reserved."),
  1636. OPT_END()
  1637. };
  1638. static void setup_sorting(void)
  1639. {
  1640. char *tmp, *tok, *str = strdup(sort_order);
  1641. for (tok = strtok_r(str, ", ", &tmp);
  1642. tok; tok = strtok_r(NULL, ", ", &tmp)) {
  1643. if (sort_dimension__add(tok) < 0) {
  1644. error("Unknown --sort key: `%s'", tok);
  1645. usage_with_options(report_usage, options);
  1646. }
  1647. }
  1648. free(str);
  1649. }
  1650. static void setup_list(struct strlist **list, const char *list_str,
  1651. struct sort_entry *se, const char *list_name,
  1652. FILE *fp)
  1653. {
  1654. if (list_str) {
  1655. *list = strlist__new(true, list_str);
  1656. if (!*list) {
  1657. fprintf(stderr, "problems parsing %s list\n",
  1658. list_name);
  1659. exit(129);
  1660. }
  1661. if (strlist__nr_entries(*list) == 1) {
  1662. fprintf(fp, "# %s: %s\n", list_name,
  1663. strlist__entry(*list, 0)->s);
  1664. se->elide = true;
  1665. }
  1666. }
  1667. }
  1668. int cmd_report(int argc, const char **argv, const char *prefix __used)
  1669. {
  1670. symbol__init();
  1671. page_size = getpagesize();
  1672. argc = parse_options(argc, argv, options, report_usage, 0);
  1673. setup_sorting();
  1674. if (parent_pattern != default_parent_pattern) {
  1675. sort_dimension__add("parent");
  1676. sort_parent.elide = 1;
  1677. } else
  1678. exclude_other = 0;
  1679. /*
  1680. * Any (unrecognized) arguments left?
  1681. */
  1682. if (argc)
  1683. usage_with_options(report_usage, options);
  1684. setup_pager();
  1685. setup_list(&dso_list, dso_list_str, &sort_dso, "dso", stdout);
  1686. setup_list(&comm_list, comm_list_str, &sort_comm, "comm", stdout);
  1687. setup_list(&sym_list, sym_list_str, &sort_sym, "symbol", stdout);
  1688. if (field_sep && *field_sep == '.') {
  1689. fputs("'.' is the only non valid --field-separator argument\n",
  1690. stderr);
  1691. exit(129);
  1692. }
  1693. return __cmd_report();
  1694. }