builtin-report.c 43 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048
  1. /*
  2. * builtin-report.c
  3. *
  4. * Builtin report command: Analyze the perf.data input file,
  5. * look up and read DSOs and symbol information and display
  6. * a histogram of results, along various sorting keys.
  7. */
  8. #include "builtin.h"
  9. #include "util/util.h"
  10. #include "util/color.h"
  11. #include <linux/list.h>
  12. #include "util/cache.h"
  13. #include <linux/rbtree.h>
  14. #include "util/symbol.h"
  15. #include "util/string.h"
  16. #include "util/callchain.h"
  17. #include "util/strlist.h"
  18. #include "util/values.h"
  19. #include "perf.h"
  20. #include "util/header.h"
  21. #include "util/parse-options.h"
  22. #include "util/parse-events.h"
  23. #define SHOW_KERNEL 1
  24. #define SHOW_USER 2
  25. #define SHOW_HV 4
  26. static char const *input_name = "perf.data";
  27. static char default_sort_order[] = "comm,dso,symbol";
  28. static char *sort_order = default_sort_order;
  29. static char *dso_list_str, *comm_list_str, *sym_list_str,
  30. *col_width_list_str;
  31. static struct strlist *dso_list, *comm_list, *sym_list;
  32. static char *field_sep;
  33. static int input;
  34. static int show_mask = SHOW_KERNEL | SHOW_USER | SHOW_HV;
  35. static int dump_trace = 0;
  36. #define dprintf(x...) do { if (dump_trace) printf(x); } while (0)
  37. #define cdprintf(x...) do { if (dump_trace) color_fprintf(stdout, color, x); } while (0)
  38. static int full_paths;
  39. static int show_nr_samples;
  40. static int show_threads;
  41. static struct perf_read_values show_threads_values;
  42. static char default_pretty_printing_style[] = "normal";
  43. static char *pretty_printing_style = default_pretty_printing_style;
  44. static unsigned long page_size;
  45. static unsigned long mmap_window = 32;
  46. static char default_parent_pattern[] = "^sys_|^do_page_fault";
  47. static char *parent_pattern = default_parent_pattern;
  48. static regex_t parent_regex;
  49. static int exclude_other = 1;
  50. static char callchain_default_opt[] = "fractal,0.5";
  51. static int callchain;
  52. static
  53. struct callchain_param callchain_param = {
  54. .mode = CHAIN_GRAPH_REL,
  55. .min_percent = 0.5
  56. };
  57. static u64 sample_type;
  58. static int repsep_fprintf(FILE *fp, const char *fmt, ...)
  59. {
  60. int n;
  61. va_list ap;
  62. va_start(ap, fmt);
  63. if (!field_sep)
  64. n = vfprintf(fp, fmt, ap);
  65. else {
  66. char *bf = NULL;
  67. n = vasprintf(&bf, fmt, ap);
  68. if (n > 0) {
  69. char *sep = bf;
  70. while (1) {
  71. sep = strchr(sep, *field_sep);
  72. if (sep == NULL)
  73. break;
  74. *sep = '.';
  75. }
  76. }
  77. fputs(bf, fp);
  78. free(bf);
  79. }
  80. va_end(ap);
  81. return n;
  82. }
  83. static char __cwd[PATH_MAX];
  84. static char *cwd = __cwd;
  85. static int cwdlen;
  86. static int strcommon(const char *pathname)
  87. {
  88. int n = 0;
  89. while (n < cwdlen && pathname[n] == cwd[n])
  90. ++n;
  91. return n;
  92. }
  93. struct map {
  94. struct list_head node;
  95. u64 start;
  96. u64 end;
  97. u64 pgoff;
  98. u64 (*map_ip)(struct map *, u64);
  99. struct dso *dso;
  100. };
  101. static u64 map__map_ip(struct map *map, u64 ip)
  102. {
  103. return ip - map->start + map->pgoff;
  104. }
  105. static u64 vdso__map_ip(struct map *map __used, u64 ip)
  106. {
  107. return ip;
  108. }
  109. static inline int is_anon_memory(const char *filename)
  110. {
  111. return strcmp(filename, "//anon") == 0;
  112. }
  113. static struct map *map__new(struct mmap_event *event)
  114. {
  115. struct map *self = malloc(sizeof(*self));
  116. if (self != NULL) {
  117. const char *filename = event->filename;
  118. char newfilename[PATH_MAX];
  119. int anon;
  120. if (cwd) {
  121. int n = strcommon(filename);
  122. if (n == cwdlen) {
  123. snprintf(newfilename, sizeof(newfilename),
  124. ".%s", filename + n);
  125. filename = newfilename;
  126. }
  127. }
  128. anon = is_anon_memory(filename);
  129. if (anon) {
  130. snprintf(newfilename, sizeof(newfilename), "/tmp/perf-%d.map", event->pid);
  131. filename = newfilename;
  132. }
  133. self->start = event->start;
  134. self->end = event->start + event->len;
  135. self->pgoff = event->pgoff;
  136. self->dso = dsos__findnew(filename);
  137. if (self->dso == NULL)
  138. goto out_delete;
  139. if (self->dso == vdso || anon)
  140. self->map_ip = vdso__map_ip;
  141. else
  142. self->map_ip = map__map_ip;
  143. }
  144. return self;
  145. out_delete:
  146. free(self);
  147. return NULL;
  148. }
  149. static struct map *map__clone(struct map *self)
  150. {
  151. struct map *map = malloc(sizeof(*self));
  152. if (!map)
  153. return NULL;
  154. memcpy(map, self, sizeof(*self));
  155. return map;
  156. }
  157. static int map__overlap(struct map *l, struct map *r)
  158. {
  159. if (l->start > r->start) {
  160. struct map *t = l;
  161. l = r;
  162. r = t;
  163. }
  164. if (l->end > r->start)
  165. return 1;
  166. return 0;
  167. }
  168. static size_t map__fprintf(struct map *self, FILE *fp)
  169. {
  170. return fprintf(fp, " %Lx-%Lx %Lx %s\n",
  171. self->start, self->end, self->pgoff, self->dso->name);
  172. }
  173. struct thread {
  174. struct rb_node rb_node;
  175. struct list_head maps;
  176. pid_t pid;
  177. char *comm;
  178. };
  179. static struct thread *thread__new(pid_t pid)
  180. {
  181. struct thread *self = malloc(sizeof(*self));
  182. if (self != NULL) {
  183. self->pid = pid;
  184. self->comm = malloc(32);
  185. if (self->comm)
  186. snprintf(self->comm, 32, ":%d", self->pid);
  187. INIT_LIST_HEAD(&self->maps);
  188. }
  189. return self;
  190. }
  191. static unsigned int dsos__col_width,
  192. comms__col_width,
  193. threads__col_width;
  194. static int thread__set_comm(struct thread *self, const char *comm)
  195. {
  196. if (self->comm)
  197. free(self->comm);
  198. self->comm = strdup(comm);
  199. if (!self->comm)
  200. return -ENOMEM;
  201. if (!col_width_list_str && !field_sep &&
  202. (!comm_list || strlist__has_entry(comm_list, comm))) {
  203. unsigned int slen = strlen(comm);
  204. if (slen > comms__col_width) {
  205. comms__col_width = slen;
  206. threads__col_width = slen + 6;
  207. }
  208. }
  209. return 0;
  210. }
  211. static size_t thread__fprintf(struct thread *self, FILE *fp)
  212. {
  213. struct map *pos;
  214. size_t ret = fprintf(fp, "Thread %d %s\n", self->pid, self->comm);
  215. list_for_each_entry(pos, &self->maps, node)
  216. ret += map__fprintf(pos, fp);
  217. return ret;
  218. }
  219. static struct rb_root threads;
  220. static struct thread *last_match;
  221. static struct thread *threads__findnew(pid_t pid)
  222. {
  223. struct rb_node **p = &threads.rb_node;
  224. struct rb_node *parent = NULL;
  225. struct thread *th;
  226. /*
  227. * Font-end cache - PID lookups come in blocks,
  228. * so most of the time we dont have to look up
  229. * the full rbtree:
  230. */
  231. if (last_match && last_match->pid == pid)
  232. return last_match;
  233. while (*p != NULL) {
  234. parent = *p;
  235. th = rb_entry(parent, struct thread, rb_node);
  236. if (th->pid == pid) {
  237. last_match = th;
  238. return th;
  239. }
  240. if (pid < th->pid)
  241. p = &(*p)->rb_left;
  242. else
  243. p = &(*p)->rb_right;
  244. }
  245. th = thread__new(pid);
  246. if (th != NULL) {
  247. rb_link_node(&th->rb_node, parent, p);
  248. rb_insert_color(&th->rb_node, &threads);
  249. last_match = th;
  250. }
  251. return th;
  252. }
  253. static void thread__insert_map(struct thread *self, struct map *map)
  254. {
  255. struct map *pos, *tmp;
  256. list_for_each_entry_safe(pos, tmp, &self->maps, node) {
  257. if (map__overlap(pos, map)) {
  258. if (verbose >= 2) {
  259. printf("overlapping maps:\n");
  260. map__fprintf(map, stdout);
  261. map__fprintf(pos, stdout);
  262. }
  263. if (map->start <= pos->start && map->end > pos->start)
  264. pos->start = map->end;
  265. if (map->end >= pos->end && map->start < pos->end)
  266. pos->end = map->start;
  267. if (verbose >= 2) {
  268. printf("after collision:\n");
  269. map__fprintf(pos, stdout);
  270. }
  271. if (pos->start >= pos->end) {
  272. list_del_init(&pos->node);
  273. free(pos);
  274. }
  275. }
  276. }
  277. list_add_tail(&map->node, &self->maps);
  278. }
  279. static int thread__fork(struct thread *self, struct thread *parent)
  280. {
  281. struct map *map;
  282. if (self->comm)
  283. free(self->comm);
  284. self->comm = strdup(parent->comm);
  285. if (!self->comm)
  286. return -ENOMEM;
  287. list_for_each_entry(map, &parent->maps, node) {
  288. struct map *new = map__clone(map);
  289. if (!new)
  290. return -ENOMEM;
  291. thread__insert_map(self, new);
  292. }
  293. return 0;
  294. }
  295. static struct map *thread__find_map(struct thread *self, u64 ip)
  296. {
  297. struct map *pos;
  298. if (self == NULL)
  299. return NULL;
  300. list_for_each_entry(pos, &self->maps, node)
  301. if (ip >= pos->start && ip <= pos->end)
  302. return pos;
  303. return NULL;
  304. }
  305. static size_t threads__fprintf(FILE *fp)
  306. {
  307. size_t ret = 0;
  308. struct rb_node *nd;
  309. for (nd = rb_first(&threads); nd; nd = rb_next(nd)) {
  310. struct thread *pos = rb_entry(nd, struct thread, rb_node);
  311. ret += thread__fprintf(pos, fp);
  312. }
  313. return ret;
  314. }
  315. /*
  316. * histogram, sorted on item, collects counts
  317. */
  318. static struct rb_root hist;
  319. struct hist_entry {
  320. struct rb_node rb_node;
  321. struct thread *thread;
  322. struct map *map;
  323. struct dso *dso;
  324. struct symbol *sym;
  325. struct symbol *parent;
  326. u64 ip;
  327. char level;
  328. struct callchain_node callchain;
  329. struct rb_root sorted_chain;
  330. u64 count;
  331. };
  332. /*
  333. * configurable sorting bits
  334. */
  335. struct sort_entry {
  336. struct list_head list;
  337. char *header;
  338. int64_t (*cmp)(struct hist_entry *, struct hist_entry *);
  339. int64_t (*collapse)(struct hist_entry *, struct hist_entry *);
  340. size_t (*print)(FILE *fp, struct hist_entry *, unsigned int width);
  341. unsigned int *width;
  342. bool elide;
  343. };
  344. static int64_t cmp_null(void *l, void *r)
  345. {
  346. if (!l && !r)
  347. return 0;
  348. else if (!l)
  349. return -1;
  350. else
  351. return 1;
  352. }
  353. /* --sort pid */
  354. static int64_t
  355. sort__thread_cmp(struct hist_entry *left, struct hist_entry *right)
  356. {
  357. return right->thread->pid - left->thread->pid;
  358. }
  359. static size_t
  360. sort__thread_print(FILE *fp, struct hist_entry *self, unsigned int width)
  361. {
  362. return repsep_fprintf(fp, "%*s:%5d", width - 6,
  363. self->thread->comm ?: "", self->thread->pid);
  364. }
  365. static struct sort_entry sort_thread = {
  366. .header = "Command: Pid",
  367. .cmp = sort__thread_cmp,
  368. .print = sort__thread_print,
  369. .width = &threads__col_width,
  370. };
  371. /* --sort comm */
  372. static int64_t
  373. sort__comm_cmp(struct hist_entry *left, struct hist_entry *right)
  374. {
  375. return right->thread->pid - left->thread->pid;
  376. }
  377. static int64_t
  378. sort__comm_collapse(struct hist_entry *left, struct hist_entry *right)
  379. {
  380. char *comm_l = left->thread->comm;
  381. char *comm_r = right->thread->comm;
  382. if (!comm_l || !comm_r)
  383. return cmp_null(comm_l, comm_r);
  384. return strcmp(comm_l, comm_r);
  385. }
  386. static size_t
  387. sort__comm_print(FILE *fp, struct hist_entry *self, unsigned int width)
  388. {
  389. return repsep_fprintf(fp, "%*s", width, self->thread->comm);
  390. }
  391. static struct sort_entry sort_comm = {
  392. .header = "Command",
  393. .cmp = sort__comm_cmp,
  394. .collapse = sort__comm_collapse,
  395. .print = sort__comm_print,
  396. .width = &comms__col_width,
  397. };
  398. /* --sort dso */
  399. static int64_t
  400. sort__dso_cmp(struct hist_entry *left, struct hist_entry *right)
  401. {
  402. struct dso *dso_l = left->dso;
  403. struct dso *dso_r = right->dso;
  404. if (!dso_l || !dso_r)
  405. return cmp_null(dso_l, dso_r);
  406. return strcmp(dso_l->name, dso_r->name);
  407. }
  408. static size_t
  409. sort__dso_print(FILE *fp, struct hist_entry *self, unsigned int width)
  410. {
  411. if (self->dso)
  412. return repsep_fprintf(fp, "%-*s", width, self->dso->name);
  413. return repsep_fprintf(fp, "%*llx", width, (u64)self->ip);
  414. }
  415. static struct sort_entry sort_dso = {
  416. .header = "Shared Object",
  417. .cmp = sort__dso_cmp,
  418. .print = sort__dso_print,
  419. .width = &dsos__col_width,
  420. };
  421. /* --sort symbol */
  422. static int64_t
  423. sort__sym_cmp(struct hist_entry *left, struct hist_entry *right)
  424. {
  425. u64 ip_l, ip_r;
  426. if (left->sym == right->sym)
  427. return 0;
  428. ip_l = left->sym ? left->sym->start : left->ip;
  429. ip_r = right->sym ? right->sym->start : right->ip;
  430. return (int64_t)(ip_r - ip_l);
  431. }
  432. static size_t
  433. sort__sym_print(FILE *fp, struct hist_entry *self, unsigned int width __used)
  434. {
  435. size_t ret = 0;
  436. if (verbose)
  437. ret += repsep_fprintf(fp, "%#018llx %c ", (u64)self->ip,
  438. dso__symtab_origin(self->dso));
  439. ret += repsep_fprintf(fp, "[%c] ", self->level);
  440. if (self->sym) {
  441. ret += repsep_fprintf(fp, "%s", self->sym->name);
  442. if (self->sym->module)
  443. ret += repsep_fprintf(fp, "\t[%s]",
  444. self->sym->module->name);
  445. } else {
  446. ret += repsep_fprintf(fp, "%#016llx", (u64)self->ip);
  447. }
  448. return ret;
  449. }
  450. static struct sort_entry sort_sym = {
  451. .header = "Symbol",
  452. .cmp = sort__sym_cmp,
  453. .print = sort__sym_print,
  454. };
  455. /* --sort parent */
  456. static int64_t
  457. sort__parent_cmp(struct hist_entry *left, struct hist_entry *right)
  458. {
  459. struct symbol *sym_l = left->parent;
  460. struct symbol *sym_r = right->parent;
  461. if (!sym_l || !sym_r)
  462. return cmp_null(sym_l, sym_r);
  463. return strcmp(sym_l->name, sym_r->name);
  464. }
  465. static size_t
  466. sort__parent_print(FILE *fp, struct hist_entry *self, unsigned int width)
  467. {
  468. return repsep_fprintf(fp, "%-*s", width,
  469. self->parent ? self->parent->name : "[other]");
  470. }
  471. static unsigned int parent_symbol__col_width;
  472. static struct sort_entry sort_parent = {
  473. .header = "Parent symbol",
  474. .cmp = sort__parent_cmp,
  475. .print = sort__parent_print,
  476. .width = &parent_symbol__col_width,
  477. };
  478. static int sort__need_collapse = 0;
  479. static int sort__has_parent = 0;
  480. struct sort_dimension {
  481. char *name;
  482. struct sort_entry *entry;
  483. int taken;
  484. };
  485. static struct sort_dimension sort_dimensions[] = {
  486. { .name = "pid", .entry = &sort_thread, },
  487. { .name = "comm", .entry = &sort_comm, },
  488. { .name = "dso", .entry = &sort_dso, },
  489. { .name = "symbol", .entry = &sort_sym, },
  490. { .name = "parent", .entry = &sort_parent, },
  491. };
  492. static LIST_HEAD(hist_entry__sort_list);
  493. static int sort_dimension__add(char *tok)
  494. {
  495. unsigned int i;
  496. for (i = 0; i < ARRAY_SIZE(sort_dimensions); i++) {
  497. struct sort_dimension *sd = &sort_dimensions[i];
  498. if (sd->taken)
  499. continue;
  500. if (strncasecmp(tok, sd->name, strlen(tok)))
  501. continue;
  502. if (sd->entry->collapse)
  503. sort__need_collapse = 1;
  504. if (sd->entry == &sort_parent) {
  505. int ret = regcomp(&parent_regex, parent_pattern, REG_EXTENDED);
  506. if (ret) {
  507. char err[BUFSIZ];
  508. regerror(ret, &parent_regex, err, sizeof(err));
  509. fprintf(stderr, "Invalid regex: %s\n%s",
  510. parent_pattern, err);
  511. exit(-1);
  512. }
  513. sort__has_parent = 1;
  514. }
  515. list_add_tail(&sd->entry->list, &hist_entry__sort_list);
  516. sd->taken = 1;
  517. return 0;
  518. }
  519. return -ESRCH;
  520. }
  521. static int64_t
  522. hist_entry__cmp(struct hist_entry *left, struct hist_entry *right)
  523. {
  524. struct sort_entry *se;
  525. int64_t cmp = 0;
  526. list_for_each_entry(se, &hist_entry__sort_list, list) {
  527. cmp = se->cmp(left, right);
  528. if (cmp)
  529. break;
  530. }
  531. return cmp;
  532. }
  533. static int64_t
  534. hist_entry__collapse(struct hist_entry *left, struct hist_entry *right)
  535. {
  536. struct sort_entry *se;
  537. int64_t cmp = 0;
  538. list_for_each_entry(se, &hist_entry__sort_list, list) {
  539. int64_t (*f)(struct hist_entry *, struct hist_entry *);
  540. f = se->collapse ?: se->cmp;
  541. cmp = f(left, right);
  542. if (cmp)
  543. break;
  544. }
  545. return cmp;
  546. }
  547. static size_t ipchain__fprintf_graph_line(FILE *fp, int depth, int depth_mask)
  548. {
  549. int i;
  550. size_t ret = 0;
  551. ret += fprintf(fp, "%s", " ");
  552. for (i = 0; i < depth; i++)
  553. if (depth_mask & (1 << i))
  554. ret += fprintf(fp, "| ");
  555. else
  556. ret += fprintf(fp, " ");
  557. ret += fprintf(fp, "\n");
  558. return ret;
  559. }
  560. static size_t
  561. ipchain__fprintf_graph(FILE *fp, struct callchain_list *chain, int depth,
  562. int depth_mask, int count, u64 total_samples,
  563. int hits)
  564. {
  565. int i;
  566. size_t ret = 0;
  567. ret += fprintf(fp, "%s", " ");
  568. for (i = 0; i < depth; i++) {
  569. if (depth_mask & (1 << i))
  570. ret += fprintf(fp, "|");
  571. else
  572. ret += fprintf(fp, " ");
  573. if (!count && i == depth - 1) {
  574. double percent;
  575. percent = hits * 100.0 / total_samples;
  576. ret += percent_color_fprintf(fp, "--%2.2f%%-- ", percent);
  577. } else
  578. ret += fprintf(fp, "%s", " ");
  579. }
  580. if (chain->sym)
  581. ret += fprintf(fp, "%s\n", chain->sym->name);
  582. else
  583. ret += fprintf(fp, "%p\n", (void *)(long)chain->ip);
  584. return ret;
  585. }
  586. static struct symbol *rem_sq_bracket;
  587. static struct callchain_list rem_hits;
  588. static void init_rem_hits(void)
  589. {
  590. rem_sq_bracket = malloc(sizeof(*rem_sq_bracket) + 6);
  591. if (!rem_sq_bracket) {
  592. fprintf(stderr, "Not enough memory to display remaining hits\n");
  593. return;
  594. }
  595. strcpy(rem_sq_bracket->name, "[...]");
  596. rem_hits.sym = rem_sq_bracket;
  597. }
  598. static size_t
  599. callchain__fprintf_graph(FILE *fp, struct callchain_node *self,
  600. u64 total_samples, int depth, int depth_mask)
  601. {
  602. struct rb_node *node, *next;
  603. struct callchain_node *child;
  604. struct callchain_list *chain;
  605. int new_depth_mask = depth_mask;
  606. u64 new_total;
  607. u64 remaining;
  608. size_t ret = 0;
  609. int i;
  610. if (callchain_param.mode == CHAIN_GRAPH_REL)
  611. new_total = self->children_hit;
  612. else
  613. new_total = total_samples;
  614. remaining = new_total;
  615. node = rb_first(&self->rb_root);
  616. while (node) {
  617. u64 cumul;
  618. child = rb_entry(node, struct callchain_node, rb_node);
  619. cumul = cumul_hits(child);
  620. remaining -= cumul;
  621. /*
  622. * The depth mask manages the output of pipes that show
  623. * the depth. We don't want to keep the pipes of the current
  624. * level for the last child of this depth.
  625. * Except if we have remaining filtered hits. They will
  626. * supersede the last child
  627. */
  628. next = rb_next(node);
  629. if (!next && (callchain_param.mode != CHAIN_GRAPH_REL || !remaining))
  630. new_depth_mask &= ~(1 << (depth - 1));
  631. /*
  632. * But we keep the older depth mask for the line seperator
  633. * to keep the level link until we reach the last child
  634. */
  635. ret += ipchain__fprintf_graph_line(fp, depth, depth_mask);
  636. i = 0;
  637. list_for_each_entry(chain, &child->val, list) {
  638. if (chain->ip >= PERF_CONTEXT_MAX)
  639. continue;
  640. ret += ipchain__fprintf_graph(fp, chain, depth,
  641. new_depth_mask, i++,
  642. new_total,
  643. cumul);
  644. }
  645. ret += callchain__fprintf_graph(fp, child, new_total,
  646. depth + 1,
  647. new_depth_mask | (1 << depth));
  648. node = next;
  649. }
  650. if (callchain_param.mode == CHAIN_GRAPH_REL &&
  651. remaining && remaining != new_total) {
  652. if (!rem_sq_bracket)
  653. return ret;
  654. new_depth_mask &= ~(1 << (depth - 1));
  655. ret += ipchain__fprintf_graph(fp, &rem_hits, depth,
  656. new_depth_mask, 0, new_total,
  657. remaining);
  658. }
  659. return ret;
  660. }
  661. static size_t
  662. callchain__fprintf_flat(FILE *fp, struct callchain_node *self,
  663. u64 total_samples)
  664. {
  665. struct callchain_list *chain;
  666. size_t ret = 0;
  667. if (!self)
  668. return 0;
  669. ret += callchain__fprintf_flat(fp, self->parent, total_samples);
  670. list_for_each_entry(chain, &self->val, list) {
  671. if (chain->ip >= PERF_CONTEXT_MAX)
  672. continue;
  673. if (chain->sym)
  674. ret += fprintf(fp, " %s\n", chain->sym->name);
  675. else
  676. ret += fprintf(fp, " %p\n",
  677. (void *)(long)chain->ip);
  678. }
  679. return ret;
  680. }
  681. static size_t
  682. hist_entry_callchain__fprintf(FILE *fp, struct hist_entry *self,
  683. u64 total_samples)
  684. {
  685. struct rb_node *rb_node;
  686. struct callchain_node *chain;
  687. size_t ret = 0;
  688. rb_node = rb_first(&self->sorted_chain);
  689. while (rb_node) {
  690. double percent;
  691. chain = rb_entry(rb_node, struct callchain_node, rb_node);
  692. percent = chain->hit * 100.0 / total_samples;
  693. switch (callchain_param.mode) {
  694. case CHAIN_FLAT:
  695. ret += percent_color_fprintf(fp, " %6.2f%%\n",
  696. percent);
  697. ret += callchain__fprintf_flat(fp, chain, total_samples);
  698. break;
  699. case CHAIN_GRAPH_ABS: /* Falldown */
  700. case CHAIN_GRAPH_REL:
  701. ret += callchain__fprintf_graph(fp, chain,
  702. total_samples, 1, 1);
  703. default:
  704. break;
  705. }
  706. ret += fprintf(fp, "\n");
  707. rb_node = rb_next(rb_node);
  708. }
  709. return ret;
  710. }
  711. static size_t
  712. hist_entry__fprintf(FILE *fp, struct hist_entry *self, u64 total_samples)
  713. {
  714. struct sort_entry *se;
  715. size_t ret;
  716. if (exclude_other && !self->parent)
  717. return 0;
  718. if (total_samples)
  719. ret = percent_color_fprintf(fp,
  720. field_sep ? "%.2f" : " %6.2f%%",
  721. (self->count * 100.0) / total_samples);
  722. else
  723. ret = fprintf(fp, field_sep ? "%lld" : "%12lld ", self->count);
  724. if (show_nr_samples) {
  725. if (field_sep)
  726. fprintf(fp, "%c%lld", *field_sep, self->count);
  727. else
  728. fprintf(fp, "%11lld", self->count);
  729. }
  730. list_for_each_entry(se, &hist_entry__sort_list, list) {
  731. if (se->elide)
  732. continue;
  733. fprintf(fp, "%s", field_sep ?: " ");
  734. ret += se->print(fp, self, se->width ? *se->width : 0);
  735. }
  736. ret += fprintf(fp, "\n");
  737. if (callchain)
  738. hist_entry_callchain__fprintf(fp, self, total_samples);
  739. return ret;
  740. }
  741. /*
  742. *
  743. */
  744. static void dso__calc_col_width(struct dso *self)
  745. {
  746. if (!col_width_list_str && !field_sep &&
  747. (!dso_list || strlist__has_entry(dso_list, self->name))) {
  748. unsigned int slen = strlen(self->name);
  749. if (slen > dsos__col_width)
  750. dsos__col_width = slen;
  751. }
  752. self->slen_calculated = 1;
  753. }
  754. static struct symbol *
  755. resolve_symbol(struct thread *thread, struct map **mapp,
  756. struct dso **dsop, u64 *ipp)
  757. {
  758. struct dso *dso = dsop ? *dsop : NULL;
  759. struct map *map = mapp ? *mapp : NULL;
  760. u64 ip = *ipp;
  761. if (!thread)
  762. return NULL;
  763. if (dso)
  764. goto got_dso;
  765. if (map)
  766. goto got_map;
  767. map = thread__find_map(thread, ip);
  768. if (map != NULL) {
  769. /*
  770. * We have to do this here as we may have a dso
  771. * with no symbol hit that has a name longer than
  772. * the ones with symbols sampled.
  773. */
  774. if (!sort_dso.elide && !map->dso->slen_calculated)
  775. dso__calc_col_width(map->dso);
  776. if (mapp)
  777. *mapp = map;
  778. got_map:
  779. ip = map->map_ip(map, ip);
  780. dso = map->dso;
  781. } else {
  782. /*
  783. * If this is outside of all known maps,
  784. * and is a negative address, try to look it
  785. * up in the kernel dso, as it might be a
  786. * vsyscall (which executes in user-mode):
  787. */
  788. if ((long long)ip < 0)
  789. dso = kernel_dso;
  790. }
  791. dprintf(" ...... dso: %s\n", dso ? dso->name : "<not found>");
  792. dprintf(" ...... map: %Lx -> %Lx\n", *ipp, ip);
  793. *ipp = ip;
  794. if (dsop)
  795. *dsop = dso;
  796. if (!dso)
  797. return NULL;
  798. got_dso:
  799. return dso->find_symbol(dso, ip);
  800. }
  801. static int call__match(struct symbol *sym)
  802. {
  803. if (sym->name && !regexec(&parent_regex, sym->name, 0, NULL, 0))
  804. return 1;
  805. return 0;
  806. }
  807. static struct symbol **
  808. resolve_callchain(struct thread *thread, struct map *map __used,
  809. struct ip_callchain *chain, struct hist_entry *entry)
  810. {
  811. u64 context = PERF_CONTEXT_MAX;
  812. struct symbol **syms = NULL;
  813. unsigned int i;
  814. if (callchain) {
  815. syms = calloc(chain->nr, sizeof(*syms));
  816. if (!syms) {
  817. fprintf(stderr, "Can't allocate memory for symbols\n");
  818. exit(-1);
  819. }
  820. }
  821. for (i = 0; i < chain->nr; i++) {
  822. u64 ip = chain->ips[i];
  823. struct dso *dso = NULL;
  824. struct symbol *sym;
  825. if (ip >= PERF_CONTEXT_MAX) {
  826. context = ip;
  827. continue;
  828. }
  829. switch (context) {
  830. case PERF_CONTEXT_HV:
  831. dso = hypervisor_dso;
  832. break;
  833. case PERF_CONTEXT_KERNEL:
  834. dso = kernel_dso;
  835. break;
  836. default:
  837. break;
  838. }
  839. sym = resolve_symbol(thread, NULL, &dso, &ip);
  840. if (sym) {
  841. if (sort__has_parent && call__match(sym) &&
  842. !entry->parent)
  843. entry->parent = sym;
  844. if (!callchain)
  845. break;
  846. syms[i] = sym;
  847. }
  848. }
  849. return syms;
  850. }
  851. /*
  852. * collect histogram counts
  853. */
  854. static int
  855. hist_entry__add(struct thread *thread, struct map *map, struct dso *dso,
  856. struct symbol *sym, u64 ip, struct ip_callchain *chain,
  857. char level, u64 count)
  858. {
  859. struct rb_node **p = &hist.rb_node;
  860. struct rb_node *parent = NULL;
  861. struct hist_entry *he;
  862. struct symbol **syms = NULL;
  863. struct hist_entry entry = {
  864. .thread = thread,
  865. .map = map,
  866. .dso = dso,
  867. .sym = sym,
  868. .ip = ip,
  869. .level = level,
  870. .count = count,
  871. .parent = NULL,
  872. .sorted_chain = RB_ROOT
  873. };
  874. int cmp;
  875. if ((sort__has_parent || callchain) && chain)
  876. syms = resolve_callchain(thread, map, chain, &entry);
  877. while (*p != NULL) {
  878. parent = *p;
  879. he = rb_entry(parent, struct hist_entry, rb_node);
  880. cmp = hist_entry__cmp(&entry, he);
  881. if (!cmp) {
  882. he->count += count;
  883. if (callchain) {
  884. append_chain(&he->callchain, chain, syms);
  885. free(syms);
  886. }
  887. return 0;
  888. }
  889. if (cmp < 0)
  890. p = &(*p)->rb_left;
  891. else
  892. p = &(*p)->rb_right;
  893. }
  894. he = malloc(sizeof(*he));
  895. if (!he)
  896. return -ENOMEM;
  897. *he = entry;
  898. if (callchain) {
  899. callchain_init(&he->callchain);
  900. append_chain(&he->callchain, chain, syms);
  901. free(syms);
  902. }
  903. rb_link_node(&he->rb_node, parent, p);
  904. rb_insert_color(&he->rb_node, &hist);
  905. return 0;
  906. }
  907. static void hist_entry__free(struct hist_entry *he)
  908. {
  909. free(he);
  910. }
  911. /*
  912. * collapse the histogram
  913. */
  914. static struct rb_root collapse_hists;
  915. static void collapse__insert_entry(struct hist_entry *he)
  916. {
  917. struct rb_node **p = &collapse_hists.rb_node;
  918. struct rb_node *parent = NULL;
  919. struct hist_entry *iter;
  920. int64_t cmp;
  921. while (*p != NULL) {
  922. parent = *p;
  923. iter = rb_entry(parent, struct hist_entry, rb_node);
  924. cmp = hist_entry__collapse(iter, he);
  925. if (!cmp) {
  926. iter->count += he->count;
  927. hist_entry__free(he);
  928. return;
  929. }
  930. if (cmp < 0)
  931. p = &(*p)->rb_left;
  932. else
  933. p = &(*p)->rb_right;
  934. }
  935. rb_link_node(&he->rb_node, parent, p);
  936. rb_insert_color(&he->rb_node, &collapse_hists);
  937. }
  938. static void collapse__resort(void)
  939. {
  940. struct rb_node *next;
  941. struct hist_entry *n;
  942. if (!sort__need_collapse)
  943. return;
  944. next = rb_first(&hist);
  945. while (next) {
  946. n = rb_entry(next, struct hist_entry, rb_node);
  947. next = rb_next(&n->rb_node);
  948. rb_erase(&n->rb_node, &hist);
  949. collapse__insert_entry(n);
  950. }
  951. }
  952. /*
  953. * reverse the map, sort on count.
  954. */
  955. static struct rb_root output_hists;
  956. static void output__insert_entry(struct hist_entry *he, u64 min_callchain_hits)
  957. {
  958. struct rb_node **p = &output_hists.rb_node;
  959. struct rb_node *parent = NULL;
  960. struct hist_entry *iter;
  961. if (callchain)
  962. callchain_param.sort(&he->sorted_chain, &he->callchain,
  963. min_callchain_hits, &callchain_param);
  964. while (*p != NULL) {
  965. parent = *p;
  966. iter = rb_entry(parent, struct hist_entry, rb_node);
  967. if (he->count > iter->count)
  968. p = &(*p)->rb_left;
  969. else
  970. p = &(*p)->rb_right;
  971. }
  972. rb_link_node(&he->rb_node, parent, p);
  973. rb_insert_color(&he->rb_node, &output_hists);
  974. }
  975. static void output__resort(u64 total_samples)
  976. {
  977. struct rb_node *next;
  978. struct hist_entry *n;
  979. struct rb_root *tree = &hist;
  980. u64 min_callchain_hits;
  981. min_callchain_hits = total_samples * (callchain_param.min_percent / 100);
  982. if (sort__need_collapse)
  983. tree = &collapse_hists;
  984. next = rb_first(tree);
  985. while (next) {
  986. n = rb_entry(next, struct hist_entry, rb_node);
  987. next = rb_next(&n->rb_node);
  988. rb_erase(&n->rb_node, tree);
  989. output__insert_entry(n, min_callchain_hits);
  990. }
  991. }
  992. static size_t output__fprintf(FILE *fp, u64 total_samples)
  993. {
  994. struct hist_entry *pos;
  995. struct sort_entry *se;
  996. struct rb_node *nd;
  997. size_t ret = 0;
  998. unsigned int width;
  999. char *col_width = col_width_list_str;
  1000. int raw_printing_style;
  1001. raw_printing_style = !strcmp(pretty_printing_style, "raw");
  1002. init_rem_hits();
  1003. fprintf(fp, "# Samples: %Ld\n", (u64)total_samples);
  1004. fprintf(fp, "#\n");
  1005. fprintf(fp, "# Overhead");
  1006. if (show_nr_samples) {
  1007. if (field_sep)
  1008. fprintf(fp, "%cSamples", *field_sep);
  1009. else
  1010. fputs(" Samples ", fp);
  1011. }
  1012. list_for_each_entry(se, &hist_entry__sort_list, list) {
  1013. if (se->elide)
  1014. continue;
  1015. if (field_sep) {
  1016. fprintf(fp, "%c%s", *field_sep, se->header);
  1017. continue;
  1018. }
  1019. width = strlen(se->header);
  1020. if (se->width) {
  1021. if (col_width_list_str) {
  1022. if (col_width) {
  1023. *se->width = atoi(col_width);
  1024. col_width = strchr(col_width, ',');
  1025. if (col_width)
  1026. ++col_width;
  1027. }
  1028. }
  1029. width = *se->width = max(*se->width, width);
  1030. }
  1031. fprintf(fp, " %*s", width, se->header);
  1032. }
  1033. fprintf(fp, "\n");
  1034. if (field_sep)
  1035. goto print_entries;
  1036. fprintf(fp, "# ........");
  1037. if (show_nr_samples)
  1038. fprintf(fp, " ..........");
  1039. list_for_each_entry(se, &hist_entry__sort_list, list) {
  1040. unsigned int i;
  1041. if (se->elide)
  1042. continue;
  1043. fprintf(fp, " ");
  1044. if (se->width)
  1045. width = *se->width;
  1046. else
  1047. width = strlen(se->header);
  1048. for (i = 0; i < width; i++)
  1049. fprintf(fp, ".");
  1050. }
  1051. fprintf(fp, "\n");
  1052. fprintf(fp, "#\n");
  1053. print_entries:
  1054. for (nd = rb_first(&output_hists); nd; nd = rb_next(nd)) {
  1055. pos = rb_entry(nd, struct hist_entry, rb_node);
  1056. ret += hist_entry__fprintf(fp, pos, total_samples);
  1057. }
  1058. if (sort_order == default_sort_order &&
  1059. parent_pattern == default_parent_pattern) {
  1060. fprintf(fp, "#\n");
  1061. fprintf(fp, "# (For a higher level overview, try: perf report --sort comm,dso)\n");
  1062. fprintf(fp, "#\n");
  1063. }
  1064. fprintf(fp, "\n");
  1065. free(rem_sq_bracket);
  1066. if (show_threads)
  1067. perf_read_values_display(fp, &show_threads_values,
  1068. raw_printing_style);
  1069. return ret;
  1070. }
  1071. static void register_idle_thread(void)
  1072. {
  1073. struct thread *thread = threads__findnew(0);
  1074. if (thread == NULL ||
  1075. thread__set_comm(thread, "[idle]")) {
  1076. fprintf(stderr, "problem inserting idle task.\n");
  1077. exit(-1);
  1078. }
  1079. }
  1080. static unsigned long total = 0,
  1081. total_mmap = 0,
  1082. total_comm = 0,
  1083. total_fork = 0,
  1084. total_unknown = 0,
  1085. total_lost = 0;
  1086. static int validate_chain(struct ip_callchain *chain, event_t *event)
  1087. {
  1088. unsigned int chain_size;
  1089. chain_size = event->header.size;
  1090. chain_size -= (unsigned long)&event->ip.__more_data - (unsigned long)event;
  1091. if (chain->nr*sizeof(u64) > chain_size)
  1092. return -1;
  1093. return 0;
  1094. }
  1095. static int
  1096. process_sample_event(event_t *event, unsigned long offset, unsigned long head)
  1097. {
  1098. char level;
  1099. int show = 0;
  1100. struct dso *dso = NULL;
  1101. struct thread *thread = threads__findnew(event->ip.pid);
  1102. u64 ip = event->ip.ip;
  1103. u64 period = 1;
  1104. struct map *map = NULL;
  1105. void *more_data = event->ip.__more_data;
  1106. struct ip_callchain *chain = NULL;
  1107. int cpumode;
  1108. if (sample_type & PERF_SAMPLE_PERIOD) {
  1109. period = *(u64 *)more_data;
  1110. more_data += sizeof(u64);
  1111. }
  1112. dprintf("%p [%p]: PERF_EVENT_SAMPLE (IP, %d): %d: %p period: %Ld\n",
  1113. (void *)(offset + head),
  1114. (void *)(long)(event->header.size),
  1115. event->header.misc,
  1116. event->ip.pid,
  1117. (void *)(long)ip,
  1118. (long long)period);
  1119. if (sample_type & PERF_SAMPLE_CALLCHAIN) {
  1120. unsigned int i;
  1121. chain = (void *)more_data;
  1122. dprintf("... chain: nr:%Lu\n", chain->nr);
  1123. if (validate_chain(chain, event) < 0) {
  1124. eprintf("call-chain problem with event, skipping it.\n");
  1125. return 0;
  1126. }
  1127. if (dump_trace) {
  1128. for (i = 0; i < chain->nr; i++)
  1129. dprintf("..... %2d: %016Lx\n", i, chain->ips[i]);
  1130. }
  1131. }
  1132. dprintf(" ... thread: %s:%d\n", thread->comm, thread->pid);
  1133. if (thread == NULL) {
  1134. eprintf("problem processing %d event, skipping it.\n",
  1135. event->header.type);
  1136. return -1;
  1137. }
  1138. if (comm_list && !strlist__has_entry(comm_list, thread->comm))
  1139. return 0;
  1140. cpumode = event->header.misc & PERF_EVENT_MISC_CPUMODE_MASK;
  1141. if (cpumode == PERF_EVENT_MISC_KERNEL) {
  1142. show = SHOW_KERNEL;
  1143. level = 'k';
  1144. dso = kernel_dso;
  1145. dprintf(" ...... dso: %s\n", dso->name);
  1146. } else if (cpumode == PERF_EVENT_MISC_USER) {
  1147. show = SHOW_USER;
  1148. level = '.';
  1149. } else {
  1150. show = SHOW_HV;
  1151. level = 'H';
  1152. dso = hypervisor_dso;
  1153. dprintf(" ...... dso: [hypervisor]\n");
  1154. }
  1155. if (show & show_mask) {
  1156. struct symbol *sym = resolve_symbol(thread, &map, &dso, &ip);
  1157. if (dso_list && dso && dso->name && !strlist__has_entry(dso_list, dso->name))
  1158. return 0;
  1159. if (sym_list && sym && !strlist__has_entry(sym_list, sym->name))
  1160. return 0;
  1161. if (hist_entry__add(thread, map, dso, sym, ip, chain, level, period)) {
  1162. eprintf("problem incrementing symbol count, skipping event\n");
  1163. return -1;
  1164. }
  1165. }
  1166. total += period;
  1167. return 0;
  1168. }
  1169. static int
  1170. process_mmap_event(event_t *event, unsigned long offset, unsigned long head)
  1171. {
  1172. struct thread *thread = threads__findnew(event->mmap.pid);
  1173. struct map *map = map__new(&event->mmap);
  1174. dprintf("%p [%p]: PERF_EVENT_MMAP %d: [%p(%p) @ %p]: %s\n",
  1175. (void *)(offset + head),
  1176. (void *)(long)(event->header.size),
  1177. event->mmap.pid,
  1178. (void *)(long)event->mmap.start,
  1179. (void *)(long)event->mmap.len,
  1180. (void *)(long)event->mmap.pgoff,
  1181. event->mmap.filename);
  1182. if (thread == NULL || map == NULL) {
  1183. dprintf("problem processing PERF_EVENT_MMAP, skipping event.\n");
  1184. return 0;
  1185. }
  1186. thread__insert_map(thread, map);
  1187. total_mmap++;
  1188. return 0;
  1189. }
  1190. static int
  1191. process_comm_event(event_t *event, unsigned long offset, unsigned long head)
  1192. {
  1193. struct thread *thread = threads__findnew(event->comm.pid);
  1194. dprintf("%p [%p]: PERF_EVENT_COMM: %s:%d\n",
  1195. (void *)(offset + head),
  1196. (void *)(long)(event->header.size),
  1197. event->comm.comm, event->comm.pid);
  1198. if (thread == NULL ||
  1199. thread__set_comm(thread, event->comm.comm)) {
  1200. dprintf("problem processing PERF_EVENT_COMM, skipping event.\n");
  1201. return -1;
  1202. }
  1203. total_comm++;
  1204. return 0;
  1205. }
  1206. static int
  1207. process_task_event(event_t *event, unsigned long offset, unsigned long head)
  1208. {
  1209. struct thread *thread = threads__findnew(event->fork.pid);
  1210. struct thread *parent = threads__findnew(event->fork.ppid);
  1211. dprintf("%p [%p]: PERF_EVENT_%s: (%d:%d):(%d:%d)\n",
  1212. (void *)(offset + head),
  1213. (void *)(long)(event->header.size),
  1214. event->header.type == PERF_EVENT_FORK ? "FORK" : "EXIT",
  1215. event->fork.pid, event->fork.tid,
  1216. event->fork.ppid, event->fork.ptid);
  1217. /*
  1218. * A thread clone will have the same PID for both
  1219. * parent and child.
  1220. */
  1221. if (thread == parent)
  1222. return 0;
  1223. if (event->header.type == PERF_EVENT_EXIT)
  1224. return 0;
  1225. if (!thread || !parent || thread__fork(thread, parent)) {
  1226. dprintf("problem processing PERF_EVENT_FORK, skipping event.\n");
  1227. return -1;
  1228. }
  1229. total_fork++;
  1230. return 0;
  1231. }
  1232. static int
  1233. process_lost_event(event_t *event, unsigned long offset, unsigned long head)
  1234. {
  1235. dprintf("%p [%p]: PERF_EVENT_LOST: id:%Ld: lost:%Ld\n",
  1236. (void *)(offset + head),
  1237. (void *)(long)(event->header.size),
  1238. event->lost.id,
  1239. event->lost.lost);
  1240. total_lost += event->lost.lost;
  1241. return 0;
  1242. }
  1243. static void trace_event(event_t *event)
  1244. {
  1245. unsigned char *raw_event = (void *)event;
  1246. char *color = PERF_COLOR_BLUE;
  1247. int i, j;
  1248. if (!dump_trace)
  1249. return;
  1250. dprintf(".");
  1251. cdprintf("\n. ... raw event: size %d bytes\n", event->header.size);
  1252. for (i = 0; i < event->header.size; i++) {
  1253. if ((i & 15) == 0) {
  1254. dprintf(".");
  1255. cdprintf(" %04x: ", i);
  1256. }
  1257. cdprintf(" %02x", raw_event[i]);
  1258. if (((i & 15) == 15) || i == event->header.size-1) {
  1259. cdprintf(" ");
  1260. for (j = 0; j < 15-(i & 15); j++)
  1261. cdprintf(" ");
  1262. for (j = 0; j < (i & 15); j++) {
  1263. if (isprint(raw_event[i-15+j]))
  1264. cdprintf("%c", raw_event[i-15+j]);
  1265. else
  1266. cdprintf(".");
  1267. }
  1268. cdprintf("\n");
  1269. }
  1270. }
  1271. dprintf(".\n");
  1272. }
  1273. static struct perf_header *header;
  1274. static struct perf_counter_attr *perf_header__find_attr(u64 id)
  1275. {
  1276. int i;
  1277. for (i = 0; i < header->attrs; i++) {
  1278. struct perf_header_attr *attr = header->attr[i];
  1279. int j;
  1280. for (j = 0; j < attr->ids; j++) {
  1281. if (attr->id[j] == id)
  1282. return &attr->attr;
  1283. }
  1284. }
  1285. return NULL;
  1286. }
  1287. static int
  1288. process_read_event(event_t *event, unsigned long offset, unsigned long head)
  1289. {
  1290. struct perf_counter_attr *attr = perf_header__find_attr(event->read.id);
  1291. if (show_threads) {
  1292. char *name = attr ? __event_name(attr->type, attr->config)
  1293. : "unknown";
  1294. perf_read_values_add_value(&show_threads_values,
  1295. event->read.pid, event->read.tid,
  1296. event->read.id,
  1297. name,
  1298. event->read.value);
  1299. }
  1300. dprintf("%p [%p]: PERF_EVENT_READ: %d %d %s %Lu\n",
  1301. (void *)(offset + head),
  1302. (void *)(long)(event->header.size),
  1303. event->read.pid,
  1304. event->read.tid,
  1305. attr ? __event_name(attr->type, attr->config)
  1306. : "FAIL",
  1307. event->read.value);
  1308. return 0;
  1309. }
  1310. static int
  1311. process_event(event_t *event, unsigned long offset, unsigned long head)
  1312. {
  1313. trace_event(event);
  1314. switch (event->header.type) {
  1315. case PERF_EVENT_SAMPLE:
  1316. return process_sample_event(event, offset, head);
  1317. case PERF_EVENT_MMAP:
  1318. return process_mmap_event(event, offset, head);
  1319. case PERF_EVENT_COMM:
  1320. return process_comm_event(event, offset, head);
  1321. case PERF_EVENT_FORK:
  1322. case PERF_EVENT_EXIT:
  1323. return process_task_event(event, offset, head);
  1324. case PERF_EVENT_LOST:
  1325. return process_lost_event(event, offset, head);
  1326. case PERF_EVENT_READ:
  1327. return process_read_event(event, offset, head);
  1328. /*
  1329. * We dont process them right now but they are fine:
  1330. */
  1331. case PERF_EVENT_THROTTLE:
  1332. case PERF_EVENT_UNTHROTTLE:
  1333. return 0;
  1334. default:
  1335. return -1;
  1336. }
  1337. return 0;
  1338. }
  1339. static u64 perf_header__sample_type(void)
  1340. {
  1341. u64 sample_type = 0;
  1342. int i;
  1343. for (i = 0; i < header->attrs; i++) {
  1344. struct perf_header_attr *attr = header->attr[i];
  1345. if (!sample_type)
  1346. sample_type = attr->attr.sample_type;
  1347. else if (sample_type != attr->attr.sample_type)
  1348. die("non matching sample_type");
  1349. }
  1350. return sample_type;
  1351. }
  1352. static int __cmd_report(void)
  1353. {
  1354. int ret, rc = EXIT_FAILURE;
  1355. unsigned long offset = 0;
  1356. unsigned long head, shift;
  1357. struct stat stat;
  1358. event_t *event;
  1359. uint32_t size;
  1360. char *buf;
  1361. register_idle_thread();
  1362. if (show_threads)
  1363. perf_read_values_init(&show_threads_values);
  1364. input = open(input_name, O_RDONLY);
  1365. if (input < 0) {
  1366. fprintf(stderr, " failed to open file: %s", input_name);
  1367. if (!strcmp(input_name, "perf.data"))
  1368. fprintf(stderr, " (try 'perf record' first)");
  1369. fprintf(stderr, "\n");
  1370. exit(-1);
  1371. }
  1372. ret = fstat(input, &stat);
  1373. if (ret < 0) {
  1374. perror("failed to stat file");
  1375. exit(-1);
  1376. }
  1377. if (!stat.st_size) {
  1378. fprintf(stderr, "zero-sized file, nothing to do!\n");
  1379. exit(0);
  1380. }
  1381. header = perf_header__read(input);
  1382. head = header->data_offset;
  1383. sample_type = perf_header__sample_type();
  1384. if (!(sample_type & PERF_SAMPLE_CALLCHAIN)) {
  1385. if (sort__has_parent) {
  1386. fprintf(stderr, "selected --sort parent, but no"
  1387. " callchain data. Did you call"
  1388. " perf record without -g?\n");
  1389. exit(-1);
  1390. }
  1391. if (callchain) {
  1392. fprintf(stderr, "selected -c but no callchain data."
  1393. " Did you call perf record without"
  1394. " -g?\n");
  1395. exit(-1);
  1396. }
  1397. } else if (callchain_param.mode != CHAIN_NONE && !callchain) {
  1398. callchain = 1;
  1399. if (register_callchain_param(&callchain_param) < 0) {
  1400. fprintf(stderr, "Can't register callchain"
  1401. " params\n");
  1402. exit(-1);
  1403. }
  1404. }
  1405. if (load_kernel() < 0) {
  1406. perror("failed to load kernel symbols");
  1407. return EXIT_FAILURE;
  1408. }
  1409. if (!full_paths) {
  1410. if (getcwd(__cwd, sizeof(__cwd)) == NULL) {
  1411. perror("failed to get the current directory");
  1412. return EXIT_FAILURE;
  1413. }
  1414. cwdlen = strlen(cwd);
  1415. } else {
  1416. cwd = NULL;
  1417. cwdlen = 0;
  1418. }
  1419. shift = page_size * (head / page_size);
  1420. offset += shift;
  1421. head -= shift;
  1422. remap:
  1423. buf = (char *)mmap(NULL, page_size * mmap_window, PROT_READ,
  1424. MAP_SHARED, input, offset);
  1425. if (buf == MAP_FAILED) {
  1426. perror("failed to mmap file");
  1427. exit(-1);
  1428. }
  1429. more:
  1430. event = (event_t *)(buf + head);
  1431. size = event->header.size;
  1432. if (!size)
  1433. size = 8;
  1434. if (head + event->header.size >= page_size * mmap_window) {
  1435. int ret;
  1436. shift = page_size * (head / page_size);
  1437. ret = munmap(buf, page_size * mmap_window);
  1438. assert(ret == 0);
  1439. offset += shift;
  1440. head -= shift;
  1441. goto remap;
  1442. }
  1443. size = event->header.size;
  1444. dprintf("\n%p [%p]: event: %d\n",
  1445. (void *)(offset + head),
  1446. (void *)(long)event->header.size,
  1447. event->header.type);
  1448. if (!size || process_event(event, offset, head) < 0) {
  1449. dprintf("%p [%p]: skipping unknown header type: %d\n",
  1450. (void *)(offset + head),
  1451. (void *)(long)(event->header.size),
  1452. event->header.type);
  1453. total_unknown++;
  1454. /*
  1455. * assume we lost track of the stream, check alignment, and
  1456. * increment a single u64 in the hope to catch on again 'soon'.
  1457. */
  1458. if (unlikely(head & 7))
  1459. head &= ~7ULL;
  1460. size = 8;
  1461. }
  1462. head += size;
  1463. if (offset + head >= header->data_offset + header->data_size)
  1464. goto done;
  1465. if (offset + head < (unsigned long)stat.st_size)
  1466. goto more;
  1467. done:
  1468. rc = EXIT_SUCCESS;
  1469. close(input);
  1470. dprintf(" IP events: %10ld\n", total);
  1471. dprintf(" mmap events: %10ld\n", total_mmap);
  1472. dprintf(" comm events: %10ld\n", total_comm);
  1473. dprintf(" fork events: %10ld\n", total_fork);
  1474. dprintf(" lost events: %10ld\n", total_lost);
  1475. dprintf(" unknown events: %10ld\n", total_unknown);
  1476. if (dump_trace)
  1477. return 0;
  1478. if (verbose >= 3)
  1479. threads__fprintf(stdout);
  1480. if (verbose >= 2)
  1481. dsos__fprintf(stdout);
  1482. collapse__resort();
  1483. output__resort(total);
  1484. output__fprintf(stdout, total);
  1485. if (show_threads)
  1486. perf_read_values_destroy(&show_threads_values);
  1487. return rc;
  1488. }
  1489. static int
  1490. parse_callchain_opt(const struct option *opt __used, const char *arg,
  1491. int unset __used)
  1492. {
  1493. char *tok;
  1494. char *endptr;
  1495. callchain = 1;
  1496. if (!arg)
  1497. return 0;
  1498. tok = strtok((char *)arg, ",");
  1499. if (!tok)
  1500. return -1;
  1501. /* get the output mode */
  1502. if (!strncmp(tok, "graph", strlen(arg)))
  1503. callchain_param.mode = CHAIN_GRAPH_ABS;
  1504. else if (!strncmp(tok, "flat", strlen(arg)))
  1505. callchain_param.mode = CHAIN_FLAT;
  1506. else if (!strncmp(tok, "fractal", strlen(arg)))
  1507. callchain_param.mode = CHAIN_GRAPH_REL;
  1508. else if (!strncmp(tok, "none", strlen(arg))) {
  1509. callchain_param.mode = CHAIN_NONE;
  1510. callchain = 0;
  1511. return 0;
  1512. }
  1513. else
  1514. return -1;
  1515. /* get the min percentage */
  1516. tok = strtok(NULL, ",");
  1517. if (!tok)
  1518. goto setup;
  1519. callchain_param.min_percent = strtod(tok, &endptr);
  1520. if (tok == endptr)
  1521. return -1;
  1522. setup:
  1523. if (register_callchain_param(&callchain_param) < 0) {
  1524. fprintf(stderr, "Can't register callchain params\n");
  1525. return -1;
  1526. }
  1527. return 0;
  1528. }
  1529. static const char * const report_usage[] = {
  1530. "perf report [<options>] <command>",
  1531. NULL
  1532. };
  1533. static const struct option options[] = {
  1534. OPT_STRING('i', "input", &input_name, "file",
  1535. "input file name"),
  1536. OPT_BOOLEAN('v', "verbose", &verbose,
  1537. "be more verbose (show symbol address, etc)"),
  1538. OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace,
  1539. "dump raw trace in ASCII"),
  1540. OPT_STRING('k', "vmlinux", &vmlinux, "file", "vmlinux pathname"),
  1541. OPT_BOOLEAN('m', "modules", &modules,
  1542. "load module symbols - WARNING: use only with -k and LIVE kernel"),
  1543. OPT_BOOLEAN('n', "show-nr-samples", &show_nr_samples,
  1544. "Show a column with the number of samples"),
  1545. OPT_BOOLEAN('T', "threads", &show_threads,
  1546. "Show per-thread event counters"),
  1547. OPT_STRING(0, "pretty", &pretty_printing_style, "key",
  1548. "pretty printing style key: normal raw"),
  1549. OPT_STRING('s', "sort", &sort_order, "key[,key2...]",
  1550. "sort by key(s): pid, comm, dso, symbol, parent"),
  1551. OPT_BOOLEAN('P', "full-paths", &full_paths,
  1552. "Don't shorten the pathnames taking into account the cwd"),
  1553. OPT_STRING('p', "parent", &parent_pattern, "regex",
  1554. "regex filter to identify parent, see: '--sort parent'"),
  1555. OPT_BOOLEAN('x', "exclude-other", &exclude_other,
  1556. "Only display entries with parent-match"),
  1557. OPT_CALLBACK_DEFAULT('g', "call-graph", NULL, "output_type,min_percent",
  1558. "Display callchains using output_type and min percent threshold. "
  1559. "Default: fractal,0.5", &parse_callchain_opt, callchain_default_opt),
  1560. OPT_STRING('d', "dsos", &dso_list_str, "dso[,dso...]",
  1561. "only consider symbols in these dsos"),
  1562. OPT_STRING('C', "comms", &comm_list_str, "comm[,comm...]",
  1563. "only consider symbols in these comms"),
  1564. OPT_STRING('S', "symbols", &sym_list_str, "symbol[,symbol...]",
  1565. "only consider these symbols"),
  1566. OPT_STRING('w', "column-widths", &col_width_list_str,
  1567. "width[,width...]",
  1568. "don't try to adjust column width, use these fixed values"),
  1569. OPT_STRING('t', "field-separator", &field_sep, "separator",
  1570. "separator for columns, no spaces will be added between "
  1571. "columns '.' is reserved."),
  1572. OPT_END()
  1573. };
  1574. static void setup_sorting(void)
  1575. {
  1576. char *tmp, *tok, *str = strdup(sort_order);
  1577. for (tok = strtok_r(str, ", ", &tmp);
  1578. tok; tok = strtok_r(NULL, ", ", &tmp)) {
  1579. if (sort_dimension__add(tok) < 0) {
  1580. error("Unknown --sort key: `%s'", tok);
  1581. usage_with_options(report_usage, options);
  1582. }
  1583. }
  1584. free(str);
  1585. }
  1586. static void setup_list(struct strlist **list, const char *list_str,
  1587. struct sort_entry *se, const char *list_name,
  1588. FILE *fp)
  1589. {
  1590. if (list_str) {
  1591. *list = strlist__new(true, list_str);
  1592. if (!*list) {
  1593. fprintf(stderr, "problems parsing %s list\n",
  1594. list_name);
  1595. exit(129);
  1596. }
  1597. if (strlist__nr_entries(*list) == 1) {
  1598. fprintf(fp, "# %s: %s\n", list_name,
  1599. strlist__entry(*list, 0)->s);
  1600. se->elide = true;
  1601. }
  1602. }
  1603. }
  1604. int cmd_report(int argc, const char **argv, const char *prefix __used)
  1605. {
  1606. symbol__init();
  1607. page_size = getpagesize();
  1608. argc = parse_options(argc, argv, options, report_usage, 0);
  1609. setup_sorting();
  1610. if (parent_pattern != default_parent_pattern) {
  1611. sort_dimension__add("parent");
  1612. sort_parent.elide = 1;
  1613. } else
  1614. exclude_other = 0;
  1615. /*
  1616. * Any (unrecognized) arguments left?
  1617. */
  1618. if (argc)
  1619. usage_with_options(report_usage, options);
  1620. setup_pager();
  1621. setup_list(&dso_list, dso_list_str, &sort_dso, "dso", stdout);
  1622. setup_list(&comm_list, comm_list_str, &sort_comm, "comm", stdout);
  1623. setup_list(&sym_list, sym_list_str, &sort_sym, "symbol", stdout);
  1624. if (field_sep && *field_sep == '.') {
  1625. fputs("'.' is the only non valid --field-separator argument\n",
  1626. stderr);
  1627. exit(129);
  1628. }
  1629. return __cmd_report();
  1630. }