hist.c 25 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096
  1. #include "annotate.h"
  2. #include "util.h"
  3. #include "build-id.h"
  4. #include "hist.h"
  5. #include "session.h"
  6. #include "sort.h"
  7. #include <math.h>
  8. enum hist_filter {
  9. HIST_FILTER__DSO,
  10. HIST_FILTER__THREAD,
  11. HIST_FILTER__PARENT,
  12. };
  13. struct callchain_param callchain_param = {
  14. .mode = CHAIN_GRAPH_REL,
  15. .min_percent = 0.5,
  16. .order = ORDER_CALLEE
  17. };
  18. u16 hists__col_len(struct hists *hists, enum hist_column col)
  19. {
  20. return hists->col_len[col];
  21. }
  22. void hists__set_col_len(struct hists *hists, enum hist_column col, u16 len)
  23. {
  24. hists->col_len[col] = len;
  25. }
  26. bool hists__new_col_len(struct hists *hists, enum hist_column col, u16 len)
  27. {
  28. if (len > hists__col_len(hists, col)) {
  29. hists__set_col_len(hists, col, len);
  30. return true;
  31. }
  32. return false;
  33. }
  34. static void hists__reset_col_len(struct hists *hists)
  35. {
  36. enum hist_column col;
  37. for (col = 0; col < HISTC_NR_COLS; ++col)
  38. hists__set_col_len(hists, col, 0);
  39. }
  40. static void hists__calc_col_len(struct hists *hists, struct hist_entry *h)
  41. {
  42. u16 len;
  43. if (h->ms.sym)
  44. hists__new_col_len(hists, HISTC_SYMBOL, h->ms.sym->namelen);
  45. else {
  46. const unsigned int unresolved_col_width = BITS_PER_LONG / 4;
  47. if (hists__col_len(hists, HISTC_DSO) < unresolved_col_width &&
  48. !symbol_conf.col_width_list_str && !symbol_conf.field_sep &&
  49. !symbol_conf.dso_list)
  50. hists__set_col_len(hists, HISTC_DSO,
  51. unresolved_col_width);
  52. }
  53. len = thread__comm_len(h->thread);
  54. if (hists__new_col_len(hists, HISTC_COMM, len))
  55. hists__set_col_len(hists, HISTC_THREAD, len + 6);
  56. if (h->ms.map) {
  57. len = dso__name_len(h->ms.map->dso);
  58. hists__new_col_len(hists, HISTC_DSO, len);
  59. }
  60. }
  61. static void hist_entry__add_cpumode_period(struct hist_entry *self,
  62. unsigned int cpumode, u64 period)
  63. {
  64. switch (cpumode) {
  65. case PERF_RECORD_MISC_KERNEL:
  66. self->period_sys += period;
  67. break;
  68. case PERF_RECORD_MISC_USER:
  69. self->period_us += period;
  70. break;
  71. case PERF_RECORD_MISC_GUEST_KERNEL:
  72. self->period_guest_sys += period;
  73. break;
  74. case PERF_RECORD_MISC_GUEST_USER:
  75. self->period_guest_us += period;
  76. break;
  77. default:
  78. break;
  79. }
  80. }
  81. /*
  82. * histogram, sorted on item, collects periods
  83. */
  84. static struct hist_entry *hist_entry__new(struct hist_entry *template)
  85. {
  86. size_t callchain_size = symbol_conf.use_callchain ? sizeof(struct callchain_root) : 0;
  87. struct hist_entry *self = malloc(sizeof(*self) + callchain_size);
  88. if (self != NULL) {
  89. *self = *template;
  90. self->nr_events = 1;
  91. if (self->ms.map)
  92. self->ms.map->referenced = true;
  93. if (symbol_conf.use_callchain)
  94. callchain_init(self->callchain);
  95. }
  96. return self;
  97. }
  98. static void hists__inc_nr_entries(struct hists *hists, struct hist_entry *h)
  99. {
  100. if (!h->filtered) {
  101. hists__calc_col_len(hists, h);
  102. ++hists->nr_entries;
  103. hists->stats.total_period += h->period;
  104. }
  105. }
  106. static u8 symbol__parent_filter(const struct symbol *parent)
  107. {
  108. if (symbol_conf.exclude_other && parent == NULL)
  109. return 1 << HIST_FILTER__PARENT;
  110. return 0;
  111. }
  112. struct hist_entry *__hists__add_entry(struct hists *hists,
  113. struct addr_location *al,
  114. struct symbol *sym_parent, u64 period)
  115. {
  116. struct rb_node **p;
  117. struct rb_node *parent = NULL;
  118. struct hist_entry *he;
  119. struct hist_entry entry = {
  120. .thread = al->thread,
  121. .ms = {
  122. .map = al->map,
  123. .sym = al->sym,
  124. },
  125. .cpu = al->cpu,
  126. .ip = al->addr,
  127. .level = al->level,
  128. .period = period,
  129. .parent = sym_parent,
  130. .filtered = symbol__parent_filter(sym_parent),
  131. };
  132. int cmp;
  133. pthread_mutex_lock(&hists->lock);
  134. p = &hists->entries_in->rb_node;
  135. while (*p != NULL) {
  136. parent = *p;
  137. he = rb_entry(parent, struct hist_entry, rb_node_in);
  138. cmp = hist_entry__cmp(&entry, he);
  139. if (!cmp) {
  140. he->period += period;
  141. ++he->nr_events;
  142. goto out;
  143. }
  144. if (cmp < 0)
  145. p = &(*p)->rb_left;
  146. else
  147. p = &(*p)->rb_right;
  148. }
  149. he = hist_entry__new(&entry);
  150. if (!he)
  151. goto out_unlock;
  152. rb_link_node(&he->rb_node_in, parent, p);
  153. rb_insert_color(&he->rb_node_in, hists->entries_in);
  154. out:
  155. hist_entry__add_cpumode_period(he, al->cpumode, period);
  156. out_unlock:
  157. pthread_mutex_unlock(&hists->lock);
  158. return he;
  159. }
  160. int64_t
  161. hist_entry__cmp(struct hist_entry *left, struct hist_entry *right)
  162. {
  163. struct sort_entry *se;
  164. int64_t cmp = 0;
  165. list_for_each_entry(se, &hist_entry__sort_list, list) {
  166. cmp = se->se_cmp(left, right);
  167. if (cmp)
  168. break;
  169. }
  170. return cmp;
  171. }
  172. int64_t
  173. hist_entry__collapse(struct hist_entry *left, struct hist_entry *right)
  174. {
  175. struct sort_entry *se;
  176. int64_t cmp = 0;
  177. list_for_each_entry(se, &hist_entry__sort_list, list) {
  178. int64_t (*f)(struct hist_entry *, struct hist_entry *);
  179. f = se->se_collapse ?: se->se_cmp;
  180. cmp = f(left, right);
  181. if (cmp)
  182. break;
  183. }
  184. return cmp;
  185. }
  186. void hist_entry__free(struct hist_entry *he)
  187. {
  188. free(he);
  189. }
  190. /*
  191. * collapse the histogram
  192. */
  193. static bool hists__collapse_insert_entry(struct hists *hists,
  194. struct rb_root *root,
  195. struct hist_entry *he)
  196. {
  197. struct rb_node **p = &root->rb_node;
  198. struct rb_node *parent = NULL;
  199. struct hist_entry *iter;
  200. int64_t cmp;
  201. while (*p != NULL) {
  202. parent = *p;
  203. iter = rb_entry(parent, struct hist_entry, rb_node_in);
  204. cmp = hist_entry__collapse(iter, he);
  205. if (!cmp) {
  206. iter->period += he->period;
  207. if (symbol_conf.use_callchain) {
  208. callchain_cursor_reset(&hists->callchain_cursor);
  209. callchain_merge(&hists->callchain_cursor, iter->callchain,
  210. he->callchain);
  211. }
  212. hist_entry__free(he);
  213. return false;
  214. }
  215. if (cmp < 0)
  216. p = &(*p)->rb_left;
  217. else
  218. p = &(*p)->rb_right;
  219. }
  220. rb_link_node(&he->rb_node_in, parent, p);
  221. rb_insert_color(&he->rb_node_in, root);
  222. return true;
  223. }
  224. static struct rb_root *hists__get_rotate_entries_in(struct hists *hists)
  225. {
  226. struct rb_root *root;
  227. pthread_mutex_lock(&hists->lock);
  228. root = hists->entries_in;
  229. if (++hists->entries_in > &hists->entries_in_array[1])
  230. hists->entries_in = &hists->entries_in_array[0];
  231. pthread_mutex_unlock(&hists->lock);
  232. return root;
  233. }
  234. static void __hists__collapse_resort(struct hists *hists, bool threaded)
  235. {
  236. struct rb_root *root;
  237. struct rb_node *next;
  238. struct hist_entry *n;
  239. if (!sort__need_collapse && !threaded)
  240. return;
  241. root = hists__get_rotate_entries_in(hists);
  242. next = rb_first(root);
  243. hists->stats.total_period = 0;
  244. while (next) {
  245. n = rb_entry(next, struct hist_entry, rb_node_in);
  246. next = rb_next(&n->rb_node_in);
  247. rb_erase(&n->rb_node_in, root);
  248. if (hists__collapse_insert_entry(hists, &hists->entries_collapsed, n))
  249. hists__inc_nr_entries(hists, n);
  250. }
  251. }
  252. void hists__collapse_resort(struct hists *hists)
  253. {
  254. return __hists__collapse_resort(hists, false);
  255. }
  256. void hists__collapse_resort_threaded(struct hists *hists)
  257. {
  258. return __hists__collapse_resort(hists, true);
  259. }
  260. /*
  261. * reverse the map, sort on period.
  262. */
  263. static void __hists__insert_output_entry(struct rb_root *entries,
  264. struct hist_entry *he,
  265. u64 min_callchain_hits)
  266. {
  267. struct rb_node **p = &entries->rb_node;
  268. struct rb_node *parent = NULL;
  269. struct hist_entry *iter;
  270. if (symbol_conf.use_callchain)
  271. callchain_param.sort(&he->sorted_chain, he->callchain,
  272. min_callchain_hits, &callchain_param);
  273. while (*p != NULL) {
  274. parent = *p;
  275. iter = rb_entry(parent, struct hist_entry, rb_node);
  276. if (he->period > iter->period)
  277. p = &(*p)->rb_left;
  278. else
  279. p = &(*p)->rb_right;
  280. }
  281. rb_link_node(&he->rb_node, parent, p);
  282. rb_insert_color(&he->rb_node, entries);
  283. }
  284. static void __hists__output_resort(struct hists *hists, bool threaded)
  285. {
  286. struct rb_root *root;
  287. struct rb_node *next;
  288. struct hist_entry *n;
  289. u64 min_callchain_hits;
  290. min_callchain_hits = hists->stats.total_period * (callchain_param.min_percent / 100);
  291. if (sort__need_collapse || threaded)
  292. root = &hists->entries_collapsed;
  293. else
  294. root = hists->entries_in;
  295. next = rb_first(root);
  296. hists->entries = RB_ROOT;
  297. hists->nr_entries = 0;
  298. hists__reset_col_len(hists);
  299. while (next) {
  300. n = rb_entry(next, struct hist_entry, rb_node_in);
  301. next = rb_next(&n->rb_node_in);
  302. __hists__insert_output_entry(&hists->entries, n, min_callchain_hits);
  303. hists__inc_nr_entries(hists, n);
  304. }
  305. }
  306. void hists__output_resort(struct hists *hists)
  307. {
  308. return __hists__output_resort(hists, false);
  309. }
  310. void hists__output_resort_threaded(struct hists *hists)
  311. {
  312. return __hists__output_resort(hists, true);
  313. }
  314. static size_t callchain__fprintf_left_margin(FILE *fp, int left_margin)
  315. {
  316. int i;
  317. int ret = fprintf(fp, " ");
  318. for (i = 0; i < left_margin; i++)
  319. ret += fprintf(fp, " ");
  320. return ret;
  321. }
  322. static size_t ipchain__fprintf_graph_line(FILE *fp, int depth, int depth_mask,
  323. int left_margin)
  324. {
  325. int i;
  326. size_t ret = callchain__fprintf_left_margin(fp, left_margin);
  327. for (i = 0; i < depth; i++)
  328. if (depth_mask & (1 << i))
  329. ret += fprintf(fp, "| ");
  330. else
  331. ret += fprintf(fp, " ");
  332. ret += fprintf(fp, "\n");
  333. return ret;
  334. }
  335. static size_t ipchain__fprintf_graph(FILE *fp, struct callchain_list *chain,
  336. int depth, int depth_mask, int period,
  337. u64 total_samples, u64 hits,
  338. int left_margin)
  339. {
  340. int i;
  341. size_t ret = 0;
  342. ret += callchain__fprintf_left_margin(fp, left_margin);
  343. for (i = 0; i < depth; i++) {
  344. if (depth_mask & (1 << i))
  345. ret += fprintf(fp, "|");
  346. else
  347. ret += fprintf(fp, " ");
  348. if (!period && i == depth - 1) {
  349. double percent;
  350. percent = hits * 100.0 / total_samples;
  351. ret += percent_color_fprintf(fp, "--%2.2f%%-- ", percent);
  352. } else
  353. ret += fprintf(fp, "%s", " ");
  354. }
  355. if (chain->ms.sym)
  356. ret += fprintf(fp, "%s\n", chain->ms.sym->name);
  357. else
  358. ret += fprintf(fp, "%p\n", (void *)(long)chain->ip);
  359. return ret;
  360. }
  361. static struct symbol *rem_sq_bracket;
  362. static struct callchain_list rem_hits;
  363. static void init_rem_hits(void)
  364. {
  365. rem_sq_bracket = malloc(sizeof(*rem_sq_bracket) + 6);
  366. if (!rem_sq_bracket) {
  367. fprintf(stderr, "Not enough memory to display remaining hits\n");
  368. return;
  369. }
  370. strcpy(rem_sq_bracket->name, "[...]");
  371. rem_hits.ms.sym = rem_sq_bracket;
  372. }
  373. static size_t __callchain__fprintf_graph(FILE *fp, struct callchain_node *self,
  374. u64 total_samples, int depth,
  375. int depth_mask, int left_margin)
  376. {
  377. struct rb_node *node, *next;
  378. struct callchain_node *child;
  379. struct callchain_list *chain;
  380. int new_depth_mask = depth_mask;
  381. u64 new_total;
  382. u64 remaining;
  383. size_t ret = 0;
  384. int i;
  385. uint entries_printed = 0;
  386. if (callchain_param.mode == CHAIN_GRAPH_REL)
  387. new_total = self->children_hit;
  388. else
  389. new_total = total_samples;
  390. remaining = new_total;
  391. node = rb_first(&self->rb_root);
  392. while (node) {
  393. u64 cumul;
  394. child = rb_entry(node, struct callchain_node, rb_node);
  395. cumul = callchain_cumul_hits(child);
  396. remaining -= cumul;
  397. /*
  398. * The depth mask manages the output of pipes that show
  399. * the depth. We don't want to keep the pipes of the current
  400. * level for the last child of this depth.
  401. * Except if we have remaining filtered hits. They will
  402. * supersede the last child
  403. */
  404. next = rb_next(node);
  405. if (!next && (callchain_param.mode != CHAIN_GRAPH_REL || !remaining))
  406. new_depth_mask &= ~(1 << (depth - 1));
  407. /*
  408. * But we keep the older depth mask for the line separator
  409. * to keep the level link until we reach the last child
  410. */
  411. ret += ipchain__fprintf_graph_line(fp, depth, depth_mask,
  412. left_margin);
  413. i = 0;
  414. list_for_each_entry(chain, &child->val, list) {
  415. ret += ipchain__fprintf_graph(fp, chain, depth,
  416. new_depth_mask, i++,
  417. new_total,
  418. cumul,
  419. left_margin);
  420. }
  421. ret += __callchain__fprintf_graph(fp, child, new_total,
  422. depth + 1,
  423. new_depth_mask | (1 << depth),
  424. left_margin);
  425. node = next;
  426. if (++entries_printed == callchain_param.print_limit)
  427. break;
  428. }
  429. if (callchain_param.mode == CHAIN_GRAPH_REL &&
  430. remaining && remaining != new_total) {
  431. if (!rem_sq_bracket)
  432. return ret;
  433. new_depth_mask &= ~(1 << (depth - 1));
  434. ret += ipchain__fprintf_graph(fp, &rem_hits, depth,
  435. new_depth_mask, 0, new_total,
  436. remaining, left_margin);
  437. }
  438. return ret;
  439. }
  440. static size_t callchain__fprintf_graph(FILE *fp, struct callchain_node *self,
  441. u64 total_samples, int left_margin)
  442. {
  443. struct callchain_list *chain;
  444. bool printed = false;
  445. int i = 0;
  446. int ret = 0;
  447. u32 entries_printed = 0;
  448. list_for_each_entry(chain, &self->val, list) {
  449. if (!i++ && sort__first_dimension == SORT_SYM)
  450. continue;
  451. if (!printed) {
  452. ret += callchain__fprintf_left_margin(fp, left_margin);
  453. ret += fprintf(fp, "|\n");
  454. ret += callchain__fprintf_left_margin(fp, left_margin);
  455. ret += fprintf(fp, "---");
  456. left_margin += 3;
  457. printed = true;
  458. } else
  459. ret += callchain__fprintf_left_margin(fp, left_margin);
  460. if (chain->ms.sym)
  461. ret += fprintf(fp, " %s\n", chain->ms.sym->name);
  462. else
  463. ret += fprintf(fp, " %p\n", (void *)(long)chain->ip);
  464. if (++entries_printed == callchain_param.print_limit)
  465. break;
  466. }
  467. ret += __callchain__fprintf_graph(fp, self, total_samples, 1, 1, left_margin);
  468. return ret;
  469. }
  470. static size_t callchain__fprintf_flat(FILE *fp, struct callchain_node *self,
  471. u64 total_samples)
  472. {
  473. struct callchain_list *chain;
  474. size_t ret = 0;
  475. if (!self)
  476. return 0;
  477. ret += callchain__fprintf_flat(fp, self->parent, total_samples);
  478. list_for_each_entry(chain, &self->val, list) {
  479. if (chain->ip >= PERF_CONTEXT_MAX)
  480. continue;
  481. if (chain->ms.sym)
  482. ret += fprintf(fp, " %s\n", chain->ms.sym->name);
  483. else
  484. ret += fprintf(fp, " %p\n",
  485. (void *)(long)chain->ip);
  486. }
  487. return ret;
  488. }
  489. static size_t hist_entry_callchain__fprintf(FILE *fp, struct hist_entry *self,
  490. u64 total_samples, int left_margin)
  491. {
  492. struct rb_node *rb_node;
  493. struct callchain_node *chain;
  494. size_t ret = 0;
  495. u32 entries_printed = 0;
  496. rb_node = rb_first(&self->sorted_chain);
  497. while (rb_node) {
  498. double percent;
  499. chain = rb_entry(rb_node, struct callchain_node, rb_node);
  500. percent = chain->hit * 100.0 / total_samples;
  501. switch (callchain_param.mode) {
  502. case CHAIN_FLAT:
  503. ret += percent_color_fprintf(fp, " %6.2f%%\n",
  504. percent);
  505. ret += callchain__fprintf_flat(fp, chain, total_samples);
  506. break;
  507. case CHAIN_GRAPH_ABS: /* Falldown */
  508. case CHAIN_GRAPH_REL:
  509. ret += callchain__fprintf_graph(fp, chain, total_samples,
  510. left_margin);
  511. case CHAIN_NONE:
  512. default:
  513. break;
  514. }
  515. ret += fprintf(fp, "\n");
  516. if (++entries_printed == callchain_param.print_limit)
  517. break;
  518. rb_node = rb_next(rb_node);
  519. }
  520. return ret;
  521. }
  522. int hist_entry__snprintf(struct hist_entry *self, char *s, size_t size,
  523. struct hists *hists, struct hists *pair_hists,
  524. bool show_displacement, long displacement,
  525. bool color, u64 session_total)
  526. {
  527. struct sort_entry *se;
  528. u64 period, total, period_sys, period_us, period_guest_sys, period_guest_us;
  529. u64 nr_events;
  530. const char *sep = symbol_conf.field_sep;
  531. int ret;
  532. if (symbol_conf.exclude_other && !self->parent)
  533. return 0;
  534. if (pair_hists) {
  535. period = self->pair ? self->pair->period : 0;
  536. nr_events = self->pair ? self->pair->nr_events : 0;
  537. total = pair_hists->stats.total_period;
  538. period_sys = self->pair ? self->pair->period_sys : 0;
  539. period_us = self->pair ? self->pair->period_us : 0;
  540. period_guest_sys = self->pair ? self->pair->period_guest_sys : 0;
  541. period_guest_us = self->pair ? self->pair->period_guest_us : 0;
  542. } else {
  543. period = self->period;
  544. nr_events = self->nr_events;
  545. total = session_total;
  546. period_sys = self->period_sys;
  547. period_us = self->period_us;
  548. period_guest_sys = self->period_guest_sys;
  549. period_guest_us = self->period_guest_us;
  550. }
  551. if (total) {
  552. if (color)
  553. ret = percent_color_snprintf(s, size,
  554. sep ? "%.2f" : " %6.2f%%",
  555. (period * 100.0) / total);
  556. else
  557. ret = snprintf(s, size, sep ? "%.2f" : " %6.2f%%",
  558. (period * 100.0) / total);
  559. if (symbol_conf.show_cpu_utilization) {
  560. ret += percent_color_snprintf(s + ret, size - ret,
  561. sep ? "%.2f" : " %6.2f%%",
  562. (period_sys * 100.0) / total);
  563. ret += percent_color_snprintf(s + ret, size - ret,
  564. sep ? "%.2f" : " %6.2f%%",
  565. (period_us * 100.0) / total);
  566. if (perf_guest) {
  567. ret += percent_color_snprintf(s + ret,
  568. size - ret,
  569. sep ? "%.2f" : " %6.2f%%",
  570. (period_guest_sys * 100.0) /
  571. total);
  572. ret += percent_color_snprintf(s + ret,
  573. size - ret,
  574. sep ? "%.2f" : " %6.2f%%",
  575. (period_guest_us * 100.0) /
  576. total);
  577. }
  578. }
  579. } else
  580. ret = snprintf(s, size, sep ? "%" PRIu64 : "%12" PRIu64 " ", period);
  581. if (symbol_conf.show_nr_samples) {
  582. if (sep)
  583. ret += snprintf(s + ret, size - ret, "%c%" PRIu64, *sep, nr_events);
  584. else
  585. ret += snprintf(s + ret, size - ret, "%11" PRIu64, nr_events);
  586. }
  587. if (symbol_conf.show_total_period) {
  588. if (sep)
  589. ret += snprintf(s + ret, size - ret, "%c%" PRIu64, *sep, period);
  590. else
  591. ret += snprintf(s + ret, size - ret, " %12" PRIu64, period);
  592. }
  593. if (pair_hists) {
  594. char bf[32];
  595. double old_percent = 0, new_percent = 0, diff;
  596. if (total > 0)
  597. old_percent = (period * 100.0) / total;
  598. if (session_total > 0)
  599. new_percent = (self->period * 100.0) / session_total;
  600. diff = new_percent - old_percent;
  601. if (fabs(diff) >= 0.01)
  602. snprintf(bf, sizeof(bf), "%+4.2F%%", diff);
  603. else
  604. snprintf(bf, sizeof(bf), " ");
  605. if (sep)
  606. ret += snprintf(s + ret, size - ret, "%c%s", *sep, bf);
  607. else
  608. ret += snprintf(s + ret, size - ret, "%11.11s", bf);
  609. if (show_displacement) {
  610. if (displacement)
  611. snprintf(bf, sizeof(bf), "%+4ld", displacement);
  612. else
  613. snprintf(bf, sizeof(bf), " ");
  614. if (sep)
  615. ret += snprintf(s + ret, size - ret, "%c%s", *sep, bf);
  616. else
  617. ret += snprintf(s + ret, size - ret, "%6.6s", bf);
  618. }
  619. }
  620. list_for_each_entry(se, &hist_entry__sort_list, list) {
  621. if (se->elide)
  622. continue;
  623. ret += snprintf(s + ret, size - ret, "%s", sep ?: " ");
  624. ret += se->se_snprintf(self, s + ret, size - ret,
  625. hists__col_len(hists, se->se_width_idx));
  626. }
  627. return ret;
  628. }
  629. int hist_entry__fprintf(struct hist_entry *he, size_t size, struct hists *hists,
  630. struct hists *pair_hists, bool show_displacement,
  631. long displacement, FILE *fp, u64 session_total)
  632. {
  633. char bf[512];
  634. if (size == 0 || size > sizeof(bf))
  635. size = sizeof(bf);
  636. hist_entry__snprintf(he, bf, size, hists, pair_hists,
  637. show_displacement, displacement,
  638. true, session_total);
  639. return fprintf(fp, "%s\n", bf);
  640. }
  641. static size_t hist_entry__fprintf_callchain(struct hist_entry *self,
  642. struct hists *hists, FILE *fp,
  643. u64 session_total)
  644. {
  645. int left_margin = 0;
  646. if (sort__first_dimension == SORT_COMM) {
  647. struct sort_entry *se = list_first_entry(&hist_entry__sort_list,
  648. typeof(*se), list);
  649. left_margin = hists__col_len(hists, se->se_width_idx);
  650. left_margin -= thread__comm_len(self->thread);
  651. }
  652. return hist_entry_callchain__fprintf(fp, self, session_total,
  653. left_margin);
  654. }
  655. size_t hists__fprintf(struct hists *hists, struct hists *pair,
  656. bool show_displacement, bool show_header, int max_rows,
  657. int max_cols, FILE *fp)
  658. {
  659. struct sort_entry *se;
  660. struct rb_node *nd;
  661. size_t ret = 0;
  662. unsigned long position = 1;
  663. long displacement = 0;
  664. unsigned int width;
  665. const char *sep = symbol_conf.field_sep;
  666. const char *col_width = symbol_conf.col_width_list_str;
  667. int nr_rows = 0;
  668. init_rem_hits();
  669. if (!show_header)
  670. goto print_entries;
  671. fprintf(fp, "# %s", pair ? "Baseline" : "Overhead");
  672. if (symbol_conf.show_nr_samples) {
  673. if (sep)
  674. fprintf(fp, "%cSamples", *sep);
  675. else
  676. fputs(" Samples ", fp);
  677. }
  678. if (symbol_conf.show_total_period) {
  679. if (sep)
  680. ret += fprintf(fp, "%cPeriod", *sep);
  681. else
  682. ret += fprintf(fp, " Period ");
  683. }
  684. if (symbol_conf.show_cpu_utilization) {
  685. if (sep) {
  686. ret += fprintf(fp, "%csys", *sep);
  687. ret += fprintf(fp, "%cus", *sep);
  688. if (perf_guest) {
  689. ret += fprintf(fp, "%cguest sys", *sep);
  690. ret += fprintf(fp, "%cguest us", *sep);
  691. }
  692. } else {
  693. ret += fprintf(fp, " sys ");
  694. ret += fprintf(fp, " us ");
  695. if (perf_guest) {
  696. ret += fprintf(fp, " guest sys ");
  697. ret += fprintf(fp, " guest us ");
  698. }
  699. }
  700. }
  701. if (pair) {
  702. if (sep)
  703. ret += fprintf(fp, "%cDelta", *sep);
  704. else
  705. ret += fprintf(fp, " Delta ");
  706. if (show_displacement) {
  707. if (sep)
  708. ret += fprintf(fp, "%cDisplacement", *sep);
  709. else
  710. ret += fprintf(fp, " Displ");
  711. }
  712. }
  713. list_for_each_entry(se, &hist_entry__sort_list, list) {
  714. if (se->elide)
  715. continue;
  716. if (sep) {
  717. fprintf(fp, "%c%s", *sep, se->se_header);
  718. continue;
  719. }
  720. width = strlen(se->se_header);
  721. if (symbol_conf.col_width_list_str) {
  722. if (col_width) {
  723. hists__set_col_len(hists, se->se_width_idx,
  724. atoi(col_width));
  725. col_width = strchr(col_width, ',');
  726. if (col_width)
  727. ++col_width;
  728. }
  729. }
  730. if (!hists__new_col_len(hists, se->se_width_idx, width))
  731. width = hists__col_len(hists, se->se_width_idx);
  732. fprintf(fp, " %*s", width, se->se_header);
  733. }
  734. fprintf(fp, "\n");
  735. if (max_rows && ++nr_rows >= max_rows)
  736. goto out;
  737. if (sep)
  738. goto print_entries;
  739. fprintf(fp, "# ........");
  740. if (symbol_conf.show_nr_samples)
  741. fprintf(fp, " ..........");
  742. if (symbol_conf.show_total_period)
  743. fprintf(fp, " ............");
  744. if (pair) {
  745. fprintf(fp, " ..........");
  746. if (show_displacement)
  747. fprintf(fp, " .....");
  748. }
  749. list_for_each_entry(se, &hist_entry__sort_list, list) {
  750. unsigned int i;
  751. if (se->elide)
  752. continue;
  753. fprintf(fp, " ");
  754. width = hists__col_len(hists, se->se_width_idx);
  755. if (width == 0)
  756. width = strlen(se->se_header);
  757. for (i = 0; i < width; i++)
  758. fprintf(fp, ".");
  759. }
  760. fprintf(fp, "\n");
  761. if (max_rows && ++nr_rows >= max_rows)
  762. goto out;
  763. fprintf(fp, "#\n");
  764. if (max_rows && ++nr_rows >= max_rows)
  765. goto out;
  766. print_entries:
  767. for (nd = rb_first(&hists->entries); nd; nd = rb_next(nd)) {
  768. struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
  769. if (h->filtered)
  770. continue;
  771. if (show_displacement) {
  772. if (h->pair != NULL)
  773. displacement = ((long)h->pair->position -
  774. (long)position);
  775. else
  776. displacement = 0;
  777. ++position;
  778. }
  779. ret += hist_entry__fprintf(h, max_cols, hists, pair, show_displacement,
  780. displacement, fp, hists->stats.total_period);
  781. if (symbol_conf.use_callchain)
  782. ret += hist_entry__fprintf_callchain(h, hists, fp,
  783. hists->stats.total_period);
  784. if (max_rows && ++nr_rows >= max_rows)
  785. goto out;
  786. if (h->ms.map == NULL && verbose > 1) {
  787. __map_groups__fprintf_maps(&h->thread->mg,
  788. MAP__FUNCTION, verbose, fp);
  789. fprintf(fp, "%.10s end\n", graph_dotted_line);
  790. }
  791. }
  792. out:
  793. free(rem_sq_bracket);
  794. return ret;
  795. }
  796. /*
  797. * See hists__fprintf to match the column widths
  798. */
  799. unsigned int hists__sort_list_width(struct hists *hists)
  800. {
  801. struct sort_entry *se;
  802. int ret = 9; /* total % */
  803. if (symbol_conf.show_cpu_utilization) {
  804. ret += 7; /* count_sys % */
  805. ret += 6; /* count_us % */
  806. if (perf_guest) {
  807. ret += 13; /* count_guest_sys % */
  808. ret += 12; /* count_guest_us % */
  809. }
  810. }
  811. if (symbol_conf.show_nr_samples)
  812. ret += 11;
  813. if (symbol_conf.show_total_period)
  814. ret += 13;
  815. list_for_each_entry(se, &hist_entry__sort_list, list)
  816. if (!se->elide)
  817. ret += 2 + hists__col_len(hists, se->se_width_idx);
  818. if (verbose) /* Addr + origin */
  819. ret += 3 + BITS_PER_LONG / 4;
  820. return ret;
  821. }
  822. static void hists__remove_entry_filter(struct hists *hists, struct hist_entry *h,
  823. enum hist_filter filter)
  824. {
  825. h->filtered &= ~(1 << filter);
  826. if (h->filtered)
  827. return;
  828. ++hists->nr_entries;
  829. if (h->ms.unfolded)
  830. hists->nr_entries += h->nr_rows;
  831. h->row_offset = 0;
  832. hists->stats.total_period += h->period;
  833. hists->stats.nr_events[PERF_RECORD_SAMPLE] += h->nr_events;
  834. hists__calc_col_len(hists, h);
  835. }
  836. void hists__filter_by_dso(struct hists *hists, const struct dso *dso)
  837. {
  838. struct rb_node *nd;
  839. hists->nr_entries = hists->stats.total_period = 0;
  840. hists->stats.nr_events[PERF_RECORD_SAMPLE] = 0;
  841. hists__reset_col_len(hists);
  842. for (nd = rb_first(&hists->entries); nd; nd = rb_next(nd)) {
  843. struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
  844. if (symbol_conf.exclude_other && !h->parent)
  845. continue;
  846. if (dso != NULL && (h->ms.map == NULL || h->ms.map->dso != dso)) {
  847. h->filtered |= (1 << HIST_FILTER__DSO);
  848. continue;
  849. }
  850. hists__remove_entry_filter(hists, h, HIST_FILTER__DSO);
  851. }
  852. }
  853. void hists__filter_by_thread(struct hists *hists, const struct thread *thread)
  854. {
  855. struct rb_node *nd;
  856. hists->nr_entries = hists->stats.total_period = 0;
  857. hists->stats.nr_events[PERF_RECORD_SAMPLE] = 0;
  858. hists__reset_col_len(hists);
  859. for (nd = rb_first(&hists->entries); nd; nd = rb_next(nd)) {
  860. struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
  861. if (thread != NULL && h->thread != thread) {
  862. h->filtered |= (1 << HIST_FILTER__THREAD);
  863. continue;
  864. }
  865. hists__remove_entry_filter(hists, h, HIST_FILTER__THREAD);
  866. }
  867. }
  868. int hist_entry__inc_addr_samples(struct hist_entry *he, int evidx, u64 ip)
  869. {
  870. return symbol__inc_addr_samples(he->ms.sym, he->ms.map, evidx, ip);
  871. }
  872. int hist_entry__annotate(struct hist_entry *he, size_t privsize)
  873. {
  874. return symbol__annotate(he->ms.sym, he->ms.map, privsize);
  875. }
  876. void hists__inc_nr_events(struct hists *hists, u32 type)
  877. {
  878. ++hists->stats.nr_events[0];
  879. ++hists->stats.nr_events[type];
  880. }
  881. size_t hists__fprintf_nr_events(struct hists *hists, FILE *fp)
  882. {
  883. int i;
  884. size_t ret = 0;
  885. for (i = 0; i < PERF_RECORD_HEADER_MAX; ++i) {
  886. const char *name;
  887. if (hists->stats.nr_events[i] == 0)
  888. continue;
  889. name = perf_event__name(i);
  890. if (!strcmp(name, "UNKNOWN"))
  891. continue;
  892. ret += fprintf(fp, "%16s events: %10d\n", name,
  893. hists->stats.nr_events[i]);
  894. }
  895. return ret;
  896. }
  897. void hists__init(struct hists *hists)
  898. {
  899. memset(hists, 0, sizeof(*hists));
  900. hists->entries_in_array[0] = hists->entries_in_array[1] = RB_ROOT;
  901. hists->entries_in = &hists->entries_in_array[0];
  902. hists->entries_collapsed = RB_ROOT;
  903. hists->entries = RB_ROOT;
  904. pthread_mutex_init(&hists->lock, NULL);
  905. }