hist.c 26 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147
  1. #include "annotate.h"
  2. #include "util.h"
  3. #include "build-id.h"
  4. #include "hist.h"
  5. #include "session.h"
  6. #include "sort.h"
  7. #include <math.h>
  8. enum hist_filter {
  9. HIST_FILTER__DSO,
  10. HIST_FILTER__THREAD,
  11. HIST_FILTER__PARENT,
  12. };
  13. struct callchain_param callchain_param = {
  14. .mode = CHAIN_GRAPH_REL,
  15. .min_percent = 0.5,
  16. .order = ORDER_CALLEE
  17. };
  18. u16 hists__col_len(struct hists *hists, enum hist_column col)
  19. {
  20. return hists->col_len[col];
  21. }
  22. void hists__set_col_len(struct hists *hists, enum hist_column col, u16 len)
  23. {
  24. hists->col_len[col] = len;
  25. }
  26. bool hists__new_col_len(struct hists *hists, enum hist_column col, u16 len)
  27. {
  28. if (len > hists__col_len(hists, col)) {
  29. hists__set_col_len(hists, col, len);
  30. return true;
  31. }
  32. return false;
  33. }
  34. static void hists__reset_col_len(struct hists *hists)
  35. {
  36. enum hist_column col;
  37. for (col = 0; col < HISTC_NR_COLS; ++col)
  38. hists__set_col_len(hists, col, 0);
  39. }
  40. static void hists__calc_col_len(struct hists *hists, struct hist_entry *h)
  41. {
  42. u16 len;
  43. if (h->ms.sym)
  44. hists__new_col_len(hists, HISTC_SYMBOL, h->ms.sym->namelen);
  45. else {
  46. const unsigned int unresolved_col_width = BITS_PER_LONG / 4;
  47. if (hists__col_len(hists, HISTC_DSO) < unresolved_col_width &&
  48. !symbol_conf.col_width_list_str && !symbol_conf.field_sep &&
  49. !symbol_conf.dso_list)
  50. hists__set_col_len(hists, HISTC_DSO,
  51. unresolved_col_width);
  52. }
  53. len = thread__comm_len(h->thread);
  54. if (hists__new_col_len(hists, HISTC_COMM, len))
  55. hists__set_col_len(hists, HISTC_THREAD, len + 6);
  56. if (h->ms.map) {
  57. len = dso__name_len(h->ms.map->dso);
  58. hists__new_col_len(hists, HISTC_DSO, len);
  59. }
  60. }
  61. static void hist_entry__add_cpumode_period(struct hist_entry *self,
  62. unsigned int cpumode, u64 period)
  63. {
  64. switch (cpumode) {
  65. case PERF_RECORD_MISC_KERNEL:
  66. self->period_sys += period;
  67. break;
  68. case PERF_RECORD_MISC_USER:
  69. self->period_us += period;
  70. break;
  71. case PERF_RECORD_MISC_GUEST_KERNEL:
  72. self->period_guest_sys += period;
  73. break;
  74. case PERF_RECORD_MISC_GUEST_USER:
  75. self->period_guest_us += period;
  76. break;
  77. default:
  78. break;
  79. }
  80. }
  81. static void hist_entry__decay(struct hist_entry *he)
  82. {
  83. he->period = (he->period * 7) / 8;
  84. he->nr_events = (he->nr_events * 7) / 8;
  85. }
  86. static bool hists__decay_entry(struct hists *hists, struct hist_entry *he)
  87. {
  88. hists->stats.total_period -= he->period;
  89. hist_entry__decay(he);
  90. hists->stats.total_period += he->period;
  91. return he->period == 0;
  92. }
  93. void hists__decay_entries(struct hists *hists)
  94. {
  95. struct rb_node *next = rb_first(&hists->entries);
  96. struct hist_entry *n;
  97. while (next) {
  98. n = rb_entry(next, struct hist_entry, rb_node);
  99. next = rb_next(&n->rb_node);
  100. if (hists__decay_entry(hists, n)) {
  101. rb_erase(&n->rb_node, &hists->entries);
  102. if (sort__need_collapse)
  103. rb_erase(&n->rb_node_in, &hists->entries_collapsed);
  104. hist_entry__free(n);
  105. --hists->nr_entries;
  106. }
  107. }
  108. }
  109. /*
  110. * histogram, sorted on item, collects periods
  111. */
  112. static struct hist_entry *hist_entry__new(struct hist_entry *template)
  113. {
  114. size_t callchain_size = symbol_conf.use_callchain ? sizeof(struct callchain_root) : 0;
  115. struct hist_entry *self = malloc(sizeof(*self) + callchain_size);
  116. if (self != NULL) {
  117. *self = *template;
  118. self->nr_events = 1;
  119. if (self->ms.map)
  120. self->ms.map->referenced = true;
  121. if (symbol_conf.use_callchain)
  122. callchain_init(self->callchain);
  123. }
  124. return self;
  125. }
  126. static void hists__inc_nr_entries(struct hists *hists, struct hist_entry *h)
  127. {
  128. if (!h->filtered) {
  129. hists__calc_col_len(hists, h);
  130. ++hists->nr_entries;
  131. hists->stats.total_period += h->period;
  132. }
  133. }
  134. static u8 symbol__parent_filter(const struct symbol *parent)
  135. {
  136. if (symbol_conf.exclude_other && parent == NULL)
  137. return 1 << HIST_FILTER__PARENT;
  138. return 0;
  139. }
  140. struct hist_entry *__hists__add_entry(struct hists *hists,
  141. struct addr_location *al,
  142. struct symbol *sym_parent, u64 period)
  143. {
  144. struct rb_node **p;
  145. struct rb_node *parent = NULL;
  146. struct hist_entry *he;
  147. struct hist_entry entry = {
  148. .thread = al->thread,
  149. .ms = {
  150. .map = al->map,
  151. .sym = al->sym,
  152. },
  153. .cpu = al->cpu,
  154. .ip = al->addr,
  155. .level = al->level,
  156. .period = period,
  157. .parent = sym_parent,
  158. .filtered = symbol__parent_filter(sym_parent),
  159. };
  160. int cmp;
  161. pthread_mutex_lock(&hists->lock);
  162. p = &hists->entries_in->rb_node;
  163. while (*p != NULL) {
  164. parent = *p;
  165. he = rb_entry(parent, struct hist_entry, rb_node_in);
  166. cmp = hist_entry__cmp(&entry, he);
  167. if (!cmp) {
  168. he->period += period;
  169. ++he->nr_events;
  170. goto out;
  171. }
  172. if (cmp < 0)
  173. p = &(*p)->rb_left;
  174. else
  175. p = &(*p)->rb_right;
  176. }
  177. he = hist_entry__new(&entry);
  178. if (!he)
  179. goto out_unlock;
  180. rb_link_node(&he->rb_node_in, parent, p);
  181. rb_insert_color(&he->rb_node_in, hists->entries_in);
  182. out:
  183. hist_entry__add_cpumode_period(he, al->cpumode, period);
  184. out_unlock:
  185. pthread_mutex_unlock(&hists->lock);
  186. return he;
  187. }
  188. int64_t
  189. hist_entry__cmp(struct hist_entry *left, struct hist_entry *right)
  190. {
  191. struct sort_entry *se;
  192. int64_t cmp = 0;
  193. list_for_each_entry(se, &hist_entry__sort_list, list) {
  194. cmp = se->se_cmp(left, right);
  195. if (cmp)
  196. break;
  197. }
  198. return cmp;
  199. }
  200. int64_t
  201. hist_entry__collapse(struct hist_entry *left, struct hist_entry *right)
  202. {
  203. struct sort_entry *se;
  204. int64_t cmp = 0;
  205. list_for_each_entry(se, &hist_entry__sort_list, list) {
  206. int64_t (*f)(struct hist_entry *, struct hist_entry *);
  207. f = se->se_collapse ?: se->se_cmp;
  208. cmp = f(left, right);
  209. if (cmp)
  210. break;
  211. }
  212. return cmp;
  213. }
  214. void hist_entry__free(struct hist_entry *he)
  215. {
  216. free(he);
  217. }
  218. /*
  219. * collapse the histogram
  220. */
  221. static bool hists__collapse_insert_entry(struct hists *hists,
  222. struct rb_root *root,
  223. struct hist_entry *he)
  224. {
  225. struct rb_node **p = &root->rb_node;
  226. struct rb_node *parent = NULL;
  227. struct hist_entry *iter;
  228. int64_t cmp;
  229. while (*p != NULL) {
  230. parent = *p;
  231. iter = rb_entry(parent, struct hist_entry, rb_node_in);
  232. cmp = hist_entry__collapse(iter, he);
  233. if (!cmp) {
  234. iter->period += he->period;
  235. iter->nr_events += he->nr_events;
  236. if (symbol_conf.use_callchain) {
  237. callchain_cursor_reset(&hists->callchain_cursor);
  238. callchain_merge(&hists->callchain_cursor, iter->callchain,
  239. he->callchain);
  240. }
  241. hist_entry__free(he);
  242. return false;
  243. }
  244. if (cmp < 0)
  245. p = &(*p)->rb_left;
  246. else
  247. p = &(*p)->rb_right;
  248. }
  249. rb_link_node(&he->rb_node_in, parent, p);
  250. rb_insert_color(&he->rb_node_in, root);
  251. return true;
  252. }
  253. static struct rb_root *hists__get_rotate_entries_in(struct hists *hists)
  254. {
  255. struct rb_root *root;
  256. pthread_mutex_lock(&hists->lock);
  257. root = hists->entries_in;
  258. if (++hists->entries_in > &hists->entries_in_array[1])
  259. hists->entries_in = &hists->entries_in_array[0];
  260. pthread_mutex_unlock(&hists->lock);
  261. return root;
  262. }
  263. static void __hists__collapse_resort(struct hists *hists, bool threaded)
  264. {
  265. struct rb_root *root;
  266. struct rb_node *next;
  267. struct hist_entry *n;
  268. if (!sort__need_collapse && !threaded)
  269. return;
  270. root = hists__get_rotate_entries_in(hists);
  271. next = rb_first(root);
  272. hists->stats.total_period = 0;
  273. while (next) {
  274. n = rb_entry(next, struct hist_entry, rb_node_in);
  275. next = rb_next(&n->rb_node_in);
  276. rb_erase(&n->rb_node_in, root);
  277. if (hists__collapse_insert_entry(hists, &hists->entries_collapsed, n))
  278. hists__inc_nr_entries(hists, n);
  279. }
  280. }
  281. void hists__collapse_resort(struct hists *hists)
  282. {
  283. return __hists__collapse_resort(hists, false);
  284. }
  285. void hists__collapse_resort_threaded(struct hists *hists)
  286. {
  287. return __hists__collapse_resort(hists, true);
  288. }
  289. /*
  290. * reverse the map, sort on period.
  291. */
  292. static void __hists__insert_output_entry(struct rb_root *entries,
  293. struct hist_entry *he,
  294. u64 min_callchain_hits)
  295. {
  296. struct rb_node **p = &entries->rb_node;
  297. struct rb_node *parent = NULL;
  298. struct hist_entry *iter;
  299. if (symbol_conf.use_callchain)
  300. callchain_param.sort(&he->sorted_chain, he->callchain,
  301. min_callchain_hits, &callchain_param);
  302. while (*p != NULL) {
  303. parent = *p;
  304. iter = rb_entry(parent, struct hist_entry, rb_node);
  305. if (he->period > iter->period)
  306. p = &(*p)->rb_left;
  307. else
  308. p = &(*p)->rb_right;
  309. }
  310. rb_link_node(&he->rb_node, parent, p);
  311. rb_insert_color(&he->rb_node, entries);
  312. }
  313. static void __hists__output_resort(struct hists *hists, bool threaded)
  314. {
  315. struct rb_root *root;
  316. struct rb_node *next;
  317. struct hist_entry *n;
  318. u64 min_callchain_hits;
  319. min_callchain_hits = hists->stats.total_period * (callchain_param.min_percent / 100);
  320. if (sort__need_collapse || threaded)
  321. root = &hists->entries_collapsed;
  322. else
  323. root = hists->entries_in;
  324. next = rb_first(root);
  325. hists->entries = RB_ROOT;
  326. hists->nr_entries = 0;
  327. hists__reset_col_len(hists);
  328. while (next) {
  329. n = rb_entry(next, struct hist_entry, rb_node_in);
  330. next = rb_next(&n->rb_node_in);
  331. __hists__insert_output_entry(&hists->entries, n, min_callchain_hits);
  332. hists__inc_nr_entries(hists, n);
  333. }
  334. }
  335. void hists__output_resort(struct hists *hists)
  336. {
  337. return __hists__output_resort(hists, false);
  338. }
  339. void hists__output_resort_threaded(struct hists *hists)
  340. {
  341. return __hists__output_resort(hists, true);
  342. }
  343. static size_t callchain__fprintf_left_margin(FILE *fp, int left_margin)
  344. {
  345. int i;
  346. int ret = fprintf(fp, " ");
  347. for (i = 0; i < left_margin; i++)
  348. ret += fprintf(fp, " ");
  349. return ret;
  350. }
  351. static size_t ipchain__fprintf_graph_line(FILE *fp, int depth, int depth_mask,
  352. int left_margin)
  353. {
  354. int i;
  355. size_t ret = callchain__fprintf_left_margin(fp, left_margin);
  356. for (i = 0; i < depth; i++)
  357. if (depth_mask & (1 << i))
  358. ret += fprintf(fp, "| ");
  359. else
  360. ret += fprintf(fp, " ");
  361. ret += fprintf(fp, "\n");
  362. return ret;
  363. }
  364. static size_t ipchain__fprintf_graph(FILE *fp, struct callchain_list *chain,
  365. int depth, int depth_mask, int period,
  366. u64 total_samples, u64 hits,
  367. int left_margin)
  368. {
  369. int i;
  370. size_t ret = 0;
  371. ret += callchain__fprintf_left_margin(fp, left_margin);
  372. for (i = 0; i < depth; i++) {
  373. if (depth_mask & (1 << i))
  374. ret += fprintf(fp, "|");
  375. else
  376. ret += fprintf(fp, " ");
  377. if (!period && i == depth - 1) {
  378. double percent;
  379. percent = hits * 100.0 / total_samples;
  380. ret += percent_color_fprintf(fp, "--%2.2f%%-- ", percent);
  381. } else
  382. ret += fprintf(fp, "%s", " ");
  383. }
  384. if (chain->ms.sym)
  385. ret += fprintf(fp, "%s\n", chain->ms.sym->name);
  386. else
  387. ret += fprintf(fp, "%p\n", (void *)(long)chain->ip);
  388. return ret;
  389. }
  390. static struct symbol *rem_sq_bracket;
  391. static struct callchain_list rem_hits;
  392. static void init_rem_hits(void)
  393. {
  394. rem_sq_bracket = malloc(sizeof(*rem_sq_bracket) + 6);
  395. if (!rem_sq_bracket) {
  396. fprintf(stderr, "Not enough memory to display remaining hits\n");
  397. return;
  398. }
  399. strcpy(rem_sq_bracket->name, "[...]");
  400. rem_hits.ms.sym = rem_sq_bracket;
  401. }
  402. static size_t __callchain__fprintf_graph(FILE *fp, struct callchain_node *self,
  403. u64 total_samples, int depth,
  404. int depth_mask, int left_margin)
  405. {
  406. struct rb_node *node, *next;
  407. struct callchain_node *child;
  408. struct callchain_list *chain;
  409. int new_depth_mask = depth_mask;
  410. u64 new_total;
  411. u64 remaining;
  412. size_t ret = 0;
  413. int i;
  414. uint entries_printed = 0;
  415. if (callchain_param.mode == CHAIN_GRAPH_REL)
  416. new_total = self->children_hit;
  417. else
  418. new_total = total_samples;
  419. remaining = new_total;
  420. node = rb_first(&self->rb_root);
  421. while (node) {
  422. u64 cumul;
  423. child = rb_entry(node, struct callchain_node, rb_node);
  424. cumul = callchain_cumul_hits(child);
  425. remaining -= cumul;
  426. /*
  427. * The depth mask manages the output of pipes that show
  428. * the depth. We don't want to keep the pipes of the current
  429. * level for the last child of this depth.
  430. * Except if we have remaining filtered hits. They will
  431. * supersede the last child
  432. */
  433. next = rb_next(node);
  434. if (!next && (callchain_param.mode != CHAIN_GRAPH_REL || !remaining))
  435. new_depth_mask &= ~(1 << (depth - 1));
  436. /*
  437. * But we keep the older depth mask for the line separator
  438. * to keep the level link until we reach the last child
  439. */
  440. ret += ipchain__fprintf_graph_line(fp, depth, depth_mask,
  441. left_margin);
  442. i = 0;
  443. list_for_each_entry(chain, &child->val, list) {
  444. ret += ipchain__fprintf_graph(fp, chain, depth,
  445. new_depth_mask, i++,
  446. new_total,
  447. cumul,
  448. left_margin);
  449. }
  450. ret += __callchain__fprintf_graph(fp, child, new_total,
  451. depth + 1,
  452. new_depth_mask | (1 << depth),
  453. left_margin);
  454. node = next;
  455. if (++entries_printed == callchain_param.print_limit)
  456. break;
  457. }
  458. if (callchain_param.mode == CHAIN_GRAPH_REL &&
  459. remaining && remaining != new_total) {
  460. if (!rem_sq_bracket)
  461. return ret;
  462. new_depth_mask &= ~(1 << (depth - 1));
  463. ret += ipchain__fprintf_graph(fp, &rem_hits, depth,
  464. new_depth_mask, 0, new_total,
  465. remaining, left_margin);
  466. }
  467. return ret;
  468. }
  469. static size_t callchain__fprintf_graph(FILE *fp, struct callchain_node *self,
  470. u64 total_samples, int left_margin)
  471. {
  472. struct callchain_list *chain;
  473. bool printed = false;
  474. int i = 0;
  475. int ret = 0;
  476. u32 entries_printed = 0;
  477. list_for_each_entry(chain, &self->val, list) {
  478. if (!i++ && sort__first_dimension == SORT_SYM)
  479. continue;
  480. if (!printed) {
  481. ret += callchain__fprintf_left_margin(fp, left_margin);
  482. ret += fprintf(fp, "|\n");
  483. ret += callchain__fprintf_left_margin(fp, left_margin);
  484. ret += fprintf(fp, "---");
  485. left_margin += 3;
  486. printed = true;
  487. } else
  488. ret += callchain__fprintf_left_margin(fp, left_margin);
  489. if (chain->ms.sym)
  490. ret += fprintf(fp, " %s\n", chain->ms.sym->name);
  491. else
  492. ret += fprintf(fp, " %p\n", (void *)(long)chain->ip);
  493. if (++entries_printed == callchain_param.print_limit)
  494. break;
  495. }
  496. ret += __callchain__fprintf_graph(fp, self, total_samples, 1, 1, left_margin);
  497. return ret;
  498. }
  499. static size_t callchain__fprintf_flat(FILE *fp, struct callchain_node *self,
  500. u64 total_samples)
  501. {
  502. struct callchain_list *chain;
  503. size_t ret = 0;
  504. if (!self)
  505. return 0;
  506. ret += callchain__fprintf_flat(fp, self->parent, total_samples);
  507. list_for_each_entry(chain, &self->val, list) {
  508. if (chain->ip >= PERF_CONTEXT_MAX)
  509. continue;
  510. if (chain->ms.sym)
  511. ret += fprintf(fp, " %s\n", chain->ms.sym->name);
  512. else
  513. ret += fprintf(fp, " %p\n",
  514. (void *)(long)chain->ip);
  515. }
  516. return ret;
  517. }
  518. static size_t hist_entry_callchain__fprintf(FILE *fp, struct hist_entry *self,
  519. u64 total_samples, int left_margin)
  520. {
  521. struct rb_node *rb_node;
  522. struct callchain_node *chain;
  523. size_t ret = 0;
  524. u32 entries_printed = 0;
  525. rb_node = rb_first(&self->sorted_chain);
  526. while (rb_node) {
  527. double percent;
  528. chain = rb_entry(rb_node, struct callchain_node, rb_node);
  529. percent = chain->hit * 100.0 / total_samples;
  530. switch (callchain_param.mode) {
  531. case CHAIN_FLAT:
  532. ret += percent_color_fprintf(fp, " %6.2f%%\n",
  533. percent);
  534. ret += callchain__fprintf_flat(fp, chain, total_samples);
  535. break;
  536. case CHAIN_GRAPH_ABS: /* Falldown */
  537. case CHAIN_GRAPH_REL:
  538. ret += callchain__fprintf_graph(fp, chain, total_samples,
  539. left_margin);
  540. case CHAIN_NONE:
  541. default:
  542. break;
  543. }
  544. ret += fprintf(fp, "\n");
  545. if (++entries_printed == callchain_param.print_limit)
  546. break;
  547. rb_node = rb_next(rb_node);
  548. }
  549. return ret;
  550. }
  551. void hists__output_recalc_col_len(struct hists *hists, int max_rows)
  552. {
  553. struct rb_node *next = rb_first(&hists->entries);
  554. struct hist_entry *n;
  555. int row = 0;
  556. hists__reset_col_len(hists);
  557. while (next && row++ < max_rows) {
  558. n = rb_entry(next, struct hist_entry, rb_node);
  559. hists__calc_col_len(hists, n);
  560. next = rb_next(&n->rb_node);
  561. }
  562. }
  563. int hist_entry__snprintf(struct hist_entry *self, char *s, size_t size,
  564. struct hists *hists, struct hists *pair_hists,
  565. bool show_displacement, long displacement,
  566. bool color, u64 session_total)
  567. {
  568. struct sort_entry *se;
  569. u64 period, total, period_sys, period_us, period_guest_sys, period_guest_us;
  570. u64 nr_events;
  571. const char *sep = symbol_conf.field_sep;
  572. int ret;
  573. if (symbol_conf.exclude_other && !self->parent)
  574. return 0;
  575. if (pair_hists) {
  576. period = self->pair ? self->pair->period : 0;
  577. nr_events = self->pair ? self->pair->nr_events : 0;
  578. total = pair_hists->stats.total_period;
  579. period_sys = self->pair ? self->pair->period_sys : 0;
  580. period_us = self->pair ? self->pair->period_us : 0;
  581. period_guest_sys = self->pair ? self->pair->period_guest_sys : 0;
  582. period_guest_us = self->pair ? self->pair->period_guest_us : 0;
  583. } else {
  584. period = self->period;
  585. nr_events = self->nr_events;
  586. total = session_total;
  587. period_sys = self->period_sys;
  588. period_us = self->period_us;
  589. period_guest_sys = self->period_guest_sys;
  590. period_guest_us = self->period_guest_us;
  591. }
  592. if (total) {
  593. if (color)
  594. ret = percent_color_snprintf(s, size,
  595. sep ? "%.2f" : " %6.2f%%",
  596. (period * 100.0) / total);
  597. else
  598. ret = snprintf(s, size, sep ? "%.2f" : " %6.2f%%",
  599. (period * 100.0) / total);
  600. if (symbol_conf.show_cpu_utilization) {
  601. ret += percent_color_snprintf(s + ret, size - ret,
  602. sep ? "%.2f" : " %6.2f%%",
  603. (period_sys * 100.0) / total);
  604. ret += percent_color_snprintf(s + ret, size - ret,
  605. sep ? "%.2f" : " %6.2f%%",
  606. (period_us * 100.0) / total);
  607. if (perf_guest) {
  608. ret += percent_color_snprintf(s + ret,
  609. size - ret,
  610. sep ? "%.2f" : " %6.2f%%",
  611. (period_guest_sys * 100.0) /
  612. total);
  613. ret += percent_color_snprintf(s + ret,
  614. size - ret,
  615. sep ? "%.2f" : " %6.2f%%",
  616. (period_guest_us * 100.0) /
  617. total);
  618. }
  619. }
  620. } else
  621. ret = snprintf(s, size, sep ? "%" PRIu64 : "%12" PRIu64 " ", period);
  622. if (symbol_conf.show_nr_samples) {
  623. if (sep)
  624. ret += snprintf(s + ret, size - ret, "%c%" PRIu64, *sep, nr_events);
  625. else
  626. ret += snprintf(s + ret, size - ret, "%11" PRIu64, nr_events);
  627. }
  628. if (symbol_conf.show_total_period) {
  629. if (sep)
  630. ret += snprintf(s + ret, size - ret, "%c%" PRIu64, *sep, period);
  631. else
  632. ret += snprintf(s + ret, size - ret, " %12" PRIu64, period);
  633. }
  634. if (pair_hists) {
  635. char bf[32];
  636. double old_percent = 0, new_percent = 0, diff;
  637. if (total > 0)
  638. old_percent = (period * 100.0) / total;
  639. if (session_total > 0)
  640. new_percent = (self->period * 100.0) / session_total;
  641. diff = new_percent - old_percent;
  642. if (fabs(diff) >= 0.01)
  643. snprintf(bf, sizeof(bf), "%+4.2F%%", diff);
  644. else
  645. snprintf(bf, sizeof(bf), " ");
  646. if (sep)
  647. ret += snprintf(s + ret, size - ret, "%c%s", *sep, bf);
  648. else
  649. ret += snprintf(s + ret, size - ret, "%11.11s", bf);
  650. if (show_displacement) {
  651. if (displacement)
  652. snprintf(bf, sizeof(bf), "%+4ld", displacement);
  653. else
  654. snprintf(bf, sizeof(bf), " ");
  655. if (sep)
  656. ret += snprintf(s + ret, size - ret, "%c%s", *sep, bf);
  657. else
  658. ret += snprintf(s + ret, size - ret, "%6.6s", bf);
  659. }
  660. }
  661. list_for_each_entry(se, &hist_entry__sort_list, list) {
  662. if (se->elide)
  663. continue;
  664. ret += snprintf(s + ret, size - ret, "%s", sep ?: " ");
  665. ret += se->se_snprintf(self, s + ret, size - ret,
  666. hists__col_len(hists, se->se_width_idx));
  667. }
  668. return ret;
  669. }
  670. int hist_entry__fprintf(struct hist_entry *he, size_t size, struct hists *hists,
  671. struct hists *pair_hists, bool show_displacement,
  672. long displacement, FILE *fp, u64 session_total)
  673. {
  674. char bf[512];
  675. if (size == 0 || size > sizeof(bf))
  676. size = sizeof(bf);
  677. hist_entry__snprintf(he, bf, size, hists, pair_hists,
  678. show_displacement, displacement,
  679. true, session_total);
  680. return fprintf(fp, "%s\n", bf);
  681. }
  682. static size_t hist_entry__fprintf_callchain(struct hist_entry *self,
  683. struct hists *hists, FILE *fp,
  684. u64 session_total)
  685. {
  686. int left_margin = 0;
  687. if (sort__first_dimension == SORT_COMM) {
  688. struct sort_entry *se = list_first_entry(&hist_entry__sort_list,
  689. typeof(*se), list);
  690. left_margin = hists__col_len(hists, se->se_width_idx);
  691. left_margin -= thread__comm_len(self->thread);
  692. }
  693. return hist_entry_callchain__fprintf(fp, self, session_total,
  694. left_margin);
  695. }
  696. size_t hists__fprintf(struct hists *hists, struct hists *pair,
  697. bool show_displacement, bool show_header, int max_rows,
  698. int max_cols, FILE *fp)
  699. {
  700. struct sort_entry *se;
  701. struct rb_node *nd;
  702. size_t ret = 0;
  703. unsigned long position = 1;
  704. long displacement = 0;
  705. unsigned int width;
  706. const char *sep = symbol_conf.field_sep;
  707. const char *col_width = symbol_conf.col_width_list_str;
  708. int nr_rows = 0;
  709. init_rem_hits();
  710. if (!show_header)
  711. goto print_entries;
  712. fprintf(fp, "# %s", pair ? "Baseline" : "Overhead");
  713. if (symbol_conf.show_nr_samples) {
  714. if (sep)
  715. fprintf(fp, "%cSamples", *sep);
  716. else
  717. fputs(" Samples ", fp);
  718. }
  719. if (symbol_conf.show_total_period) {
  720. if (sep)
  721. ret += fprintf(fp, "%cPeriod", *sep);
  722. else
  723. ret += fprintf(fp, " Period ");
  724. }
  725. if (symbol_conf.show_cpu_utilization) {
  726. if (sep) {
  727. ret += fprintf(fp, "%csys", *sep);
  728. ret += fprintf(fp, "%cus", *sep);
  729. if (perf_guest) {
  730. ret += fprintf(fp, "%cguest sys", *sep);
  731. ret += fprintf(fp, "%cguest us", *sep);
  732. }
  733. } else {
  734. ret += fprintf(fp, " sys ");
  735. ret += fprintf(fp, " us ");
  736. if (perf_guest) {
  737. ret += fprintf(fp, " guest sys ");
  738. ret += fprintf(fp, " guest us ");
  739. }
  740. }
  741. }
  742. if (pair) {
  743. if (sep)
  744. ret += fprintf(fp, "%cDelta", *sep);
  745. else
  746. ret += fprintf(fp, " Delta ");
  747. if (show_displacement) {
  748. if (sep)
  749. ret += fprintf(fp, "%cDisplacement", *sep);
  750. else
  751. ret += fprintf(fp, " Displ");
  752. }
  753. }
  754. list_for_each_entry(se, &hist_entry__sort_list, list) {
  755. if (se->elide)
  756. continue;
  757. if (sep) {
  758. fprintf(fp, "%c%s", *sep, se->se_header);
  759. continue;
  760. }
  761. width = strlen(se->se_header);
  762. if (symbol_conf.col_width_list_str) {
  763. if (col_width) {
  764. hists__set_col_len(hists, se->se_width_idx,
  765. atoi(col_width));
  766. col_width = strchr(col_width, ',');
  767. if (col_width)
  768. ++col_width;
  769. }
  770. }
  771. if (!hists__new_col_len(hists, se->se_width_idx, width))
  772. width = hists__col_len(hists, se->se_width_idx);
  773. fprintf(fp, " %*s", width, se->se_header);
  774. }
  775. fprintf(fp, "\n");
  776. if (max_rows && ++nr_rows >= max_rows)
  777. goto out;
  778. if (sep)
  779. goto print_entries;
  780. fprintf(fp, "# ........");
  781. if (symbol_conf.show_nr_samples)
  782. fprintf(fp, " ..........");
  783. if (symbol_conf.show_total_period)
  784. fprintf(fp, " ............");
  785. if (pair) {
  786. fprintf(fp, " ..........");
  787. if (show_displacement)
  788. fprintf(fp, " .....");
  789. }
  790. list_for_each_entry(se, &hist_entry__sort_list, list) {
  791. unsigned int i;
  792. if (se->elide)
  793. continue;
  794. fprintf(fp, " ");
  795. width = hists__col_len(hists, se->se_width_idx);
  796. if (width == 0)
  797. width = strlen(se->se_header);
  798. for (i = 0; i < width; i++)
  799. fprintf(fp, ".");
  800. }
  801. fprintf(fp, "\n");
  802. if (max_rows && ++nr_rows >= max_rows)
  803. goto out;
  804. fprintf(fp, "#\n");
  805. if (max_rows && ++nr_rows >= max_rows)
  806. goto out;
  807. print_entries:
  808. for (nd = rb_first(&hists->entries); nd; nd = rb_next(nd)) {
  809. struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
  810. if (h->filtered)
  811. continue;
  812. if (show_displacement) {
  813. if (h->pair != NULL)
  814. displacement = ((long)h->pair->position -
  815. (long)position);
  816. else
  817. displacement = 0;
  818. ++position;
  819. }
  820. ret += hist_entry__fprintf(h, max_cols, hists, pair, show_displacement,
  821. displacement, fp, hists->stats.total_period);
  822. if (symbol_conf.use_callchain)
  823. ret += hist_entry__fprintf_callchain(h, hists, fp,
  824. hists->stats.total_period);
  825. if (max_rows && ++nr_rows >= max_rows)
  826. goto out;
  827. if (h->ms.map == NULL && verbose > 1) {
  828. __map_groups__fprintf_maps(&h->thread->mg,
  829. MAP__FUNCTION, verbose, fp);
  830. fprintf(fp, "%.10s end\n", graph_dotted_line);
  831. }
  832. }
  833. out:
  834. free(rem_sq_bracket);
  835. return ret;
  836. }
  837. /*
  838. * See hists__fprintf to match the column widths
  839. */
  840. unsigned int hists__sort_list_width(struct hists *hists)
  841. {
  842. struct sort_entry *se;
  843. int ret = 9; /* total % */
  844. if (symbol_conf.show_cpu_utilization) {
  845. ret += 7; /* count_sys % */
  846. ret += 6; /* count_us % */
  847. if (perf_guest) {
  848. ret += 13; /* count_guest_sys % */
  849. ret += 12; /* count_guest_us % */
  850. }
  851. }
  852. if (symbol_conf.show_nr_samples)
  853. ret += 11;
  854. if (symbol_conf.show_total_period)
  855. ret += 13;
  856. list_for_each_entry(se, &hist_entry__sort_list, list)
  857. if (!se->elide)
  858. ret += 2 + hists__col_len(hists, se->se_width_idx);
  859. if (verbose) /* Addr + origin */
  860. ret += 3 + BITS_PER_LONG / 4;
  861. return ret;
  862. }
  863. static void hists__remove_entry_filter(struct hists *hists, struct hist_entry *h,
  864. enum hist_filter filter)
  865. {
  866. h->filtered &= ~(1 << filter);
  867. if (h->filtered)
  868. return;
  869. ++hists->nr_entries;
  870. if (h->ms.unfolded)
  871. hists->nr_entries += h->nr_rows;
  872. h->row_offset = 0;
  873. hists->stats.total_period += h->period;
  874. hists->stats.nr_events[PERF_RECORD_SAMPLE] += h->nr_events;
  875. hists__calc_col_len(hists, h);
  876. }
  877. void hists__filter_by_dso(struct hists *hists, const struct dso *dso)
  878. {
  879. struct rb_node *nd;
  880. hists->nr_entries = hists->stats.total_period = 0;
  881. hists->stats.nr_events[PERF_RECORD_SAMPLE] = 0;
  882. hists__reset_col_len(hists);
  883. for (nd = rb_first(&hists->entries); nd; nd = rb_next(nd)) {
  884. struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
  885. if (symbol_conf.exclude_other && !h->parent)
  886. continue;
  887. if (dso != NULL && (h->ms.map == NULL || h->ms.map->dso != dso)) {
  888. h->filtered |= (1 << HIST_FILTER__DSO);
  889. continue;
  890. }
  891. hists__remove_entry_filter(hists, h, HIST_FILTER__DSO);
  892. }
  893. }
  894. void hists__filter_by_thread(struct hists *hists, const struct thread *thread)
  895. {
  896. struct rb_node *nd;
  897. hists->nr_entries = hists->stats.total_period = 0;
  898. hists->stats.nr_events[PERF_RECORD_SAMPLE] = 0;
  899. hists__reset_col_len(hists);
  900. for (nd = rb_first(&hists->entries); nd; nd = rb_next(nd)) {
  901. struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
  902. if (thread != NULL && h->thread != thread) {
  903. h->filtered |= (1 << HIST_FILTER__THREAD);
  904. continue;
  905. }
  906. hists__remove_entry_filter(hists, h, HIST_FILTER__THREAD);
  907. }
  908. }
  909. int hist_entry__inc_addr_samples(struct hist_entry *he, int evidx, u64 ip)
  910. {
  911. return symbol__inc_addr_samples(he->ms.sym, he->ms.map, evidx, ip);
  912. }
  913. int hist_entry__annotate(struct hist_entry *he, size_t privsize)
  914. {
  915. return symbol__annotate(he->ms.sym, he->ms.map, privsize);
  916. }
  917. void hists__inc_nr_events(struct hists *hists, u32 type)
  918. {
  919. ++hists->stats.nr_events[0];
  920. ++hists->stats.nr_events[type];
  921. }
  922. size_t hists__fprintf_nr_events(struct hists *hists, FILE *fp)
  923. {
  924. int i;
  925. size_t ret = 0;
  926. for (i = 0; i < PERF_RECORD_HEADER_MAX; ++i) {
  927. const char *name;
  928. if (hists->stats.nr_events[i] == 0)
  929. continue;
  930. name = perf_event__name(i);
  931. if (!strcmp(name, "UNKNOWN"))
  932. continue;
  933. ret += fprintf(fp, "%16s events: %10d\n", name,
  934. hists->stats.nr_events[i]);
  935. }
  936. return ret;
  937. }
  938. void hists__init(struct hists *hists)
  939. {
  940. memset(hists, 0, sizeof(*hists));
  941. hists->entries_in_array[0] = hists->entries_in_array[1] = RB_ROOT;
  942. hists->entries_in = &hists->entries_in_array[0];
  943. hists->entries_collapsed = RB_ROOT;
  944. hists->entries = RB_ROOT;
  945. pthread_mutex_init(&hists->lock, NULL);
  946. }