hist.c 24 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057
  1. #include "util.h"
  2. #include "hist.h"
  3. #include "session.h"
  4. #include "sort.h"
  5. #include <math.h>
  6. struct callchain_param callchain_param = {
  7. .mode = CHAIN_GRAPH_REL,
  8. .min_percent = 0.5
  9. };
  10. static void hist_entry__add_cpumode_period(struct hist_entry *self,
  11. unsigned int cpumode, u64 period)
  12. {
  13. switch (cpumode) {
  14. case PERF_RECORD_MISC_KERNEL:
  15. self->period_sys += period;
  16. break;
  17. case PERF_RECORD_MISC_USER:
  18. self->period_us += period;
  19. break;
  20. case PERF_RECORD_MISC_GUEST_KERNEL:
  21. self->period_guest_sys += period;
  22. break;
  23. case PERF_RECORD_MISC_GUEST_USER:
  24. self->period_guest_us += period;
  25. break;
  26. default:
  27. break;
  28. }
  29. }
  30. /*
  31. * histogram, sorted on item, collects periods
  32. */
  33. static struct hist_entry *hist_entry__new(struct hist_entry *template)
  34. {
  35. size_t callchain_size = symbol_conf.use_callchain ? sizeof(struct callchain_node) : 0;
  36. struct hist_entry *self = malloc(sizeof(*self) + callchain_size);
  37. if (self != NULL) {
  38. *self = *template;
  39. self->nr_events = 1;
  40. if (symbol_conf.use_callchain)
  41. callchain_init(self->callchain);
  42. }
  43. return self;
  44. }
  45. static void hists__inc_nr_entries(struct hists *self, struct hist_entry *entry)
  46. {
  47. if (entry->ms.sym && self->max_sym_namelen < entry->ms.sym->namelen)
  48. self->max_sym_namelen = entry->ms.sym->namelen;
  49. ++self->nr_entries;
  50. }
  51. struct hist_entry *__hists__add_entry(struct hists *self,
  52. struct addr_location *al,
  53. struct symbol *sym_parent, u64 period)
  54. {
  55. struct rb_node **p = &self->entries.rb_node;
  56. struct rb_node *parent = NULL;
  57. struct hist_entry *he;
  58. struct hist_entry entry = {
  59. .thread = al->thread,
  60. .ms = {
  61. .map = al->map,
  62. .sym = al->sym,
  63. },
  64. .ip = al->addr,
  65. .level = al->level,
  66. .period = period,
  67. .parent = sym_parent,
  68. };
  69. int cmp;
  70. while (*p != NULL) {
  71. parent = *p;
  72. he = rb_entry(parent, struct hist_entry, rb_node);
  73. cmp = hist_entry__cmp(&entry, he);
  74. if (!cmp) {
  75. he->period += period;
  76. ++he->nr_events;
  77. goto out;
  78. }
  79. if (cmp < 0)
  80. p = &(*p)->rb_left;
  81. else
  82. p = &(*p)->rb_right;
  83. }
  84. he = hist_entry__new(&entry);
  85. if (!he)
  86. return NULL;
  87. rb_link_node(&he->rb_node, parent, p);
  88. rb_insert_color(&he->rb_node, &self->entries);
  89. hists__inc_nr_entries(self, he);
  90. out:
  91. hist_entry__add_cpumode_period(he, al->cpumode, period);
  92. return he;
  93. }
  94. int64_t
  95. hist_entry__cmp(struct hist_entry *left, struct hist_entry *right)
  96. {
  97. struct sort_entry *se;
  98. int64_t cmp = 0;
  99. list_for_each_entry(se, &hist_entry__sort_list, list) {
  100. cmp = se->se_cmp(left, right);
  101. if (cmp)
  102. break;
  103. }
  104. return cmp;
  105. }
  106. int64_t
  107. hist_entry__collapse(struct hist_entry *left, struct hist_entry *right)
  108. {
  109. struct sort_entry *se;
  110. int64_t cmp = 0;
  111. list_for_each_entry(se, &hist_entry__sort_list, list) {
  112. int64_t (*f)(struct hist_entry *, struct hist_entry *);
  113. f = se->se_collapse ?: se->se_cmp;
  114. cmp = f(left, right);
  115. if (cmp)
  116. break;
  117. }
  118. return cmp;
  119. }
  120. void hist_entry__free(struct hist_entry *he)
  121. {
  122. free(he);
  123. }
  124. /*
  125. * collapse the histogram
  126. */
  127. static bool collapse__insert_entry(struct rb_root *root, struct hist_entry *he)
  128. {
  129. struct rb_node **p = &root->rb_node;
  130. struct rb_node *parent = NULL;
  131. struct hist_entry *iter;
  132. int64_t cmp;
  133. while (*p != NULL) {
  134. parent = *p;
  135. iter = rb_entry(parent, struct hist_entry, rb_node);
  136. cmp = hist_entry__collapse(iter, he);
  137. if (!cmp) {
  138. iter->period += he->period;
  139. hist_entry__free(he);
  140. return false;
  141. }
  142. if (cmp < 0)
  143. p = &(*p)->rb_left;
  144. else
  145. p = &(*p)->rb_right;
  146. }
  147. rb_link_node(&he->rb_node, parent, p);
  148. rb_insert_color(&he->rb_node, root);
  149. return true;
  150. }
  151. void hists__collapse_resort(struct hists *self)
  152. {
  153. struct rb_root tmp;
  154. struct rb_node *next;
  155. struct hist_entry *n;
  156. if (!sort__need_collapse)
  157. return;
  158. tmp = RB_ROOT;
  159. next = rb_first(&self->entries);
  160. self->nr_entries = 0;
  161. self->max_sym_namelen = 0;
  162. while (next) {
  163. n = rb_entry(next, struct hist_entry, rb_node);
  164. next = rb_next(&n->rb_node);
  165. rb_erase(&n->rb_node, &self->entries);
  166. if (collapse__insert_entry(&tmp, n))
  167. hists__inc_nr_entries(self, n);
  168. }
  169. self->entries = tmp;
  170. }
  171. /*
  172. * reverse the map, sort on period.
  173. */
  174. static void __hists__insert_output_entry(struct rb_root *entries,
  175. struct hist_entry *he,
  176. u64 min_callchain_hits)
  177. {
  178. struct rb_node **p = &entries->rb_node;
  179. struct rb_node *parent = NULL;
  180. struct hist_entry *iter;
  181. if (symbol_conf.use_callchain)
  182. callchain_param.sort(&he->sorted_chain, he->callchain,
  183. min_callchain_hits, &callchain_param);
  184. while (*p != NULL) {
  185. parent = *p;
  186. iter = rb_entry(parent, struct hist_entry, rb_node);
  187. if (he->period > iter->period)
  188. p = &(*p)->rb_left;
  189. else
  190. p = &(*p)->rb_right;
  191. }
  192. rb_link_node(&he->rb_node, parent, p);
  193. rb_insert_color(&he->rb_node, entries);
  194. }
  195. void hists__output_resort(struct hists *self)
  196. {
  197. struct rb_root tmp;
  198. struct rb_node *next;
  199. struct hist_entry *n;
  200. u64 min_callchain_hits;
  201. min_callchain_hits = self->stats.total_period * (callchain_param.min_percent / 100);
  202. tmp = RB_ROOT;
  203. next = rb_first(&self->entries);
  204. self->nr_entries = 0;
  205. self->max_sym_namelen = 0;
  206. while (next) {
  207. n = rb_entry(next, struct hist_entry, rb_node);
  208. next = rb_next(&n->rb_node);
  209. rb_erase(&n->rb_node, &self->entries);
  210. __hists__insert_output_entry(&tmp, n, min_callchain_hits);
  211. hists__inc_nr_entries(self, n);
  212. }
  213. self->entries = tmp;
  214. }
  215. static size_t callchain__fprintf_left_margin(FILE *fp, int left_margin)
  216. {
  217. int i;
  218. int ret = fprintf(fp, " ");
  219. for (i = 0; i < left_margin; i++)
  220. ret += fprintf(fp, " ");
  221. return ret;
  222. }
  223. static size_t ipchain__fprintf_graph_line(FILE *fp, int depth, int depth_mask,
  224. int left_margin)
  225. {
  226. int i;
  227. size_t ret = callchain__fprintf_left_margin(fp, left_margin);
  228. for (i = 0; i < depth; i++)
  229. if (depth_mask & (1 << i))
  230. ret += fprintf(fp, "| ");
  231. else
  232. ret += fprintf(fp, " ");
  233. ret += fprintf(fp, "\n");
  234. return ret;
  235. }
  236. static size_t ipchain__fprintf_graph(FILE *fp, struct callchain_list *chain,
  237. int depth, int depth_mask, int period,
  238. u64 total_samples, int hits,
  239. int left_margin)
  240. {
  241. int i;
  242. size_t ret = 0;
  243. ret += callchain__fprintf_left_margin(fp, left_margin);
  244. for (i = 0; i < depth; i++) {
  245. if (depth_mask & (1 << i))
  246. ret += fprintf(fp, "|");
  247. else
  248. ret += fprintf(fp, " ");
  249. if (!period && i == depth - 1) {
  250. double percent;
  251. percent = hits * 100.0 / total_samples;
  252. ret += percent_color_fprintf(fp, "--%2.2f%%-- ", percent);
  253. } else
  254. ret += fprintf(fp, "%s", " ");
  255. }
  256. if (chain->ms.sym)
  257. ret += fprintf(fp, "%s\n", chain->ms.sym->name);
  258. else
  259. ret += fprintf(fp, "%p\n", (void *)(long)chain->ip);
  260. return ret;
  261. }
  262. static struct symbol *rem_sq_bracket;
  263. static struct callchain_list rem_hits;
  264. static void init_rem_hits(void)
  265. {
  266. rem_sq_bracket = malloc(sizeof(*rem_sq_bracket) + 6);
  267. if (!rem_sq_bracket) {
  268. fprintf(stderr, "Not enough memory to display remaining hits\n");
  269. return;
  270. }
  271. strcpy(rem_sq_bracket->name, "[...]");
  272. rem_hits.ms.sym = rem_sq_bracket;
  273. }
  274. static size_t __callchain__fprintf_graph(FILE *fp, struct callchain_node *self,
  275. u64 total_samples, int depth,
  276. int depth_mask, int left_margin)
  277. {
  278. struct rb_node *node, *next;
  279. struct callchain_node *child;
  280. struct callchain_list *chain;
  281. int new_depth_mask = depth_mask;
  282. u64 new_total;
  283. u64 remaining;
  284. size_t ret = 0;
  285. int i;
  286. uint entries_printed = 0;
  287. if (callchain_param.mode == CHAIN_GRAPH_REL)
  288. new_total = self->children_hit;
  289. else
  290. new_total = total_samples;
  291. remaining = new_total;
  292. node = rb_first(&self->rb_root);
  293. while (node) {
  294. u64 cumul;
  295. child = rb_entry(node, struct callchain_node, rb_node);
  296. cumul = cumul_hits(child);
  297. remaining -= cumul;
  298. /*
  299. * The depth mask manages the output of pipes that show
  300. * the depth. We don't want to keep the pipes of the current
  301. * level for the last child of this depth.
  302. * Except if we have remaining filtered hits. They will
  303. * supersede the last child
  304. */
  305. next = rb_next(node);
  306. if (!next && (callchain_param.mode != CHAIN_GRAPH_REL || !remaining))
  307. new_depth_mask &= ~(1 << (depth - 1));
  308. /*
  309. * But we keep the older depth mask for the line separator
  310. * to keep the level link until we reach the last child
  311. */
  312. ret += ipchain__fprintf_graph_line(fp, depth, depth_mask,
  313. left_margin);
  314. i = 0;
  315. list_for_each_entry(chain, &child->val, list) {
  316. ret += ipchain__fprintf_graph(fp, chain, depth,
  317. new_depth_mask, i++,
  318. new_total,
  319. cumul,
  320. left_margin);
  321. }
  322. ret += __callchain__fprintf_graph(fp, child, new_total,
  323. depth + 1,
  324. new_depth_mask | (1 << depth),
  325. left_margin);
  326. node = next;
  327. if (++entries_printed == callchain_param.print_limit)
  328. break;
  329. }
  330. if (callchain_param.mode == CHAIN_GRAPH_REL &&
  331. remaining && remaining != new_total) {
  332. if (!rem_sq_bracket)
  333. return ret;
  334. new_depth_mask &= ~(1 << (depth - 1));
  335. ret += ipchain__fprintf_graph(fp, &rem_hits, depth,
  336. new_depth_mask, 0, new_total,
  337. remaining, left_margin);
  338. }
  339. return ret;
  340. }
  341. static size_t callchain__fprintf_graph(FILE *fp, struct callchain_node *self,
  342. u64 total_samples, int left_margin)
  343. {
  344. struct callchain_list *chain;
  345. bool printed = false;
  346. int i = 0;
  347. int ret = 0;
  348. u32 entries_printed = 0;
  349. list_for_each_entry(chain, &self->val, list) {
  350. if (!i++ && sort__first_dimension == SORT_SYM)
  351. continue;
  352. if (!printed) {
  353. ret += callchain__fprintf_left_margin(fp, left_margin);
  354. ret += fprintf(fp, "|\n");
  355. ret += callchain__fprintf_left_margin(fp, left_margin);
  356. ret += fprintf(fp, "---");
  357. left_margin += 3;
  358. printed = true;
  359. } else
  360. ret += callchain__fprintf_left_margin(fp, left_margin);
  361. if (chain->ms.sym)
  362. ret += fprintf(fp, " %s\n", chain->ms.sym->name);
  363. else
  364. ret += fprintf(fp, " %p\n", (void *)(long)chain->ip);
  365. if (++entries_printed == callchain_param.print_limit)
  366. break;
  367. }
  368. ret += __callchain__fprintf_graph(fp, self, total_samples, 1, 1, left_margin);
  369. return ret;
  370. }
  371. static size_t callchain__fprintf_flat(FILE *fp, struct callchain_node *self,
  372. u64 total_samples)
  373. {
  374. struct callchain_list *chain;
  375. size_t ret = 0;
  376. if (!self)
  377. return 0;
  378. ret += callchain__fprintf_flat(fp, self->parent, total_samples);
  379. list_for_each_entry(chain, &self->val, list) {
  380. if (chain->ip >= PERF_CONTEXT_MAX)
  381. continue;
  382. if (chain->ms.sym)
  383. ret += fprintf(fp, " %s\n", chain->ms.sym->name);
  384. else
  385. ret += fprintf(fp, " %p\n",
  386. (void *)(long)chain->ip);
  387. }
  388. return ret;
  389. }
  390. static size_t hist_entry_callchain__fprintf(FILE *fp, struct hist_entry *self,
  391. u64 total_samples, int left_margin)
  392. {
  393. struct rb_node *rb_node;
  394. struct callchain_node *chain;
  395. size_t ret = 0;
  396. u32 entries_printed = 0;
  397. rb_node = rb_first(&self->sorted_chain);
  398. while (rb_node) {
  399. double percent;
  400. chain = rb_entry(rb_node, struct callchain_node, rb_node);
  401. percent = chain->hit * 100.0 / total_samples;
  402. switch (callchain_param.mode) {
  403. case CHAIN_FLAT:
  404. ret += percent_color_fprintf(fp, " %6.2f%%\n",
  405. percent);
  406. ret += callchain__fprintf_flat(fp, chain, total_samples);
  407. break;
  408. case CHAIN_GRAPH_ABS: /* Falldown */
  409. case CHAIN_GRAPH_REL:
  410. ret += callchain__fprintf_graph(fp, chain, total_samples,
  411. left_margin);
  412. case CHAIN_NONE:
  413. default:
  414. break;
  415. }
  416. ret += fprintf(fp, "\n");
  417. if (++entries_printed == callchain_param.print_limit)
  418. break;
  419. rb_node = rb_next(rb_node);
  420. }
  421. return ret;
  422. }
  423. int hist_entry__snprintf(struct hist_entry *self, char *s, size_t size,
  424. struct hists *pair_hists, bool show_displacement,
  425. long displacement, bool color, u64 session_total)
  426. {
  427. struct sort_entry *se;
  428. u64 period, total, period_sys, period_us, period_guest_sys, period_guest_us;
  429. const char *sep = symbol_conf.field_sep;
  430. int ret;
  431. if (symbol_conf.exclude_other && !self->parent)
  432. return 0;
  433. if (pair_hists) {
  434. period = self->pair ? self->pair->period : 0;
  435. total = pair_hists->stats.total_period;
  436. period_sys = self->pair ? self->pair->period_sys : 0;
  437. period_us = self->pair ? self->pair->period_us : 0;
  438. period_guest_sys = self->pair ? self->pair->period_guest_sys : 0;
  439. period_guest_us = self->pair ? self->pair->period_guest_us : 0;
  440. } else {
  441. period = self->period;
  442. total = session_total;
  443. period_sys = self->period_sys;
  444. period_us = self->period_us;
  445. period_guest_sys = self->period_guest_sys;
  446. period_guest_us = self->period_guest_us;
  447. }
  448. if (total) {
  449. if (color)
  450. ret = percent_color_snprintf(s, size,
  451. sep ? "%.2f" : " %6.2f%%",
  452. (period * 100.0) / total);
  453. else
  454. ret = snprintf(s, size, sep ? "%.2f" : " %6.2f%%",
  455. (period * 100.0) / total);
  456. if (symbol_conf.show_cpu_utilization) {
  457. ret += percent_color_snprintf(s + ret, size - ret,
  458. sep ? "%.2f" : " %6.2f%%",
  459. (period_sys * 100.0) / total);
  460. ret += percent_color_snprintf(s + ret, size - ret,
  461. sep ? "%.2f" : " %6.2f%%",
  462. (period_us * 100.0) / total);
  463. if (perf_guest) {
  464. ret += percent_color_snprintf(s + ret,
  465. size - ret,
  466. sep ? "%.2f" : " %6.2f%%",
  467. (period_guest_sys * 100.0) /
  468. total);
  469. ret += percent_color_snprintf(s + ret,
  470. size - ret,
  471. sep ? "%.2f" : " %6.2f%%",
  472. (period_guest_us * 100.0) /
  473. total);
  474. }
  475. }
  476. } else
  477. ret = snprintf(s, size, sep ? "%lld" : "%12lld ", period);
  478. if (symbol_conf.show_nr_samples) {
  479. if (sep)
  480. ret += snprintf(s + ret, size - ret, "%c%lld", *sep, period);
  481. else
  482. ret += snprintf(s + ret, size - ret, "%11lld", period);
  483. }
  484. if (pair_hists) {
  485. char bf[32];
  486. double old_percent = 0, new_percent = 0, diff;
  487. if (total > 0)
  488. old_percent = (period * 100.0) / total;
  489. if (session_total > 0)
  490. new_percent = (self->period * 100.0) / session_total;
  491. diff = new_percent - old_percent;
  492. if (fabs(diff) >= 0.01)
  493. snprintf(bf, sizeof(bf), "%+4.2F%%", diff);
  494. else
  495. snprintf(bf, sizeof(bf), " ");
  496. if (sep)
  497. ret += snprintf(s + ret, size - ret, "%c%s", *sep, bf);
  498. else
  499. ret += snprintf(s + ret, size - ret, "%11.11s", bf);
  500. if (show_displacement) {
  501. if (displacement)
  502. snprintf(bf, sizeof(bf), "%+4ld", displacement);
  503. else
  504. snprintf(bf, sizeof(bf), " ");
  505. if (sep)
  506. ret += snprintf(s + ret, size - ret, "%c%s", *sep, bf);
  507. else
  508. ret += snprintf(s + ret, size - ret, "%6.6s", bf);
  509. }
  510. }
  511. list_for_each_entry(se, &hist_entry__sort_list, list) {
  512. if (se->elide)
  513. continue;
  514. ret += snprintf(s + ret, size - ret, "%s", sep ?: " ");
  515. ret += se->se_snprintf(self, s + ret, size - ret,
  516. se->se_width ? *se->se_width : 0);
  517. }
  518. return ret;
  519. }
  520. int hist_entry__fprintf(struct hist_entry *self, struct hists *pair_hists,
  521. bool show_displacement, long displacement, FILE *fp,
  522. u64 session_total)
  523. {
  524. char bf[512];
  525. hist_entry__snprintf(self, bf, sizeof(bf), pair_hists,
  526. show_displacement, displacement,
  527. true, session_total);
  528. return fprintf(fp, "%s\n", bf);
  529. }
  530. static size_t hist_entry__fprintf_callchain(struct hist_entry *self, FILE *fp,
  531. u64 session_total)
  532. {
  533. int left_margin = 0;
  534. if (sort__first_dimension == SORT_COMM) {
  535. struct sort_entry *se = list_first_entry(&hist_entry__sort_list,
  536. typeof(*se), list);
  537. left_margin = se->se_width ? *se->se_width : 0;
  538. left_margin -= thread__comm_len(self->thread);
  539. }
  540. return hist_entry_callchain__fprintf(fp, self, session_total,
  541. left_margin);
  542. }
  543. size_t hists__fprintf(struct hists *self, struct hists *pair,
  544. bool show_displacement, FILE *fp)
  545. {
  546. struct sort_entry *se;
  547. struct rb_node *nd;
  548. size_t ret = 0;
  549. unsigned long position = 1;
  550. long displacement = 0;
  551. unsigned int width;
  552. const char *sep = symbol_conf.field_sep;
  553. const char *col_width = symbol_conf.col_width_list_str;
  554. init_rem_hits();
  555. fprintf(fp, "# %s", pair ? "Baseline" : "Overhead");
  556. if (symbol_conf.show_nr_samples) {
  557. if (sep)
  558. fprintf(fp, "%cSamples", *sep);
  559. else
  560. fputs(" Samples ", fp);
  561. }
  562. if (symbol_conf.show_cpu_utilization) {
  563. if (sep) {
  564. ret += fprintf(fp, "%csys", *sep);
  565. ret += fprintf(fp, "%cus", *sep);
  566. if (perf_guest) {
  567. ret += fprintf(fp, "%cguest sys", *sep);
  568. ret += fprintf(fp, "%cguest us", *sep);
  569. }
  570. } else {
  571. ret += fprintf(fp, " sys ");
  572. ret += fprintf(fp, " us ");
  573. if (perf_guest) {
  574. ret += fprintf(fp, " guest sys ");
  575. ret += fprintf(fp, " guest us ");
  576. }
  577. }
  578. }
  579. if (pair) {
  580. if (sep)
  581. ret += fprintf(fp, "%cDelta", *sep);
  582. else
  583. ret += fprintf(fp, " Delta ");
  584. if (show_displacement) {
  585. if (sep)
  586. ret += fprintf(fp, "%cDisplacement", *sep);
  587. else
  588. ret += fprintf(fp, " Displ");
  589. }
  590. }
  591. list_for_each_entry(se, &hist_entry__sort_list, list) {
  592. if (se->elide)
  593. continue;
  594. if (sep) {
  595. fprintf(fp, "%c%s", *sep, se->se_header);
  596. continue;
  597. }
  598. width = strlen(se->se_header);
  599. if (se->se_width) {
  600. if (symbol_conf.col_width_list_str) {
  601. if (col_width) {
  602. *se->se_width = atoi(col_width);
  603. col_width = strchr(col_width, ',');
  604. if (col_width)
  605. ++col_width;
  606. }
  607. }
  608. width = *se->se_width = max(*se->se_width, width);
  609. }
  610. fprintf(fp, " %*s", width, se->se_header);
  611. }
  612. fprintf(fp, "\n");
  613. if (sep)
  614. goto print_entries;
  615. fprintf(fp, "# ........");
  616. if (symbol_conf.show_nr_samples)
  617. fprintf(fp, " ..........");
  618. if (pair) {
  619. fprintf(fp, " ..........");
  620. if (show_displacement)
  621. fprintf(fp, " .....");
  622. }
  623. list_for_each_entry(se, &hist_entry__sort_list, list) {
  624. unsigned int i;
  625. if (se->elide)
  626. continue;
  627. fprintf(fp, " ");
  628. if (se->se_width)
  629. width = *se->se_width;
  630. else
  631. width = strlen(se->se_header);
  632. for (i = 0; i < width; i++)
  633. fprintf(fp, ".");
  634. }
  635. fprintf(fp, "\n#\n");
  636. print_entries:
  637. for (nd = rb_first(&self->entries); nd; nd = rb_next(nd)) {
  638. struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
  639. if (show_displacement) {
  640. if (h->pair != NULL)
  641. displacement = ((long)h->pair->position -
  642. (long)position);
  643. else
  644. displacement = 0;
  645. ++position;
  646. }
  647. ret += hist_entry__fprintf(h, pair, show_displacement,
  648. displacement, fp, self->stats.total_period);
  649. if (symbol_conf.use_callchain)
  650. ret += hist_entry__fprintf_callchain(h, fp, self->stats.total_period);
  651. if (h->ms.map == NULL && verbose > 1) {
  652. __map_groups__fprintf_maps(&h->thread->mg,
  653. MAP__FUNCTION, verbose, fp);
  654. fprintf(fp, "%.10s end\n", graph_dotted_line);
  655. }
  656. }
  657. free(rem_sq_bracket);
  658. return ret;
  659. }
  660. enum hist_filter {
  661. HIST_FILTER__DSO,
  662. HIST_FILTER__THREAD,
  663. };
  664. void hists__filter_by_dso(struct hists *self, const struct dso *dso)
  665. {
  666. struct rb_node *nd;
  667. self->nr_entries = self->stats.total_period = 0;
  668. self->stats.nr_events[PERF_RECORD_SAMPLE] = 0;
  669. self->max_sym_namelen = 0;
  670. for (nd = rb_first(&self->entries); nd; nd = rb_next(nd)) {
  671. struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
  672. if (symbol_conf.exclude_other && !h->parent)
  673. continue;
  674. if (dso != NULL && (h->ms.map == NULL || h->ms.map->dso != dso)) {
  675. h->filtered |= (1 << HIST_FILTER__DSO);
  676. continue;
  677. }
  678. h->filtered &= ~(1 << HIST_FILTER__DSO);
  679. if (!h->filtered) {
  680. ++self->nr_entries;
  681. self->stats.total_period += h->period;
  682. self->stats.nr_events[PERF_RECORD_SAMPLE] += h->nr_events;
  683. if (h->ms.sym &&
  684. self->max_sym_namelen < h->ms.sym->namelen)
  685. self->max_sym_namelen = h->ms.sym->namelen;
  686. }
  687. }
  688. }
  689. void hists__filter_by_thread(struct hists *self, const struct thread *thread)
  690. {
  691. struct rb_node *nd;
  692. self->nr_entries = self->stats.total_period = 0;
  693. self->stats.nr_events[PERF_RECORD_SAMPLE] = 0;
  694. self->max_sym_namelen = 0;
  695. for (nd = rb_first(&self->entries); nd; nd = rb_next(nd)) {
  696. struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
  697. if (thread != NULL && h->thread != thread) {
  698. h->filtered |= (1 << HIST_FILTER__THREAD);
  699. continue;
  700. }
  701. h->filtered &= ~(1 << HIST_FILTER__THREAD);
  702. if (!h->filtered) {
  703. ++self->nr_entries;
  704. self->stats.total_period += h->period;
  705. self->stats.nr_events[PERF_RECORD_SAMPLE] += h->nr_events;
  706. if (h->ms.sym &&
  707. self->max_sym_namelen < h->ms.sym->namelen)
  708. self->max_sym_namelen = h->ms.sym->namelen;
  709. }
  710. }
  711. }
  712. static int symbol__alloc_hist(struct symbol *self)
  713. {
  714. struct sym_priv *priv = symbol__priv(self);
  715. const int size = (sizeof(*priv->hist) +
  716. (self->end - self->start) * sizeof(u64));
  717. priv->hist = zalloc(size);
  718. return priv->hist == NULL ? -1 : 0;
  719. }
  720. int hist_entry__inc_addr_samples(struct hist_entry *self, u64 ip)
  721. {
  722. unsigned int sym_size, offset;
  723. struct symbol *sym = self->ms.sym;
  724. struct sym_priv *priv;
  725. struct sym_hist *h;
  726. if (!sym || !self->ms.map)
  727. return 0;
  728. priv = symbol__priv(sym);
  729. if (priv->hist == NULL && symbol__alloc_hist(sym) < 0)
  730. return -ENOMEM;
  731. sym_size = sym->end - sym->start;
  732. offset = ip - sym->start;
  733. pr_debug3("%s: ip=%#Lx\n", __func__, self->ms.map->unmap_ip(self->ms.map, ip));
  734. if (offset >= sym_size)
  735. return 0;
  736. h = priv->hist;
  737. h->sum++;
  738. h->ip[offset]++;
  739. pr_debug3("%#Lx %s: period++ [ip: %#Lx, %#Lx] => %Ld\n", self->ms.sym->start,
  740. self->ms.sym->name, ip, ip - self->ms.sym->start, h->ip[offset]);
  741. return 0;
  742. }
  743. static struct objdump_line *objdump_line__new(s64 offset, char *line)
  744. {
  745. struct objdump_line *self = malloc(sizeof(*self));
  746. if (self != NULL) {
  747. self->offset = offset;
  748. self->line = line;
  749. }
  750. return self;
  751. }
  752. void objdump_line__free(struct objdump_line *self)
  753. {
  754. free(self->line);
  755. free(self);
  756. }
  757. static void objdump__add_line(struct list_head *head, struct objdump_line *line)
  758. {
  759. list_add_tail(&line->node, head);
  760. }
  761. struct objdump_line *objdump__get_next_ip_line(struct list_head *head,
  762. struct objdump_line *pos)
  763. {
  764. list_for_each_entry_continue(pos, head, node)
  765. if (pos->offset >= 0)
  766. return pos;
  767. return NULL;
  768. }
  769. static int hist_entry__parse_objdump_line(struct hist_entry *self, FILE *file,
  770. struct list_head *head)
  771. {
  772. struct symbol *sym = self->ms.sym;
  773. struct objdump_line *objdump_line;
  774. char *line = NULL, *tmp, *tmp2, *c;
  775. size_t line_len;
  776. s64 line_ip, offset = -1;
  777. if (getline(&line, &line_len, file) < 0)
  778. return -1;
  779. if (!line)
  780. return -1;
  781. while (line_len != 0 && isspace(line[line_len - 1]))
  782. line[--line_len] = '\0';
  783. c = strchr(line, '\n');
  784. if (c)
  785. *c = 0;
  786. line_ip = -1;
  787. /*
  788. * Strip leading spaces:
  789. */
  790. tmp = line;
  791. while (*tmp) {
  792. if (*tmp != ' ')
  793. break;
  794. tmp++;
  795. }
  796. if (*tmp) {
  797. /*
  798. * Parse hexa addresses followed by ':'
  799. */
  800. line_ip = strtoull(tmp, &tmp2, 16);
  801. if (*tmp2 != ':')
  802. line_ip = -1;
  803. }
  804. if (line_ip != -1) {
  805. u64 start = map__rip_2objdump(self->ms.map, sym->start);
  806. offset = line_ip - start;
  807. }
  808. objdump_line = objdump_line__new(offset, line);
  809. if (objdump_line == NULL) {
  810. free(line);
  811. return -1;
  812. }
  813. objdump__add_line(head, objdump_line);
  814. return 0;
  815. }
  816. int hist_entry__annotate(struct hist_entry *self, struct list_head *head)
  817. {
  818. struct symbol *sym = self->ms.sym;
  819. struct map *map = self->ms.map;
  820. struct dso *dso = map->dso;
  821. const char *filename = dso->long_name;
  822. char command[PATH_MAX * 2];
  823. FILE *file;
  824. u64 len;
  825. if (!filename)
  826. return -1;
  827. if (dso->origin == DSO__ORIG_KERNEL) {
  828. if (dso->annotate_warned)
  829. return 0;
  830. dso->annotate_warned = 1;
  831. pr_err("Can't annotate %s: No vmlinux file was found in the "
  832. "path:\n", sym->name);
  833. vmlinux_path__fprintf(stderr);
  834. return -1;
  835. }
  836. pr_debug("%s: filename=%s, sym=%s, start=%#Lx, end=%#Lx\n", __func__,
  837. filename, sym->name, map->unmap_ip(map, sym->start),
  838. map->unmap_ip(map, sym->end));
  839. len = sym->end - sym->start;
  840. pr_debug("annotating [%p] %30s : [%p] %30s\n",
  841. dso, dso->long_name, sym, sym->name);
  842. snprintf(command, sizeof(command),
  843. "objdump --start-address=0x%016Lx --stop-address=0x%016Lx -dS %s|grep -v %s|expand",
  844. map__rip_2objdump(map, sym->start),
  845. map__rip_2objdump(map, sym->end),
  846. filename, filename);
  847. pr_debug("Executing: %s\n", command);
  848. file = popen(command, "r");
  849. if (!file)
  850. return -1;
  851. while (!feof(file))
  852. if (hist_entry__parse_objdump_line(self, file, head) < 0)
  853. break;
  854. pclose(file);
  855. return 0;
  856. }
  857. void hists__inc_nr_events(struct hists *self, u32 type)
  858. {
  859. ++self->stats.nr_events[0];
  860. ++self->stats.nr_events[type];
  861. }
  862. size_t hists__fprintf_nr_events(struct hists *self, FILE *fp)
  863. {
  864. int i;
  865. size_t ret = 0;
  866. for (i = 0; i < PERF_RECORD_HEADER_MAX; ++i) {
  867. if (!event__name[i])
  868. continue;
  869. ret += fprintf(fp, "%10s events: %10d\n",
  870. event__name[i], self->stats.nr_events[i]);
  871. }
  872. return ret;
  873. }