hist.c 23 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029
  1. #include "hist.h"
  2. #include "session.h"
  3. #include "sort.h"
  4. #include <math.h>
  5. struct callchain_param callchain_param = {
  6. .mode = CHAIN_GRAPH_REL,
  7. .min_percent = 0.5
  8. };
  9. static void hist_entry__add_cpumode_count(struct hist_entry *self,
  10. unsigned int cpumode, u64 count)
  11. {
  12. switch (cpumode) {
  13. case PERF_RECORD_MISC_KERNEL:
  14. self->count_sys += count;
  15. break;
  16. case PERF_RECORD_MISC_USER:
  17. self->count_us += count;
  18. break;
  19. case PERF_RECORD_MISC_GUEST_KERNEL:
  20. self->count_guest_sys += count;
  21. break;
  22. case PERF_RECORD_MISC_GUEST_USER:
  23. self->count_guest_us += count;
  24. break;
  25. default:
  26. break;
  27. }
  28. }
  29. /*
  30. * histogram, sorted on item, collects counts
  31. */
  32. static struct hist_entry *hist_entry__new(struct hist_entry *template)
  33. {
  34. size_t callchain_size = symbol_conf.use_callchain ? sizeof(struct callchain_node) : 0;
  35. struct hist_entry *self = malloc(sizeof(*self) + callchain_size);
  36. if (self != NULL) {
  37. *self = *template;
  38. if (symbol_conf.use_callchain)
  39. callchain_init(self->callchain);
  40. }
  41. return self;
  42. }
  43. static void hists__inc_nr_entries(struct hists *self, struct hist_entry *entry)
  44. {
  45. if (entry->ms.sym && self->max_sym_namelen < entry->ms.sym->namelen)
  46. self->max_sym_namelen = entry->ms.sym->namelen;
  47. ++self->nr_entries;
  48. }
  49. struct hist_entry *__hists__add_entry(struct hists *self,
  50. struct addr_location *al,
  51. struct symbol *sym_parent, u64 count)
  52. {
  53. struct rb_node **p = &self->entries.rb_node;
  54. struct rb_node *parent = NULL;
  55. struct hist_entry *he;
  56. struct hist_entry entry = {
  57. .thread = al->thread,
  58. .ms = {
  59. .map = al->map,
  60. .sym = al->sym,
  61. },
  62. .ip = al->addr,
  63. .level = al->level,
  64. .count = count,
  65. .parent = sym_parent,
  66. };
  67. int cmp;
  68. while (*p != NULL) {
  69. parent = *p;
  70. he = rb_entry(parent, struct hist_entry, rb_node);
  71. cmp = hist_entry__cmp(&entry, he);
  72. if (!cmp) {
  73. he->count += count;
  74. goto out;
  75. }
  76. if (cmp < 0)
  77. p = &(*p)->rb_left;
  78. else
  79. p = &(*p)->rb_right;
  80. }
  81. he = hist_entry__new(&entry);
  82. if (!he)
  83. return NULL;
  84. rb_link_node(&he->rb_node, parent, p);
  85. rb_insert_color(&he->rb_node, &self->entries);
  86. hists__inc_nr_entries(self, he);
  87. out:
  88. hist_entry__add_cpumode_count(he, al->cpumode, count);
  89. return he;
  90. }
  91. int64_t
  92. hist_entry__cmp(struct hist_entry *left, struct hist_entry *right)
  93. {
  94. struct sort_entry *se;
  95. int64_t cmp = 0;
  96. list_for_each_entry(se, &hist_entry__sort_list, list) {
  97. cmp = se->se_cmp(left, right);
  98. if (cmp)
  99. break;
  100. }
  101. return cmp;
  102. }
  103. int64_t
  104. hist_entry__collapse(struct hist_entry *left, struct hist_entry *right)
  105. {
  106. struct sort_entry *se;
  107. int64_t cmp = 0;
  108. list_for_each_entry(se, &hist_entry__sort_list, list) {
  109. int64_t (*f)(struct hist_entry *, struct hist_entry *);
  110. f = se->se_collapse ?: se->se_cmp;
  111. cmp = f(left, right);
  112. if (cmp)
  113. break;
  114. }
  115. return cmp;
  116. }
  117. void hist_entry__free(struct hist_entry *he)
  118. {
  119. free(he);
  120. }
  121. /*
  122. * collapse the histogram
  123. */
  124. static bool collapse__insert_entry(struct rb_root *root, struct hist_entry *he)
  125. {
  126. struct rb_node **p = &root->rb_node;
  127. struct rb_node *parent = NULL;
  128. struct hist_entry *iter;
  129. int64_t cmp;
  130. while (*p != NULL) {
  131. parent = *p;
  132. iter = rb_entry(parent, struct hist_entry, rb_node);
  133. cmp = hist_entry__collapse(iter, he);
  134. if (!cmp) {
  135. iter->count += he->count;
  136. hist_entry__free(he);
  137. return false;
  138. }
  139. if (cmp < 0)
  140. p = &(*p)->rb_left;
  141. else
  142. p = &(*p)->rb_right;
  143. }
  144. rb_link_node(&he->rb_node, parent, p);
  145. rb_insert_color(&he->rb_node, root);
  146. return true;
  147. }
  148. void hists__collapse_resort(struct hists *self)
  149. {
  150. struct rb_root tmp;
  151. struct rb_node *next;
  152. struct hist_entry *n;
  153. if (!sort__need_collapse)
  154. return;
  155. tmp = RB_ROOT;
  156. next = rb_first(&self->entries);
  157. self->nr_entries = 0;
  158. self->max_sym_namelen = 0;
  159. while (next) {
  160. n = rb_entry(next, struct hist_entry, rb_node);
  161. next = rb_next(&n->rb_node);
  162. rb_erase(&n->rb_node, &self->entries);
  163. if (collapse__insert_entry(&tmp, n))
  164. hists__inc_nr_entries(self, n);
  165. }
  166. self->entries = tmp;
  167. }
  168. /*
  169. * reverse the map, sort on count.
  170. */
  171. static void __hists__insert_output_entry(struct rb_root *entries,
  172. struct hist_entry *he,
  173. u64 min_callchain_hits)
  174. {
  175. struct rb_node **p = &entries->rb_node;
  176. struct rb_node *parent = NULL;
  177. struct hist_entry *iter;
  178. if (symbol_conf.use_callchain)
  179. callchain_param.sort(&he->sorted_chain, he->callchain,
  180. min_callchain_hits, &callchain_param);
  181. while (*p != NULL) {
  182. parent = *p;
  183. iter = rb_entry(parent, struct hist_entry, rb_node);
  184. if (he->count > iter->count)
  185. p = &(*p)->rb_left;
  186. else
  187. p = &(*p)->rb_right;
  188. }
  189. rb_link_node(&he->rb_node, parent, p);
  190. rb_insert_color(&he->rb_node, entries);
  191. }
  192. void hists__output_resort(struct hists *self)
  193. {
  194. struct rb_root tmp;
  195. struct rb_node *next;
  196. struct hist_entry *n;
  197. u64 min_callchain_hits;
  198. min_callchain_hits = self->stats.total * (callchain_param.min_percent / 100);
  199. tmp = RB_ROOT;
  200. next = rb_first(&self->entries);
  201. self->nr_entries = 0;
  202. self->max_sym_namelen = 0;
  203. while (next) {
  204. n = rb_entry(next, struct hist_entry, rb_node);
  205. next = rb_next(&n->rb_node);
  206. rb_erase(&n->rb_node, &self->entries);
  207. __hists__insert_output_entry(&tmp, n, min_callchain_hits);
  208. hists__inc_nr_entries(self, n);
  209. }
  210. self->entries = tmp;
  211. }
  212. static size_t callchain__fprintf_left_margin(FILE *fp, int left_margin)
  213. {
  214. int i;
  215. int ret = fprintf(fp, " ");
  216. for (i = 0; i < left_margin; i++)
  217. ret += fprintf(fp, " ");
  218. return ret;
  219. }
  220. static size_t ipchain__fprintf_graph_line(FILE *fp, int depth, int depth_mask,
  221. int left_margin)
  222. {
  223. int i;
  224. size_t ret = callchain__fprintf_left_margin(fp, left_margin);
  225. for (i = 0; i < depth; i++)
  226. if (depth_mask & (1 << i))
  227. ret += fprintf(fp, "| ");
  228. else
  229. ret += fprintf(fp, " ");
  230. ret += fprintf(fp, "\n");
  231. return ret;
  232. }
  233. static size_t ipchain__fprintf_graph(FILE *fp, struct callchain_list *chain,
  234. int depth, int depth_mask, int count,
  235. u64 total_samples, int hits,
  236. int left_margin)
  237. {
  238. int i;
  239. size_t ret = 0;
  240. ret += callchain__fprintf_left_margin(fp, left_margin);
  241. for (i = 0; i < depth; i++) {
  242. if (depth_mask & (1 << i))
  243. ret += fprintf(fp, "|");
  244. else
  245. ret += fprintf(fp, " ");
  246. if (!count && i == depth - 1) {
  247. double percent;
  248. percent = hits * 100.0 / total_samples;
  249. ret += percent_color_fprintf(fp, "--%2.2f%%-- ", percent);
  250. } else
  251. ret += fprintf(fp, "%s", " ");
  252. }
  253. if (chain->ms.sym)
  254. ret += fprintf(fp, "%s\n", chain->ms.sym->name);
  255. else
  256. ret += fprintf(fp, "%p\n", (void *)(long)chain->ip);
  257. return ret;
  258. }
  259. static struct symbol *rem_sq_bracket;
  260. static struct callchain_list rem_hits;
  261. static void init_rem_hits(void)
  262. {
  263. rem_sq_bracket = malloc(sizeof(*rem_sq_bracket) + 6);
  264. if (!rem_sq_bracket) {
  265. fprintf(stderr, "Not enough memory to display remaining hits\n");
  266. return;
  267. }
  268. strcpy(rem_sq_bracket->name, "[...]");
  269. rem_hits.ms.sym = rem_sq_bracket;
  270. }
  271. static size_t __callchain__fprintf_graph(FILE *fp, struct callchain_node *self,
  272. u64 total_samples, int depth,
  273. int depth_mask, int left_margin)
  274. {
  275. struct rb_node *node, *next;
  276. struct callchain_node *child;
  277. struct callchain_list *chain;
  278. int new_depth_mask = depth_mask;
  279. u64 new_total;
  280. u64 remaining;
  281. size_t ret = 0;
  282. int i;
  283. uint entries_printed = 0;
  284. if (callchain_param.mode == CHAIN_GRAPH_REL)
  285. new_total = self->children_hit;
  286. else
  287. new_total = total_samples;
  288. remaining = new_total;
  289. node = rb_first(&self->rb_root);
  290. while (node) {
  291. u64 cumul;
  292. child = rb_entry(node, struct callchain_node, rb_node);
  293. cumul = cumul_hits(child);
  294. remaining -= cumul;
  295. /*
  296. * The depth mask manages the output of pipes that show
  297. * the depth. We don't want to keep the pipes of the current
  298. * level for the last child of this depth.
  299. * Except if we have remaining filtered hits. They will
  300. * supersede the last child
  301. */
  302. next = rb_next(node);
  303. if (!next && (callchain_param.mode != CHAIN_GRAPH_REL || !remaining))
  304. new_depth_mask &= ~(1 << (depth - 1));
  305. /*
  306. * But we keep the older depth mask for the line separator
  307. * to keep the level link until we reach the last child
  308. */
  309. ret += ipchain__fprintf_graph_line(fp, depth, depth_mask,
  310. left_margin);
  311. i = 0;
  312. list_for_each_entry(chain, &child->val, list) {
  313. ret += ipchain__fprintf_graph(fp, chain, depth,
  314. new_depth_mask, i++,
  315. new_total,
  316. cumul,
  317. left_margin);
  318. }
  319. ret += __callchain__fprintf_graph(fp, child, new_total,
  320. depth + 1,
  321. new_depth_mask | (1 << depth),
  322. left_margin);
  323. node = next;
  324. if (++entries_printed == callchain_param.print_limit)
  325. break;
  326. }
  327. if (callchain_param.mode == CHAIN_GRAPH_REL &&
  328. remaining && remaining != new_total) {
  329. if (!rem_sq_bracket)
  330. return ret;
  331. new_depth_mask &= ~(1 << (depth - 1));
  332. ret += ipchain__fprintf_graph(fp, &rem_hits, depth,
  333. new_depth_mask, 0, new_total,
  334. remaining, left_margin);
  335. }
  336. return ret;
  337. }
  338. static size_t callchain__fprintf_graph(FILE *fp, struct callchain_node *self,
  339. u64 total_samples, int left_margin)
  340. {
  341. struct callchain_list *chain;
  342. bool printed = false;
  343. int i = 0;
  344. int ret = 0;
  345. u32 entries_printed = 0;
  346. list_for_each_entry(chain, &self->val, list) {
  347. if (!i++ && sort__first_dimension == SORT_SYM)
  348. continue;
  349. if (!printed) {
  350. ret += callchain__fprintf_left_margin(fp, left_margin);
  351. ret += fprintf(fp, "|\n");
  352. ret += callchain__fprintf_left_margin(fp, left_margin);
  353. ret += fprintf(fp, "---");
  354. left_margin += 3;
  355. printed = true;
  356. } else
  357. ret += callchain__fprintf_left_margin(fp, left_margin);
  358. if (chain->ms.sym)
  359. ret += fprintf(fp, " %s\n", chain->ms.sym->name);
  360. else
  361. ret += fprintf(fp, " %p\n", (void *)(long)chain->ip);
  362. if (++entries_printed == callchain_param.print_limit)
  363. break;
  364. }
  365. ret += __callchain__fprintf_graph(fp, self, total_samples, 1, 1, left_margin);
  366. return ret;
  367. }
  368. static size_t callchain__fprintf_flat(FILE *fp, struct callchain_node *self,
  369. u64 total_samples)
  370. {
  371. struct callchain_list *chain;
  372. size_t ret = 0;
  373. if (!self)
  374. return 0;
  375. ret += callchain__fprintf_flat(fp, self->parent, total_samples);
  376. list_for_each_entry(chain, &self->val, list) {
  377. if (chain->ip >= PERF_CONTEXT_MAX)
  378. continue;
  379. if (chain->ms.sym)
  380. ret += fprintf(fp, " %s\n", chain->ms.sym->name);
  381. else
  382. ret += fprintf(fp, " %p\n",
  383. (void *)(long)chain->ip);
  384. }
  385. return ret;
  386. }
  387. static size_t hist_entry_callchain__fprintf(FILE *fp, struct hist_entry *self,
  388. u64 total_samples, int left_margin)
  389. {
  390. struct rb_node *rb_node;
  391. struct callchain_node *chain;
  392. size_t ret = 0;
  393. u32 entries_printed = 0;
  394. rb_node = rb_first(&self->sorted_chain);
  395. while (rb_node) {
  396. double percent;
  397. chain = rb_entry(rb_node, struct callchain_node, rb_node);
  398. percent = chain->hit * 100.0 / total_samples;
  399. switch (callchain_param.mode) {
  400. case CHAIN_FLAT:
  401. ret += percent_color_fprintf(fp, " %6.2f%%\n",
  402. percent);
  403. ret += callchain__fprintf_flat(fp, chain, total_samples);
  404. break;
  405. case CHAIN_GRAPH_ABS: /* Falldown */
  406. case CHAIN_GRAPH_REL:
  407. ret += callchain__fprintf_graph(fp, chain, total_samples,
  408. left_margin);
  409. case CHAIN_NONE:
  410. default:
  411. break;
  412. }
  413. ret += fprintf(fp, "\n");
  414. if (++entries_printed == callchain_param.print_limit)
  415. break;
  416. rb_node = rb_next(rb_node);
  417. }
  418. return ret;
  419. }
  420. int hist_entry__snprintf(struct hist_entry *self, char *s, size_t size,
  421. struct hists *pair_hists, bool show_displacement,
  422. long displacement, bool color, u64 session_total)
  423. {
  424. struct sort_entry *se;
  425. u64 count, total, count_sys, count_us, count_guest_sys, count_guest_us;
  426. const char *sep = symbol_conf.field_sep;
  427. int ret;
  428. if (symbol_conf.exclude_other && !self->parent)
  429. return 0;
  430. if (pair_hists) {
  431. count = self->pair ? self->pair->count : 0;
  432. total = pair_hists->stats.total;
  433. count_sys = self->pair ? self->pair->count_sys : 0;
  434. count_us = self->pair ? self->pair->count_us : 0;
  435. count_guest_sys = self->pair ? self->pair->count_guest_sys : 0;
  436. count_guest_us = self->pair ? self->pair->count_guest_us : 0;
  437. } else {
  438. count = self->count;
  439. total = session_total;
  440. count_sys = self->count_sys;
  441. count_us = self->count_us;
  442. count_guest_sys = self->count_guest_sys;
  443. count_guest_us = self->count_guest_us;
  444. }
  445. if (total) {
  446. if (color)
  447. ret = percent_color_snprintf(s, size,
  448. sep ? "%.2f" : " %6.2f%%",
  449. (count * 100.0) / total);
  450. else
  451. ret = snprintf(s, size, sep ? "%.2f" : " %6.2f%%",
  452. (count * 100.0) / total);
  453. if (symbol_conf.show_cpu_utilization) {
  454. ret += percent_color_snprintf(s + ret, size - ret,
  455. sep ? "%.2f" : " %6.2f%%",
  456. (count_sys * 100.0) / total);
  457. ret += percent_color_snprintf(s + ret, size - ret,
  458. sep ? "%.2f" : " %6.2f%%",
  459. (count_us * 100.0) / total);
  460. if (perf_guest) {
  461. ret += percent_color_snprintf(s + ret,
  462. size - ret,
  463. sep ? "%.2f" : " %6.2f%%",
  464. (count_guest_sys * 100.0) /
  465. total);
  466. ret += percent_color_snprintf(s + ret,
  467. size - ret,
  468. sep ? "%.2f" : " %6.2f%%",
  469. (count_guest_us * 100.0) /
  470. total);
  471. }
  472. }
  473. } else
  474. ret = snprintf(s, size, sep ? "%lld" : "%12lld ", count);
  475. if (symbol_conf.show_nr_samples) {
  476. if (sep)
  477. ret += snprintf(s + ret, size - ret, "%c%lld", *sep, count);
  478. else
  479. ret += snprintf(s + ret, size - ret, "%11lld", count);
  480. }
  481. if (pair_hists) {
  482. char bf[32];
  483. double old_percent = 0, new_percent = 0, diff;
  484. if (total > 0)
  485. old_percent = (count * 100.0) / total;
  486. if (session_total > 0)
  487. new_percent = (self->count * 100.0) / session_total;
  488. diff = new_percent - old_percent;
  489. if (fabs(diff) >= 0.01)
  490. snprintf(bf, sizeof(bf), "%+4.2F%%", diff);
  491. else
  492. snprintf(bf, sizeof(bf), " ");
  493. if (sep)
  494. ret += snprintf(s + ret, size - ret, "%c%s", *sep, bf);
  495. else
  496. ret += snprintf(s + ret, size - ret, "%11.11s", bf);
  497. if (show_displacement) {
  498. if (displacement)
  499. snprintf(bf, sizeof(bf), "%+4ld", displacement);
  500. else
  501. snprintf(bf, sizeof(bf), " ");
  502. if (sep)
  503. ret += snprintf(s + ret, size - ret, "%c%s", *sep, bf);
  504. else
  505. ret += snprintf(s + ret, size - ret, "%6.6s", bf);
  506. }
  507. }
  508. list_for_each_entry(se, &hist_entry__sort_list, list) {
  509. if (se->elide)
  510. continue;
  511. ret += snprintf(s + ret, size - ret, "%s", sep ?: " ");
  512. ret += se->se_snprintf(self, s + ret, size - ret,
  513. se->se_width ? *se->se_width : 0);
  514. }
  515. return ret;
  516. }
  517. int hist_entry__fprintf(struct hist_entry *self, struct hists *pair_hists,
  518. bool show_displacement, long displacement, FILE *fp,
  519. u64 session_total)
  520. {
  521. char bf[512];
  522. hist_entry__snprintf(self, bf, sizeof(bf), pair_hists,
  523. show_displacement, displacement,
  524. true, session_total);
  525. return fprintf(fp, "%s\n", bf);
  526. }
  527. static size_t hist_entry__fprintf_callchain(struct hist_entry *self, FILE *fp,
  528. u64 session_total)
  529. {
  530. int left_margin = 0;
  531. if (sort__first_dimension == SORT_COMM) {
  532. struct sort_entry *se = list_first_entry(&hist_entry__sort_list,
  533. typeof(*se), list);
  534. left_margin = se->se_width ? *se->se_width : 0;
  535. left_margin -= thread__comm_len(self->thread);
  536. }
  537. return hist_entry_callchain__fprintf(fp, self, session_total,
  538. left_margin);
  539. }
  540. size_t hists__fprintf(struct hists *self, struct hists *pair,
  541. bool show_displacement, FILE *fp)
  542. {
  543. struct sort_entry *se;
  544. struct rb_node *nd;
  545. size_t ret = 0;
  546. unsigned long position = 1;
  547. long displacement = 0;
  548. unsigned int width;
  549. const char *sep = symbol_conf.field_sep;
  550. char *col_width = symbol_conf.col_width_list_str;
  551. init_rem_hits();
  552. fprintf(fp, "# %s", pair ? "Baseline" : "Overhead");
  553. if (symbol_conf.show_nr_samples) {
  554. if (sep)
  555. fprintf(fp, "%cSamples", *sep);
  556. else
  557. fputs(" Samples ", fp);
  558. }
  559. if (symbol_conf.show_cpu_utilization) {
  560. if (sep) {
  561. ret += fprintf(fp, "%csys", *sep);
  562. ret += fprintf(fp, "%cus", *sep);
  563. if (perf_guest) {
  564. ret += fprintf(fp, "%cguest sys", *sep);
  565. ret += fprintf(fp, "%cguest us", *sep);
  566. }
  567. } else {
  568. ret += fprintf(fp, " sys ");
  569. ret += fprintf(fp, " us ");
  570. if (perf_guest) {
  571. ret += fprintf(fp, " guest sys ");
  572. ret += fprintf(fp, " guest us ");
  573. }
  574. }
  575. }
  576. if (pair) {
  577. if (sep)
  578. ret += fprintf(fp, "%cDelta", *sep);
  579. else
  580. ret += fprintf(fp, " Delta ");
  581. if (show_displacement) {
  582. if (sep)
  583. ret += fprintf(fp, "%cDisplacement", *sep);
  584. else
  585. ret += fprintf(fp, " Displ");
  586. }
  587. }
  588. list_for_each_entry(se, &hist_entry__sort_list, list) {
  589. if (se->elide)
  590. continue;
  591. if (sep) {
  592. fprintf(fp, "%c%s", *sep, se->se_header);
  593. continue;
  594. }
  595. width = strlen(se->se_header);
  596. if (se->se_width) {
  597. if (symbol_conf.col_width_list_str) {
  598. if (col_width) {
  599. *se->se_width = atoi(col_width);
  600. col_width = strchr(col_width, ',');
  601. if (col_width)
  602. ++col_width;
  603. }
  604. }
  605. width = *se->se_width = max(*se->se_width, width);
  606. }
  607. fprintf(fp, " %*s", width, se->se_header);
  608. }
  609. fprintf(fp, "\n");
  610. if (sep)
  611. goto print_entries;
  612. fprintf(fp, "# ........");
  613. if (symbol_conf.show_nr_samples)
  614. fprintf(fp, " ..........");
  615. if (pair) {
  616. fprintf(fp, " ..........");
  617. if (show_displacement)
  618. fprintf(fp, " .....");
  619. }
  620. list_for_each_entry(se, &hist_entry__sort_list, list) {
  621. unsigned int i;
  622. if (se->elide)
  623. continue;
  624. fprintf(fp, " ");
  625. if (se->se_width)
  626. width = *se->se_width;
  627. else
  628. width = strlen(se->se_header);
  629. for (i = 0; i < width; i++)
  630. fprintf(fp, ".");
  631. }
  632. fprintf(fp, "\n#\n");
  633. print_entries:
  634. for (nd = rb_first(&self->entries); nd; nd = rb_next(nd)) {
  635. struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
  636. if (show_displacement) {
  637. if (h->pair != NULL)
  638. displacement = ((long)h->pair->position -
  639. (long)position);
  640. else
  641. displacement = 0;
  642. ++position;
  643. }
  644. ret += hist_entry__fprintf(h, pair, show_displacement,
  645. displacement, fp, self->stats.total);
  646. if (symbol_conf.use_callchain)
  647. ret += hist_entry__fprintf_callchain(h, fp, self->stats.total);
  648. if (h->ms.map == NULL && verbose > 1) {
  649. __map_groups__fprintf_maps(&h->thread->mg,
  650. MAP__FUNCTION, verbose, fp);
  651. fprintf(fp, "%.10s end\n", graph_dotted_line);
  652. }
  653. }
  654. free(rem_sq_bracket);
  655. return ret;
  656. }
  657. enum hist_filter {
  658. HIST_FILTER__DSO,
  659. HIST_FILTER__THREAD,
  660. };
  661. void hists__filter_by_dso(struct hists *self, const struct dso *dso)
  662. {
  663. struct rb_node *nd;
  664. self->nr_entries = self->stats.total = 0;
  665. self->max_sym_namelen = 0;
  666. for (nd = rb_first(&self->entries); nd; nd = rb_next(nd)) {
  667. struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
  668. if (symbol_conf.exclude_other && !h->parent)
  669. continue;
  670. if (dso != NULL && (h->ms.map == NULL || h->ms.map->dso != dso)) {
  671. h->filtered |= (1 << HIST_FILTER__DSO);
  672. continue;
  673. }
  674. h->filtered &= ~(1 << HIST_FILTER__DSO);
  675. if (!h->filtered) {
  676. ++self->nr_entries;
  677. self->stats.total += h->count;
  678. if (h->ms.sym &&
  679. self->max_sym_namelen < h->ms.sym->namelen)
  680. self->max_sym_namelen = h->ms.sym->namelen;
  681. }
  682. }
  683. }
  684. void hists__filter_by_thread(struct hists *self, const struct thread *thread)
  685. {
  686. struct rb_node *nd;
  687. self->nr_entries = self->stats.total = 0;
  688. self->max_sym_namelen = 0;
  689. for (nd = rb_first(&self->entries); nd; nd = rb_next(nd)) {
  690. struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
  691. if (thread != NULL && h->thread != thread) {
  692. h->filtered |= (1 << HIST_FILTER__THREAD);
  693. continue;
  694. }
  695. h->filtered &= ~(1 << HIST_FILTER__THREAD);
  696. if (!h->filtered) {
  697. ++self->nr_entries;
  698. self->stats.total += h->count;
  699. if (h->ms.sym &&
  700. self->max_sym_namelen < h->ms.sym->namelen)
  701. self->max_sym_namelen = h->ms.sym->namelen;
  702. }
  703. }
  704. }
  705. static int symbol__alloc_hist(struct symbol *self)
  706. {
  707. struct sym_priv *priv = symbol__priv(self);
  708. const int size = (sizeof(*priv->hist) +
  709. (self->end - self->start) * sizeof(u64));
  710. priv->hist = zalloc(size);
  711. return priv->hist == NULL ? -1 : 0;
  712. }
  713. int hist_entry__inc_addr_samples(struct hist_entry *self, u64 ip)
  714. {
  715. unsigned int sym_size, offset;
  716. struct symbol *sym = self->ms.sym;
  717. struct sym_priv *priv;
  718. struct sym_hist *h;
  719. if (!sym || !self->ms.map)
  720. return 0;
  721. priv = symbol__priv(sym);
  722. if (priv->hist == NULL && symbol__alloc_hist(sym) < 0)
  723. return -ENOMEM;
  724. sym_size = sym->end - sym->start;
  725. offset = ip - sym->start;
  726. pr_debug3("%s: ip=%#Lx\n", __func__, self->ms.map->unmap_ip(self->ms.map, ip));
  727. if (offset >= sym_size)
  728. return 0;
  729. h = priv->hist;
  730. h->sum++;
  731. h->ip[offset]++;
  732. pr_debug3("%#Lx %s: count++ [ip: %#Lx, %#Lx] => %Ld\n", self->ms.sym->start,
  733. self->ms.sym->name, ip, ip - self->ms.sym->start, h->ip[offset]);
  734. return 0;
  735. }
  736. static struct objdump_line *objdump_line__new(s64 offset, char *line)
  737. {
  738. struct objdump_line *self = malloc(sizeof(*self));
  739. if (self != NULL) {
  740. self->offset = offset;
  741. self->line = line;
  742. }
  743. return self;
  744. }
  745. void objdump_line__free(struct objdump_line *self)
  746. {
  747. free(self->line);
  748. free(self);
  749. }
  750. static void objdump__add_line(struct list_head *head, struct objdump_line *line)
  751. {
  752. list_add_tail(&line->node, head);
  753. }
  754. struct objdump_line *objdump__get_next_ip_line(struct list_head *head,
  755. struct objdump_line *pos)
  756. {
  757. list_for_each_entry_continue(pos, head, node)
  758. if (pos->offset >= 0)
  759. return pos;
  760. return NULL;
  761. }
  762. static int hist_entry__parse_objdump_line(struct hist_entry *self, FILE *file,
  763. struct list_head *head)
  764. {
  765. struct symbol *sym = self->ms.sym;
  766. struct objdump_line *objdump_line;
  767. char *line = NULL, *tmp, *tmp2, *c;
  768. size_t line_len;
  769. s64 line_ip, offset = -1;
  770. if (getline(&line, &line_len, file) < 0)
  771. return -1;
  772. if (!line)
  773. return -1;
  774. while (line_len != 0 && isspace(line[line_len - 1]))
  775. line[--line_len] = '\0';
  776. c = strchr(line, '\n');
  777. if (c)
  778. *c = 0;
  779. line_ip = -1;
  780. /*
  781. * Strip leading spaces:
  782. */
  783. tmp = line;
  784. while (*tmp) {
  785. if (*tmp != ' ')
  786. break;
  787. tmp++;
  788. }
  789. if (*tmp) {
  790. /*
  791. * Parse hexa addresses followed by ':'
  792. */
  793. line_ip = strtoull(tmp, &tmp2, 16);
  794. if (*tmp2 != ':')
  795. line_ip = -1;
  796. }
  797. if (line_ip != -1) {
  798. u64 start = map__rip_2objdump(self->ms.map, sym->start);
  799. offset = line_ip - start;
  800. }
  801. objdump_line = objdump_line__new(offset, line);
  802. if (objdump_line == NULL) {
  803. free(line);
  804. return -1;
  805. }
  806. objdump__add_line(head, objdump_line);
  807. return 0;
  808. }
  809. int hist_entry__annotate(struct hist_entry *self, struct list_head *head)
  810. {
  811. struct symbol *sym = self->ms.sym;
  812. struct map *map = self->ms.map;
  813. struct dso *dso = map->dso;
  814. const char *filename = dso->long_name;
  815. char command[PATH_MAX * 2];
  816. FILE *file;
  817. u64 len;
  818. if (!filename)
  819. return -1;
  820. if (dso->origin == DSO__ORIG_KERNEL) {
  821. if (dso->annotate_warned)
  822. return 0;
  823. dso->annotate_warned = 1;
  824. pr_err("Can't annotate %s: No vmlinux file was found in the "
  825. "path:\n", sym->name);
  826. vmlinux_path__fprintf(stderr);
  827. return -1;
  828. }
  829. pr_debug("%s: filename=%s, sym=%s, start=%#Lx, end=%#Lx\n", __func__,
  830. filename, sym->name, map->unmap_ip(map, sym->start),
  831. map->unmap_ip(map, sym->end));
  832. len = sym->end - sym->start;
  833. pr_debug("annotating [%p] %30s : [%p] %30s\n",
  834. dso, dso->long_name, sym, sym->name);
  835. snprintf(command, sizeof(command),
  836. "objdump --start-address=0x%016Lx --stop-address=0x%016Lx -dS %s|grep -v %s|expand",
  837. map__rip_2objdump(map, sym->start),
  838. map__rip_2objdump(map, sym->end),
  839. filename, filename);
  840. pr_debug("Executing: %s\n", command);
  841. file = popen(command, "r");
  842. if (!file)
  843. return -1;
  844. while (!feof(file))
  845. if (hist_entry__parse_objdump_line(self, file, head) < 0)
  846. break;
  847. pclose(file);
  848. return 0;
  849. }