hist.c 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699
  1. #include "annotate.h"
  2. #include "util.h"
  3. #include "build-id.h"
  4. #include "hist.h"
  5. #include "session.h"
  6. #include "sort.h"
  7. #include <math.h>
  8. static bool hists__filter_entry_by_dso(struct hists *hists,
  9. struct hist_entry *he);
  10. static bool hists__filter_entry_by_thread(struct hists *hists,
  11. struct hist_entry *he);
  12. static bool hists__filter_entry_by_symbol(struct hists *hists,
  13. struct hist_entry *he);
  14. enum hist_filter {
  15. HIST_FILTER__DSO,
  16. HIST_FILTER__THREAD,
  17. HIST_FILTER__PARENT,
  18. HIST_FILTER__SYMBOL,
  19. };
  20. struct callchain_param callchain_param = {
  21. .mode = CHAIN_GRAPH_REL,
  22. .min_percent = 0.5,
  23. .order = ORDER_CALLEE
  24. };
  25. u16 hists__col_len(struct hists *hists, enum hist_column col)
  26. {
  27. return hists->col_len[col];
  28. }
  29. void hists__set_col_len(struct hists *hists, enum hist_column col, u16 len)
  30. {
  31. hists->col_len[col] = len;
  32. }
  33. bool hists__new_col_len(struct hists *hists, enum hist_column col, u16 len)
  34. {
  35. if (len > hists__col_len(hists, col)) {
  36. hists__set_col_len(hists, col, len);
  37. return true;
  38. }
  39. return false;
  40. }
  41. void hists__reset_col_len(struct hists *hists)
  42. {
  43. enum hist_column col;
  44. for (col = 0; col < HISTC_NR_COLS; ++col)
  45. hists__set_col_len(hists, col, 0);
  46. }
  47. static void hists__set_unres_dso_col_len(struct hists *hists, int dso)
  48. {
  49. const unsigned int unresolved_col_width = BITS_PER_LONG / 4;
  50. if (hists__col_len(hists, dso) < unresolved_col_width &&
  51. !symbol_conf.col_width_list_str && !symbol_conf.field_sep &&
  52. !symbol_conf.dso_list)
  53. hists__set_col_len(hists, dso, unresolved_col_width);
  54. }
  55. void hists__calc_col_len(struct hists *hists, struct hist_entry *h)
  56. {
  57. const unsigned int unresolved_col_width = BITS_PER_LONG / 4;
  58. u16 len;
  59. if (h->ms.sym)
  60. hists__new_col_len(hists, HISTC_SYMBOL, h->ms.sym->namelen + 4);
  61. else
  62. hists__set_unres_dso_col_len(hists, HISTC_DSO);
  63. len = thread__comm_len(h->thread);
  64. if (hists__new_col_len(hists, HISTC_COMM, len))
  65. hists__set_col_len(hists, HISTC_THREAD, len + 6);
  66. if (h->ms.map) {
  67. len = dso__name_len(h->ms.map->dso);
  68. hists__new_col_len(hists, HISTC_DSO, len);
  69. }
  70. if (h->branch_info) {
  71. int symlen;
  72. /*
  73. * +4 accounts for '[x] ' priv level info
  74. * +2 account of 0x prefix on raw addresses
  75. */
  76. if (h->branch_info->from.sym) {
  77. symlen = (int)h->branch_info->from.sym->namelen + 4;
  78. hists__new_col_len(hists, HISTC_SYMBOL_FROM, symlen);
  79. symlen = dso__name_len(h->branch_info->from.map->dso);
  80. hists__new_col_len(hists, HISTC_DSO_FROM, symlen);
  81. } else {
  82. symlen = unresolved_col_width + 4 + 2;
  83. hists__new_col_len(hists, HISTC_SYMBOL_FROM, symlen);
  84. hists__set_unres_dso_col_len(hists, HISTC_DSO_FROM);
  85. }
  86. if (h->branch_info->to.sym) {
  87. symlen = (int)h->branch_info->to.sym->namelen + 4;
  88. hists__new_col_len(hists, HISTC_SYMBOL_TO, symlen);
  89. symlen = dso__name_len(h->branch_info->to.map->dso);
  90. hists__new_col_len(hists, HISTC_DSO_TO, symlen);
  91. } else {
  92. symlen = unresolved_col_width + 4 + 2;
  93. hists__new_col_len(hists, HISTC_SYMBOL_TO, symlen);
  94. hists__set_unres_dso_col_len(hists, HISTC_DSO_TO);
  95. }
  96. }
  97. }
  98. void hists__output_recalc_col_len(struct hists *hists, int max_rows)
  99. {
  100. struct rb_node *next = rb_first(&hists->entries);
  101. struct hist_entry *n;
  102. int row = 0;
  103. hists__reset_col_len(hists);
  104. while (next && row++ < max_rows) {
  105. n = rb_entry(next, struct hist_entry, rb_node);
  106. if (!n->filtered)
  107. hists__calc_col_len(hists, n);
  108. next = rb_next(&n->rb_node);
  109. }
  110. }
  111. static void hist_entry__add_cpumode_period(struct hist_entry *he,
  112. unsigned int cpumode, u64 period)
  113. {
  114. switch (cpumode) {
  115. case PERF_RECORD_MISC_KERNEL:
  116. he->period_sys += period;
  117. break;
  118. case PERF_RECORD_MISC_USER:
  119. he->period_us += period;
  120. break;
  121. case PERF_RECORD_MISC_GUEST_KERNEL:
  122. he->period_guest_sys += period;
  123. break;
  124. case PERF_RECORD_MISC_GUEST_USER:
  125. he->period_guest_us += period;
  126. break;
  127. default:
  128. break;
  129. }
  130. }
  131. static void hist_entry__decay(struct hist_entry *he)
  132. {
  133. he->period = (he->period * 7) / 8;
  134. he->nr_events = (he->nr_events * 7) / 8;
  135. }
  136. static bool hists__decay_entry(struct hists *hists, struct hist_entry *he)
  137. {
  138. u64 prev_period = he->period;
  139. if (prev_period == 0)
  140. return true;
  141. hist_entry__decay(he);
  142. if (!he->filtered)
  143. hists->stats.total_period -= prev_period - he->period;
  144. return he->period == 0;
  145. }
  146. static void __hists__decay_entries(struct hists *hists, bool zap_user,
  147. bool zap_kernel, bool threaded)
  148. {
  149. struct rb_node *next = rb_first(&hists->entries);
  150. struct hist_entry *n;
  151. while (next) {
  152. n = rb_entry(next, struct hist_entry, rb_node);
  153. next = rb_next(&n->rb_node);
  154. /*
  155. * We may be annotating this, for instance, so keep it here in
  156. * case some it gets new samples, we'll eventually free it when
  157. * the user stops browsing and it agains gets fully decayed.
  158. */
  159. if (((zap_user && n->level == '.') ||
  160. (zap_kernel && n->level != '.') ||
  161. hists__decay_entry(hists, n)) &&
  162. !n->used) {
  163. rb_erase(&n->rb_node, &hists->entries);
  164. if (sort__need_collapse || threaded)
  165. rb_erase(&n->rb_node_in, &hists->entries_collapsed);
  166. hist_entry__free(n);
  167. --hists->nr_entries;
  168. }
  169. }
  170. }
  171. void hists__decay_entries(struct hists *hists, bool zap_user, bool zap_kernel)
  172. {
  173. return __hists__decay_entries(hists, zap_user, zap_kernel, false);
  174. }
  175. void hists__decay_entries_threaded(struct hists *hists,
  176. bool zap_user, bool zap_kernel)
  177. {
  178. return __hists__decay_entries(hists, zap_user, zap_kernel, true);
  179. }
  180. /*
  181. * histogram, sorted on item, collects periods
  182. */
  183. static struct hist_entry *hist_entry__new(struct hist_entry *template)
  184. {
  185. size_t callchain_size = symbol_conf.use_callchain ? sizeof(struct callchain_root) : 0;
  186. struct hist_entry *he = malloc(sizeof(*he) + callchain_size);
  187. if (he != NULL) {
  188. *he = *template;
  189. he->nr_events = 1;
  190. if (he->ms.map)
  191. he->ms.map->referenced = true;
  192. if (symbol_conf.use_callchain)
  193. callchain_init(he->callchain);
  194. }
  195. return he;
  196. }
  197. static void hists__inc_nr_entries(struct hists *hists, struct hist_entry *h)
  198. {
  199. if (!h->filtered) {
  200. hists__calc_col_len(hists, h);
  201. ++hists->nr_entries;
  202. hists->stats.total_period += h->period;
  203. }
  204. }
  205. static u8 symbol__parent_filter(const struct symbol *parent)
  206. {
  207. if (symbol_conf.exclude_other && parent == NULL)
  208. return 1 << HIST_FILTER__PARENT;
  209. return 0;
  210. }
  211. static struct hist_entry *add_hist_entry(struct hists *hists,
  212. struct hist_entry *entry,
  213. struct addr_location *al,
  214. u64 period)
  215. {
  216. struct rb_node **p;
  217. struct rb_node *parent = NULL;
  218. struct hist_entry *he;
  219. int cmp;
  220. pthread_mutex_lock(&hists->lock);
  221. p = &hists->entries_in->rb_node;
  222. while (*p != NULL) {
  223. parent = *p;
  224. he = rb_entry(parent, struct hist_entry, rb_node_in);
  225. cmp = hist_entry__cmp(entry, he);
  226. if (!cmp) {
  227. he->period += period;
  228. ++he->nr_events;
  229. /* If the map of an existing hist_entry has
  230. * become out-of-date due to an exec() or
  231. * similar, update it. Otherwise we will
  232. * mis-adjust symbol addresses when computing
  233. * the history counter to increment.
  234. */
  235. if (he->ms.map != entry->ms.map) {
  236. he->ms.map = entry->ms.map;
  237. if (he->ms.map)
  238. he->ms.map->referenced = true;
  239. }
  240. goto out;
  241. }
  242. if (cmp < 0)
  243. p = &(*p)->rb_left;
  244. else
  245. p = &(*p)->rb_right;
  246. }
  247. he = hist_entry__new(entry);
  248. if (!he)
  249. goto out_unlock;
  250. rb_link_node(&he->rb_node_in, parent, p);
  251. rb_insert_color(&he->rb_node_in, hists->entries_in);
  252. out:
  253. hist_entry__add_cpumode_period(he, al->cpumode, period);
  254. out_unlock:
  255. pthread_mutex_unlock(&hists->lock);
  256. return he;
  257. }
  258. struct hist_entry *__hists__add_branch_entry(struct hists *self,
  259. struct addr_location *al,
  260. struct symbol *sym_parent,
  261. struct branch_info *bi,
  262. u64 period)
  263. {
  264. struct hist_entry entry = {
  265. .thread = al->thread,
  266. .ms = {
  267. .map = bi->to.map,
  268. .sym = bi->to.sym,
  269. },
  270. .cpu = al->cpu,
  271. .ip = bi->to.addr,
  272. .level = al->level,
  273. .period = period,
  274. .parent = sym_parent,
  275. .filtered = symbol__parent_filter(sym_parent),
  276. .branch_info = bi,
  277. .hists = self,
  278. };
  279. return add_hist_entry(self, &entry, al, period);
  280. }
  281. struct hist_entry *__hists__add_entry(struct hists *self,
  282. struct addr_location *al,
  283. struct symbol *sym_parent, u64 period)
  284. {
  285. struct hist_entry entry = {
  286. .thread = al->thread,
  287. .ms = {
  288. .map = al->map,
  289. .sym = al->sym,
  290. },
  291. .cpu = al->cpu,
  292. .ip = al->addr,
  293. .level = al->level,
  294. .period = period,
  295. .parent = sym_parent,
  296. .filtered = symbol__parent_filter(sym_parent),
  297. .hists = self,
  298. };
  299. return add_hist_entry(self, &entry, al, period);
  300. }
  301. int64_t
  302. hist_entry__cmp(struct hist_entry *left, struct hist_entry *right)
  303. {
  304. struct sort_entry *se;
  305. int64_t cmp = 0;
  306. list_for_each_entry(se, &hist_entry__sort_list, list) {
  307. cmp = se->se_cmp(left, right);
  308. if (cmp)
  309. break;
  310. }
  311. return cmp;
  312. }
  313. int64_t
  314. hist_entry__collapse(struct hist_entry *left, struct hist_entry *right)
  315. {
  316. struct sort_entry *se;
  317. int64_t cmp = 0;
  318. list_for_each_entry(se, &hist_entry__sort_list, list) {
  319. int64_t (*f)(struct hist_entry *, struct hist_entry *);
  320. f = se->se_collapse ?: se->se_cmp;
  321. cmp = f(left, right);
  322. if (cmp)
  323. break;
  324. }
  325. return cmp;
  326. }
  327. void hist_entry__free(struct hist_entry *he)
  328. {
  329. free(he);
  330. }
  331. /*
  332. * collapse the histogram
  333. */
  334. static bool hists__collapse_insert_entry(struct hists *hists __maybe_unused,
  335. struct rb_root *root,
  336. struct hist_entry *he)
  337. {
  338. struct rb_node **p = &root->rb_node;
  339. struct rb_node *parent = NULL;
  340. struct hist_entry *iter;
  341. int64_t cmp;
  342. while (*p != NULL) {
  343. parent = *p;
  344. iter = rb_entry(parent, struct hist_entry, rb_node_in);
  345. cmp = hist_entry__collapse(iter, he);
  346. if (!cmp) {
  347. iter->period += he->period;
  348. iter->period_sys += he->period_sys;
  349. iter->period_us += he->period_us;
  350. iter->period_guest_sys += he->period_guest_sys;
  351. iter->period_guest_us += he->period_guest_us;
  352. iter->nr_events += he->nr_events;
  353. if (symbol_conf.use_callchain) {
  354. callchain_cursor_reset(&callchain_cursor);
  355. callchain_merge(&callchain_cursor,
  356. iter->callchain,
  357. he->callchain);
  358. }
  359. hist_entry__free(he);
  360. return false;
  361. }
  362. if (cmp < 0)
  363. p = &(*p)->rb_left;
  364. else
  365. p = &(*p)->rb_right;
  366. }
  367. rb_link_node(&he->rb_node_in, parent, p);
  368. rb_insert_color(&he->rb_node_in, root);
  369. return true;
  370. }
  371. static struct rb_root *hists__get_rotate_entries_in(struct hists *hists)
  372. {
  373. struct rb_root *root;
  374. pthread_mutex_lock(&hists->lock);
  375. root = hists->entries_in;
  376. if (++hists->entries_in > &hists->entries_in_array[1])
  377. hists->entries_in = &hists->entries_in_array[0];
  378. pthread_mutex_unlock(&hists->lock);
  379. return root;
  380. }
  381. static void hists__apply_filters(struct hists *hists, struct hist_entry *he)
  382. {
  383. hists__filter_entry_by_dso(hists, he);
  384. hists__filter_entry_by_thread(hists, he);
  385. hists__filter_entry_by_symbol(hists, he);
  386. }
  387. static void __hists__collapse_resort(struct hists *hists, bool threaded)
  388. {
  389. struct rb_root *root;
  390. struct rb_node *next;
  391. struct hist_entry *n;
  392. if (!sort__need_collapse && !threaded)
  393. return;
  394. root = hists__get_rotate_entries_in(hists);
  395. next = rb_first(root);
  396. while (next) {
  397. n = rb_entry(next, struct hist_entry, rb_node_in);
  398. next = rb_next(&n->rb_node_in);
  399. rb_erase(&n->rb_node_in, root);
  400. if (hists__collapse_insert_entry(hists, &hists->entries_collapsed, n)) {
  401. /*
  402. * If it wasn't combined with one of the entries already
  403. * collapsed, we need to apply the filters that may have
  404. * been set by, say, the hist_browser.
  405. */
  406. hists__apply_filters(hists, n);
  407. }
  408. }
  409. }
  410. void hists__collapse_resort(struct hists *hists)
  411. {
  412. return __hists__collapse_resort(hists, false);
  413. }
  414. void hists__collapse_resort_threaded(struct hists *hists)
  415. {
  416. return __hists__collapse_resort(hists, true);
  417. }
  418. /*
  419. * reverse the map, sort on period.
  420. */
  421. static void __hists__insert_output_entry(struct rb_root *entries,
  422. struct hist_entry *he,
  423. u64 min_callchain_hits)
  424. {
  425. struct rb_node **p = &entries->rb_node;
  426. struct rb_node *parent = NULL;
  427. struct hist_entry *iter;
  428. if (symbol_conf.use_callchain)
  429. callchain_param.sort(&he->sorted_chain, he->callchain,
  430. min_callchain_hits, &callchain_param);
  431. while (*p != NULL) {
  432. parent = *p;
  433. iter = rb_entry(parent, struct hist_entry, rb_node);
  434. if (he->period > iter->period)
  435. p = &(*p)->rb_left;
  436. else
  437. p = &(*p)->rb_right;
  438. }
  439. rb_link_node(&he->rb_node, parent, p);
  440. rb_insert_color(&he->rb_node, entries);
  441. }
  442. static void __hists__output_resort(struct hists *hists, bool threaded)
  443. {
  444. struct rb_root *root;
  445. struct rb_node *next;
  446. struct hist_entry *n;
  447. u64 min_callchain_hits;
  448. min_callchain_hits = hists->stats.total_period * (callchain_param.min_percent / 100);
  449. if (sort__need_collapse || threaded)
  450. root = &hists->entries_collapsed;
  451. else
  452. root = hists->entries_in;
  453. next = rb_first(root);
  454. hists->entries = RB_ROOT;
  455. hists->nr_entries = 0;
  456. hists->stats.total_period = 0;
  457. hists__reset_col_len(hists);
  458. while (next) {
  459. n = rb_entry(next, struct hist_entry, rb_node_in);
  460. next = rb_next(&n->rb_node_in);
  461. __hists__insert_output_entry(&hists->entries, n, min_callchain_hits);
  462. hists__inc_nr_entries(hists, n);
  463. }
  464. }
  465. void hists__output_resort(struct hists *hists)
  466. {
  467. return __hists__output_resort(hists, false);
  468. }
  469. void hists__output_resort_threaded(struct hists *hists)
  470. {
  471. return __hists__output_resort(hists, true);
  472. }
  473. static void hists__remove_entry_filter(struct hists *hists, struct hist_entry *h,
  474. enum hist_filter filter)
  475. {
  476. h->filtered &= ~(1 << filter);
  477. if (h->filtered)
  478. return;
  479. ++hists->nr_entries;
  480. if (h->ms.unfolded)
  481. hists->nr_entries += h->nr_rows;
  482. h->row_offset = 0;
  483. hists->stats.total_period += h->period;
  484. hists->stats.nr_events[PERF_RECORD_SAMPLE] += h->nr_events;
  485. hists__calc_col_len(hists, h);
  486. }
  487. static bool hists__filter_entry_by_dso(struct hists *hists,
  488. struct hist_entry *he)
  489. {
  490. if (hists->dso_filter != NULL &&
  491. (he->ms.map == NULL || he->ms.map->dso != hists->dso_filter)) {
  492. he->filtered |= (1 << HIST_FILTER__DSO);
  493. return true;
  494. }
  495. return false;
  496. }
  497. void hists__filter_by_dso(struct hists *hists)
  498. {
  499. struct rb_node *nd;
  500. hists->nr_entries = hists->stats.total_period = 0;
  501. hists->stats.nr_events[PERF_RECORD_SAMPLE] = 0;
  502. hists__reset_col_len(hists);
  503. for (nd = rb_first(&hists->entries); nd; nd = rb_next(nd)) {
  504. struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
  505. if (symbol_conf.exclude_other && !h->parent)
  506. continue;
  507. if (hists__filter_entry_by_dso(hists, h))
  508. continue;
  509. hists__remove_entry_filter(hists, h, HIST_FILTER__DSO);
  510. }
  511. }
  512. static bool hists__filter_entry_by_thread(struct hists *hists,
  513. struct hist_entry *he)
  514. {
  515. if (hists->thread_filter != NULL &&
  516. he->thread != hists->thread_filter) {
  517. he->filtered |= (1 << HIST_FILTER__THREAD);
  518. return true;
  519. }
  520. return false;
  521. }
  522. void hists__filter_by_thread(struct hists *hists)
  523. {
  524. struct rb_node *nd;
  525. hists->nr_entries = hists->stats.total_period = 0;
  526. hists->stats.nr_events[PERF_RECORD_SAMPLE] = 0;
  527. hists__reset_col_len(hists);
  528. for (nd = rb_first(&hists->entries); nd; nd = rb_next(nd)) {
  529. struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
  530. if (hists__filter_entry_by_thread(hists, h))
  531. continue;
  532. hists__remove_entry_filter(hists, h, HIST_FILTER__THREAD);
  533. }
  534. }
  535. static bool hists__filter_entry_by_symbol(struct hists *hists,
  536. struct hist_entry *he)
  537. {
  538. if (hists->symbol_filter_str != NULL &&
  539. (!he->ms.sym || strstr(he->ms.sym->name,
  540. hists->symbol_filter_str) == NULL)) {
  541. he->filtered |= (1 << HIST_FILTER__SYMBOL);
  542. return true;
  543. }
  544. return false;
  545. }
  546. void hists__filter_by_symbol(struct hists *hists)
  547. {
  548. struct rb_node *nd;
  549. hists->nr_entries = hists->stats.total_period = 0;
  550. hists->stats.nr_events[PERF_RECORD_SAMPLE] = 0;
  551. hists__reset_col_len(hists);
  552. for (nd = rb_first(&hists->entries); nd; nd = rb_next(nd)) {
  553. struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
  554. if (hists__filter_entry_by_symbol(hists, h))
  555. continue;
  556. hists__remove_entry_filter(hists, h, HIST_FILTER__SYMBOL);
  557. }
  558. }
  559. int hist_entry__inc_addr_samples(struct hist_entry *he, int evidx, u64 ip)
  560. {
  561. return symbol__inc_addr_samples(he->ms.sym, he->ms.map, evidx, ip);
  562. }
  563. int hist_entry__annotate(struct hist_entry *he, size_t privsize)
  564. {
  565. return symbol__annotate(he->ms.sym, he->ms.map, privsize);
  566. }
  567. void hists__inc_nr_events(struct hists *hists, u32 type)
  568. {
  569. ++hists->stats.nr_events[0];
  570. ++hists->stats.nr_events[type];
  571. }