hist.c 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918
  1. #include "annotate.h"
  2. #include "util.h"
  3. #include "build-id.h"
  4. #include "hist.h"
  5. #include "session.h"
  6. #include "sort.h"
  7. #include "evsel.h"
  8. #include <math.h>
  9. static bool hists__filter_entry_by_dso(struct hists *hists,
  10. struct hist_entry *he);
  11. static bool hists__filter_entry_by_thread(struct hists *hists,
  12. struct hist_entry *he);
  13. static bool hists__filter_entry_by_symbol(struct hists *hists,
  14. struct hist_entry *he);
  15. enum hist_filter {
  16. HIST_FILTER__DSO,
  17. HIST_FILTER__THREAD,
  18. HIST_FILTER__PARENT,
  19. HIST_FILTER__SYMBOL,
  20. };
  21. struct callchain_param callchain_param = {
  22. .mode = CHAIN_GRAPH_REL,
  23. .min_percent = 0.5,
  24. .order = ORDER_CALLEE
  25. };
  26. u16 hists__col_len(struct hists *hists, enum hist_column col)
  27. {
  28. return hists->col_len[col];
  29. }
  30. void hists__set_col_len(struct hists *hists, enum hist_column col, u16 len)
  31. {
  32. hists->col_len[col] = len;
  33. }
  34. bool hists__new_col_len(struct hists *hists, enum hist_column col, u16 len)
  35. {
  36. if (len > hists__col_len(hists, col)) {
  37. hists__set_col_len(hists, col, len);
  38. return true;
  39. }
  40. return false;
  41. }
  42. void hists__reset_col_len(struct hists *hists)
  43. {
  44. enum hist_column col;
  45. for (col = 0; col < HISTC_NR_COLS; ++col)
  46. hists__set_col_len(hists, col, 0);
  47. }
  48. static void hists__set_unres_dso_col_len(struct hists *hists, int dso)
  49. {
  50. const unsigned int unresolved_col_width = BITS_PER_LONG / 4;
  51. if (hists__col_len(hists, dso) < unresolved_col_width &&
  52. !symbol_conf.col_width_list_str && !symbol_conf.field_sep &&
  53. !symbol_conf.dso_list)
  54. hists__set_col_len(hists, dso, unresolved_col_width);
  55. }
  56. void hists__calc_col_len(struct hists *hists, struct hist_entry *h)
  57. {
  58. const unsigned int unresolved_col_width = BITS_PER_LONG / 4;
  59. u16 len;
  60. if (h->ms.sym)
  61. hists__new_col_len(hists, HISTC_SYMBOL, h->ms.sym->namelen + 4);
  62. else
  63. hists__set_unres_dso_col_len(hists, HISTC_DSO);
  64. len = thread__comm_len(h->thread);
  65. if (hists__new_col_len(hists, HISTC_COMM, len))
  66. hists__set_col_len(hists, HISTC_THREAD, len + 6);
  67. if (h->ms.map) {
  68. len = dso__name_len(h->ms.map->dso);
  69. hists__new_col_len(hists, HISTC_DSO, len);
  70. }
  71. if (h->parent)
  72. hists__new_col_len(hists, HISTC_PARENT, h->parent->namelen);
  73. if (h->branch_info) {
  74. int symlen;
  75. /*
  76. * +4 accounts for '[x] ' priv level info
  77. * +2 account of 0x prefix on raw addresses
  78. */
  79. if (h->branch_info->from.sym) {
  80. symlen = (int)h->branch_info->from.sym->namelen + 4;
  81. hists__new_col_len(hists, HISTC_SYMBOL_FROM, symlen);
  82. symlen = dso__name_len(h->branch_info->from.map->dso);
  83. hists__new_col_len(hists, HISTC_DSO_FROM, symlen);
  84. } else {
  85. symlen = unresolved_col_width + 4 + 2;
  86. hists__new_col_len(hists, HISTC_SYMBOL_FROM, symlen);
  87. hists__set_unres_dso_col_len(hists, HISTC_DSO_FROM);
  88. }
  89. if (h->branch_info->to.sym) {
  90. symlen = (int)h->branch_info->to.sym->namelen + 4;
  91. hists__new_col_len(hists, HISTC_SYMBOL_TO, symlen);
  92. symlen = dso__name_len(h->branch_info->to.map->dso);
  93. hists__new_col_len(hists, HISTC_DSO_TO, symlen);
  94. } else {
  95. symlen = unresolved_col_width + 4 + 2;
  96. hists__new_col_len(hists, HISTC_SYMBOL_TO, symlen);
  97. hists__set_unres_dso_col_len(hists, HISTC_DSO_TO);
  98. }
  99. }
  100. }
  101. void hists__output_recalc_col_len(struct hists *hists, int max_rows)
  102. {
  103. struct rb_node *next = rb_first(&hists->entries);
  104. struct hist_entry *n;
  105. int row = 0;
  106. hists__reset_col_len(hists);
  107. while (next && row++ < max_rows) {
  108. n = rb_entry(next, struct hist_entry, rb_node);
  109. if (!n->filtered)
  110. hists__calc_col_len(hists, n);
  111. next = rb_next(&n->rb_node);
  112. }
  113. }
  114. static void hist_entry__add_cpumode_period(struct hist_entry *he,
  115. unsigned int cpumode, u64 period)
  116. {
  117. switch (cpumode) {
  118. case PERF_RECORD_MISC_KERNEL:
  119. he->stat.period_sys += period;
  120. break;
  121. case PERF_RECORD_MISC_USER:
  122. he->stat.period_us += period;
  123. break;
  124. case PERF_RECORD_MISC_GUEST_KERNEL:
  125. he->stat.period_guest_sys += period;
  126. break;
  127. case PERF_RECORD_MISC_GUEST_USER:
  128. he->stat.period_guest_us += period;
  129. break;
  130. default:
  131. break;
  132. }
  133. }
  134. static void he_stat__add_period(struct he_stat *he_stat, u64 period)
  135. {
  136. he_stat->period += period;
  137. he_stat->nr_events += 1;
  138. }
  139. static void he_stat__add_stat(struct he_stat *dest, struct he_stat *src)
  140. {
  141. dest->period += src->period;
  142. dest->period_sys += src->period_sys;
  143. dest->period_us += src->period_us;
  144. dest->period_guest_sys += src->period_guest_sys;
  145. dest->period_guest_us += src->period_guest_us;
  146. dest->nr_events += src->nr_events;
  147. }
  148. static void hist_entry__decay(struct hist_entry *he)
  149. {
  150. he->stat.period = (he->stat.period * 7) / 8;
  151. he->stat.nr_events = (he->stat.nr_events * 7) / 8;
  152. }
  153. static bool hists__decay_entry(struct hists *hists, struct hist_entry *he)
  154. {
  155. u64 prev_period = he->stat.period;
  156. if (prev_period == 0)
  157. return true;
  158. hist_entry__decay(he);
  159. if (!he->filtered)
  160. hists->stats.total_period -= prev_period - he->stat.period;
  161. return he->stat.period == 0;
  162. }
  163. static void __hists__decay_entries(struct hists *hists, bool zap_user,
  164. bool zap_kernel, bool threaded)
  165. {
  166. struct rb_node *next = rb_first(&hists->entries);
  167. struct hist_entry *n;
  168. while (next) {
  169. n = rb_entry(next, struct hist_entry, rb_node);
  170. next = rb_next(&n->rb_node);
  171. /*
  172. * We may be annotating this, for instance, so keep it here in
  173. * case some it gets new samples, we'll eventually free it when
  174. * the user stops browsing and it agains gets fully decayed.
  175. */
  176. if (((zap_user && n->level == '.') ||
  177. (zap_kernel && n->level != '.') ||
  178. hists__decay_entry(hists, n)) &&
  179. !n->used) {
  180. rb_erase(&n->rb_node, &hists->entries);
  181. if (sort__need_collapse || threaded)
  182. rb_erase(&n->rb_node_in, &hists->entries_collapsed);
  183. hist_entry__free(n);
  184. --hists->nr_entries;
  185. }
  186. }
  187. }
  188. void hists__decay_entries(struct hists *hists, bool zap_user, bool zap_kernel)
  189. {
  190. return __hists__decay_entries(hists, zap_user, zap_kernel, false);
  191. }
  192. void hists__decay_entries_threaded(struct hists *hists,
  193. bool zap_user, bool zap_kernel)
  194. {
  195. return __hists__decay_entries(hists, zap_user, zap_kernel, true);
  196. }
  197. /*
  198. * histogram, sorted on item, collects periods
  199. */
  200. static struct hist_entry *hist_entry__new(struct hist_entry *template)
  201. {
  202. size_t callchain_size = symbol_conf.use_callchain ? sizeof(struct callchain_root) : 0;
  203. struct hist_entry *he = malloc(sizeof(*he) + callchain_size);
  204. if (he != NULL) {
  205. *he = *template;
  206. if (he->ms.map)
  207. he->ms.map->referenced = true;
  208. if (he->branch_info) {
  209. if (he->branch_info->from.map)
  210. he->branch_info->from.map->referenced = true;
  211. if (he->branch_info->to.map)
  212. he->branch_info->to.map->referenced = true;
  213. }
  214. if (symbol_conf.use_callchain)
  215. callchain_init(he->callchain);
  216. INIT_LIST_HEAD(&he->pairs.node);
  217. }
  218. return he;
  219. }
  220. void hists__inc_nr_entries(struct hists *hists, struct hist_entry *h)
  221. {
  222. if (!h->filtered) {
  223. hists__calc_col_len(hists, h);
  224. ++hists->nr_entries;
  225. hists->stats.total_period += h->stat.period;
  226. }
  227. }
  228. static u8 symbol__parent_filter(const struct symbol *parent)
  229. {
  230. if (symbol_conf.exclude_other && parent == NULL)
  231. return 1 << HIST_FILTER__PARENT;
  232. return 0;
  233. }
  234. static struct hist_entry *add_hist_entry(struct hists *hists,
  235. struct hist_entry *entry,
  236. struct addr_location *al,
  237. u64 period)
  238. {
  239. struct rb_node **p;
  240. struct rb_node *parent = NULL;
  241. struct hist_entry *he;
  242. int cmp;
  243. pthread_mutex_lock(&hists->lock);
  244. p = &hists->entries_in->rb_node;
  245. while (*p != NULL) {
  246. parent = *p;
  247. he = rb_entry(parent, struct hist_entry, rb_node_in);
  248. /*
  249. * Make sure that it receives arguments in a same order as
  250. * hist_entry__collapse() so that we can use an appropriate
  251. * function when searching an entry regardless which sort
  252. * keys were used.
  253. */
  254. cmp = hist_entry__cmp(he, entry);
  255. if (!cmp) {
  256. he_stat__add_period(&he->stat, period);
  257. /* If the map of an existing hist_entry has
  258. * become out-of-date due to an exec() or
  259. * similar, update it. Otherwise we will
  260. * mis-adjust symbol addresses when computing
  261. * the history counter to increment.
  262. */
  263. if (he->ms.map != entry->ms.map) {
  264. he->ms.map = entry->ms.map;
  265. if (he->ms.map)
  266. he->ms.map->referenced = true;
  267. }
  268. goto out;
  269. }
  270. if (cmp < 0)
  271. p = &(*p)->rb_left;
  272. else
  273. p = &(*p)->rb_right;
  274. }
  275. he = hist_entry__new(entry);
  276. if (!he)
  277. goto out_unlock;
  278. rb_link_node(&he->rb_node_in, parent, p);
  279. rb_insert_color(&he->rb_node_in, hists->entries_in);
  280. out:
  281. hist_entry__add_cpumode_period(he, al->cpumode, period);
  282. out_unlock:
  283. pthread_mutex_unlock(&hists->lock);
  284. return he;
  285. }
  286. struct hist_entry *__hists__add_branch_entry(struct hists *self,
  287. struct addr_location *al,
  288. struct symbol *sym_parent,
  289. struct branch_info *bi,
  290. u64 period)
  291. {
  292. struct hist_entry entry = {
  293. .thread = al->thread,
  294. .ms = {
  295. .map = bi->to.map,
  296. .sym = bi->to.sym,
  297. },
  298. .cpu = al->cpu,
  299. .ip = bi->to.addr,
  300. .level = al->level,
  301. .stat = {
  302. .period = period,
  303. .nr_events = 1,
  304. },
  305. .parent = sym_parent,
  306. .filtered = symbol__parent_filter(sym_parent),
  307. .branch_info = bi,
  308. .hists = self,
  309. };
  310. return add_hist_entry(self, &entry, al, period);
  311. }
  312. struct hist_entry *__hists__add_entry(struct hists *self,
  313. struct addr_location *al,
  314. struct symbol *sym_parent, u64 period)
  315. {
  316. struct hist_entry entry = {
  317. .thread = al->thread,
  318. .ms = {
  319. .map = al->map,
  320. .sym = al->sym,
  321. },
  322. .cpu = al->cpu,
  323. .ip = al->addr,
  324. .level = al->level,
  325. .stat = {
  326. .period = period,
  327. .nr_events = 1,
  328. },
  329. .parent = sym_parent,
  330. .filtered = symbol__parent_filter(sym_parent),
  331. .hists = self,
  332. };
  333. return add_hist_entry(self, &entry, al, period);
  334. }
  335. int64_t
  336. hist_entry__cmp(struct hist_entry *left, struct hist_entry *right)
  337. {
  338. struct sort_entry *se;
  339. int64_t cmp = 0;
  340. list_for_each_entry(se, &hist_entry__sort_list, list) {
  341. cmp = se->se_cmp(left, right);
  342. if (cmp)
  343. break;
  344. }
  345. return cmp;
  346. }
  347. int64_t
  348. hist_entry__collapse(struct hist_entry *left, struct hist_entry *right)
  349. {
  350. struct sort_entry *se;
  351. int64_t cmp = 0;
  352. list_for_each_entry(se, &hist_entry__sort_list, list) {
  353. int64_t (*f)(struct hist_entry *, struct hist_entry *);
  354. f = se->se_collapse ?: se->se_cmp;
  355. cmp = f(left, right);
  356. if (cmp)
  357. break;
  358. }
  359. return cmp;
  360. }
  361. void hist_entry__free(struct hist_entry *he)
  362. {
  363. free(he->branch_info);
  364. free(he);
  365. }
  366. /*
  367. * collapse the histogram
  368. */
  369. static bool hists__collapse_insert_entry(struct hists *hists __maybe_unused,
  370. struct rb_root *root,
  371. struct hist_entry *he)
  372. {
  373. struct rb_node **p = &root->rb_node;
  374. struct rb_node *parent = NULL;
  375. struct hist_entry *iter;
  376. int64_t cmp;
  377. while (*p != NULL) {
  378. parent = *p;
  379. iter = rb_entry(parent, struct hist_entry, rb_node_in);
  380. cmp = hist_entry__collapse(iter, he);
  381. if (!cmp) {
  382. he_stat__add_stat(&iter->stat, &he->stat);
  383. if (symbol_conf.use_callchain) {
  384. callchain_cursor_reset(&callchain_cursor);
  385. callchain_merge(&callchain_cursor,
  386. iter->callchain,
  387. he->callchain);
  388. }
  389. hist_entry__free(he);
  390. return false;
  391. }
  392. if (cmp < 0)
  393. p = &(*p)->rb_left;
  394. else
  395. p = &(*p)->rb_right;
  396. }
  397. rb_link_node(&he->rb_node_in, parent, p);
  398. rb_insert_color(&he->rb_node_in, root);
  399. return true;
  400. }
  401. static struct rb_root *hists__get_rotate_entries_in(struct hists *hists)
  402. {
  403. struct rb_root *root;
  404. pthread_mutex_lock(&hists->lock);
  405. root = hists->entries_in;
  406. if (++hists->entries_in > &hists->entries_in_array[1])
  407. hists->entries_in = &hists->entries_in_array[0];
  408. pthread_mutex_unlock(&hists->lock);
  409. return root;
  410. }
  411. static void hists__apply_filters(struct hists *hists, struct hist_entry *he)
  412. {
  413. hists__filter_entry_by_dso(hists, he);
  414. hists__filter_entry_by_thread(hists, he);
  415. hists__filter_entry_by_symbol(hists, he);
  416. }
  417. static void __hists__collapse_resort(struct hists *hists, bool threaded)
  418. {
  419. struct rb_root *root;
  420. struct rb_node *next;
  421. struct hist_entry *n;
  422. if (!sort__need_collapse && !threaded)
  423. return;
  424. root = hists__get_rotate_entries_in(hists);
  425. next = rb_first(root);
  426. while (next) {
  427. n = rb_entry(next, struct hist_entry, rb_node_in);
  428. next = rb_next(&n->rb_node_in);
  429. rb_erase(&n->rb_node_in, root);
  430. if (hists__collapse_insert_entry(hists, &hists->entries_collapsed, n)) {
  431. /*
  432. * If it wasn't combined with one of the entries already
  433. * collapsed, we need to apply the filters that may have
  434. * been set by, say, the hist_browser.
  435. */
  436. hists__apply_filters(hists, n);
  437. }
  438. }
  439. }
  440. void hists__collapse_resort(struct hists *hists)
  441. {
  442. return __hists__collapse_resort(hists, false);
  443. }
  444. void hists__collapse_resort_threaded(struct hists *hists)
  445. {
  446. return __hists__collapse_resort(hists, true);
  447. }
  448. /*
  449. * reverse the map, sort on period.
  450. */
  451. static int period_cmp(u64 period_a, u64 period_b)
  452. {
  453. if (period_a > period_b)
  454. return 1;
  455. if (period_a < period_b)
  456. return -1;
  457. return 0;
  458. }
  459. static int hist_entry__sort_on_period(struct hist_entry *a,
  460. struct hist_entry *b)
  461. {
  462. int ret;
  463. int i, nr_members;
  464. struct perf_evsel *evsel;
  465. struct hist_entry *pair;
  466. u64 *periods_a, *periods_b;
  467. ret = period_cmp(a->stat.period, b->stat.period);
  468. if (ret || !symbol_conf.event_group)
  469. return ret;
  470. evsel = hists_to_evsel(a->hists);
  471. nr_members = evsel->nr_members;
  472. if (nr_members <= 1)
  473. return ret;
  474. periods_a = zalloc(sizeof(periods_a) * nr_members);
  475. periods_b = zalloc(sizeof(periods_b) * nr_members);
  476. if (!periods_a || !periods_b)
  477. goto out;
  478. list_for_each_entry(pair, &a->pairs.head, pairs.node) {
  479. evsel = hists_to_evsel(pair->hists);
  480. periods_a[perf_evsel__group_idx(evsel)] = pair->stat.period;
  481. }
  482. list_for_each_entry(pair, &b->pairs.head, pairs.node) {
  483. evsel = hists_to_evsel(pair->hists);
  484. periods_b[perf_evsel__group_idx(evsel)] = pair->stat.period;
  485. }
  486. for (i = 1; i < nr_members; i++) {
  487. ret = period_cmp(periods_a[i], periods_b[i]);
  488. if (ret)
  489. break;
  490. }
  491. out:
  492. free(periods_a);
  493. free(periods_b);
  494. return ret;
  495. }
  496. static void __hists__insert_output_entry(struct rb_root *entries,
  497. struct hist_entry *he,
  498. u64 min_callchain_hits)
  499. {
  500. struct rb_node **p = &entries->rb_node;
  501. struct rb_node *parent = NULL;
  502. struct hist_entry *iter;
  503. if (symbol_conf.use_callchain)
  504. callchain_param.sort(&he->sorted_chain, he->callchain,
  505. min_callchain_hits, &callchain_param);
  506. while (*p != NULL) {
  507. parent = *p;
  508. iter = rb_entry(parent, struct hist_entry, rb_node);
  509. if (hist_entry__sort_on_period(he, iter) > 0)
  510. p = &(*p)->rb_left;
  511. else
  512. p = &(*p)->rb_right;
  513. }
  514. rb_link_node(&he->rb_node, parent, p);
  515. rb_insert_color(&he->rb_node, entries);
  516. }
  517. static void __hists__output_resort(struct hists *hists, bool threaded)
  518. {
  519. struct rb_root *root;
  520. struct rb_node *next;
  521. struct hist_entry *n;
  522. u64 min_callchain_hits;
  523. min_callchain_hits = hists->stats.total_period * (callchain_param.min_percent / 100);
  524. if (sort__need_collapse || threaded)
  525. root = &hists->entries_collapsed;
  526. else
  527. root = hists->entries_in;
  528. next = rb_first(root);
  529. hists->entries = RB_ROOT;
  530. hists->nr_entries = 0;
  531. hists->stats.total_period = 0;
  532. hists__reset_col_len(hists);
  533. while (next) {
  534. n = rb_entry(next, struct hist_entry, rb_node_in);
  535. next = rb_next(&n->rb_node_in);
  536. __hists__insert_output_entry(&hists->entries, n, min_callchain_hits);
  537. hists__inc_nr_entries(hists, n);
  538. }
  539. }
  540. void hists__output_resort(struct hists *hists)
  541. {
  542. return __hists__output_resort(hists, false);
  543. }
  544. void hists__output_resort_threaded(struct hists *hists)
  545. {
  546. return __hists__output_resort(hists, true);
  547. }
  548. static void hists__remove_entry_filter(struct hists *hists, struct hist_entry *h,
  549. enum hist_filter filter)
  550. {
  551. h->filtered &= ~(1 << filter);
  552. if (h->filtered)
  553. return;
  554. ++hists->nr_entries;
  555. if (h->ms.unfolded)
  556. hists->nr_entries += h->nr_rows;
  557. h->row_offset = 0;
  558. hists->stats.total_period += h->stat.period;
  559. hists->stats.nr_events[PERF_RECORD_SAMPLE] += h->stat.nr_events;
  560. hists__calc_col_len(hists, h);
  561. }
  562. static bool hists__filter_entry_by_dso(struct hists *hists,
  563. struct hist_entry *he)
  564. {
  565. if (hists->dso_filter != NULL &&
  566. (he->ms.map == NULL || he->ms.map->dso != hists->dso_filter)) {
  567. he->filtered |= (1 << HIST_FILTER__DSO);
  568. return true;
  569. }
  570. return false;
  571. }
  572. void hists__filter_by_dso(struct hists *hists)
  573. {
  574. struct rb_node *nd;
  575. hists->nr_entries = hists->stats.total_period = 0;
  576. hists->stats.nr_events[PERF_RECORD_SAMPLE] = 0;
  577. hists__reset_col_len(hists);
  578. for (nd = rb_first(&hists->entries); nd; nd = rb_next(nd)) {
  579. struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
  580. if (symbol_conf.exclude_other && !h->parent)
  581. continue;
  582. if (hists__filter_entry_by_dso(hists, h))
  583. continue;
  584. hists__remove_entry_filter(hists, h, HIST_FILTER__DSO);
  585. }
  586. }
  587. static bool hists__filter_entry_by_thread(struct hists *hists,
  588. struct hist_entry *he)
  589. {
  590. if (hists->thread_filter != NULL &&
  591. he->thread != hists->thread_filter) {
  592. he->filtered |= (1 << HIST_FILTER__THREAD);
  593. return true;
  594. }
  595. return false;
  596. }
  597. void hists__filter_by_thread(struct hists *hists)
  598. {
  599. struct rb_node *nd;
  600. hists->nr_entries = hists->stats.total_period = 0;
  601. hists->stats.nr_events[PERF_RECORD_SAMPLE] = 0;
  602. hists__reset_col_len(hists);
  603. for (nd = rb_first(&hists->entries); nd; nd = rb_next(nd)) {
  604. struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
  605. if (hists__filter_entry_by_thread(hists, h))
  606. continue;
  607. hists__remove_entry_filter(hists, h, HIST_FILTER__THREAD);
  608. }
  609. }
  610. static bool hists__filter_entry_by_symbol(struct hists *hists,
  611. struct hist_entry *he)
  612. {
  613. if (hists->symbol_filter_str != NULL &&
  614. (!he->ms.sym || strstr(he->ms.sym->name,
  615. hists->symbol_filter_str) == NULL)) {
  616. he->filtered |= (1 << HIST_FILTER__SYMBOL);
  617. return true;
  618. }
  619. return false;
  620. }
  621. void hists__filter_by_symbol(struct hists *hists)
  622. {
  623. struct rb_node *nd;
  624. hists->nr_entries = hists->stats.total_period = 0;
  625. hists->stats.nr_events[PERF_RECORD_SAMPLE] = 0;
  626. hists__reset_col_len(hists);
  627. for (nd = rb_first(&hists->entries); nd; nd = rb_next(nd)) {
  628. struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
  629. if (hists__filter_entry_by_symbol(hists, h))
  630. continue;
  631. hists__remove_entry_filter(hists, h, HIST_FILTER__SYMBOL);
  632. }
  633. }
  634. int hist_entry__inc_addr_samples(struct hist_entry *he, int evidx, u64 ip)
  635. {
  636. return symbol__inc_addr_samples(he->ms.sym, he->ms.map, evidx, ip);
  637. }
  638. int hist_entry__annotate(struct hist_entry *he, size_t privsize)
  639. {
  640. return symbol__annotate(he->ms.sym, he->ms.map, privsize);
  641. }
  642. void events_stats__inc(struct events_stats *stats, u32 type)
  643. {
  644. ++stats->nr_events[0];
  645. ++stats->nr_events[type];
  646. }
  647. void hists__inc_nr_events(struct hists *hists, u32 type)
  648. {
  649. events_stats__inc(&hists->stats, type);
  650. }
  651. static struct hist_entry *hists__add_dummy_entry(struct hists *hists,
  652. struct hist_entry *pair)
  653. {
  654. struct rb_root *root;
  655. struct rb_node **p;
  656. struct rb_node *parent = NULL;
  657. struct hist_entry *he;
  658. int cmp;
  659. if (sort__need_collapse)
  660. root = &hists->entries_collapsed;
  661. else
  662. root = hists->entries_in;
  663. p = &root->rb_node;
  664. while (*p != NULL) {
  665. parent = *p;
  666. he = rb_entry(parent, struct hist_entry, rb_node_in);
  667. cmp = hist_entry__collapse(he, pair);
  668. if (!cmp)
  669. goto out;
  670. if (cmp < 0)
  671. p = &(*p)->rb_left;
  672. else
  673. p = &(*p)->rb_right;
  674. }
  675. he = hist_entry__new(pair);
  676. if (he) {
  677. memset(&he->stat, 0, sizeof(he->stat));
  678. he->hists = hists;
  679. rb_link_node(&he->rb_node_in, parent, p);
  680. rb_insert_color(&he->rb_node_in, root);
  681. hists__inc_nr_entries(hists, he);
  682. }
  683. out:
  684. return he;
  685. }
  686. static struct hist_entry *hists__find_entry(struct hists *hists,
  687. struct hist_entry *he)
  688. {
  689. struct rb_node *n;
  690. if (sort__need_collapse)
  691. n = hists->entries_collapsed.rb_node;
  692. else
  693. n = hists->entries_in->rb_node;
  694. while (n) {
  695. struct hist_entry *iter = rb_entry(n, struct hist_entry, rb_node_in);
  696. int64_t cmp = hist_entry__collapse(iter, he);
  697. if (cmp < 0)
  698. n = n->rb_left;
  699. else if (cmp > 0)
  700. n = n->rb_right;
  701. else
  702. return iter;
  703. }
  704. return NULL;
  705. }
  706. /*
  707. * Look for pairs to link to the leader buckets (hist_entries):
  708. */
  709. void hists__match(struct hists *leader, struct hists *other)
  710. {
  711. struct rb_root *root;
  712. struct rb_node *nd;
  713. struct hist_entry *pos, *pair;
  714. if (sort__need_collapse)
  715. root = &leader->entries_collapsed;
  716. else
  717. root = leader->entries_in;
  718. for (nd = rb_first(root); nd; nd = rb_next(nd)) {
  719. pos = rb_entry(nd, struct hist_entry, rb_node_in);
  720. pair = hists__find_entry(other, pos);
  721. if (pair)
  722. hist_entry__add_pair(pair, pos);
  723. }
  724. }
  725. /*
  726. * Look for entries in the other hists that are not present in the leader, if
  727. * we find them, just add a dummy entry on the leader hists, with period=0,
  728. * nr_events=0, to serve as the list header.
  729. */
  730. int hists__link(struct hists *leader, struct hists *other)
  731. {
  732. struct rb_root *root;
  733. struct rb_node *nd;
  734. struct hist_entry *pos, *pair;
  735. if (sort__need_collapse)
  736. root = &other->entries_collapsed;
  737. else
  738. root = other->entries_in;
  739. for (nd = rb_first(root); nd; nd = rb_next(nd)) {
  740. pos = rb_entry(nd, struct hist_entry, rb_node_in);
  741. if (!hist_entry__has_pairs(pos)) {
  742. pair = hists__add_dummy_entry(leader, pos);
  743. if (pair == NULL)
  744. return -1;
  745. hist_entry__add_pair(pos, pair);
  746. }
  747. }
  748. return 0;
  749. }