hist.c 23 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008
  1. #include "annotate.h"
  2. #include "util.h"
  3. #include "build-id.h"
  4. #include "hist.h"
  5. #include "session.h"
  6. #include "sort.h"
  7. #include "evsel.h"
  8. #include <math.h>
  9. static bool hists__filter_entry_by_dso(struct hists *hists,
  10. struct hist_entry *he);
  11. static bool hists__filter_entry_by_thread(struct hists *hists,
  12. struct hist_entry *he);
  13. static bool hists__filter_entry_by_symbol(struct hists *hists,
  14. struct hist_entry *he);
  15. enum hist_filter {
  16. HIST_FILTER__DSO,
  17. HIST_FILTER__THREAD,
  18. HIST_FILTER__PARENT,
  19. HIST_FILTER__SYMBOL,
  20. };
  21. struct callchain_param callchain_param = {
  22. .mode = CHAIN_GRAPH_REL,
  23. .min_percent = 0.5,
  24. .order = ORDER_CALLEE
  25. };
  26. u16 hists__col_len(struct hists *hists, enum hist_column col)
  27. {
  28. return hists->col_len[col];
  29. }
  30. void hists__set_col_len(struct hists *hists, enum hist_column col, u16 len)
  31. {
  32. hists->col_len[col] = len;
  33. }
  34. bool hists__new_col_len(struct hists *hists, enum hist_column col, u16 len)
  35. {
  36. if (len > hists__col_len(hists, col)) {
  37. hists__set_col_len(hists, col, len);
  38. return true;
  39. }
  40. return false;
  41. }
  42. void hists__reset_col_len(struct hists *hists)
  43. {
  44. enum hist_column col;
  45. for (col = 0; col < HISTC_NR_COLS; ++col)
  46. hists__set_col_len(hists, col, 0);
  47. }
  48. static void hists__set_unres_dso_col_len(struct hists *hists, int dso)
  49. {
  50. const unsigned int unresolved_col_width = BITS_PER_LONG / 4;
  51. if (hists__col_len(hists, dso) < unresolved_col_width &&
  52. !symbol_conf.col_width_list_str && !symbol_conf.field_sep &&
  53. !symbol_conf.dso_list)
  54. hists__set_col_len(hists, dso, unresolved_col_width);
  55. }
  56. void hists__calc_col_len(struct hists *hists, struct hist_entry *h)
  57. {
  58. const unsigned int unresolved_col_width = BITS_PER_LONG / 4;
  59. int symlen;
  60. u16 len;
  61. if (h->ms.sym)
  62. hists__new_col_len(hists, HISTC_SYMBOL, h->ms.sym->namelen + 4);
  63. else {
  64. symlen = unresolved_col_width + 4 + 2;
  65. hists__new_col_len(hists, HISTC_SYMBOL, symlen);
  66. hists__set_unres_dso_col_len(hists, HISTC_DSO);
  67. }
  68. len = thread__comm_len(h->thread);
  69. if (hists__new_col_len(hists, HISTC_COMM, len))
  70. hists__set_col_len(hists, HISTC_THREAD, len + 6);
  71. if (h->ms.map) {
  72. len = dso__name_len(h->ms.map->dso);
  73. hists__new_col_len(hists, HISTC_DSO, len);
  74. }
  75. if (h->parent)
  76. hists__new_col_len(hists, HISTC_PARENT, h->parent->namelen);
  77. if (h->branch_info) {
  78. /*
  79. * +4 accounts for '[x] ' priv level info
  80. * +2 account of 0x prefix on raw addresses
  81. */
  82. if (h->branch_info->from.sym) {
  83. symlen = (int)h->branch_info->from.sym->namelen + 4;
  84. hists__new_col_len(hists, HISTC_SYMBOL_FROM, symlen);
  85. symlen = dso__name_len(h->branch_info->from.map->dso);
  86. hists__new_col_len(hists, HISTC_DSO_FROM, symlen);
  87. } else {
  88. symlen = unresolved_col_width + 4 + 2;
  89. hists__new_col_len(hists, HISTC_SYMBOL_FROM, symlen);
  90. hists__set_unres_dso_col_len(hists, HISTC_DSO_FROM);
  91. }
  92. if (h->branch_info->to.sym) {
  93. symlen = (int)h->branch_info->to.sym->namelen + 4;
  94. hists__new_col_len(hists, HISTC_SYMBOL_TO, symlen);
  95. symlen = dso__name_len(h->branch_info->to.map->dso);
  96. hists__new_col_len(hists, HISTC_DSO_TO, symlen);
  97. } else {
  98. symlen = unresolved_col_width + 4 + 2;
  99. hists__new_col_len(hists, HISTC_SYMBOL_TO, symlen);
  100. hists__set_unres_dso_col_len(hists, HISTC_DSO_TO);
  101. }
  102. }
  103. if (h->mem_info) {
  104. /*
  105. * +4 accounts for '[x] ' priv level info
  106. * +2 account of 0x prefix on raw addresses
  107. */
  108. if (h->mem_info->daddr.sym) {
  109. symlen = (int)h->mem_info->daddr.sym->namelen + 4
  110. + unresolved_col_width + 2;
  111. hists__new_col_len(hists, HISTC_MEM_DADDR_SYMBOL,
  112. symlen);
  113. } else {
  114. symlen = unresolved_col_width + 4 + 2;
  115. hists__new_col_len(hists, HISTC_MEM_DADDR_SYMBOL,
  116. symlen);
  117. }
  118. if (h->mem_info->daddr.map) {
  119. symlen = dso__name_len(h->mem_info->daddr.map->dso);
  120. hists__new_col_len(hists, HISTC_MEM_DADDR_DSO,
  121. symlen);
  122. } else {
  123. symlen = unresolved_col_width + 4 + 2;
  124. hists__set_unres_dso_col_len(hists, HISTC_MEM_DADDR_DSO);
  125. }
  126. } else {
  127. symlen = unresolved_col_width + 4 + 2;
  128. hists__new_col_len(hists, HISTC_MEM_DADDR_SYMBOL, symlen);
  129. hists__set_unres_dso_col_len(hists, HISTC_MEM_DADDR_DSO);
  130. }
  131. hists__new_col_len(hists, HISTC_MEM_LOCKED, 6);
  132. hists__new_col_len(hists, HISTC_MEM_TLB, 22);
  133. hists__new_col_len(hists, HISTC_MEM_SNOOP, 12);
  134. hists__new_col_len(hists, HISTC_MEM_LVL, 21 + 3);
  135. hists__new_col_len(hists, HISTC_LOCAL_WEIGHT, 12);
  136. hists__new_col_len(hists, HISTC_GLOBAL_WEIGHT, 12);
  137. }
  138. void hists__output_recalc_col_len(struct hists *hists, int max_rows)
  139. {
  140. struct rb_node *next = rb_first(&hists->entries);
  141. struct hist_entry *n;
  142. int row = 0;
  143. hists__reset_col_len(hists);
  144. while (next && row++ < max_rows) {
  145. n = rb_entry(next, struct hist_entry, rb_node);
  146. if (!n->filtered)
  147. hists__calc_col_len(hists, n);
  148. next = rb_next(&n->rb_node);
  149. }
  150. }
  151. static void hist_entry__add_cpumode_period(struct hist_entry *he,
  152. unsigned int cpumode, u64 period)
  153. {
  154. switch (cpumode) {
  155. case PERF_RECORD_MISC_KERNEL:
  156. he->stat.period_sys += period;
  157. break;
  158. case PERF_RECORD_MISC_USER:
  159. he->stat.period_us += period;
  160. break;
  161. case PERF_RECORD_MISC_GUEST_KERNEL:
  162. he->stat.period_guest_sys += period;
  163. break;
  164. case PERF_RECORD_MISC_GUEST_USER:
  165. he->stat.period_guest_us += period;
  166. break;
  167. default:
  168. break;
  169. }
  170. }
  171. static void he_stat__add_period(struct he_stat *he_stat, u64 period,
  172. u64 weight)
  173. {
  174. he_stat->period += period;
  175. he_stat->weight += weight;
  176. he_stat->nr_events += 1;
  177. }
  178. static void he_stat__add_stat(struct he_stat *dest, struct he_stat *src)
  179. {
  180. dest->period += src->period;
  181. dest->period_sys += src->period_sys;
  182. dest->period_us += src->period_us;
  183. dest->period_guest_sys += src->period_guest_sys;
  184. dest->period_guest_us += src->period_guest_us;
  185. dest->nr_events += src->nr_events;
  186. dest->weight += src->weight;
  187. }
  188. static void hist_entry__decay(struct hist_entry *he)
  189. {
  190. he->stat.period = (he->stat.period * 7) / 8;
  191. he->stat.nr_events = (he->stat.nr_events * 7) / 8;
  192. /* XXX need decay for weight too? */
  193. }
  194. static bool hists__decay_entry(struct hists *hists, struct hist_entry *he)
  195. {
  196. u64 prev_period = he->stat.period;
  197. if (prev_period == 0)
  198. return true;
  199. hist_entry__decay(he);
  200. if (!he->filtered)
  201. hists->stats.total_period -= prev_period - he->stat.period;
  202. return he->stat.period == 0;
  203. }
  204. static void __hists__decay_entries(struct hists *hists, bool zap_user,
  205. bool zap_kernel, bool threaded)
  206. {
  207. struct rb_node *next = rb_first(&hists->entries);
  208. struct hist_entry *n;
  209. while (next) {
  210. n = rb_entry(next, struct hist_entry, rb_node);
  211. next = rb_next(&n->rb_node);
  212. /*
  213. * We may be annotating this, for instance, so keep it here in
  214. * case some it gets new samples, we'll eventually free it when
  215. * the user stops browsing and it agains gets fully decayed.
  216. */
  217. if (((zap_user && n->level == '.') ||
  218. (zap_kernel && n->level != '.') ||
  219. hists__decay_entry(hists, n)) &&
  220. !n->used) {
  221. rb_erase(&n->rb_node, &hists->entries);
  222. if (sort__need_collapse || threaded)
  223. rb_erase(&n->rb_node_in, &hists->entries_collapsed);
  224. hist_entry__free(n);
  225. --hists->nr_entries;
  226. }
  227. }
  228. }
  229. void hists__decay_entries(struct hists *hists, bool zap_user, bool zap_kernel)
  230. {
  231. return __hists__decay_entries(hists, zap_user, zap_kernel, false);
  232. }
  233. void hists__decay_entries_threaded(struct hists *hists,
  234. bool zap_user, bool zap_kernel)
  235. {
  236. return __hists__decay_entries(hists, zap_user, zap_kernel, true);
  237. }
  238. /*
  239. * histogram, sorted on item, collects periods
  240. */
  241. static struct hist_entry *hist_entry__new(struct hist_entry *template)
  242. {
  243. size_t callchain_size = symbol_conf.use_callchain ? sizeof(struct callchain_root) : 0;
  244. struct hist_entry *he = zalloc(sizeof(*he) + callchain_size);
  245. if (he != NULL) {
  246. *he = *template;
  247. if (he->ms.map)
  248. he->ms.map->referenced = true;
  249. if (he->branch_info) {
  250. if (he->branch_info->from.map)
  251. he->branch_info->from.map->referenced = true;
  252. if (he->branch_info->to.map)
  253. he->branch_info->to.map->referenced = true;
  254. }
  255. if (he->mem_info) {
  256. if (he->mem_info->iaddr.map)
  257. he->mem_info->iaddr.map->referenced = true;
  258. if (he->mem_info->daddr.map)
  259. he->mem_info->daddr.map->referenced = true;
  260. }
  261. if (symbol_conf.use_callchain)
  262. callchain_init(he->callchain);
  263. INIT_LIST_HEAD(&he->pairs.node);
  264. }
  265. return he;
  266. }
  267. void hists__inc_nr_entries(struct hists *hists, struct hist_entry *h)
  268. {
  269. if (!h->filtered) {
  270. hists__calc_col_len(hists, h);
  271. ++hists->nr_entries;
  272. hists->stats.total_period += h->stat.period;
  273. }
  274. }
  275. static u8 symbol__parent_filter(const struct symbol *parent)
  276. {
  277. if (symbol_conf.exclude_other && parent == NULL)
  278. return 1 << HIST_FILTER__PARENT;
  279. return 0;
  280. }
  281. static struct hist_entry *add_hist_entry(struct hists *hists,
  282. struct hist_entry *entry,
  283. struct addr_location *al,
  284. u64 period,
  285. u64 weight)
  286. {
  287. struct rb_node **p;
  288. struct rb_node *parent = NULL;
  289. struct hist_entry *he;
  290. int cmp;
  291. pthread_mutex_lock(&hists->lock);
  292. p = &hists->entries_in->rb_node;
  293. while (*p != NULL) {
  294. parent = *p;
  295. he = rb_entry(parent, struct hist_entry, rb_node_in);
  296. /*
  297. * Make sure that it receives arguments in a same order as
  298. * hist_entry__collapse() so that we can use an appropriate
  299. * function when searching an entry regardless which sort
  300. * keys were used.
  301. */
  302. cmp = hist_entry__cmp(he, entry);
  303. if (!cmp) {
  304. he_stat__add_period(&he->stat, period, weight);
  305. /* If the map of an existing hist_entry has
  306. * become out-of-date due to an exec() or
  307. * similar, update it. Otherwise we will
  308. * mis-adjust symbol addresses when computing
  309. * the history counter to increment.
  310. */
  311. if (he->ms.map != entry->ms.map) {
  312. he->ms.map = entry->ms.map;
  313. if (he->ms.map)
  314. he->ms.map->referenced = true;
  315. }
  316. goto out;
  317. }
  318. if (cmp < 0)
  319. p = &(*p)->rb_left;
  320. else
  321. p = &(*p)->rb_right;
  322. }
  323. he = hist_entry__new(entry);
  324. if (!he)
  325. goto out_unlock;
  326. rb_link_node(&he->rb_node_in, parent, p);
  327. rb_insert_color(&he->rb_node_in, hists->entries_in);
  328. out:
  329. hist_entry__add_cpumode_period(he, al->cpumode, period);
  330. out_unlock:
  331. pthread_mutex_unlock(&hists->lock);
  332. return he;
  333. }
  334. struct hist_entry *__hists__add_mem_entry(struct hists *self,
  335. struct addr_location *al,
  336. struct symbol *sym_parent,
  337. struct mem_info *mi,
  338. u64 period,
  339. u64 weight)
  340. {
  341. struct hist_entry entry = {
  342. .thread = al->thread,
  343. .ms = {
  344. .map = al->map,
  345. .sym = al->sym,
  346. },
  347. .stat = {
  348. .period = period,
  349. .weight = weight,
  350. .nr_events = 1,
  351. },
  352. .cpu = al->cpu,
  353. .ip = al->addr,
  354. .level = al->level,
  355. .parent = sym_parent,
  356. .filtered = symbol__parent_filter(sym_parent),
  357. .hists = self,
  358. .mem_info = mi,
  359. .branch_info = NULL,
  360. };
  361. return add_hist_entry(self, &entry, al, period, weight);
  362. }
  363. struct hist_entry *__hists__add_branch_entry(struct hists *self,
  364. struct addr_location *al,
  365. struct symbol *sym_parent,
  366. struct branch_info *bi,
  367. u64 period,
  368. u64 weight)
  369. {
  370. struct hist_entry entry = {
  371. .thread = al->thread,
  372. .ms = {
  373. .map = bi->to.map,
  374. .sym = bi->to.sym,
  375. },
  376. .cpu = al->cpu,
  377. .ip = bi->to.addr,
  378. .level = al->level,
  379. .stat = {
  380. .period = period,
  381. .nr_events = 1,
  382. .weight = weight,
  383. },
  384. .parent = sym_parent,
  385. .filtered = symbol__parent_filter(sym_parent),
  386. .branch_info = bi,
  387. .hists = self,
  388. .mem_info = NULL,
  389. };
  390. return add_hist_entry(self, &entry, al, period, weight);
  391. }
  392. struct hist_entry *__hists__add_entry(struct hists *self,
  393. struct addr_location *al,
  394. struct symbol *sym_parent, u64 period,
  395. u64 weight)
  396. {
  397. struct hist_entry entry = {
  398. .thread = al->thread,
  399. .ms = {
  400. .map = al->map,
  401. .sym = al->sym,
  402. },
  403. .cpu = al->cpu,
  404. .ip = al->addr,
  405. .level = al->level,
  406. .stat = {
  407. .period = period,
  408. .nr_events = 1,
  409. .weight = weight,
  410. },
  411. .parent = sym_parent,
  412. .filtered = symbol__parent_filter(sym_parent),
  413. .hists = self,
  414. .branch_info = NULL,
  415. .mem_info = NULL,
  416. };
  417. return add_hist_entry(self, &entry, al, period, weight);
  418. }
  419. int64_t
  420. hist_entry__cmp(struct hist_entry *left, struct hist_entry *right)
  421. {
  422. struct sort_entry *se;
  423. int64_t cmp = 0;
  424. list_for_each_entry(se, &hist_entry__sort_list, list) {
  425. cmp = se->se_cmp(left, right);
  426. if (cmp)
  427. break;
  428. }
  429. return cmp;
  430. }
  431. int64_t
  432. hist_entry__collapse(struct hist_entry *left, struct hist_entry *right)
  433. {
  434. struct sort_entry *se;
  435. int64_t cmp = 0;
  436. list_for_each_entry(se, &hist_entry__sort_list, list) {
  437. int64_t (*f)(struct hist_entry *, struct hist_entry *);
  438. f = se->se_collapse ?: se->se_cmp;
  439. cmp = f(left, right);
  440. if (cmp)
  441. break;
  442. }
  443. return cmp;
  444. }
  445. void hist_entry__free(struct hist_entry *he)
  446. {
  447. free(he->branch_info);
  448. free(he->mem_info);
  449. free(he);
  450. }
  451. /*
  452. * collapse the histogram
  453. */
  454. static bool hists__collapse_insert_entry(struct hists *hists __maybe_unused,
  455. struct rb_root *root,
  456. struct hist_entry *he)
  457. {
  458. struct rb_node **p = &root->rb_node;
  459. struct rb_node *parent = NULL;
  460. struct hist_entry *iter;
  461. int64_t cmp;
  462. while (*p != NULL) {
  463. parent = *p;
  464. iter = rb_entry(parent, struct hist_entry, rb_node_in);
  465. cmp = hist_entry__collapse(iter, he);
  466. if (!cmp) {
  467. he_stat__add_stat(&iter->stat, &he->stat);
  468. if (symbol_conf.use_callchain) {
  469. callchain_cursor_reset(&callchain_cursor);
  470. callchain_merge(&callchain_cursor,
  471. iter->callchain,
  472. he->callchain);
  473. }
  474. hist_entry__free(he);
  475. return false;
  476. }
  477. if (cmp < 0)
  478. p = &(*p)->rb_left;
  479. else
  480. p = &(*p)->rb_right;
  481. }
  482. rb_link_node(&he->rb_node_in, parent, p);
  483. rb_insert_color(&he->rb_node_in, root);
  484. return true;
  485. }
  486. static struct rb_root *hists__get_rotate_entries_in(struct hists *hists)
  487. {
  488. struct rb_root *root;
  489. pthread_mutex_lock(&hists->lock);
  490. root = hists->entries_in;
  491. if (++hists->entries_in > &hists->entries_in_array[1])
  492. hists->entries_in = &hists->entries_in_array[0];
  493. pthread_mutex_unlock(&hists->lock);
  494. return root;
  495. }
  496. static void hists__apply_filters(struct hists *hists, struct hist_entry *he)
  497. {
  498. hists__filter_entry_by_dso(hists, he);
  499. hists__filter_entry_by_thread(hists, he);
  500. hists__filter_entry_by_symbol(hists, he);
  501. }
  502. static void __hists__collapse_resort(struct hists *hists, bool threaded)
  503. {
  504. struct rb_root *root;
  505. struct rb_node *next;
  506. struct hist_entry *n;
  507. if (!sort__need_collapse && !threaded)
  508. return;
  509. root = hists__get_rotate_entries_in(hists);
  510. next = rb_first(root);
  511. while (next) {
  512. n = rb_entry(next, struct hist_entry, rb_node_in);
  513. next = rb_next(&n->rb_node_in);
  514. rb_erase(&n->rb_node_in, root);
  515. if (hists__collapse_insert_entry(hists, &hists->entries_collapsed, n)) {
  516. /*
  517. * If it wasn't combined with one of the entries already
  518. * collapsed, we need to apply the filters that may have
  519. * been set by, say, the hist_browser.
  520. */
  521. hists__apply_filters(hists, n);
  522. }
  523. }
  524. }
  525. void hists__collapse_resort(struct hists *hists)
  526. {
  527. return __hists__collapse_resort(hists, false);
  528. }
  529. void hists__collapse_resort_threaded(struct hists *hists)
  530. {
  531. return __hists__collapse_resort(hists, true);
  532. }
  533. /*
  534. * reverse the map, sort on period.
  535. */
  536. static int period_cmp(u64 period_a, u64 period_b)
  537. {
  538. if (period_a > period_b)
  539. return 1;
  540. if (period_a < period_b)
  541. return -1;
  542. return 0;
  543. }
  544. static int hist_entry__sort_on_period(struct hist_entry *a,
  545. struct hist_entry *b)
  546. {
  547. int ret;
  548. int i, nr_members;
  549. struct perf_evsel *evsel;
  550. struct hist_entry *pair;
  551. u64 *periods_a, *periods_b;
  552. ret = period_cmp(a->stat.period, b->stat.period);
  553. if (ret || !symbol_conf.event_group)
  554. return ret;
  555. evsel = hists_to_evsel(a->hists);
  556. nr_members = evsel->nr_members;
  557. if (nr_members <= 1)
  558. return ret;
  559. periods_a = zalloc(sizeof(periods_a) * nr_members);
  560. periods_b = zalloc(sizeof(periods_b) * nr_members);
  561. if (!periods_a || !periods_b)
  562. goto out;
  563. list_for_each_entry(pair, &a->pairs.head, pairs.node) {
  564. evsel = hists_to_evsel(pair->hists);
  565. periods_a[perf_evsel__group_idx(evsel)] = pair->stat.period;
  566. }
  567. list_for_each_entry(pair, &b->pairs.head, pairs.node) {
  568. evsel = hists_to_evsel(pair->hists);
  569. periods_b[perf_evsel__group_idx(evsel)] = pair->stat.period;
  570. }
  571. for (i = 1; i < nr_members; i++) {
  572. ret = period_cmp(periods_a[i], periods_b[i]);
  573. if (ret)
  574. break;
  575. }
  576. out:
  577. free(periods_a);
  578. free(periods_b);
  579. return ret;
  580. }
  581. static void __hists__insert_output_entry(struct rb_root *entries,
  582. struct hist_entry *he,
  583. u64 min_callchain_hits)
  584. {
  585. struct rb_node **p = &entries->rb_node;
  586. struct rb_node *parent = NULL;
  587. struct hist_entry *iter;
  588. if (symbol_conf.use_callchain)
  589. callchain_param.sort(&he->sorted_chain, he->callchain,
  590. min_callchain_hits, &callchain_param);
  591. while (*p != NULL) {
  592. parent = *p;
  593. iter = rb_entry(parent, struct hist_entry, rb_node);
  594. if (hist_entry__sort_on_period(he, iter) > 0)
  595. p = &(*p)->rb_left;
  596. else
  597. p = &(*p)->rb_right;
  598. }
  599. rb_link_node(&he->rb_node, parent, p);
  600. rb_insert_color(&he->rb_node, entries);
  601. }
  602. static void __hists__output_resort(struct hists *hists, bool threaded)
  603. {
  604. struct rb_root *root;
  605. struct rb_node *next;
  606. struct hist_entry *n;
  607. u64 min_callchain_hits;
  608. min_callchain_hits = hists->stats.total_period * (callchain_param.min_percent / 100);
  609. if (sort__need_collapse || threaded)
  610. root = &hists->entries_collapsed;
  611. else
  612. root = hists->entries_in;
  613. next = rb_first(root);
  614. hists->entries = RB_ROOT;
  615. hists->nr_entries = 0;
  616. hists->stats.total_period = 0;
  617. hists__reset_col_len(hists);
  618. while (next) {
  619. n = rb_entry(next, struct hist_entry, rb_node_in);
  620. next = rb_next(&n->rb_node_in);
  621. __hists__insert_output_entry(&hists->entries, n, min_callchain_hits);
  622. hists__inc_nr_entries(hists, n);
  623. }
  624. }
  625. void hists__output_resort(struct hists *hists)
  626. {
  627. return __hists__output_resort(hists, false);
  628. }
  629. void hists__output_resort_threaded(struct hists *hists)
  630. {
  631. return __hists__output_resort(hists, true);
  632. }
  633. static void hists__remove_entry_filter(struct hists *hists, struct hist_entry *h,
  634. enum hist_filter filter)
  635. {
  636. h->filtered &= ~(1 << filter);
  637. if (h->filtered)
  638. return;
  639. ++hists->nr_entries;
  640. if (h->ms.unfolded)
  641. hists->nr_entries += h->nr_rows;
  642. h->row_offset = 0;
  643. hists->stats.total_period += h->stat.period;
  644. hists->stats.nr_events[PERF_RECORD_SAMPLE] += h->stat.nr_events;
  645. hists__calc_col_len(hists, h);
  646. }
  647. static bool hists__filter_entry_by_dso(struct hists *hists,
  648. struct hist_entry *he)
  649. {
  650. if (hists->dso_filter != NULL &&
  651. (he->ms.map == NULL || he->ms.map->dso != hists->dso_filter)) {
  652. he->filtered |= (1 << HIST_FILTER__DSO);
  653. return true;
  654. }
  655. return false;
  656. }
  657. void hists__filter_by_dso(struct hists *hists)
  658. {
  659. struct rb_node *nd;
  660. hists->nr_entries = hists->stats.total_period = 0;
  661. hists->stats.nr_events[PERF_RECORD_SAMPLE] = 0;
  662. hists__reset_col_len(hists);
  663. for (nd = rb_first(&hists->entries); nd; nd = rb_next(nd)) {
  664. struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
  665. if (symbol_conf.exclude_other && !h->parent)
  666. continue;
  667. if (hists__filter_entry_by_dso(hists, h))
  668. continue;
  669. hists__remove_entry_filter(hists, h, HIST_FILTER__DSO);
  670. }
  671. }
  672. static bool hists__filter_entry_by_thread(struct hists *hists,
  673. struct hist_entry *he)
  674. {
  675. if (hists->thread_filter != NULL &&
  676. he->thread != hists->thread_filter) {
  677. he->filtered |= (1 << HIST_FILTER__THREAD);
  678. return true;
  679. }
  680. return false;
  681. }
  682. void hists__filter_by_thread(struct hists *hists)
  683. {
  684. struct rb_node *nd;
  685. hists->nr_entries = hists->stats.total_period = 0;
  686. hists->stats.nr_events[PERF_RECORD_SAMPLE] = 0;
  687. hists__reset_col_len(hists);
  688. for (nd = rb_first(&hists->entries); nd; nd = rb_next(nd)) {
  689. struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
  690. if (hists__filter_entry_by_thread(hists, h))
  691. continue;
  692. hists__remove_entry_filter(hists, h, HIST_FILTER__THREAD);
  693. }
  694. }
  695. static bool hists__filter_entry_by_symbol(struct hists *hists,
  696. struct hist_entry *he)
  697. {
  698. if (hists->symbol_filter_str != NULL &&
  699. (!he->ms.sym || strstr(he->ms.sym->name,
  700. hists->symbol_filter_str) == NULL)) {
  701. he->filtered |= (1 << HIST_FILTER__SYMBOL);
  702. return true;
  703. }
  704. return false;
  705. }
  706. void hists__filter_by_symbol(struct hists *hists)
  707. {
  708. struct rb_node *nd;
  709. hists->nr_entries = hists->stats.total_period = 0;
  710. hists->stats.nr_events[PERF_RECORD_SAMPLE] = 0;
  711. hists__reset_col_len(hists);
  712. for (nd = rb_first(&hists->entries); nd; nd = rb_next(nd)) {
  713. struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
  714. if (hists__filter_entry_by_symbol(hists, h))
  715. continue;
  716. hists__remove_entry_filter(hists, h, HIST_FILTER__SYMBOL);
  717. }
  718. }
  719. int hist_entry__inc_addr_samples(struct hist_entry *he, int evidx, u64 ip)
  720. {
  721. return symbol__inc_addr_samples(he->ms.sym, he->ms.map, evidx, ip);
  722. }
  723. int hist_entry__annotate(struct hist_entry *he, size_t privsize)
  724. {
  725. return symbol__annotate(he->ms.sym, he->ms.map, privsize);
  726. }
  727. void events_stats__inc(struct events_stats *stats, u32 type)
  728. {
  729. ++stats->nr_events[0];
  730. ++stats->nr_events[type];
  731. }
  732. void hists__inc_nr_events(struct hists *hists, u32 type)
  733. {
  734. events_stats__inc(&hists->stats, type);
  735. }
  736. static struct hist_entry *hists__add_dummy_entry(struct hists *hists,
  737. struct hist_entry *pair)
  738. {
  739. struct rb_root *root;
  740. struct rb_node **p;
  741. struct rb_node *parent = NULL;
  742. struct hist_entry *he;
  743. int cmp;
  744. if (sort__need_collapse)
  745. root = &hists->entries_collapsed;
  746. else
  747. root = hists->entries_in;
  748. p = &root->rb_node;
  749. while (*p != NULL) {
  750. parent = *p;
  751. he = rb_entry(parent, struct hist_entry, rb_node_in);
  752. cmp = hist_entry__collapse(he, pair);
  753. if (!cmp)
  754. goto out;
  755. if (cmp < 0)
  756. p = &(*p)->rb_left;
  757. else
  758. p = &(*p)->rb_right;
  759. }
  760. he = hist_entry__new(pair);
  761. if (he) {
  762. memset(&he->stat, 0, sizeof(he->stat));
  763. he->hists = hists;
  764. rb_link_node(&he->rb_node_in, parent, p);
  765. rb_insert_color(&he->rb_node_in, root);
  766. hists__inc_nr_entries(hists, he);
  767. }
  768. out:
  769. return he;
  770. }
  771. static struct hist_entry *hists__find_entry(struct hists *hists,
  772. struct hist_entry *he)
  773. {
  774. struct rb_node *n;
  775. if (sort__need_collapse)
  776. n = hists->entries_collapsed.rb_node;
  777. else
  778. n = hists->entries_in->rb_node;
  779. while (n) {
  780. struct hist_entry *iter = rb_entry(n, struct hist_entry, rb_node_in);
  781. int64_t cmp = hist_entry__collapse(iter, he);
  782. if (cmp < 0)
  783. n = n->rb_left;
  784. else if (cmp > 0)
  785. n = n->rb_right;
  786. else
  787. return iter;
  788. }
  789. return NULL;
  790. }
  791. /*
  792. * Look for pairs to link to the leader buckets (hist_entries):
  793. */
  794. void hists__match(struct hists *leader, struct hists *other)
  795. {
  796. struct rb_root *root;
  797. struct rb_node *nd;
  798. struct hist_entry *pos, *pair;
  799. if (sort__need_collapse)
  800. root = &leader->entries_collapsed;
  801. else
  802. root = leader->entries_in;
  803. for (nd = rb_first(root); nd; nd = rb_next(nd)) {
  804. pos = rb_entry(nd, struct hist_entry, rb_node_in);
  805. pair = hists__find_entry(other, pos);
  806. if (pair)
  807. hist_entry__add_pair(pair, pos);
  808. }
  809. }
  810. /*
  811. * Look for entries in the other hists that are not present in the leader, if
  812. * we find them, just add a dummy entry on the leader hists, with period=0,
  813. * nr_events=0, to serve as the list header.
  814. */
  815. int hists__link(struct hists *leader, struct hists *other)
  816. {
  817. struct rb_root *root;
  818. struct rb_node *nd;
  819. struct hist_entry *pos, *pair;
  820. if (sort__need_collapse)
  821. root = &other->entries_collapsed;
  822. else
  823. root = other->entries_in;
  824. for (nd = rb_first(root); nd; nd = rb_next(nd)) {
  825. pos = rb_entry(nd, struct hist_entry, rb_node_in);
  826. if (!hist_entry__has_pairs(pos)) {
  827. pair = hists__add_dummy_entry(leader, pos);
  828. if (pair == NULL)
  829. return -1;
  830. hist_entry__add_pair(pos, pair);
  831. }
  832. }
  833. return 0;
  834. }