top.c 6.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224
  1. /*
  2. * Copyright (C) 2011, Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com>
  3. *
  4. * Refactored from builtin-top.c, see that files for further copyright notes.
  5. *
  6. * Released under the GPL v2. (and only v2, not any later version)
  7. */
  8. #include "cpumap.h"
  9. #include "event.h"
  10. #include "evlist.h"
  11. #include "evsel.h"
  12. #include "parse-events.h"
  13. #include "symbol.h"
  14. #include "top.h"
  15. #include <inttypes.h>
  16. /*
  17. * Ordering weight: count-1 * count-2 * ... / count-n
  18. */
  19. static double sym_weight(const struct sym_entry *sym, struct perf_top *top)
  20. {
  21. double weight = sym->snap_count;
  22. int counter;
  23. if (!top->display_weighted)
  24. return weight;
  25. for (counter = 1; counter < top->evlist->nr_entries - 1; counter++)
  26. weight *= sym->count[counter];
  27. weight /= (sym->count[counter] + 1);
  28. return weight;
  29. }
  30. static void perf_top__remove_active_sym(struct perf_top *top, struct sym_entry *syme)
  31. {
  32. pthread_mutex_lock(&top->active_symbols_lock);
  33. list_del_init(&syme->node);
  34. pthread_mutex_unlock(&top->active_symbols_lock);
  35. }
  36. static void rb_insert_active_sym(struct rb_root *tree, struct sym_entry *se)
  37. {
  38. struct rb_node **p = &tree->rb_node;
  39. struct rb_node *parent = NULL;
  40. struct sym_entry *iter;
  41. while (*p != NULL) {
  42. parent = *p;
  43. iter = rb_entry(parent, struct sym_entry, rb_node);
  44. if (se->weight > iter->weight)
  45. p = &(*p)->rb_left;
  46. else
  47. p = &(*p)->rb_right;
  48. }
  49. rb_link_node(&se->rb_node, parent, p);
  50. rb_insert_color(&se->rb_node, tree);
  51. }
  52. #define SNPRINTF(buf, size, fmt, args...) \
  53. ({ \
  54. size_t r = snprintf(buf, size, fmt, ## args); \
  55. r > size ? size : r; \
  56. })
  57. size_t perf_top__header_snprintf(struct perf_top *top, char *bf, size_t size)
  58. {
  59. struct perf_evsel *counter;
  60. float samples_per_sec = top->samples / top->delay_secs;
  61. float ksamples_per_sec = top->kernel_samples / top->delay_secs;
  62. float esamples_percent = (100.0 * top->exact_samples) / top->samples;
  63. size_t ret = 0;
  64. if (!perf_guest) {
  65. ret = SNPRINTF(bf, size,
  66. " PerfTop:%8.0f irqs/sec kernel:%4.1f%%"
  67. " exact: %4.1f%% [", samples_per_sec,
  68. 100.0 - (100.0 * ((samples_per_sec - ksamples_per_sec) /
  69. samples_per_sec)),
  70. esamples_percent);
  71. } else {
  72. float us_samples_per_sec = top->us_samples / top->delay_secs;
  73. float guest_kernel_samples_per_sec = top->guest_kernel_samples / top->delay_secs;
  74. float guest_us_samples_per_sec = top->guest_us_samples / top->delay_secs;
  75. ret = SNPRINTF(bf, size,
  76. " PerfTop:%8.0f irqs/sec kernel:%4.1f%% us:%4.1f%%"
  77. " guest kernel:%4.1f%% guest us:%4.1f%%"
  78. " exact: %4.1f%% [", samples_per_sec,
  79. 100.0 - (100.0 * ((samples_per_sec - ksamples_per_sec) /
  80. samples_per_sec)),
  81. 100.0 - (100.0 * ((samples_per_sec - us_samples_per_sec) /
  82. samples_per_sec)),
  83. 100.0 - (100.0 * ((samples_per_sec -
  84. guest_kernel_samples_per_sec) /
  85. samples_per_sec)),
  86. 100.0 - (100.0 * ((samples_per_sec -
  87. guest_us_samples_per_sec) /
  88. samples_per_sec)),
  89. esamples_percent);
  90. }
  91. if (top->evlist->nr_entries == 1 || !top->display_weighted) {
  92. struct perf_evsel *first;
  93. first = list_entry(top->evlist->entries.next, struct perf_evsel, node);
  94. ret += SNPRINTF(bf + ret, size - ret, "%" PRIu64 "%s ",
  95. (uint64_t)first->attr.sample_period,
  96. top->freq ? "Hz" : "");
  97. }
  98. if (!top->display_weighted) {
  99. ret += SNPRINTF(bf + ret, size - ret, "%s",
  100. event_name(top->sym_evsel));
  101. } else list_for_each_entry(counter, &top->evlist->entries, node) {
  102. ret += SNPRINTF(bf + ret, size - ret, "%s%s",
  103. counter->idx ? "/" : "", event_name(counter));
  104. }
  105. ret += SNPRINTF(bf + ret, size - ret, "], ");
  106. if (top->target_pid != -1)
  107. ret += SNPRINTF(bf + ret, size - ret, " (target_pid: %d",
  108. top->target_pid);
  109. else if (top->target_tid != -1)
  110. ret += SNPRINTF(bf + ret, size - ret, " (target_tid: %d",
  111. top->target_tid);
  112. else
  113. ret += SNPRINTF(bf + ret, size - ret, " (all");
  114. if (top->cpu_list)
  115. ret += SNPRINTF(bf + ret, size - ret, ", CPU%s: %s)",
  116. top->evlist->cpus->nr > 1 ? "s" : "", top->cpu_list);
  117. else {
  118. if (top->target_tid != -1)
  119. ret += SNPRINTF(bf + ret, size - ret, ")");
  120. else
  121. ret += SNPRINTF(bf + ret, size - ret, ", %d CPU%s)",
  122. top->evlist->cpus->nr,
  123. top->evlist->cpus->nr > 1 ? "s" : "");
  124. }
  125. return ret;
  126. }
  127. void perf_top__reset_sample_counters(struct perf_top *top)
  128. {
  129. top->samples = top->us_samples = top->kernel_samples =
  130. top->exact_samples = top->guest_kernel_samples =
  131. top->guest_us_samples = 0;
  132. }
  133. float perf_top__decay_samples(struct perf_top *top, struct rb_root *root)
  134. {
  135. struct sym_entry *syme, *n;
  136. float sum_ksamples = 0.0;
  137. int snap = !top->display_weighted ? top->sym_counter : 0, j;
  138. /* Sort the active symbols */
  139. pthread_mutex_lock(&top->active_symbols_lock);
  140. syme = list_entry(top->active_symbols.next, struct sym_entry, node);
  141. pthread_mutex_unlock(&top->active_symbols_lock);
  142. top->rb_entries = 0;
  143. list_for_each_entry_safe_from(syme, n, &top->active_symbols, node) {
  144. syme->snap_count = syme->count[snap];
  145. if (syme->snap_count != 0) {
  146. if ((top->hide_user_symbols &&
  147. syme->origin == PERF_RECORD_MISC_USER) ||
  148. (top->hide_kernel_symbols &&
  149. syme->origin == PERF_RECORD_MISC_KERNEL)) {
  150. perf_top__remove_active_sym(top, syme);
  151. continue;
  152. }
  153. syme->weight = sym_weight(syme, top);
  154. if ((int)syme->snap_count >= top->count_filter) {
  155. rb_insert_active_sym(root, syme);
  156. ++top->rb_entries;
  157. }
  158. sum_ksamples += syme->snap_count;
  159. for (j = 0; j < top->evlist->nr_entries; j++)
  160. syme->count[j] = top->zero ? 0 : syme->count[j] * 7 / 8;
  161. } else
  162. perf_top__remove_active_sym(top, syme);
  163. }
  164. return sum_ksamples;
  165. }
  166. /*
  167. * Find the longest symbol name that will be displayed
  168. */
  169. void perf_top__find_widths(struct perf_top *top, struct rb_root *root,
  170. int *dso_width, int *dso_short_width, int *sym_width)
  171. {
  172. struct rb_node *nd;
  173. int printed = 0;
  174. *sym_width = *dso_width = *dso_short_width = 0;
  175. for (nd = rb_first(root); nd; nd = rb_next(nd)) {
  176. struct sym_entry *syme = rb_entry(nd, struct sym_entry, rb_node);
  177. struct symbol *sym = sym_entry__symbol(syme);
  178. if (++printed > top->print_entries ||
  179. (int)syme->snap_count < top->count_filter)
  180. continue;
  181. if (syme->map->dso->long_name_len > *dso_width)
  182. *dso_width = syme->map->dso->long_name_len;
  183. if (syme->map->dso->short_name_len > *dso_short_width)
  184. *dso_short_width = syme->map->dso->short_name_len;
  185. if (sym->namelen > *sym_width)
  186. *sym_width = sym->namelen;
  187. }
  188. }