builtin-top.c 31 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386
  1. /*
  2. * builtin-top.c
  3. *
  4. * Builtin top command: Display a continuously updated profile of
  5. * any workload, CPU or specific PID.
  6. *
  7. * Copyright (C) 2008, Red Hat Inc, Ingo Molnar <mingo@redhat.com>
  8. *
  9. * Improvements and fixes by:
  10. *
  11. * Arjan van de Ven <arjan@linux.intel.com>
  12. * Yanmin Zhang <yanmin.zhang@intel.com>
  13. * Wu Fengguang <fengguang.wu@intel.com>
  14. * Mike Galbraith <efault@gmx.de>
  15. * Paul Mackerras <paulus@samba.org>
  16. *
  17. * Released under the GPL v2. (and only v2, not any later version)
  18. */
  19. #include "builtin.h"
  20. #include "perf.h"
  21. #include "util/symbol.h"
  22. #include "util/color.h"
  23. #include "util/thread.h"
  24. #include "util/util.h"
  25. #include <linux/rbtree.h>
  26. #include "util/parse-options.h"
  27. #include "util/parse-events.h"
  28. #include "util/debug.h"
  29. #include <assert.h>
  30. #include <fcntl.h>
  31. #include <stdio.h>
  32. #include <termios.h>
  33. #include <unistd.h>
  34. #include <errno.h>
  35. #include <time.h>
  36. #include <sched.h>
  37. #include <pthread.h>
  38. #include <sys/syscall.h>
  39. #include <sys/ioctl.h>
  40. #include <sys/poll.h>
  41. #include <sys/prctl.h>
  42. #include <sys/wait.h>
  43. #include <sys/uio.h>
  44. #include <sys/mman.h>
  45. #include <linux/unistd.h>
  46. #include <linux/types.h>
  47. static int fd[MAX_NR_CPUS][MAX_COUNTERS];
  48. static int system_wide = 0;
  49. static int default_interval = 0;
  50. static int count_filter = 5;
  51. static int print_entries;
  52. static int target_pid = -1;
  53. static int inherit = 0;
  54. static int profile_cpu = -1;
  55. static int nr_cpus = 0;
  56. static unsigned int realtime_prio = 0;
  57. static int group = 0;
  58. static unsigned int page_size;
  59. static unsigned int mmap_pages = 16;
  60. static int freq = 1000; /* 1 KHz */
  61. static int delay_secs = 2;
  62. static int zero = 0;
  63. static int dump_symtab = 0;
  64. static bool hide_kernel_symbols = false;
  65. static bool hide_user_symbols = false;
  66. static struct winsize winsize;
  67. const char *vmlinux_name;
  68. static const char *graph_line =
  69. "_____________________________________________________________________"
  70. "_____________________________________________________________________";
  71. static const char *graph_dotted_line =
  72. "---------------------------------------------------------------------"
  73. "---------------------------------------------------------------------"
  74. "---------------------------------------------------------------------";
  75. /*
  76. * Source
  77. */
  78. struct source_line {
  79. u64 eip;
  80. unsigned long count[MAX_COUNTERS];
  81. char *line;
  82. struct source_line *next;
  83. };
  84. static char *sym_filter = NULL;
  85. struct sym_entry *sym_filter_entry = NULL;
  86. static int sym_pcnt_filter = 5;
  87. static int sym_counter = 0;
  88. static int display_weighted = -1;
  89. /*
  90. * Symbols
  91. */
  92. struct sym_entry_source {
  93. struct source_line *source;
  94. struct source_line *lines;
  95. struct source_line **lines_tail;
  96. pthread_mutex_t lock;
  97. };
  98. struct sym_entry {
  99. struct rb_node rb_node;
  100. struct list_head node;
  101. unsigned long snap_count;
  102. double weight;
  103. int skip;
  104. u16 name_len;
  105. u8 origin;
  106. struct map *map;
  107. struct sym_entry_source *src;
  108. unsigned long count[0];
  109. };
  110. /*
  111. * Source functions
  112. */
  113. static inline struct symbol *sym_entry__symbol(struct sym_entry *self)
  114. {
  115. return ((void *)self) + symbol__priv_size;
  116. }
  117. static void get_term_dimensions(struct winsize *ws)
  118. {
  119. char *s = getenv("LINES");
  120. if (s != NULL) {
  121. ws->ws_row = atoi(s);
  122. s = getenv("COLUMNS");
  123. if (s != NULL) {
  124. ws->ws_col = atoi(s);
  125. if (ws->ws_row && ws->ws_col)
  126. return;
  127. }
  128. }
  129. #ifdef TIOCGWINSZ
  130. if (ioctl(1, TIOCGWINSZ, ws) == 0 &&
  131. ws->ws_row && ws->ws_col)
  132. return;
  133. #endif
  134. ws->ws_row = 25;
  135. ws->ws_col = 80;
  136. }
  137. static void update_print_entries(struct winsize *ws)
  138. {
  139. print_entries = ws->ws_row;
  140. if (print_entries > 9)
  141. print_entries -= 9;
  142. }
  143. static void sig_winch_handler(int sig __used)
  144. {
  145. get_term_dimensions(&winsize);
  146. update_print_entries(&winsize);
  147. }
  148. static void parse_source(struct sym_entry *syme)
  149. {
  150. struct symbol *sym;
  151. struct sym_entry_source *source;
  152. struct map *map;
  153. FILE *file;
  154. char command[PATH_MAX*2];
  155. const char *path;
  156. u64 len;
  157. if (!syme)
  158. return;
  159. if (syme->src == NULL) {
  160. syme->src = calloc(1, sizeof(*source));
  161. if (syme->src == NULL)
  162. return;
  163. pthread_mutex_init(&syme->src->lock, NULL);
  164. }
  165. source = syme->src;
  166. if (source->lines) {
  167. pthread_mutex_lock(&source->lock);
  168. goto out_assign;
  169. }
  170. sym = sym_entry__symbol(syme);
  171. map = syme->map;
  172. path = map->dso->long_name;
  173. len = sym->end - sym->start;
  174. sprintf(command,
  175. "objdump --start-address=0x%016Lx "
  176. "--stop-address=0x%016Lx -dS %s",
  177. map->unmap_ip(map, sym->start),
  178. map->unmap_ip(map, sym->end), path);
  179. file = popen(command, "r");
  180. if (!file)
  181. return;
  182. pthread_mutex_lock(&source->lock);
  183. source->lines_tail = &source->lines;
  184. while (!feof(file)) {
  185. struct source_line *src;
  186. size_t dummy = 0;
  187. char *c;
  188. src = malloc(sizeof(struct source_line));
  189. assert(src != NULL);
  190. memset(src, 0, sizeof(struct source_line));
  191. if (getline(&src->line, &dummy, file) < 0)
  192. break;
  193. if (!src->line)
  194. break;
  195. c = strchr(src->line, '\n');
  196. if (c)
  197. *c = 0;
  198. src->next = NULL;
  199. *source->lines_tail = src;
  200. source->lines_tail = &src->next;
  201. if (strlen(src->line)>8 && src->line[8] == ':') {
  202. src->eip = strtoull(src->line, NULL, 16);
  203. src->eip = map->unmap_ip(map, src->eip);
  204. }
  205. if (strlen(src->line)>8 && src->line[16] == ':') {
  206. src->eip = strtoull(src->line, NULL, 16);
  207. src->eip = map->unmap_ip(map, src->eip);
  208. }
  209. }
  210. pclose(file);
  211. out_assign:
  212. sym_filter_entry = syme;
  213. pthread_mutex_unlock(&source->lock);
  214. }
  215. static void __zero_source_counters(struct sym_entry *syme)
  216. {
  217. int i;
  218. struct source_line *line;
  219. line = syme->src->lines;
  220. while (line) {
  221. for (i = 0; i < nr_counters; i++)
  222. line->count[i] = 0;
  223. line = line->next;
  224. }
  225. }
  226. static void record_precise_ip(struct sym_entry *syme, int counter, u64 ip)
  227. {
  228. struct source_line *line;
  229. if (syme != sym_filter_entry)
  230. return;
  231. if (pthread_mutex_trylock(&syme->src->lock))
  232. return;
  233. if (syme->src == NULL || syme->src->source == NULL)
  234. goto out_unlock;
  235. for (line = syme->src->lines; line; line = line->next) {
  236. if (line->eip == ip) {
  237. line->count[counter]++;
  238. break;
  239. }
  240. if (line->eip > ip)
  241. break;
  242. }
  243. out_unlock:
  244. pthread_mutex_unlock(&syme->src->lock);
  245. }
  246. static void lookup_sym_source(struct sym_entry *syme)
  247. {
  248. struct symbol *symbol = sym_entry__symbol(syme);
  249. struct source_line *line;
  250. char pattern[PATH_MAX];
  251. sprintf(pattern, "<%s>:", symbol->name);
  252. pthread_mutex_lock(&syme->src->lock);
  253. for (line = syme->src->lines; line; line = line->next) {
  254. if (strstr(line->line, pattern)) {
  255. syme->src->source = line;
  256. break;
  257. }
  258. }
  259. pthread_mutex_unlock(&syme->src->lock);
  260. }
  261. static void show_lines(struct source_line *queue, int count, int total)
  262. {
  263. int i;
  264. struct source_line *line;
  265. line = queue;
  266. for (i = 0; i < count; i++) {
  267. float pcnt = 100.0*(float)line->count[sym_counter]/(float)total;
  268. printf("%8li %4.1f%%\t%s\n", line->count[sym_counter], pcnt, line->line);
  269. line = line->next;
  270. }
  271. }
  272. #define TRACE_COUNT 3
  273. static void show_details(struct sym_entry *syme)
  274. {
  275. struct symbol *symbol;
  276. struct source_line *line;
  277. struct source_line *line_queue = NULL;
  278. int displayed = 0;
  279. int line_queue_count = 0, total = 0, more = 0;
  280. if (!syme)
  281. return;
  282. if (!syme->src->source)
  283. lookup_sym_source(syme);
  284. if (!syme->src->source)
  285. return;
  286. symbol = sym_entry__symbol(syme);
  287. printf("Showing %s for %s\n", event_name(sym_counter), symbol->name);
  288. printf(" Events Pcnt (>=%d%%)\n", sym_pcnt_filter);
  289. pthread_mutex_lock(&syme->src->lock);
  290. line = syme->src->source;
  291. while (line) {
  292. total += line->count[sym_counter];
  293. line = line->next;
  294. }
  295. line = syme->src->source;
  296. while (line) {
  297. float pcnt = 0.0;
  298. if (!line_queue_count)
  299. line_queue = line;
  300. line_queue_count++;
  301. if (line->count[sym_counter])
  302. pcnt = 100.0 * line->count[sym_counter] / (float)total;
  303. if (pcnt >= (float)sym_pcnt_filter) {
  304. if (displayed <= print_entries)
  305. show_lines(line_queue, line_queue_count, total);
  306. else more++;
  307. displayed += line_queue_count;
  308. line_queue_count = 0;
  309. line_queue = NULL;
  310. } else if (line_queue_count > TRACE_COUNT) {
  311. line_queue = line_queue->next;
  312. line_queue_count--;
  313. }
  314. line->count[sym_counter] = zero ? 0 : line->count[sym_counter] * 7 / 8;
  315. line = line->next;
  316. }
  317. pthread_mutex_unlock(&syme->src->lock);
  318. if (more)
  319. printf("%d lines not displayed, maybe increase display entries [e]\n", more);
  320. }
  321. /*
  322. * Symbols will be added here in event__process_sample and will get out
  323. * after decayed.
  324. */
  325. static LIST_HEAD(active_symbols);
  326. static pthread_mutex_t active_symbols_lock = PTHREAD_MUTEX_INITIALIZER;
  327. /*
  328. * Ordering weight: count-1 * count-2 * ... / count-n
  329. */
  330. static double sym_weight(const struct sym_entry *sym)
  331. {
  332. double weight = sym->snap_count;
  333. int counter;
  334. if (!display_weighted)
  335. return weight;
  336. for (counter = 1; counter < nr_counters-1; counter++)
  337. weight *= sym->count[counter];
  338. weight /= (sym->count[counter] + 1);
  339. return weight;
  340. }
  341. static long samples;
  342. static long userspace_samples;
  343. static const char CONSOLE_CLEAR[] = "";
  344. static void __list_insert_active_sym(struct sym_entry *syme)
  345. {
  346. list_add(&syme->node, &active_symbols);
  347. }
  348. static void list_remove_active_sym(struct sym_entry *syme)
  349. {
  350. pthread_mutex_lock(&active_symbols_lock);
  351. list_del_init(&syme->node);
  352. pthread_mutex_unlock(&active_symbols_lock);
  353. }
  354. static void rb_insert_active_sym(struct rb_root *tree, struct sym_entry *se)
  355. {
  356. struct rb_node **p = &tree->rb_node;
  357. struct rb_node *parent = NULL;
  358. struct sym_entry *iter;
  359. while (*p != NULL) {
  360. parent = *p;
  361. iter = rb_entry(parent, struct sym_entry, rb_node);
  362. if (se->weight > iter->weight)
  363. p = &(*p)->rb_left;
  364. else
  365. p = &(*p)->rb_right;
  366. }
  367. rb_link_node(&se->rb_node, parent, p);
  368. rb_insert_color(&se->rb_node, tree);
  369. }
  370. static void print_sym_table(void)
  371. {
  372. int printed = 0, j;
  373. int counter, snap = !display_weighted ? sym_counter : 0;
  374. float samples_per_sec = samples/delay_secs;
  375. float ksamples_per_sec = (samples-userspace_samples)/delay_secs;
  376. float sum_ksamples = 0.0;
  377. struct sym_entry *syme, *n;
  378. struct rb_root tmp = RB_ROOT;
  379. struct rb_node *nd;
  380. int sym_width = 0, dso_width = 0;
  381. const int win_width = winsize.ws_col - 1;
  382. struct dso *unique_dso = NULL, *first_dso = NULL;
  383. samples = userspace_samples = 0;
  384. /* Sort the active symbols */
  385. pthread_mutex_lock(&active_symbols_lock);
  386. syme = list_entry(active_symbols.next, struct sym_entry, node);
  387. pthread_mutex_unlock(&active_symbols_lock);
  388. list_for_each_entry_safe_from(syme, n, &active_symbols, node) {
  389. syme->snap_count = syme->count[snap];
  390. if (syme->snap_count != 0) {
  391. if ((hide_user_symbols &&
  392. syme->origin == PERF_RECORD_MISC_USER) ||
  393. (hide_kernel_symbols &&
  394. syme->origin == PERF_RECORD_MISC_KERNEL)) {
  395. list_remove_active_sym(syme);
  396. continue;
  397. }
  398. syme->weight = sym_weight(syme);
  399. rb_insert_active_sym(&tmp, syme);
  400. sum_ksamples += syme->snap_count;
  401. for (j = 0; j < nr_counters; j++)
  402. syme->count[j] = zero ? 0 : syme->count[j] * 7 / 8;
  403. } else
  404. list_remove_active_sym(syme);
  405. }
  406. puts(CONSOLE_CLEAR);
  407. printf("%-*.*s\n", win_width, win_width, graph_dotted_line);
  408. printf( " PerfTop:%8.0f irqs/sec kernel:%4.1f%% [",
  409. samples_per_sec,
  410. 100.0 - (100.0*((samples_per_sec-ksamples_per_sec)/samples_per_sec)));
  411. if (nr_counters == 1 || !display_weighted) {
  412. printf("%Ld", (u64)attrs[0].sample_period);
  413. if (freq)
  414. printf("Hz ");
  415. else
  416. printf(" ");
  417. }
  418. if (!display_weighted)
  419. printf("%s", event_name(sym_counter));
  420. else for (counter = 0; counter < nr_counters; counter++) {
  421. if (counter)
  422. printf("/");
  423. printf("%s", event_name(counter));
  424. }
  425. printf( "], ");
  426. if (target_pid != -1)
  427. printf(" (target_pid: %d", target_pid);
  428. else
  429. printf(" (all");
  430. if (profile_cpu != -1)
  431. printf(", cpu: %d)\n", profile_cpu);
  432. else {
  433. if (target_pid != -1)
  434. printf(")\n");
  435. else
  436. printf(", %d CPUs)\n", nr_cpus);
  437. }
  438. printf("%-*.*s\n", win_width, win_width, graph_dotted_line);
  439. if (sym_filter_entry) {
  440. show_details(sym_filter_entry);
  441. return;
  442. }
  443. /*
  444. * Find the longest symbol name that will be displayed
  445. */
  446. for (nd = rb_first(&tmp); nd; nd = rb_next(nd)) {
  447. syme = rb_entry(nd, struct sym_entry, rb_node);
  448. if (++printed > print_entries ||
  449. (int)syme->snap_count < count_filter)
  450. continue;
  451. if (first_dso == NULL)
  452. unique_dso = first_dso = syme->map->dso;
  453. else if (syme->map->dso != first_dso)
  454. unique_dso = NULL;
  455. if (syme->map->dso->long_name_len > dso_width)
  456. dso_width = syme->map->dso->long_name_len;
  457. if (syme->name_len > sym_width)
  458. sym_width = syme->name_len;
  459. }
  460. printed = 0;
  461. if (unique_dso)
  462. printf("DSO: %s\n", unique_dso->long_name);
  463. else {
  464. int max_dso_width = winsize.ws_col - sym_width - 29;
  465. if (dso_width > max_dso_width)
  466. dso_width = max_dso_width;
  467. putchar('\n');
  468. }
  469. if (nr_counters == 1)
  470. printf(" samples pcnt");
  471. else
  472. printf(" weight samples pcnt");
  473. if (verbose)
  474. printf(" RIP ");
  475. printf(" %-*.*s", sym_width, sym_width, "function");
  476. if (!unique_dso)
  477. printf(" DSO");
  478. putchar('\n');
  479. printf(" %s _______ _____",
  480. nr_counters == 1 ? " " : "______");
  481. if (verbose)
  482. printf(" ________________");
  483. printf(" %-*.*s", sym_width, sym_width, graph_line);
  484. if (!unique_dso)
  485. printf(" %-*.*s", dso_width, dso_width, graph_line);
  486. puts("\n");
  487. for (nd = rb_first(&tmp); nd; nd = rb_next(nd)) {
  488. struct symbol *sym;
  489. double pcnt;
  490. syme = rb_entry(nd, struct sym_entry, rb_node);
  491. sym = sym_entry__symbol(syme);
  492. if (++printed > print_entries || (int)syme->snap_count < count_filter)
  493. continue;
  494. pcnt = 100.0 - (100.0 * ((sum_ksamples - syme->snap_count) /
  495. sum_ksamples));
  496. if (nr_counters == 1 || !display_weighted)
  497. printf("%20.2f ", syme->weight);
  498. else
  499. printf("%9.1f %10ld ", syme->weight, syme->snap_count);
  500. percent_color_fprintf(stdout, "%4.1f%%", pcnt);
  501. if (verbose)
  502. printf(" %016llx", sym->start);
  503. printf(" %-*.*s", sym_width, sym_width, sym->name);
  504. if (!unique_dso)
  505. printf(" %-*.*s", dso_width, dso_width,
  506. dso_width >= syme->map->dso->long_name_len ?
  507. syme->map->dso->long_name :
  508. syme->map->dso->short_name);
  509. printf("\n");
  510. }
  511. }
  512. static void prompt_integer(int *target, const char *msg)
  513. {
  514. char *buf = malloc(0), *p;
  515. size_t dummy = 0;
  516. int tmp;
  517. fprintf(stdout, "\n%s: ", msg);
  518. if (getline(&buf, &dummy, stdin) < 0)
  519. return;
  520. p = strchr(buf, '\n');
  521. if (p)
  522. *p = 0;
  523. p = buf;
  524. while(*p) {
  525. if (!isdigit(*p))
  526. goto out_free;
  527. p++;
  528. }
  529. tmp = strtoul(buf, NULL, 10);
  530. *target = tmp;
  531. out_free:
  532. free(buf);
  533. }
  534. static void prompt_percent(int *target, const char *msg)
  535. {
  536. int tmp = 0;
  537. prompt_integer(&tmp, msg);
  538. if (tmp >= 0 && tmp <= 100)
  539. *target = tmp;
  540. }
  541. static void prompt_symbol(struct sym_entry **target, const char *msg)
  542. {
  543. char *buf = malloc(0), *p;
  544. struct sym_entry *syme = *target, *n, *found = NULL;
  545. size_t dummy = 0;
  546. /* zero counters of active symbol */
  547. if (syme) {
  548. pthread_mutex_lock(&syme->src->lock);
  549. __zero_source_counters(syme);
  550. *target = NULL;
  551. pthread_mutex_unlock(&syme->src->lock);
  552. }
  553. fprintf(stdout, "\n%s: ", msg);
  554. if (getline(&buf, &dummy, stdin) < 0)
  555. goto out_free;
  556. p = strchr(buf, '\n');
  557. if (p)
  558. *p = 0;
  559. pthread_mutex_lock(&active_symbols_lock);
  560. syme = list_entry(active_symbols.next, struct sym_entry, node);
  561. pthread_mutex_unlock(&active_symbols_lock);
  562. list_for_each_entry_safe_from(syme, n, &active_symbols, node) {
  563. struct symbol *sym = sym_entry__symbol(syme);
  564. if (!strcmp(buf, sym->name)) {
  565. found = syme;
  566. break;
  567. }
  568. }
  569. if (!found) {
  570. fprintf(stderr, "Sorry, %s is not active.\n", sym_filter);
  571. sleep(1);
  572. return;
  573. } else
  574. parse_source(found);
  575. out_free:
  576. free(buf);
  577. }
  578. static void print_mapped_keys(void)
  579. {
  580. char *name = NULL;
  581. if (sym_filter_entry) {
  582. struct symbol *sym = sym_entry__symbol(sym_filter_entry);
  583. name = sym->name;
  584. }
  585. fprintf(stdout, "\nMapped keys:\n");
  586. fprintf(stdout, "\t[d] display refresh delay. \t(%d)\n", delay_secs);
  587. fprintf(stdout, "\t[e] display entries (lines). \t(%d)\n", print_entries);
  588. if (nr_counters > 1)
  589. fprintf(stdout, "\t[E] active event counter. \t(%s)\n", event_name(sym_counter));
  590. fprintf(stdout, "\t[f] profile display filter (count). \t(%d)\n", count_filter);
  591. if (vmlinux_name) {
  592. fprintf(stdout, "\t[F] annotate display filter (percent). \t(%d%%)\n", sym_pcnt_filter);
  593. fprintf(stdout, "\t[s] annotate symbol. \t(%s)\n", name?: "NULL");
  594. fprintf(stdout, "\t[S] stop annotation.\n");
  595. }
  596. if (nr_counters > 1)
  597. fprintf(stdout, "\t[w] toggle display weighted/count[E]r. \t(%d)\n", display_weighted ? 1 : 0);
  598. fprintf(stdout,
  599. "\t[K] hide kernel_symbols symbols. \t(%s)\n",
  600. hide_kernel_symbols ? "yes" : "no");
  601. fprintf(stdout,
  602. "\t[U] hide user symbols. \t(%s)\n",
  603. hide_user_symbols ? "yes" : "no");
  604. fprintf(stdout, "\t[z] toggle sample zeroing. \t(%d)\n", zero ? 1 : 0);
  605. fprintf(stdout, "\t[qQ] quit.\n");
  606. }
  607. static int key_mapped(int c)
  608. {
  609. switch (c) {
  610. case 'd':
  611. case 'e':
  612. case 'f':
  613. case 'z':
  614. case 'q':
  615. case 'Q':
  616. case 'K':
  617. case 'U':
  618. return 1;
  619. case 'E':
  620. case 'w':
  621. return nr_counters > 1 ? 1 : 0;
  622. case 'F':
  623. case 's':
  624. case 'S':
  625. return vmlinux_name ? 1 : 0;
  626. default:
  627. break;
  628. }
  629. return 0;
  630. }
  631. static void handle_keypress(int c)
  632. {
  633. if (!key_mapped(c)) {
  634. struct pollfd stdin_poll = { .fd = 0, .events = POLLIN };
  635. struct termios tc, save;
  636. print_mapped_keys();
  637. fprintf(stdout, "\nEnter selection, or unmapped key to continue: ");
  638. fflush(stdout);
  639. tcgetattr(0, &save);
  640. tc = save;
  641. tc.c_lflag &= ~(ICANON | ECHO);
  642. tc.c_cc[VMIN] = 0;
  643. tc.c_cc[VTIME] = 0;
  644. tcsetattr(0, TCSANOW, &tc);
  645. poll(&stdin_poll, 1, -1);
  646. c = getc(stdin);
  647. tcsetattr(0, TCSAFLUSH, &save);
  648. if (!key_mapped(c))
  649. return;
  650. }
  651. switch (c) {
  652. case 'd':
  653. prompt_integer(&delay_secs, "Enter display delay");
  654. if (delay_secs < 1)
  655. delay_secs = 1;
  656. break;
  657. case 'e':
  658. prompt_integer(&print_entries, "Enter display entries (lines)");
  659. if (print_entries == 0) {
  660. sig_winch_handler(SIGWINCH);
  661. signal(SIGWINCH, sig_winch_handler);
  662. } else
  663. signal(SIGWINCH, SIG_DFL);
  664. break;
  665. case 'E':
  666. if (nr_counters > 1) {
  667. int i;
  668. fprintf(stderr, "\nAvailable events:");
  669. for (i = 0; i < nr_counters; i++)
  670. fprintf(stderr, "\n\t%d %s", i, event_name(i));
  671. prompt_integer(&sym_counter, "Enter details event counter");
  672. if (sym_counter >= nr_counters) {
  673. fprintf(stderr, "Sorry, no such event, using %s.\n", event_name(0));
  674. sym_counter = 0;
  675. sleep(1);
  676. }
  677. } else sym_counter = 0;
  678. break;
  679. case 'f':
  680. prompt_integer(&count_filter, "Enter display event count filter");
  681. break;
  682. case 'F':
  683. prompt_percent(&sym_pcnt_filter, "Enter details display event filter (percent)");
  684. break;
  685. case 'K':
  686. hide_kernel_symbols = !hide_kernel_symbols;
  687. break;
  688. case 'q':
  689. case 'Q':
  690. printf("exiting.\n");
  691. if (dump_symtab)
  692. dsos__fprintf(stderr);
  693. exit(0);
  694. case 's':
  695. prompt_symbol(&sym_filter_entry, "Enter details symbol");
  696. break;
  697. case 'S':
  698. if (!sym_filter_entry)
  699. break;
  700. else {
  701. struct sym_entry *syme = sym_filter_entry;
  702. pthread_mutex_lock(&syme->src->lock);
  703. sym_filter_entry = NULL;
  704. __zero_source_counters(syme);
  705. pthread_mutex_unlock(&syme->src->lock);
  706. }
  707. break;
  708. case 'U':
  709. hide_user_symbols = !hide_user_symbols;
  710. break;
  711. case 'w':
  712. display_weighted = ~display_weighted;
  713. break;
  714. case 'z':
  715. zero = ~zero;
  716. break;
  717. default:
  718. break;
  719. }
  720. }
  721. static void *display_thread(void *arg __used)
  722. {
  723. struct pollfd stdin_poll = { .fd = 0, .events = POLLIN };
  724. struct termios tc, save;
  725. int delay_msecs, c;
  726. tcgetattr(0, &save);
  727. tc = save;
  728. tc.c_lflag &= ~(ICANON | ECHO);
  729. tc.c_cc[VMIN] = 0;
  730. tc.c_cc[VTIME] = 0;
  731. repeat:
  732. delay_msecs = delay_secs * 1000;
  733. tcsetattr(0, TCSANOW, &tc);
  734. /* trash return*/
  735. getc(stdin);
  736. do {
  737. print_sym_table();
  738. } while (!poll(&stdin_poll, 1, delay_msecs) == 1);
  739. c = getc(stdin);
  740. tcsetattr(0, TCSAFLUSH, &save);
  741. handle_keypress(c);
  742. goto repeat;
  743. return NULL;
  744. }
  745. /* Tag samples to be skipped. */
  746. static const char *skip_symbols[] = {
  747. "default_idle",
  748. "cpu_idle",
  749. "enter_idle",
  750. "exit_idle",
  751. "mwait_idle",
  752. "mwait_idle_with_hints",
  753. "poll_idle",
  754. "ppc64_runlatch_off",
  755. "pseries_dedicated_idle_sleep",
  756. NULL
  757. };
  758. static int symbol_filter(struct map *map, struct symbol *sym)
  759. {
  760. struct sym_entry *syme;
  761. const char *name = sym->name;
  762. int i;
  763. /*
  764. * ppc64 uses function descriptors and appends a '.' to the
  765. * start of every instruction address. Remove it.
  766. */
  767. if (name[0] == '.')
  768. name++;
  769. if (!strcmp(name, "_text") ||
  770. !strcmp(name, "_etext") ||
  771. !strcmp(name, "_sinittext") ||
  772. !strncmp("init_module", name, 11) ||
  773. !strncmp("cleanup_module", name, 14) ||
  774. strstr(name, "_text_start") ||
  775. strstr(name, "_text_end"))
  776. return 1;
  777. syme = symbol__priv(sym);
  778. syme->map = map;
  779. syme->src = NULL;
  780. if (!sym_filter_entry && sym_filter && !strcmp(name, sym_filter))
  781. sym_filter_entry = syme;
  782. for (i = 0; skip_symbols[i]; i++) {
  783. if (!strcmp(skip_symbols[i], name)) {
  784. syme->skip = 1;
  785. break;
  786. }
  787. }
  788. if (!syme->skip)
  789. syme->name_len = strlen(sym->name);
  790. return 0;
  791. }
  792. static void event__process_sample(const event_t *self, int counter)
  793. {
  794. u64 ip = self->ip.ip;
  795. struct map *map;
  796. struct sym_entry *syme;
  797. struct symbol *sym;
  798. u8 origin = self->header.misc & PERF_RECORD_MISC_CPUMODE_MASK;
  799. switch (origin) {
  800. case PERF_RECORD_MISC_USER: {
  801. struct thread *thread;
  802. if (hide_user_symbols)
  803. return;
  804. thread = threads__findnew(self->ip.pid);
  805. if (thread == NULL)
  806. return;
  807. map = thread__find_map(thread, ip);
  808. if (map != NULL) {
  809. ip = map->map_ip(map, ip);
  810. sym = map__find_symbol(map, ip, symbol_filter);
  811. if (sym == NULL)
  812. return;
  813. userspace_samples++;
  814. break;
  815. }
  816. }
  817. /*
  818. * If this is outside of all known maps,
  819. * and is a negative address, try to look it
  820. * up in the kernel dso, as it might be a
  821. * vsyscall or vdso (which executes in user-mode).
  822. */
  823. if ((long long)ip >= 0)
  824. return;
  825. /* Fall thru */
  826. case PERF_RECORD_MISC_KERNEL:
  827. if (hide_kernel_symbols)
  828. return;
  829. sym = kernel_maps__find_symbol(ip, &map, symbol_filter);
  830. if (sym == NULL)
  831. return;
  832. break;
  833. default:
  834. return;
  835. }
  836. syme = symbol__priv(sym);
  837. if (!syme->skip) {
  838. syme->count[counter]++;
  839. syme->origin = origin;
  840. record_precise_ip(syme, counter, ip);
  841. pthread_mutex_lock(&active_symbols_lock);
  842. if (list_empty(&syme->node) || !syme->node.next)
  843. __list_insert_active_sym(syme);
  844. pthread_mutex_unlock(&active_symbols_lock);
  845. ++samples;
  846. return;
  847. }
  848. }
  849. static void event__process_mmap(event_t *self)
  850. {
  851. struct thread *thread = threads__findnew(self->mmap.pid);
  852. if (thread != NULL) {
  853. struct map *map = map__new(&self->mmap, NULL, 0);
  854. if (map != NULL)
  855. thread__insert_map(thread, map);
  856. }
  857. }
  858. static void event__process_comm(event_t *self)
  859. {
  860. struct thread *thread = threads__findnew(self->comm.pid);
  861. if (thread != NULL)
  862. thread__set_comm(thread, self->comm.comm);
  863. }
  864. static int event__process(event_t *event)
  865. {
  866. switch (event->header.type) {
  867. case PERF_RECORD_COMM:
  868. event__process_comm(event);
  869. break;
  870. case PERF_RECORD_MMAP:
  871. event__process_mmap(event);
  872. break;
  873. default:
  874. break;
  875. }
  876. return 0;
  877. }
  878. struct mmap_data {
  879. int counter;
  880. void *base;
  881. int mask;
  882. unsigned int prev;
  883. };
  884. static unsigned int mmap_read_head(struct mmap_data *md)
  885. {
  886. struct perf_event_mmap_page *pc = md->base;
  887. int head;
  888. head = pc->data_head;
  889. rmb();
  890. return head;
  891. }
  892. static void mmap_read_counter(struct mmap_data *md)
  893. {
  894. unsigned int head = mmap_read_head(md);
  895. unsigned int old = md->prev;
  896. unsigned char *data = md->base + page_size;
  897. int diff;
  898. /*
  899. * If we're further behind than half the buffer, there's a chance
  900. * the writer will bite our tail and mess up the samples under us.
  901. *
  902. * If we somehow ended up ahead of the head, we got messed up.
  903. *
  904. * In either case, truncate and restart at head.
  905. */
  906. diff = head - old;
  907. if (diff > md->mask / 2 || diff < 0) {
  908. fprintf(stderr, "WARNING: failed to keep up with mmap data.\n");
  909. /*
  910. * head points to a known good entry, start there.
  911. */
  912. old = head;
  913. }
  914. for (; old != head;) {
  915. event_t *event = (event_t *)&data[old & md->mask];
  916. event_t event_copy;
  917. size_t size = event->header.size;
  918. /*
  919. * Event straddles the mmap boundary -- header should always
  920. * be inside due to u64 alignment of output.
  921. */
  922. if ((old & md->mask) + size != ((old + size) & md->mask)) {
  923. unsigned int offset = old;
  924. unsigned int len = min(sizeof(*event), size), cpy;
  925. void *dst = &event_copy;
  926. do {
  927. cpy = min(md->mask + 1 - (offset & md->mask), len);
  928. memcpy(dst, &data[offset & md->mask], cpy);
  929. offset += cpy;
  930. dst += cpy;
  931. len -= cpy;
  932. } while (len);
  933. event = &event_copy;
  934. }
  935. if (event->header.type == PERF_RECORD_SAMPLE)
  936. event__process_sample(event, md->counter);
  937. else
  938. event__process(event);
  939. old += size;
  940. }
  941. md->prev = old;
  942. }
  943. static struct pollfd event_array[MAX_NR_CPUS * MAX_COUNTERS];
  944. static struct mmap_data mmap_array[MAX_NR_CPUS][MAX_COUNTERS];
  945. static void mmap_read(void)
  946. {
  947. int i, counter;
  948. for (i = 0; i < nr_cpus; i++) {
  949. for (counter = 0; counter < nr_counters; counter++)
  950. mmap_read_counter(&mmap_array[i][counter]);
  951. }
  952. }
  953. int nr_poll;
  954. int group_fd;
  955. static void start_counter(int i, int counter)
  956. {
  957. struct perf_event_attr *attr;
  958. int cpu;
  959. cpu = profile_cpu;
  960. if (target_pid == -1 && profile_cpu == -1)
  961. cpu = i;
  962. attr = attrs + counter;
  963. attr->sample_type = PERF_SAMPLE_IP | PERF_SAMPLE_TID;
  964. if (freq) {
  965. attr->sample_type |= PERF_SAMPLE_PERIOD;
  966. attr->freq = 1;
  967. attr->sample_freq = freq;
  968. }
  969. attr->inherit = (cpu < 0) && inherit;
  970. attr->mmap = 1;
  971. try_again:
  972. fd[i][counter] = sys_perf_event_open(attr, target_pid, cpu, group_fd, 0);
  973. if (fd[i][counter] < 0) {
  974. int err = errno;
  975. if (err == EPERM || err == EACCES)
  976. die("No permission - are you root?\n");
  977. /*
  978. * If it's cycles then fall back to hrtimer
  979. * based cpu-clock-tick sw counter, which
  980. * is always available even if no PMU support:
  981. */
  982. if (attr->type == PERF_TYPE_HARDWARE
  983. && attr->config == PERF_COUNT_HW_CPU_CYCLES) {
  984. if (verbose)
  985. warning(" ... trying to fall back to cpu-clock-ticks\n");
  986. attr->type = PERF_TYPE_SOFTWARE;
  987. attr->config = PERF_COUNT_SW_CPU_CLOCK;
  988. goto try_again;
  989. }
  990. printf("\n");
  991. error("perfcounter syscall returned with %d (%s)\n",
  992. fd[i][counter], strerror(err));
  993. die("No CONFIG_PERF_EVENTS=y kernel support configured?\n");
  994. exit(-1);
  995. }
  996. assert(fd[i][counter] >= 0);
  997. fcntl(fd[i][counter], F_SETFL, O_NONBLOCK);
  998. /*
  999. * First counter acts as the group leader:
  1000. */
  1001. if (group && group_fd == -1)
  1002. group_fd = fd[i][counter];
  1003. event_array[nr_poll].fd = fd[i][counter];
  1004. event_array[nr_poll].events = POLLIN;
  1005. nr_poll++;
  1006. mmap_array[i][counter].counter = counter;
  1007. mmap_array[i][counter].prev = 0;
  1008. mmap_array[i][counter].mask = mmap_pages*page_size - 1;
  1009. mmap_array[i][counter].base = mmap(NULL, (mmap_pages+1)*page_size,
  1010. PROT_READ, MAP_SHARED, fd[i][counter], 0);
  1011. if (mmap_array[i][counter].base == MAP_FAILED)
  1012. die("failed to mmap with %d (%s)\n", errno, strerror(errno));
  1013. }
  1014. static int __cmd_top(void)
  1015. {
  1016. pthread_t thread;
  1017. int i, counter;
  1018. int ret;
  1019. if (target_pid != -1)
  1020. event__synthesize_thread(target_pid, event__process);
  1021. else
  1022. event__synthesize_threads(event__process);
  1023. for (i = 0; i < nr_cpus; i++) {
  1024. group_fd = -1;
  1025. for (counter = 0; counter < nr_counters; counter++)
  1026. start_counter(i, counter);
  1027. }
  1028. /* Wait for a minimal set of events before starting the snapshot */
  1029. poll(event_array, nr_poll, 100);
  1030. mmap_read();
  1031. if (pthread_create(&thread, NULL, display_thread, NULL)) {
  1032. printf("Could not create display thread.\n");
  1033. exit(-1);
  1034. }
  1035. if (realtime_prio) {
  1036. struct sched_param param;
  1037. param.sched_priority = realtime_prio;
  1038. if (sched_setscheduler(0, SCHED_FIFO, &param)) {
  1039. printf("Could not set realtime priority.\n");
  1040. exit(-1);
  1041. }
  1042. }
  1043. while (1) {
  1044. int hits = samples;
  1045. mmap_read();
  1046. if (hits == samples)
  1047. ret = poll(event_array, nr_poll, 100);
  1048. }
  1049. return 0;
  1050. }
  1051. static const char * const top_usage[] = {
  1052. "perf top [<options>]",
  1053. NULL
  1054. };
  1055. static const struct option options[] = {
  1056. OPT_CALLBACK('e', "event", NULL, "event",
  1057. "event selector. use 'perf list' to list available events",
  1058. parse_events),
  1059. OPT_INTEGER('c', "count", &default_interval,
  1060. "event period to sample"),
  1061. OPT_INTEGER('p', "pid", &target_pid,
  1062. "profile events on existing pid"),
  1063. OPT_BOOLEAN('a', "all-cpus", &system_wide,
  1064. "system-wide collection from all CPUs"),
  1065. OPT_INTEGER('C', "CPU", &profile_cpu,
  1066. "CPU to profile on"),
  1067. OPT_STRING('k', "vmlinux", &vmlinux_name, "file", "vmlinux pathname"),
  1068. OPT_BOOLEAN('K', "hide_kernel_symbols", &hide_kernel_symbols,
  1069. "hide kernel symbols"),
  1070. OPT_INTEGER('m', "mmap-pages", &mmap_pages,
  1071. "number of mmap data pages"),
  1072. OPT_INTEGER('r', "realtime", &realtime_prio,
  1073. "collect data with this RT SCHED_FIFO priority"),
  1074. OPT_INTEGER('d', "delay", &delay_secs,
  1075. "number of seconds to delay between refreshes"),
  1076. OPT_BOOLEAN('D', "dump-symtab", &dump_symtab,
  1077. "dump the symbol table used for profiling"),
  1078. OPT_INTEGER('f', "count-filter", &count_filter,
  1079. "only display functions with more events than this"),
  1080. OPT_BOOLEAN('g', "group", &group,
  1081. "put the counters into a counter group"),
  1082. OPT_BOOLEAN('i', "inherit", &inherit,
  1083. "child tasks inherit counters"),
  1084. OPT_STRING('s', "sym-annotate", &sym_filter, "symbol name",
  1085. "symbol to annotate - requires -k option"),
  1086. OPT_BOOLEAN('z', "zero", &zero,
  1087. "zero history across updates"),
  1088. OPT_INTEGER('F', "freq", &freq,
  1089. "profile at this frequency"),
  1090. OPT_INTEGER('E', "entries", &print_entries,
  1091. "display this many functions"),
  1092. OPT_BOOLEAN('U', "hide_user_symbols", &hide_user_symbols,
  1093. "hide user symbols"),
  1094. OPT_BOOLEAN('v', "verbose", &verbose,
  1095. "be more verbose (show counter open errors, etc)"),
  1096. OPT_END()
  1097. };
  1098. int cmd_top(int argc, const char **argv, const char *prefix __used)
  1099. {
  1100. int counter, err;
  1101. page_size = sysconf(_SC_PAGE_SIZE);
  1102. argc = parse_options(argc, argv, options, top_usage, 0);
  1103. if (argc)
  1104. usage_with_options(top_usage, options);
  1105. /* CPU and PID are mutually exclusive */
  1106. if (target_pid != -1 && profile_cpu != -1) {
  1107. printf("WARNING: PID switch overriding CPU\n");
  1108. sleep(1);
  1109. profile_cpu = -1;
  1110. }
  1111. if (!nr_counters)
  1112. nr_counters = 1;
  1113. symbol__init(sizeof(struct sym_entry) +
  1114. (nr_counters + 1) * sizeof(unsigned long));
  1115. if (delay_secs < 1)
  1116. delay_secs = 1;
  1117. err = kernel_maps__init(vmlinux_name, !vmlinux_name, true);
  1118. if (err < 0)
  1119. return err;
  1120. parse_source(sym_filter_entry);
  1121. /*
  1122. * User specified count overrides default frequency.
  1123. */
  1124. if (default_interval)
  1125. freq = 0;
  1126. else if (freq) {
  1127. default_interval = freq;
  1128. } else {
  1129. fprintf(stderr, "frequency and count are zero, aborting\n");
  1130. exit(EXIT_FAILURE);
  1131. }
  1132. /*
  1133. * Fill in the ones not specifically initialized via -c:
  1134. */
  1135. for (counter = 0; counter < nr_counters; counter++) {
  1136. if (attrs[counter].sample_period)
  1137. continue;
  1138. attrs[counter].sample_period = default_interval;
  1139. }
  1140. nr_cpus = sysconf(_SC_NPROCESSORS_ONLN);
  1141. assert(nr_cpus <= MAX_NR_CPUS);
  1142. assert(nr_cpus >= 0);
  1143. if (target_pid != -1 || profile_cpu != -1)
  1144. nr_cpus = 1;
  1145. get_term_dimensions(&winsize);
  1146. if (print_entries == 0) {
  1147. update_print_entries(&winsize);
  1148. signal(SIGWINCH, sig_winch_handler);
  1149. }
  1150. return __cmd_top();
  1151. }