builtin-top.c 29 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203
  1. /*
  2. * kerneltop.c: show top kernel functions - performance counters showcase
  3. Build with:
  4. make -C Documentation/perf_counter/
  5. Sample output:
  6. ------------------------------------------------------------------------------
  7. KernelTop: 2669 irqs/sec [NMI, cache-misses/cache-refs], (all, cpu: 2)
  8. ------------------------------------------------------------------------------
  9. weight RIP kernel function
  10. ______ ________________ _______________
  11. 35.20 - ffffffff804ce74b : skb_copy_and_csum_dev
  12. 33.00 - ffffffff804cb740 : sock_alloc_send_skb
  13. 31.26 - ffffffff804ce808 : skb_push
  14. 22.43 - ffffffff80510004 : tcp_established_options
  15. 19.00 - ffffffff8027d250 : find_get_page
  16. 15.76 - ffffffff804e4fc9 : eth_type_trans
  17. 15.20 - ffffffff804d8baa : dst_release
  18. 14.86 - ffffffff804cf5d8 : skb_release_head_state
  19. 14.00 - ffffffff802217d5 : read_hpet
  20. 12.00 - ffffffff804ffb7f : __ip_local_out
  21. 11.97 - ffffffff804fc0c8 : ip_local_deliver_finish
  22. 8.54 - ffffffff805001a3 : ip_queue_xmit
  23. */
  24. /*
  25. * Copyright (C) 2008, Red Hat Inc, Ingo Molnar <mingo@redhat.com>
  26. *
  27. * Improvements and fixes by:
  28. *
  29. * Arjan van de Ven <arjan@linux.intel.com>
  30. * Yanmin Zhang <yanmin.zhang@intel.com>
  31. * Wu Fengguang <fengguang.wu@intel.com>
  32. * Mike Galbraith <efault@gmx.de>
  33. * Paul Mackerras <paulus@samba.org>
  34. *
  35. * Released under the GPL v2. (and only v2, not any later version)
  36. */
  37. #include "util/util.h"
  38. #include <getopt.h>
  39. #include <assert.h>
  40. #include <fcntl.h>
  41. #include <stdio.h>
  42. #include <errno.h>
  43. #include <time.h>
  44. #include <sched.h>
  45. #include <pthread.h>
  46. #include <sys/syscall.h>
  47. #include <sys/ioctl.h>
  48. #include <sys/poll.h>
  49. #include <sys/prctl.h>
  50. #include <sys/wait.h>
  51. #include <sys/uio.h>
  52. #include <sys/mman.h>
  53. #include <linux/unistd.h>
  54. #include <linux/types.h>
  55. #include "../../include/linux/perf_counter.h"
  56. /*
  57. * prctl(PR_TASK_PERF_COUNTERS_DISABLE) will (cheaply) disable all
  58. * counters in the current task.
  59. */
  60. #define PR_TASK_PERF_COUNTERS_DISABLE 31
  61. #define PR_TASK_PERF_COUNTERS_ENABLE 32
  62. #define rdclock() \
  63. ({ \
  64. struct timespec ts; \
  65. \
  66. clock_gettime(CLOCK_MONOTONIC, &ts); \
  67. ts.tv_sec * 1000000000ULL + ts.tv_nsec; \
  68. })
  69. /*
  70. * Pick up some kernel type conventions:
  71. */
  72. #define __user
  73. #define asmlinkage
  74. #ifdef __x86_64__
  75. #define __NR_perf_counter_open 298
  76. #define rmb() asm volatile("lfence" ::: "memory")
  77. #define cpu_relax() asm volatile("rep; nop" ::: "memory");
  78. #endif
  79. #ifdef __i386__
  80. #define __NR_perf_counter_open 336
  81. #define rmb() asm volatile("lfence" ::: "memory")
  82. #define cpu_relax() asm volatile("rep; nop" ::: "memory");
  83. #endif
  84. #ifdef __powerpc__
  85. #define __NR_perf_counter_open 319
  86. #define rmb() asm volatile ("sync" ::: "memory")
  87. #define cpu_relax() asm volatile ("" ::: "memory");
  88. #endif
  89. #define unlikely(x) __builtin_expect(!!(x), 0)
  90. #define min(x, y) ({ \
  91. typeof(x) _min1 = (x); \
  92. typeof(y) _min2 = (y); \
  93. (void) (&_min1 == &_min2); \
  94. _min1 < _min2 ? _min1 : _min2; })
  95. asmlinkage int sys_perf_counter_open(
  96. struct perf_counter_hw_event *hw_event_uptr __user,
  97. pid_t pid,
  98. int cpu,
  99. int group_fd,
  100. unsigned long flags)
  101. {
  102. return syscall(
  103. __NR_perf_counter_open, hw_event_uptr, pid, cpu, group_fd, flags);
  104. }
  105. #define MAX_COUNTERS 64
  106. #define MAX_NR_CPUS 256
  107. #define EID(type, id) (((__u64)(type) << PERF_COUNTER_TYPE_SHIFT) | (id))
  108. static int system_wide = 0;
  109. static int nr_counters = 0;
  110. static __u64 event_id[MAX_COUNTERS] = {
  111. EID(PERF_TYPE_SOFTWARE, PERF_COUNT_TASK_CLOCK),
  112. EID(PERF_TYPE_SOFTWARE, PERF_COUNT_CONTEXT_SWITCHES),
  113. EID(PERF_TYPE_SOFTWARE, PERF_COUNT_CPU_MIGRATIONS),
  114. EID(PERF_TYPE_SOFTWARE, PERF_COUNT_PAGE_FAULTS),
  115. EID(PERF_TYPE_HARDWARE, PERF_COUNT_CPU_CYCLES),
  116. EID(PERF_TYPE_HARDWARE, PERF_COUNT_INSTRUCTIONS),
  117. EID(PERF_TYPE_HARDWARE, PERF_COUNT_CACHE_REFERENCES),
  118. EID(PERF_TYPE_HARDWARE, PERF_COUNT_CACHE_MISSES),
  119. };
  120. static int default_interval = 100000;
  121. static int event_count[MAX_COUNTERS];
  122. static int fd[MAX_NR_CPUS][MAX_COUNTERS];
  123. static __u64 count_filter = 100;
  124. static int tid = -1;
  125. static int profile_cpu = -1;
  126. static int nr_cpus = 0;
  127. static int nmi = 1;
  128. static unsigned int realtime_prio = 0;
  129. static int group = 0;
  130. static unsigned int page_size;
  131. static unsigned int mmap_pages = 16;
  132. static int use_mmap = 0;
  133. static int use_munmap = 0;
  134. static char *vmlinux;
  135. static char *sym_filter;
  136. static unsigned long filter_start;
  137. static unsigned long filter_end;
  138. static int delay_secs = 2;
  139. static int zero;
  140. static int dump_symtab;
  141. static int scale;
  142. struct source_line {
  143. uint64_t EIP;
  144. unsigned long count;
  145. char *line;
  146. struct source_line *next;
  147. };
  148. static struct source_line *lines;
  149. static struct source_line **lines_tail;
  150. static const unsigned int default_count[] = {
  151. 1000000,
  152. 1000000,
  153. 10000,
  154. 10000,
  155. 1000000,
  156. 10000,
  157. };
  158. static char *hw_event_names[] = {
  159. "CPU cycles",
  160. "instructions",
  161. "cache references",
  162. "cache misses",
  163. "branches",
  164. "branch misses",
  165. "bus cycles",
  166. };
  167. static char *sw_event_names[] = {
  168. "cpu clock ticks",
  169. "task clock ticks",
  170. "pagefaults",
  171. "context switches",
  172. "CPU migrations",
  173. "minor faults",
  174. "major faults",
  175. };
  176. struct event_symbol {
  177. __u64 event;
  178. char *symbol;
  179. };
  180. static struct event_symbol event_symbols[] = {
  181. {EID(PERF_TYPE_HARDWARE, PERF_COUNT_CPU_CYCLES), "cpu-cycles", },
  182. {EID(PERF_TYPE_HARDWARE, PERF_COUNT_CPU_CYCLES), "cycles", },
  183. {EID(PERF_TYPE_HARDWARE, PERF_COUNT_INSTRUCTIONS), "instructions", },
  184. {EID(PERF_TYPE_HARDWARE, PERF_COUNT_CACHE_REFERENCES), "cache-references", },
  185. {EID(PERF_TYPE_HARDWARE, PERF_COUNT_CACHE_MISSES), "cache-misses", },
  186. {EID(PERF_TYPE_HARDWARE, PERF_COUNT_BRANCH_INSTRUCTIONS), "branch-instructions", },
  187. {EID(PERF_TYPE_HARDWARE, PERF_COUNT_BRANCH_INSTRUCTIONS), "branches", },
  188. {EID(PERF_TYPE_HARDWARE, PERF_COUNT_BRANCH_MISSES), "branch-misses", },
  189. {EID(PERF_TYPE_HARDWARE, PERF_COUNT_BUS_CYCLES), "bus-cycles", },
  190. {EID(PERF_TYPE_SOFTWARE, PERF_COUNT_CPU_CLOCK), "cpu-clock", },
  191. {EID(PERF_TYPE_SOFTWARE, PERF_COUNT_TASK_CLOCK), "task-clock", },
  192. {EID(PERF_TYPE_SOFTWARE, PERF_COUNT_PAGE_FAULTS), "page-faults", },
  193. {EID(PERF_TYPE_SOFTWARE, PERF_COUNT_PAGE_FAULTS), "faults", },
  194. {EID(PERF_TYPE_SOFTWARE, PERF_COUNT_PAGE_FAULTS_MIN), "minor-faults", },
  195. {EID(PERF_TYPE_SOFTWARE, PERF_COUNT_PAGE_FAULTS_MAJ), "major-faults", },
  196. {EID(PERF_TYPE_SOFTWARE, PERF_COUNT_CONTEXT_SWITCHES), "context-switches", },
  197. {EID(PERF_TYPE_SOFTWARE, PERF_COUNT_CONTEXT_SWITCHES), "cs", },
  198. {EID(PERF_TYPE_SOFTWARE, PERF_COUNT_CPU_MIGRATIONS), "cpu-migrations", },
  199. {EID(PERF_TYPE_SOFTWARE, PERF_COUNT_CPU_MIGRATIONS), "migrations", },
  200. };
  201. #define __PERF_COUNTER_FIELD(config, name) \
  202. ((config & PERF_COUNTER_##name##_MASK) >> PERF_COUNTER_##name##_SHIFT)
  203. #define PERF_COUNTER_RAW(config) __PERF_COUNTER_FIELD(config, RAW)
  204. #define PERF_COUNTER_CONFIG(config) __PERF_COUNTER_FIELD(config, CONFIG)
  205. #define PERF_COUNTER_TYPE(config) __PERF_COUNTER_FIELD(config, TYPE)
  206. #define PERF_COUNTER_ID(config) __PERF_COUNTER_FIELD(config, EVENT)
  207. static void display_events_help(void)
  208. {
  209. unsigned int i;
  210. __u64 e;
  211. printf(
  212. " -e EVENT --event=EVENT # symbolic-name abbreviations");
  213. for (i = 0; i < ARRAY_SIZE(event_symbols); i++) {
  214. int type, id;
  215. e = event_symbols[i].event;
  216. type = PERF_COUNTER_TYPE(e);
  217. id = PERF_COUNTER_ID(e);
  218. printf("\n %d:%d: %-20s",
  219. type, id, event_symbols[i].symbol);
  220. }
  221. printf("\n"
  222. " rNNN: raw PMU events (eventsel+umask)\n\n");
  223. }
  224. static void display_help(void)
  225. {
  226. printf(
  227. "Usage: kerneltop [<options>]\n"
  228. " Or: kerneltop -S [<options>] COMMAND [ARGS]\n\n"
  229. "KernelTop Options (up to %d event types can be specified at once):\n\n",
  230. MAX_COUNTERS);
  231. display_events_help();
  232. printf(
  233. " -c CNT --count=CNT # event period to sample\n\n"
  234. " -C CPU --cpu=CPU # CPU (-1 for all) [default: -1]\n"
  235. " -p PID --pid=PID # PID of sampled task (-1 for all) [default: -1]\n\n"
  236. " -l # show scale factor for RR events\n"
  237. " -d delay --delay=<seconds> # sampling/display delay [default: 2]\n"
  238. " -f CNT --filter=CNT # min-event-count filter [default: 100]\n\n"
  239. " -r prio --realtime=<prio> # event acquisition runs with SCHED_FIFO policy\n"
  240. " -s symbol --symbol=<symbol> # function to be showed annotated one-shot\n"
  241. " -x path --vmlinux=<path> # the vmlinux binary, required for -s use\n"
  242. " -z --zero # zero counts after display\n"
  243. " -D --dump_symtab # dump symbol table to stderr on startup\n"
  244. " -m pages --mmap_pages=<pages> # number of mmap data pages\n"
  245. " -M --mmap_info # print mmap info stream\n"
  246. " -U --munmap_info # print munmap info stream\n"
  247. );
  248. exit(0);
  249. }
  250. static char *event_name(int ctr)
  251. {
  252. __u64 config = event_id[ctr];
  253. int type = PERF_COUNTER_TYPE(config);
  254. int id = PERF_COUNTER_ID(config);
  255. static char buf[32];
  256. if (PERF_COUNTER_RAW(config)) {
  257. sprintf(buf, "raw 0x%llx", PERF_COUNTER_CONFIG(config));
  258. return buf;
  259. }
  260. switch (type) {
  261. case PERF_TYPE_HARDWARE:
  262. if (id < PERF_HW_EVENTS_MAX)
  263. return hw_event_names[id];
  264. return "unknown-hardware";
  265. case PERF_TYPE_SOFTWARE:
  266. if (id < PERF_SW_EVENTS_MAX)
  267. return sw_event_names[id];
  268. return "unknown-software";
  269. default:
  270. break;
  271. }
  272. return "unknown";
  273. }
  274. /*
  275. * Each event can have multiple symbolic names.
  276. * Symbolic names are (almost) exactly matched.
  277. */
  278. static __u64 match_event_symbols(char *str)
  279. {
  280. __u64 config, id;
  281. int type;
  282. unsigned int i;
  283. if (sscanf(str, "r%llx", &config) == 1)
  284. return config | PERF_COUNTER_RAW_MASK;
  285. if (sscanf(str, "%d:%llu", &type, &id) == 2)
  286. return EID(type, id);
  287. for (i = 0; i < ARRAY_SIZE(event_symbols); i++) {
  288. if (!strncmp(str, event_symbols[i].symbol,
  289. strlen(event_symbols[i].symbol)))
  290. return event_symbols[i].event;
  291. }
  292. return ~0ULL;
  293. }
  294. static int parse_events(char *str)
  295. {
  296. __u64 config;
  297. again:
  298. if (nr_counters == MAX_COUNTERS)
  299. return -1;
  300. config = match_event_symbols(str);
  301. if (config == ~0ULL)
  302. return -1;
  303. event_id[nr_counters] = config;
  304. nr_counters++;
  305. str = strstr(str, ",");
  306. if (str) {
  307. str++;
  308. goto again;
  309. }
  310. return 0;
  311. }
  312. /*
  313. * Symbols
  314. */
  315. static uint64_t min_ip;
  316. static uint64_t max_ip = -1ll;
  317. struct sym_entry {
  318. unsigned long long addr;
  319. char *sym;
  320. unsigned long count[MAX_COUNTERS];
  321. int skip;
  322. struct source_line *source;
  323. };
  324. #define MAX_SYMS 100000
  325. static int sym_table_count;
  326. struct sym_entry *sym_filter_entry;
  327. static struct sym_entry sym_table[MAX_SYMS];
  328. static void show_details(struct sym_entry *sym);
  329. /*
  330. * Ordering weight: count-1 * count-2 * ... / count-n
  331. */
  332. static double sym_weight(const struct sym_entry *sym)
  333. {
  334. double weight;
  335. int counter;
  336. weight = sym->count[0];
  337. for (counter = 1; counter < nr_counters-1; counter++)
  338. weight *= sym->count[counter];
  339. weight /= (sym->count[counter] + 1);
  340. return weight;
  341. }
  342. static int compare(const void *__sym1, const void *__sym2)
  343. {
  344. const struct sym_entry *sym1 = __sym1, *sym2 = __sym2;
  345. return sym_weight(sym1) < sym_weight(sym2);
  346. }
  347. static long events;
  348. static long userspace_events;
  349. static const char CONSOLE_CLEAR[] = "";
  350. static struct sym_entry tmp[MAX_SYMS];
  351. static void print_sym_table(void)
  352. {
  353. int i, printed;
  354. int counter;
  355. float events_per_sec = events/delay_secs;
  356. float kevents_per_sec = (events-userspace_events)/delay_secs;
  357. float sum_kevents = 0.0;
  358. events = userspace_events = 0;
  359. memcpy(tmp, sym_table, sizeof(sym_table[0])*sym_table_count);
  360. qsort(tmp, sym_table_count, sizeof(tmp[0]), compare);
  361. for (i = 0; i < sym_table_count && tmp[i].count[0]; i++)
  362. sum_kevents += tmp[i].count[0];
  363. write(1, CONSOLE_CLEAR, strlen(CONSOLE_CLEAR));
  364. printf(
  365. "------------------------------------------------------------------------------\n");
  366. printf( " KernelTop:%8.0f irqs/sec kernel:%4.1f%% [%s, ",
  367. events_per_sec,
  368. 100.0 - (100.0*((events_per_sec-kevents_per_sec)/events_per_sec)),
  369. nmi ? "NMI" : "IRQ");
  370. if (nr_counters == 1)
  371. printf("%d ", event_count[0]);
  372. for (counter = 0; counter < nr_counters; counter++) {
  373. if (counter)
  374. printf("/");
  375. printf("%s", event_name(counter));
  376. }
  377. printf( "], ");
  378. if (tid != -1)
  379. printf(" (tid: %d", tid);
  380. else
  381. printf(" (all");
  382. if (profile_cpu != -1)
  383. printf(", cpu: %d)\n", profile_cpu);
  384. else {
  385. if (tid != -1)
  386. printf(")\n");
  387. else
  388. printf(", %d CPUs)\n", nr_cpus);
  389. }
  390. printf("------------------------------------------------------------------------------\n\n");
  391. if (nr_counters == 1)
  392. printf(" events pcnt");
  393. else
  394. printf(" weight events pcnt");
  395. printf(" RIP kernel function\n"
  396. " ______ ______ _____ ________________ _______________\n\n"
  397. );
  398. for (i = 0, printed = 0; i < sym_table_count; i++) {
  399. float pcnt;
  400. int count;
  401. if (printed <= 18 && tmp[i].count[0] >= count_filter) {
  402. pcnt = 100.0 - (100.0*((sum_kevents-tmp[i].count[0])/sum_kevents));
  403. if (nr_counters == 1)
  404. printf("%19.2f - %4.1f%% - %016llx : %s\n",
  405. sym_weight(tmp + i),
  406. pcnt, tmp[i].addr, tmp[i].sym);
  407. else
  408. printf("%8.1f %10ld - %4.1f%% - %016llx : %s\n",
  409. sym_weight(tmp + i),
  410. tmp[i].count[0],
  411. pcnt, tmp[i].addr, tmp[i].sym);
  412. printed++;
  413. }
  414. /*
  415. * Add decay to the counts:
  416. */
  417. for (count = 0; count < nr_counters; count++)
  418. sym_table[i].count[count] = zero ? 0 : sym_table[i].count[count] * 7 / 8;
  419. }
  420. if (sym_filter_entry)
  421. show_details(sym_filter_entry);
  422. {
  423. struct pollfd stdin_poll = { .fd = 0, .events = POLLIN };
  424. if (poll(&stdin_poll, 1, 0) == 1) {
  425. printf("key pressed - exiting.\n");
  426. exit(0);
  427. }
  428. }
  429. }
  430. static void *display_thread(void *arg)
  431. {
  432. printf("KernelTop refresh period: %d seconds\n", delay_secs);
  433. while (!sleep(delay_secs))
  434. print_sym_table();
  435. return NULL;
  436. }
  437. static int read_symbol(FILE *in, struct sym_entry *s)
  438. {
  439. static int filter_match = 0;
  440. char *sym, stype;
  441. char str[500];
  442. int rc, pos;
  443. rc = fscanf(in, "%llx %c %499s", &s->addr, &stype, str);
  444. if (rc == EOF)
  445. return -1;
  446. assert(rc == 3);
  447. /* skip until end of line: */
  448. pos = strlen(str);
  449. do {
  450. rc = fgetc(in);
  451. if (rc == '\n' || rc == EOF || pos >= 499)
  452. break;
  453. str[pos] = rc;
  454. pos++;
  455. } while (1);
  456. str[pos] = 0;
  457. sym = str;
  458. /* Filter out known duplicates and non-text symbols. */
  459. if (!strcmp(sym, "_text"))
  460. return 1;
  461. if (!min_ip && !strcmp(sym, "_stext"))
  462. return 1;
  463. if (!strcmp(sym, "_etext") || !strcmp(sym, "_sinittext"))
  464. return 1;
  465. if (stype != 'T' && stype != 't')
  466. return 1;
  467. if (!strncmp("init_module", sym, 11) || !strncmp("cleanup_module", sym, 14))
  468. return 1;
  469. if (strstr(sym, "_text_start") || strstr(sym, "_text_end"))
  470. return 1;
  471. s->sym = malloc(strlen(str));
  472. assert(s->sym);
  473. strcpy((char *)s->sym, str);
  474. s->skip = 0;
  475. /* Tag events to be skipped. */
  476. if (!strcmp("default_idle", s->sym) || !strcmp("cpu_idle", s->sym))
  477. s->skip = 1;
  478. else if (!strcmp("enter_idle", s->sym) || !strcmp("exit_idle", s->sym))
  479. s->skip = 1;
  480. else if (!strcmp("mwait_idle", s->sym))
  481. s->skip = 1;
  482. if (filter_match == 1) {
  483. filter_end = s->addr;
  484. filter_match = -1;
  485. if (filter_end - filter_start > 10000) {
  486. printf("hm, too large filter symbol <%s> - skipping.\n",
  487. sym_filter);
  488. printf("symbol filter start: %016lx\n", filter_start);
  489. printf(" end: %016lx\n", filter_end);
  490. filter_end = filter_start = 0;
  491. sym_filter = NULL;
  492. sleep(1);
  493. }
  494. }
  495. if (filter_match == 0 && sym_filter && !strcmp(s->sym, sym_filter)) {
  496. filter_match = 1;
  497. filter_start = s->addr;
  498. }
  499. return 0;
  500. }
  501. static int compare_addr(const void *__sym1, const void *__sym2)
  502. {
  503. const struct sym_entry *sym1 = __sym1, *sym2 = __sym2;
  504. return sym1->addr > sym2->addr;
  505. }
  506. static void sort_symbol_table(void)
  507. {
  508. int i, dups;
  509. do {
  510. qsort(sym_table, sym_table_count, sizeof(sym_table[0]), compare_addr);
  511. for (i = 0, dups = 0; i < sym_table_count; i++) {
  512. if (sym_table[i].addr == sym_table[i+1].addr) {
  513. sym_table[i+1].addr = -1ll;
  514. dups++;
  515. }
  516. }
  517. sym_table_count -= dups;
  518. } while(dups);
  519. }
  520. static void parse_symbols(void)
  521. {
  522. struct sym_entry *last;
  523. FILE *kallsyms = fopen("/proc/kallsyms", "r");
  524. if (!kallsyms) {
  525. printf("Could not open /proc/kallsyms - no CONFIG_KALLSYMS_ALL=y?\n");
  526. exit(-1);
  527. }
  528. while (!feof(kallsyms)) {
  529. if (read_symbol(kallsyms, &sym_table[sym_table_count]) == 0) {
  530. sym_table_count++;
  531. assert(sym_table_count <= MAX_SYMS);
  532. }
  533. }
  534. sort_symbol_table();
  535. min_ip = sym_table[0].addr;
  536. max_ip = sym_table[sym_table_count-1].addr;
  537. last = sym_table + sym_table_count++;
  538. last->addr = -1ll;
  539. last->sym = "<end>";
  540. if (filter_end) {
  541. int count;
  542. for (count=0; count < sym_table_count; count ++) {
  543. if (!strcmp(sym_table[count].sym, sym_filter)) {
  544. sym_filter_entry = &sym_table[count];
  545. break;
  546. }
  547. }
  548. }
  549. if (dump_symtab) {
  550. int i;
  551. for (i = 0; i < sym_table_count; i++)
  552. fprintf(stderr, "%llx %s\n",
  553. sym_table[i].addr, sym_table[i].sym);
  554. }
  555. }
  556. /*
  557. * Source lines
  558. */
  559. static void parse_vmlinux(char *filename)
  560. {
  561. FILE *file;
  562. char command[PATH_MAX*2];
  563. if (!filename)
  564. return;
  565. sprintf(command, "objdump --start-address=0x%016lx --stop-address=0x%016lx -dS %s", filter_start, filter_end, filename);
  566. file = popen(command, "r");
  567. if (!file)
  568. return;
  569. lines_tail = &lines;
  570. while (!feof(file)) {
  571. struct source_line *src;
  572. size_t dummy = 0;
  573. char *c;
  574. src = malloc(sizeof(struct source_line));
  575. assert(src != NULL);
  576. memset(src, 0, sizeof(struct source_line));
  577. if (getline(&src->line, &dummy, file) < 0)
  578. break;
  579. if (!src->line)
  580. break;
  581. c = strchr(src->line, '\n');
  582. if (c)
  583. *c = 0;
  584. src->next = NULL;
  585. *lines_tail = src;
  586. lines_tail = &src->next;
  587. if (strlen(src->line)>8 && src->line[8] == ':')
  588. src->EIP = strtoull(src->line, NULL, 16);
  589. if (strlen(src->line)>8 && src->line[16] == ':')
  590. src->EIP = strtoull(src->line, NULL, 16);
  591. }
  592. pclose(file);
  593. }
  594. static void record_precise_ip(uint64_t ip)
  595. {
  596. struct source_line *line;
  597. for (line = lines; line; line = line->next) {
  598. if (line->EIP == ip)
  599. line->count++;
  600. if (line->EIP > ip)
  601. break;
  602. }
  603. }
  604. static void lookup_sym_in_vmlinux(struct sym_entry *sym)
  605. {
  606. struct source_line *line;
  607. char pattern[PATH_MAX];
  608. sprintf(pattern, "<%s>:", sym->sym);
  609. for (line = lines; line; line = line->next) {
  610. if (strstr(line->line, pattern)) {
  611. sym->source = line;
  612. break;
  613. }
  614. }
  615. }
  616. static void show_lines(struct source_line *line_queue, int line_queue_count)
  617. {
  618. int i;
  619. struct source_line *line;
  620. line = line_queue;
  621. for (i = 0; i < line_queue_count; i++) {
  622. printf("%8li\t%s\n", line->count, line->line);
  623. line = line->next;
  624. }
  625. }
  626. #define TRACE_COUNT 3
  627. static void show_details(struct sym_entry *sym)
  628. {
  629. struct source_line *line;
  630. struct source_line *line_queue = NULL;
  631. int displayed = 0;
  632. int line_queue_count = 0;
  633. if (!sym->source)
  634. lookup_sym_in_vmlinux(sym);
  635. if (!sym->source)
  636. return;
  637. printf("Showing details for %s\n", sym->sym);
  638. line = sym->source;
  639. while (line) {
  640. if (displayed && strstr(line->line, ">:"))
  641. break;
  642. if (!line_queue_count)
  643. line_queue = line;
  644. line_queue_count ++;
  645. if (line->count >= count_filter) {
  646. show_lines(line_queue, line_queue_count);
  647. line_queue_count = 0;
  648. line_queue = NULL;
  649. } else if (line_queue_count > TRACE_COUNT) {
  650. line_queue = line_queue->next;
  651. line_queue_count --;
  652. }
  653. line->count = 0;
  654. displayed++;
  655. if (displayed > 300)
  656. break;
  657. line = line->next;
  658. }
  659. }
  660. /*
  661. * Binary search in the histogram table and record the hit:
  662. */
  663. static void record_ip(uint64_t ip, int counter)
  664. {
  665. int left_idx, middle_idx, right_idx, idx;
  666. unsigned long left, middle, right;
  667. record_precise_ip(ip);
  668. left_idx = 0;
  669. right_idx = sym_table_count-1;
  670. assert(ip <= max_ip && ip >= min_ip);
  671. while (left_idx + 1 < right_idx) {
  672. middle_idx = (left_idx + right_idx) / 2;
  673. left = sym_table[ left_idx].addr;
  674. middle = sym_table[middle_idx].addr;
  675. right = sym_table[ right_idx].addr;
  676. if (!(left <= middle && middle <= right)) {
  677. printf("%016lx...\n%016lx...\n%016lx\n", left, middle, right);
  678. printf("%d %d %d\n", left_idx, middle_idx, right_idx);
  679. }
  680. assert(left <= middle && middle <= right);
  681. if (!(left <= ip && ip <= right)) {
  682. printf(" left: %016lx\n", left);
  683. printf(" ip: %016lx\n", (unsigned long)ip);
  684. printf("right: %016lx\n", right);
  685. }
  686. assert(left <= ip && ip <= right);
  687. /*
  688. * [ left .... target .... middle .... right ]
  689. * => right := middle
  690. */
  691. if (ip < middle) {
  692. right_idx = middle_idx;
  693. continue;
  694. }
  695. /*
  696. * [ left .... middle ... target ... right ]
  697. * => left := middle
  698. */
  699. left_idx = middle_idx;
  700. }
  701. idx = left_idx;
  702. if (!sym_table[idx].skip)
  703. sym_table[idx].count[counter]++;
  704. else events--;
  705. }
  706. static void process_event(uint64_t ip, int counter)
  707. {
  708. events++;
  709. if (ip < min_ip || ip > max_ip) {
  710. userspace_events++;
  711. return;
  712. }
  713. record_ip(ip, counter);
  714. }
  715. static void process_options(int argc, char **argv)
  716. {
  717. int error = 0, counter;
  718. for (;;) {
  719. int option_index = 0;
  720. /** Options for getopt */
  721. static struct option long_options[] = {
  722. {"count", required_argument, NULL, 'c'},
  723. {"cpu", required_argument, NULL, 'C'},
  724. {"delay", required_argument, NULL, 'd'},
  725. {"dump_symtab", no_argument, NULL, 'D'},
  726. {"event", required_argument, NULL, 'e'},
  727. {"filter", required_argument, NULL, 'f'},
  728. {"group", required_argument, NULL, 'g'},
  729. {"help", no_argument, NULL, 'h'},
  730. {"nmi", required_argument, NULL, 'n'},
  731. {"mmap_info", no_argument, NULL, 'M'},
  732. {"mmap_pages", required_argument, NULL, 'm'},
  733. {"munmap_info", no_argument, NULL, 'U'},
  734. {"pid", required_argument, NULL, 'p'},
  735. {"realtime", required_argument, NULL, 'r'},
  736. {"scale", no_argument, NULL, 'l'},
  737. {"symbol", required_argument, NULL, 's'},
  738. {"stat", no_argument, NULL, 'S'},
  739. {"vmlinux", required_argument, NULL, 'x'},
  740. {"zero", no_argument, NULL, 'z'},
  741. {NULL, 0, NULL, 0 }
  742. };
  743. int c = getopt_long(argc, argv, "+:ac:C:d:De:f:g:hln:m:p:r:s:Sx:zMU",
  744. long_options, &option_index);
  745. if (c == -1)
  746. break;
  747. switch (c) {
  748. case 'a': system_wide = 1; break;
  749. case 'c': default_interval = atoi(optarg); break;
  750. case 'C':
  751. /* CPU and PID are mutually exclusive */
  752. if (tid != -1) {
  753. printf("WARNING: CPU switch overriding PID\n");
  754. sleep(1);
  755. tid = -1;
  756. }
  757. profile_cpu = atoi(optarg); break;
  758. case 'd': delay_secs = atoi(optarg); break;
  759. case 'D': dump_symtab = 1; break;
  760. case 'e': error = parse_events(optarg); break;
  761. case 'f': count_filter = atoi(optarg); break;
  762. case 'g': group = atoi(optarg); break;
  763. case 'h': display_help(); break;
  764. case 'l': scale = 1; break;
  765. case 'n': nmi = atoi(optarg); break;
  766. case 'p':
  767. /* CPU and PID are mutually exclusive */
  768. if (profile_cpu != -1) {
  769. printf("WARNING: PID switch overriding CPU\n");
  770. sleep(1);
  771. profile_cpu = -1;
  772. }
  773. tid = atoi(optarg); break;
  774. case 'r': realtime_prio = atoi(optarg); break;
  775. case 's': sym_filter = strdup(optarg); break;
  776. case 'x': vmlinux = strdup(optarg); break;
  777. case 'z': zero = 1; break;
  778. case 'm': mmap_pages = atoi(optarg); break;
  779. case 'M': use_mmap = 1; break;
  780. case 'U': use_munmap = 1; break;
  781. default: error = 1; break;
  782. }
  783. }
  784. if (error)
  785. display_help();
  786. if (!nr_counters) {
  787. nr_counters = 1;
  788. event_id[0] = 0;
  789. }
  790. for (counter = 0; counter < nr_counters; counter++) {
  791. if (event_count[counter])
  792. continue;
  793. event_count[counter] = default_interval;
  794. }
  795. }
  796. struct mmap_data {
  797. int counter;
  798. void *base;
  799. unsigned int mask;
  800. unsigned int prev;
  801. };
  802. static unsigned int mmap_read_head(struct mmap_data *md)
  803. {
  804. struct perf_counter_mmap_page *pc = md->base;
  805. int head;
  806. head = pc->data_head;
  807. rmb();
  808. return head;
  809. }
  810. struct timeval last_read, this_read;
  811. static void mmap_read(struct mmap_data *md)
  812. {
  813. unsigned int head = mmap_read_head(md);
  814. unsigned int old = md->prev;
  815. unsigned char *data = md->base + page_size;
  816. int diff;
  817. gettimeofday(&this_read, NULL);
  818. /*
  819. * If we're further behind than half the buffer, there's a chance
  820. * the writer will bite our tail and screw up the events under us.
  821. *
  822. * If we somehow ended up ahead of the head, we got messed up.
  823. *
  824. * In either case, truncate and restart at head.
  825. */
  826. diff = head - old;
  827. if (diff > md->mask / 2 || diff < 0) {
  828. struct timeval iv;
  829. unsigned long msecs;
  830. timersub(&this_read, &last_read, &iv);
  831. msecs = iv.tv_sec*1000 + iv.tv_usec/1000;
  832. fprintf(stderr, "WARNING: failed to keep up with mmap data."
  833. " Last read %lu msecs ago.\n", msecs);
  834. /*
  835. * head points to a known good entry, start there.
  836. */
  837. old = head;
  838. }
  839. last_read = this_read;
  840. for (; old != head;) {
  841. struct ip_event {
  842. struct perf_event_header header;
  843. __u64 ip;
  844. __u32 pid, tid;
  845. };
  846. struct mmap_event {
  847. struct perf_event_header header;
  848. __u32 pid, tid;
  849. __u64 start;
  850. __u64 len;
  851. __u64 pgoff;
  852. char filename[PATH_MAX];
  853. };
  854. typedef union event_union {
  855. struct perf_event_header header;
  856. struct ip_event ip;
  857. struct mmap_event mmap;
  858. } event_t;
  859. event_t *event = (event_t *)&data[old & md->mask];
  860. event_t event_copy;
  861. size_t size = event->header.size;
  862. /*
  863. * Event straddles the mmap boundary -- header should always
  864. * be inside due to u64 alignment of output.
  865. */
  866. if ((old & md->mask) + size != ((old + size) & md->mask)) {
  867. unsigned int offset = old;
  868. unsigned int len = min(sizeof(*event), size), cpy;
  869. void *dst = &event_copy;
  870. do {
  871. cpy = min(md->mask + 1 - (offset & md->mask), len);
  872. memcpy(dst, &data[offset & md->mask], cpy);
  873. offset += cpy;
  874. dst += cpy;
  875. len -= cpy;
  876. } while (len);
  877. event = &event_copy;
  878. }
  879. old += size;
  880. if (event->header.misc & PERF_EVENT_MISC_OVERFLOW) {
  881. if (event->header.type & PERF_RECORD_IP)
  882. process_event(event->ip.ip, md->counter);
  883. } else {
  884. switch (event->header.type) {
  885. case PERF_EVENT_MMAP:
  886. case PERF_EVENT_MUNMAP:
  887. printf("%s: %Lu %Lu %Lu %s\n",
  888. event->header.type == PERF_EVENT_MMAP
  889. ? "mmap" : "munmap",
  890. event->mmap.start,
  891. event->mmap.len,
  892. event->mmap.pgoff,
  893. event->mmap.filename);
  894. break;
  895. }
  896. }
  897. }
  898. md->prev = old;
  899. }
  900. int cmd_top(int argc, char **argv, const char *prefix)
  901. {
  902. struct pollfd event_array[MAX_NR_CPUS * MAX_COUNTERS];
  903. struct mmap_data mmap_array[MAX_NR_CPUS][MAX_COUNTERS];
  904. struct perf_counter_hw_event hw_event;
  905. pthread_t thread;
  906. int i, counter, group_fd, nr_poll = 0;
  907. unsigned int cpu;
  908. int ret;
  909. page_size = sysconf(_SC_PAGE_SIZE);
  910. process_options(argc, argv);
  911. nr_cpus = sysconf(_SC_NPROCESSORS_ONLN);
  912. assert(nr_cpus <= MAX_NR_CPUS);
  913. assert(nr_cpus >= 0);
  914. if (tid != -1 || profile_cpu != -1)
  915. nr_cpus = 1;
  916. parse_symbols();
  917. if (vmlinux && sym_filter_entry)
  918. parse_vmlinux(vmlinux);
  919. for (i = 0; i < nr_cpus; i++) {
  920. group_fd = -1;
  921. for (counter = 0; counter < nr_counters; counter++) {
  922. cpu = profile_cpu;
  923. if (tid == -1 && profile_cpu == -1)
  924. cpu = i;
  925. memset(&hw_event, 0, sizeof(hw_event));
  926. hw_event.config = event_id[counter];
  927. hw_event.irq_period = event_count[counter];
  928. hw_event.record_type = PERF_RECORD_IP | PERF_RECORD_TID;
  929. hw_event.nmi = nmi;
  930. hw_event.mmap = use_mmap;
  931. hw_event.munmap = use_munmap;
  932. fd[i][counter] = sys_perf_counter_open(&hw_event, tid, cpu, group_fd, 0);
  933. if (fd[i][counter] < 0) {
  934. int err = errno;
  935. printf("kerneltop error: syscall returned with %d (%s)\n",
  936. fd[i][counter], strerror(err));
  937. if (err == EPERM)
  938. printf("Are you root?\n");
  939. exit(-1);
  940. }
  941. assert(fd[i][counter] >= 0);
  942. fcntl(fd[i][counter], F_SETFL, O_NONBLOCK);
  943. /*
  944. * First counter acts as the group leader:
  945. */
  946. if (group && group_fd == -1)
  947. group_fd = fd[i][counter];
  948. event_array[nr_poll].fd = fd[i][counter];
  949. event_array[nr_poll].events = POLLIN;
  950. nr_poll++;
  951. mmap_array[i][counter].counter = counter;
  952. mmap_array[i][counter].prev = 0;
  953. mmap_array[i][counter].mask = mmap_pages*page_size - 1;
  954. mmap_array[i][counter].base = mmap(NULL, (mmap_pages+1)*page_size,
  955. PROT_READ, MAP_SHARED, fd[i][counter], 0);
  956. if (mmap_array[i][counter].base == MAP_FAILED) {
  957. printf("kerneltop error: failed to mmap with %d (%s)\n",
  958. errno, strerror(errno));
  959. exit(-1);
  960. }
  961. }
  962. }
  963. if (pthread_create(&thread, NULL, display_thread, NULL)) {
  964. printf("Could not create display thread.\n");
  965. exit(-1);
  966. }
  967. if (realtime_prio) {
  968. struct sched_param param;
  969. param.sched_priority = realtime_prio;
  970. if (sched_setscheduler(0, SCHED_FIFO, &param)) {
  971. printf("Could not set realtime priority.\n");
  972. exit(-1);
  973. }
  974. }
  975. while (1) {
  976. int hits = events;
  977. for (i = 0; i < nr_cpus; i++) {
  978. for (counter = 0; counter < nr_counters; counter++)
  979. mmap_read(&mmap_array[i][counter]);
  980. }
  981. if (hits == events)
  982. ret = poll(event_array, nr_poll, 100);
  983. }
  984. return 0;
  985. }