kerneltop.c 28 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212
  1. /*
  2. * kerneltop.c: show top kernel functions - performance counters showcase
  3. Build with:
  4. cc -O6 -Wall -lrt `pkg-config --cflags --libs glib-2.0` -o kerneltop kerneltop.c
  5. Sample output:
  6. ------------------------------------------------------------------------------
  7. KernelTop: 2669 irqs/sec [NMI, cache-misses/cache-refs], (all, cpu: 2)
  8. ------------------------------------------------------------------------------
  9. weight RIP kernel function
  10. ______ ________________ _______________
  11. 35.20 - ffffffff804ce74b : skb_copy_and_csum_dev
  12. 33.00 - ffffffff804cb740 : sock_alloc_send_skb
  13. 31.26 - ffffffff804ce808 : skb_push
  14. 22.43 - ffffffff80510004 : tcp_established_options
  15. 19.00 - ffffffff8027d250 : find_get_page
  16. 15.76 - ffffffff804e4fc9 : eth_type_trans
  17. 15.20 - ffffffff804d8baa : dst_release
  18. 14.86 - ffffffff804cf5d8 : skb_release_head_state
  19. 14.00 - ffffffff802217d5 : read_hpet
  20. 12.00 - ffffffff804ffb7f : __ip_local_out
  21. 11.97 - ffffffff804fc0c8 : ip_local_deliver_finish
  22. 8.54 - ffffffff805001a3 : ip_queue_xmit
  23. */
  24. /*
  25. * perfstat: /usr/bin/time -alike performance counter statistics utility
  26. It summarizes the counter events of all tasks (and child tasks),
  27. covering all CPUs that the command (or workload) executes on.
  28. It only counts the per-task events of the workload started,
  29. independent of how many other tasks run on those CPUs.
  30. Sample output:
  31. $ ./perfstat -e 1 -e 3 -e 5 ls -lR /usr/include/ >/dev/null
  32. Performance counter stats for 'ls':
  33. 163516953 instructions
  34. 2295 cache-misses
  35. 2855182 branch-misses
  36. */
  37. /*
  38. * Copyright (C) 2008, Red Hat Inc, Ingo Molnar <mingo@redhat.com>
  39. *
  40. * Improvements and fixes by:
  41. *
  42. * Arjan van de Ven <arjan@linux.intel.com>
  43. * Yanmin Zhang <yanmin.zhang@intel.com>
  44. * Wu Fengguang <fengguang.wu@intel.com>
  45. * Mike Galbraith <efault@gmx.de>
  46. *
  47. * Released under the GPL v2. (and only v2, not any later version)
  48. */
  49. #define _GNU_SOURCE
  50. #include <sys/types.h>
  51. #include <sys/stat.h>
  52. #include <sys/time.h>
  53. #include <unistd.h>
  54. #include <stdint.h>
  55. #include <stdlib.h>
  56. #include <string.h>
  57. #include <getopt.h>
  58. #include <assert.h>
  59. #include <fcntl.h>
  60. #include <stdio.h>
  61. #include <errno.h>
  62. #include <ctype.h>
  63. #include <time.h>
  64. #include <glib.h>
  65. #include <sys/syscall.h>
  66. #include <sys/ioctl.h>
  67. #include <sys/poll.h>
  68. #include <sys/prctl.h>
  69. #include <sys/wait.h>
  70. #include <sys/uio.h>
  71. #include <linux/unistd.h>
  72. #include "include/linux/perf_counter.h"
  73. /*
  74. * prctl(PR_TASK_PERF_COUNTERS_DISABLE) will (cheaply) disable all
  75. * counters in the current task.
  76. */
  77. #define PR_TASK_PERF_COUNTERS_DISABLE 31
  78. #define PR_TASK_PERF_COUNTERS_ENABLE 32
  79. #define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
  80. #define rdclock() \
  81. ({ \
  82. struct timespec ts; \
  83. \
  84. clock_gettime(CLOCK_MONOTONIC, &ts); \
  85. ts.tv_sec * 1000000000ULL + ts.tv_nsec; \
  86. })
  87. /*
  88. * Pick up some kernel type conventions:
  89. */
  90. #define __user
  91. #define asmlinkage
  92. typedef unsigned int __u32;
  93. typedef unsigned long long __u64;
  94. typedef long long __s64;
  95. #ifdef __x86_64__
  96. # define __NR_perf_counter_open 295
  97. #endif
  98. #ifdef __i386__
  99. # define __NR_perf_counter_open 333
  100. #endif
  101. #ifdef __powerpc__
  102. #define __NR_perf_counter_open 319
  103. #endif
  104. asmlinkage int sys_perf_counter_open(
  105. struct perf_counter_hw_event *hw_event_uptr __user,
  106. pid_t pid,
  107. int cpu,
  108. int group_fd,
  109. unsigned long flags)
  110. {
  111. int ret;
  112. ret = syscall(
  113. __NR_perf_counter_open, hw_event_uptr, pid, cpu, group_fd, flags);
  114. #if defined(__x86_64__) || defined(__i386__)
  115. if (ret < 0 && ret > -4096) {
  116. errno = -ret;
  117. ret = -1;
  118. }
  119. #endif
  120. return ret;
  121. }
  122. #define MAX_COUNTERS 64
  123. #define MAX_NR_CPUS 256
  124. #define EID(type, id) (((__u64)(type) << PERF_COUNTER_TYPE_SHIFT) | (id))
  125. static int run_perfstat = 0;
  126. static int system_wide = 0;
  127. static int nr_counters = 0;
  128. static __u64 event_id[MAX_COUNTERS] = {
  129. EID(PERF_TYPE_SOFTWARE, PERF_COUNT_TASK_CLOCK),
  130. EID(PERF_TYPE_SOFTWARE, PERF_COUNT_CPU_MIGRATIONS),
  131. EID(PERF_TYPE_SOFTWARE, PERF_COUNT_CPU_MIGRATIONS),
  132. EID(PERF_TYPE_SOFTWARE, PERF_COUNT_PAGE_FAULTS),
  133. EID(PERF_TYPE_HARDWARE, PERF_COUNT_CPU_CYCLES),
  134. EID(PERF_TYPE_HARDWARE, PERF_COUNT_INSTRUCTIONS),
  135. EID(PERF_TYPE_HARDWARE, PERF_COUNT_CACHE_REFERENCES),
  136. EID(PERF_TYPE_HARDWARE, PERF_COUNT_CACHE_MISSES),
  137. };
  138. static int default_interval = 100000;
  139. static int event_count[MAX_COUNTERS];
  140. static int fd[MAX_NR_CPUS][MAX_COUNTERS];
  141. static __u64 count_filter = 100;
  142. static int tid = -1;
  143. static int profile_cpu = -1;
  144. static int nr_cpus = 0;
  145. static int nmi = 1;
  146. static int group = 0;
  147. static char *vmlinux;
  148. static char *sym_filter;
  149. static unsigned long filter_start;
  150. static unsigned long filter_end;
  151. static int delay_secs = 2;
  152. static int zero;
  153. static int dump_symtab;
  154. static GList *lines;
  155. struct source_line {
  156. uint64_t EIP;
  157. unsigned long count;
  158. char *line;
  159. };
  160. const unsigned int default_count[] = {
  161. 10000,
  162. 1000000,
  163. 10000,
  164. 10000,
  165. 1000000,
  166. 10000,
  167. };
  168. static char *hw_event_names[] = {
  169. "CPU cycles",
  170. "instructions",
  171. "cache references",
  172. "cache misses",
  173. "branches",
  174. "branch misses",
  175. "bus cycles",
  176. };
  177. static char *sw_event_names[] = {
  178. "cpu clock ticks",
  179. "task clock ticks",
  180. "pagefaults",
  181. "context switches",
  182. "CPU migrations",
  183. "minor faults",
  184. "major faults",
  185. };
  186. struct event_symbol {
  187. __u64 event;
  188. char *symbol;
  189. };
  190. static struct event_symbol event_symbols[] = {
  191. {EID(PERF_TYPE_HARDWARE, PERF_COUNT_CPU_CYCLES), "cpu-cycles", },
  192. {EID(PERF_TYPE_HARDWARE, PERF_COUNT_CPU_CYCLES), "cycles", },
  193. {EID(PERF_TYPE_HARDWARE, PERF_COUNT_INSTRUCTIONS), "instructions", },
  194. {EID(PERF_TYPE_HARDWARE, PERF_COUNT_CACHE_REFERENCES), "cache-references", },
  195. {EID(PERF_TYPE_HARDWARE, PERF_COUNT_CACHE_MISSES), "cache-misses", },
  196. {EID(PERF_TYPE_HARDWARE, PERF_COUNT_BRANCH_INSTRUCTIONS), "branch-instructions", },
  197. {EID(PERF_TYPE_HARDWARE, PERF_COUNT_BRANCH_INSTRUCTIONS), "branches", },
  198. {EID(PERF_TYPE_HARDWARE, PERF_COUNT_BRANCH_MISSES), "branch-misses", },
  199. {EID(PERF_TYPE_HARDWARE, PERF_COUNT_BUS_CYCLES), "bus-cycles", },
  200. {EID(PERF_TYPE_SOFTWARE, PERF_COUNT_CPU_CLOCK), "cpu-clock", },
  201. {EID(PERF_TYPE_SOFTWARE, PERF_COUNT_TASK_CLOCK), "task-clock", },
  202. {EID(PERF_TYPE_SOFTWARE, PERF_COUNT_PAGE_FAULTS), "page-faults", },
  203. {EID(PERF_TYPE_SOFTWARE, PERF_COUNT_PAGE_FAULTS), "faults", },
  204. {EID(PERF_TYPE_SOFTWARE, PERF_COUNT_PAGE_FAULTS_MIN), "minor-faults", },
  205. {EID(PERF_TYPE_SOFTWARE, PERF_COUNT_PAGE_FAULTS_MAJ), "major-faults", },
  206. {EID(PERF_TYPE_SOFTWARE, PERF_COUNT_CONTEXT_SWITCHES), "context-switches", },
  207. {EID(PERF_TYPE_SOFTWARE, PERF_COUNT_CONTEXT_SWITCHES), "cs", },
  208. {EID(PERF_TYPE_SOFTWARE, PERF_COUNT_CPU_MIGRATIONS), "cpu-migrations", },
  209. {EID(PERF_TYPE_SOFTWARE, PERF_COUNT_CPU_MIGRATIONS), "migrations", },
  210. };
  211. #define __PERF_COUNTER_FIELD(config, name) \
  212. ((config & PERF_COUNTER_##name##_MASK) >> PERF_COUNTER_##name##_SHIFT)
  213. #define PERF_COUNTER_RAW(config) __PERF_COUNTER_FIELD(config, RAW)
  214. #define PERF_COUNTER_CONFIG(config) __PERF_COUNTER_FIELD(config, CONFIG)
  215. #define PERF_COUNTER_TYPE(config) __PERF_COUNTER_FIELD(config, TYPE)
  216. #define PERF_COUNTER_ID(config) __PERF_COUNTER_FIELD(config, EVENT)
  217. static void display_events_help(void)
  218. {
  219. unsigned int i;
  220. __u64 e;
  221. printf(
  222. " -e EVENT --event=EVENT # symbolic-name abbreviations");
  223. for (i = 0; i < ARRAY_SIZE(event_symbols); i++) {
  224. int type, id;
  225. e = event_symbols[i].event;
  226. type = PERF_COUNTER_TYPE(e);
  227. id = PERF_COUNTER_ID(e);
  228. printf("\n %d:%d: %-20s",
  229. type, id, event_symbols[i].symbol);
  230. }
  231. printf("\n"
  232. " rNNN: raw PMU events (eventsel+umask)\n\n");
  233. }
  234. static void display_perfstat_help(void)
  235. {
  236. printf(
  237. "Usage: perfstat [<events...>] <cmd...>\n\n"
  238. "PerfStat Options (up to %d event types can be specified):\n\n",
  239. MAX_COUNTERS);
  240. display_events_help();
  241. printf(
  242. " -a # system-wide collection\n");
  243. exit(0);
  244. }
  245. static void display_help(void)
  246. {
  247. if (run_perfstat)
  248. return display_perfstat_help();
  249. printf(
  250. "Usage: kerneltop [<options>]\n"
  251. " Or: kerneltop -S [<options>] COMMAND [ARGS]\n\n"
  252. "KernelTop Options (up to %d event types can be specified at once):\n\n",
  253. MAX_COUNTERS);
  254. display_events_help();
  255. printf(
  256. " -S --stat # perfstat COMMAND\n"
  257. " -a # system-wide collection (for perfstat)\n\n"
  258. " -c CNT --count=CNT # event period to sample\n\n"
  259. " -C CPU --cpu=CPU # CPU (-1 for all) [default: -1]\n"
  260. " -p PID --pid=PID # PID of sampled task (-1 for all) [default: -1]\n\n"
  261. " -d delay --delay=<seconds> # sampling/display delay [default: 2]\n"
  262. " -f CNT --filter=CNT # min-event-count filter [default: 100]\n\n"
  263. " -s symbol --symbol=<symbol> # function to be showed annotated one-shot\n"
  264. " -x path --vmlinux=<path> # the vmlinux binary, required for -s use\n"
  265. " -z --zero # zero counts after display\n"
  266. " -D --dump_symtab # dump symbol table to stderr on startup\n"
  267. );
  268. exit(0);
  269. }
  270. static char *event_name(int ctr)
  271. {
  272. __u64 config = event_id[ctr];
  273. int type = PERF_COUNTER_TYPE(config);
  274. int id = PERF_COUNTER_ID(config);
  275. static char buf[32];
  276. if (PERF_COUNTER_RAW(config)) {
  277. sprintf(buf, "raw 0x%llx", PERF_COUNTER_CONFIG(config));
  278. return buf;
  279. }
  280. switch (type) {
  281. case PERF_TYPE_HARDWARE:
  282. if (id < PERF_HW_EVENTS_MAX)
  283. return hw_event_names[id];
  284. return "unknown-hardware";
  285. case PERF_TYPE_SOFTWARE:
  286. if (id < PERF_SW_EVENTS_MAX)
  287. return sw_event_names[id];
  288. return "unknown-software";
  289. default:
  290. break;
  291. }
  292. return "unknown";
  293. }
  294. /*
  295. * Each event can have multiple symbolic names.
  296. * Symbolic names are (almost) exactly matched.
  297. */
  298. static __u64 match_event_symbols(char *str)
  299. {
  300. __u64 config, id;
  301. int type;
  302. unsigned int i;
  303. if (sscanf(str, "r%llx", &config) == 1)
  304. return config | PERF_COUNTER_RAW_MASK;
  305. if (sscanf(str, "%d:%llu", &type, &id) == 2)
  306. return EID(type, id);
  307. for (i = 0; i < ARRAY_SIZE(event_symbols); i++) {
  308. if (!strncmp(str, event_symbols[i].symbol,
  309. strlen(event_symbols[i].symbol)))
  310. return event_symbols[i].event;
  311. }
  312. return ~0ULL;
  313. }
  314. static int parse_events(char *str)
  315. {
  316. __u64 config;
  317. again:
  318. if (nr_counters == MAX_COUNTERS)
  319. return -1;
  320. config = match_event_symbols(str);
  321. if (config == ~0ULL)
  322. return -1;
  323. event_id[nr_counters] = config;
  324. nr_counters++;
  325. str = strstr(str, ",");
  326. if (str) {
  327. str++;
  328. goto again;
  329. }
  330. return 0;
  331. }
  332. /*
  333. * perfstat
  334. */
  335. char fault_here[1000000];
  336. static void create_perfstat_counter(int counter)
  337. {
  338. struct perf_counter_hw_event hw_event;
  339. memset(&hw_event, 0, sizeof(hw_event));
  340. hw_event.config = event_id[counter];
  341. hw_event.record_type = PERF_RECORD_SIMPLE;
  342. hw_event.nmi = 0;
  343. if (system_wide) {
  344. int cpu;
  345. for (cpu = 0; cpu < nr_cpus; cpu ++) {
  346. fd[cpu][counter] = sys_perf_counter_open(&hw_event, -1, cpu, -1, 0);
  347. if (fd[cpu][counter] < 0) {
  348. printf("perfstat error: syscall returned with %d (%s)\n",
  349. fd[cpu][counter], strerror(errno));
  350. exit(-1);
  351. }
  352. }
  353. } else {
  354. hw_event.inherit = 1;
  355. hw_event.disabled = 1;
  356. fd[0][counter] = sys_perf_counter_open(&hw_event, 0, -1, -1, 0);
  357. if (fd[0][counter] < 0) {
  358. printf("perfstat error: syscall returned with %d (%s)\n",
  359. fd[0][counter], strerror(errno));
  360. exit(-1);
  361. }
  362. }
  363. }
  364. int do_perfstat(int argc, char *argv[])
  365. {
  366. unsigned long long t0, t1;
  367. int counter;
  368. ssize_t res;
  369. int status;
  370. int pid;
  371. if (!system_wide)
  372. nr_cpus = 1;
  373. for (counter = 0; counter < nr_counters; counter++)
  374. create_perfstat_counter(counter);
  375. argc -= optind;
  376. argv += optind;
  377. if (!argc)
  378. display_help();
  379. /*
  380. * Enable counters and exec the command:
  381. */
  382. t0 = rdclock();
  383. prctl(PR_TASK_PERF_COUNTERS_ENABLE);
  384. if ((pid = fork()) < 0)
  385. perror("failed to fork");
  386. if (!pid) {
  387. if (execvp(argv[0], argv)) {
  388. perror(argv[0]);
  389. exit(-1);
  390. }
  391. }
  392. while (wait(&status) >= 0)
  393. ;
  394. prctl(PR_TASK_PERF_COUNTERS_DISABLE);
  395. t1 = rdclock();
  396. fflush(stdout);
  397. fprintf(stderr, "\n");
  398. fprintf(stderr, " Performance counter stats for \'%s\':\n",
  399. argv[0]);
  400. fprintf(stderr, "\n");
  401. for (counter = 0; counter < nr_counters; counter++) {
  402. int cpu;
  403. __u64 count, single_count;
  404. count = 0;
  405. for (cpu = 0; cpu < nr_cpus; cpu ++) {
  406. res = read(fd[cpu][counter],
  407. (char *) &single_count, sizeof(single_count));
  408. assert(res == sizeof(single_count));
  409. count += single_count;
  410. }
  411. if (!PERF_COUNTER_RAW(event_id[counter]) &&
  412. (event_id[counter] == PERF_COUNT_CPU_CLOCK ||
  413. event_id[counter] == PERF_COUNT_TASK_CLOCK)) {
  414. double msecs = (double)count / 1000000;
  415. fprintf(stderr, " %14.6f %-20s (msecs)\n",
  416. msecs, event_name(counter));
  417. } else {
  418. fprintf(stderr, " %14Ld %-20s (events)\n",
  419. count, event_name(counter));
  420. }
  421. if (!counter)
  422. fprintf(stderr, "\n");
  423. }
  424. fprintf(stderr, "\n");
  425. fprintf(stderr, " Wall-clock time elapsed: %12.6f msecs\n",
  426. (double)(t1-t0)/1e6);
  427. fprintf(stderr, "\n");
  428. return 0;
  429. }
  430. /*
  431. * Symbols
  432. */
  433. static uint64_t min_ip;
  434. static uint64_t max_ip = -1ll;
  435. struct sym_entry {
  436. unsigned long long addr;
  437. char *sym;
  438. unsigned long count[MAX_COUNTERS];
  439. int skip;
  440. GList *source;
  441. };
  442. #define MAX_SYMS 100000
  443. static int sym_table_count;
  444. struct sym_entry *sym_filter_entry;
  445. static struct sym_entry sym_table[MAX_SYMS];
  446. static void show_details(struct sym_entry *sym);
  447. /*
  448. * Ordering weight: count-1 * count-2 * ... / count-n
  449. */
  450. static double sym_weight(const struct sym_entry *sym)
  451. {
  452. double weight;
  453. int counter;
  454. weight = sym->count[0];
  455. for (counter = 1; counter < nr_counters-1; counter++)
  456. weight *= sym->count[counter];
  457. weight /= (sym->count[counter] + 1);
  458. return weight;
  459. }
  460. static int compare(const void *__sym1, const void *__sym2)
  461. {
  462. const struct sym_entry *sym1 = __sym1, *sym2 = __sym2;
  463. return sym_weight(sym1) < sym_weight(sym2);
  464. }
  465. static time_t last_refresh;
  466. static long events;
  467. static long userspace_events;
  468. static const char CONSOLE_CLEAR[] = "";
  469. static struct sym_entry tmp[MAX_SYMS];
  470. static void print_sym_table(void)
  471. {
  472. int i, printed;
  473. int counter;
  474. float events_per_sec = events/delay_secs;
  475. float kevents_per_sec = (events-userspace_events)/delay_secs;
  476. memcpy(tmp, sym_table, sizeof(sym_table[0])*sym_table_count);
  477. qsort(tmp, sym_table_count, sizeof(tmp[0]), compare);
  478. write(1, CONSOLE_CLEAR, strlen(CONSOLE_CLEAR));
  479. printf(
  480. "------------------------------------------------------------------------------\n");
  481. printf( " KernelTop:%8.0f irqs/sec kernel:%3.1f%% [%s, ",
  482. events_per_sec,
  483. 100.0 - (100.0*((events_per_sec-kevents_per_sec)/events_per_sec)),
  484. nmi ? "NMI" : "IRQ");
  485. if (nr_counters == 1)
  486. printf("%d ", event_count[0]);
  487. for (counter = 0; counter < nr_counters; counter++) {
  488. if (counter)
  489. printf("/");
  490. printf("%s", event_name(counter));
  491. }
  492. printf( "], ");
  493. if (tid != -1)
  494. printf(" (tid: %d", tid);
  495. else
  496. printf(" (all");
  497. if (profile_cpu != -1)
  498. printf(", cpu: %d)\n", profile_cpu);
  499. else {
  500. if (tid != -1)
  501. printf(")\n");
  502. else
  503. printf(", %d CPUs)\n", nr_cpus);
  504. }
  505. printf("------------------------------------------------------------------------------\n\n");
  506. if (nr_counters == 1)
  507. printf(" events");
  508. else
  509. printf(" weight events");
  510. printf(" RIP kernel function\n"
  511. " ______ ______ ________________ _______________\n\n"
  512. );
  513. printed = 0;
  514. for (i = 0; i < sym_table_count; i++) {
  515. int count;
  516. if (nr_counters == 1) {
  517. if (printed <= 18 &&
  518. tmp[i].count[0] >= count_filter) {
  519. printf("%19.2f - %016llx : %s\n",
  520. sym_weight(tmp + i), tmp[i].addr, tmp[i].sym);
  521. printed++;
  522. }
  523. } else {
  524. if (printed <= 18 &&
  525. tmp[i].count[0] >= count_filter) {
  526. printf("%8.1f %10ld - %016llx : %s\n",
  527. sym_weight(tmp + i),
  528. tmp[i].count[0],
  529. tmp[i].addr, tmp[i].sym);
  530. printed++;
  531. }
  532. }
  533. /*
  534. * Add decay to the counts:
  535. */
  536. for (count = 0; count < nr_counters; count++)
  537. sym_table[i].count[count] = zero ? 0 : sym_table[i].count[count] * 7 / 8;
  538. }
  539. if (sym_filter_entry)
  540. show_details(sym_filter_entry);
  541. last_refresh = time(NULL);
  542. {
  543. struct pollfd stdin_poll = { .fd = 0, .events = POLLIN };
  544. if (poll(&stdin_poll, 1, 0) == 1) {
  545. printf("key pressed - exiting.\n");
  546. exit(0);
  547. }
  548. }
  549. }
  550. static int read_symbol(FILE *in, struct sym_entry *s)
  551. {
  552. static int filter_match = 0;
  553. char *sym, stype;
  554. char str[500];
  555. int rc, pos;
  556. rc = fscanf(in, "%llx %c %499s", &s->addr, &stype, str);
  557. if (rc == EOF)
  558. return -1;
  559. assert(rc == 3);
  560. /* skip until end of line: */
  561. pos = strlen(str);
  562. do {
  563. rc = fgetc(in);
  564. if (rc == '\n' || rc == EOF || pos >= 499)
  565. break;
  566. str[pos] = rc;
  567. pos++;
  568. } while (1);
  569. str[pos] = 0;
  570. sym = str;
  571. /* Filter out known duplicates and non-text symbols. */
  572. if (!strcmp(sym, "_text"))
  573. return 1;
  574. if (!min_ip && !strcmp(sym, "_stext"))
  575. return 1;
  576. if (!strcmp(sym, "_etext") || !strcmp(sym, "_sinittext"))
  577. return 1;
  578. if (stype != 'T' && stype != 't')
  579. return 1;
  580. if (!strncmp("init_module", sym, 11) || !strncmp("cleanup_module", sym, 14))
  581. return 1;
  582. if (strstr(sym, "_text_start") || strstr(sym, "_text_end"))
  583. return 1;
  584. s->sym = malloc(strlen(str));
  585. assert(s->sym);
  586. strcpy((char *)s->sym, str);
  587. s->skip = 0;
  588. /* Tag events to be skipped. */
  589. if (!strcmp("default_idle", s->sym) || !strcmp("cpu_idle", s->sym))
  590. s->skip = 1;
  591. if (!strcmp("enter_idle", s->sym) || !strcmp("exit_idle", s->sym))
  592. s->skip = 1;
  593. if (filter_match == 1) {
  594. filter_end = s->addr;
  595. filter_match = -1;
  596. if (filter_end - filter_start > 10000) {
  597. printf("hm, too large filter symbol <%s> - skipping.\n",
  598. sym_filter);
  599. printf("symbol filter start: %016lx\n", filter_start);
  600. printf(" end: %016lx\n", filter_end);
  601. filter_end = filter_start = 0;
  602. sym_filter = NULL;
  603. sleep(1);
  604. }
  605. }
  606. if (filter_match == 0 && sym_filter && !strcmp(s->sym, sym_filter)) {
  607. filter_match = 1;
  608. filter_start = s->addr;
  609. }
  610. return 0;
  611. }
  612. int compare_addr(const void *__sym1, const void *__sym2)
  613. {
  614. const struct sym_entry *sym1 = __sym1, *sym2 = __sym2;
  615. return sym1->addr > sym2->addr;
  616. }
  617. static void sort_symbol_table(void)
  618. {
  619. int i, dups;
  620. do {
  621. qsort(sym_table, sym_table_count, sizeof(sym_table[0]), compare_addr);
  622. for (i = 0, dups = 0; i < sym_table_count; i++) {
  623. if (sym_table[i].addr == sym_table[i+1].addr) {
  624. sym_table[i+1].addr = -1ll;
  625. dups++;
  626. }
  627. }
  628. sym_table_count -= dups;
  629. } while(dups);
  630. }
  631. static void parse_symbols(void)
  632. {
  633. struct sym_entry *last;
  634. FILE *kallsyms = fopen("/proc/kallsyms", "r");
  635. if (!kallsyms) {
  636. printf("Could not open /proc/kallsyms - no CONFIG_KALLSYMS_ALL=y?\n");
  637. exit(-1);
  638. }
  639. while (!feof(kallsyms)) {
  640. if (read_symbol(kallsyms, &sym_table[sym_table_count]) == 0) {
  641. sym_table_count++;
  642. assert(sym_table_count <= MAX_SYMS);
  643. }
  644. }
  645. sort_symbol_table();
  646. min_ip = sym_table[0].addr;
  647. max_ip = sym_table[sym_table_count-1].addr;
  648. last = sym_table + sym_table_count++;
  649. last->addr = -1ll;
  650. last->sym = "<end>";
  651. if (filter_end) {
  652. int count;
  653. for (count=0; count < sym_table_count; count ++) {
  654. if (!strcmp(sym_table[count].sym, sym_filter)) {
  655. sym_filter_entry = &sym_table[count];
  656. break;
  657. }
  658. }
  659. }
  660. if (dump_symtab) {
  661. int i;
  662. for (i = 0; i < sym_table_count; i++)
  663. fprintf(stderr, "%llx %s\n",
  664. sym_table[i].addr, sym_table[i].sym);
  665. }
  666. }
  667. /*
  668. * Source lines
  669. */
  670. static void parse_vmlinux(char *filename)
  671. {
  672. FILE *file;
  673. char command[PATH_MAX*2];
  674. if (!filename)
  675. return;
  676. sprintf(command, "objdump --start-address=0x%016lx --stop-address=0x%016lx -dS %s", filter_start, filter_end, filename);
  677. file = popen(command, "r");
  678. if (!file)
  679. return;
  680. while (!feof(file)) {
  681. struct source_line *src;
  682. size_t dummy = 0;
  683. char *c;
  684. src = malloc(sizeof(struct source_line));
  685. assert(src != NULL);
  686. memset(src, 0, sizeof(struct source_line));
  687. if (getline(&src->line, &dummy, file) < 0)
  688. break;
  689. if (!src->line)
  690. break;
  691. c = strchr(src->line, '\n');
  692. if (c)
  693. *c = 0;
  694. lines = g_list_prepend(lines, src);
  695. if (strlen(src->line)>8 && src->line[8] == ':')
  696. src->EIP = strtoull(src->line, NULL, 16);
  697. if (strlen(src->line)>8 && src->line[16] == ':')
  698. src->EIP = strtoull(src->line, NULL, 16);
  699. }
  700. pclose(file);
  701. lines = g_list_reverse(lines);
  702. }
  703. static void record_precise_ip(uint64_t ip)
  704. {
  705. struct source_line *line;
  706. GList *item;
  707. item = g_list_first(lines);
  708. while (item) {
  709. line = item->data;
  710. if (line->EIP == ip)
  711. line->count++;
  712. if (line->EIP > ip)
  713. break;
  714. item = g_list_next(item);
  715. }
  716. }
  717. static void lookup_sym_in_vmlinux(struct sym_entry *sym)
  718. {
  719. struct source_line *line;
  720. GList *item;
  721. char pattern[PATH_MAX];
  722. sprintf(pattern, "<%s>:", sym->sym);
  723. item = g_list_first(lines);
  724. while (item) {
  725. line = item->data;
  726. if (strstr(line->line, pattern)) {
  727. sym->source = item;
  728. break;
  729. }
  730. item = g_list_next(item);
  731. }
  732. }
  733. void show_lines(GList *item_queue, int item_queue_count)
  734. {
  735. int i;
  736. struct source_line *line;
  737. for (i = 0; i < item_queue_count; i++) {
  738. line = item_queue->data;
  739. printf("%8li\t%s\n", line->count, line->line);
  740. item_queue = g_list_next(item_queue);
  741. }
  742. }
  743. #define TRACE_COUNT 3
  744. static void show_details(struct sym_entry *sym)
  745. {
  746. struct source_line *line;
  747. GList *item;
  748. int displayed = 0;
  749. GList *item_queue = NULL;
  750. int item_queue_count = 0;
  751. if (!sym->source)
  752. lookup_sym_in_vmlinux(sym);
  753. if (!sym->source)
  754. return;
  755. printf("Showing details for %s\n", sym->sym);
  756. item = sym->source;
  757. while (item) {
  758. line = item->data;
  759. if (displayed && strstr(line->line, ">:"))
  760. break;
  761. if (!item_queue_count)
  762. item_queue = item;
  763. item_queue_count ++;
  764. if (line->count >= count_filter) {
  765. show_lines(item_queue, item_queue_count);
  766. item_queue_count = 0;
  767. item_queue = NULL;
  768. } else if (item_queue_count > TRACE_COUNT) {
  769. item_queue = g_list_next(item_queue);
  770. item_queue_count --;
  771. }
  772. line->count = 0;
  773. displayed++;
  774. if (displayed > 300)
  775. break;
  776. item = g_list_next(item);
  777. }
  778. }
  779. /*
  780. * Binary search in the histogram table and record the hit:
  781. */
  782. static void record_ip(uint64_t ip, int counter)
  783. {
  784. int left_idx, middle_idx, right_idx, idx;
  785. unsigned long left, middle, right;
  786. record_precise_ip(ip);
  787. left_idx = 0;
  788. right_idx = sym_table_count-1;
  789. assert(ip <= max_ip && ip >= min_ip);
  790. while (left_idx + 1 < right_idx) {
  791. middle_idx = (left_idx + right_idx) / 2;
  792. left = sym_table[ left_idx].addr;
  793. middle = sym_table[middle_idx].addr;
  794. right = sym_table[ right_idx].addr;
  795. if (!(left <= middle && middle <= right)) {
  796. printf("%016lx...\n%016lx...\n%016lx\n", left, middle, right);
  797. printf("%d %d %d\n", left_idx, middle_idx, right_idx);
  798. }
  799. assert(left <= middle && middle <= right);
  800. if (!(left <= ip && ip <= right)) {
  801. printf(" left: %016lx\n", left);
  802. printf(" ip: %016llx\n", ip);
  803. printf("right: %016lx\n", right);
  804. }
  805. assert(left <= ip && ip <= right);
  806. /*
  807. * [ left .... target .... middle .... right ]
  808. * => right := middle
  809. */
  810. if (ip < middle) {
  811. right_idx = middle_idx;
  812. continue;
  813. }
  814. /*
  815. * [ left .... middle ... target ... right ]
  816. * => left := middle
  817. */
  818. left_idx = middle_idx;
  819. }
  820. idx = left_idx;
  821. if (!sym_table[idx].skip)
  822. sym_table[idx].count[counter]++;
  823. else events--;
  824. }
  825. static void process_event(uint64_t ip, int counter)
  826. {
  827. events++;
  828. if (ip < min_ip || ip > max_ip) {
  829. userspace_events++;
  830. return;
  831. }
  832. record_ip(ip, counter);
  833. }
  834. static void process_options(int argc, char *argv[])
  835. {
  836. int error = 0, counter;
  837. if (strstr(argv[0], "perfstat"))
  838. run_perfstat = 1;
  839. for (;;) {
  840. int option_index = 0;
  841. /** Options for getopt */
  842. static struct option long_options[] = {
  843. {"count", required_argument, NULL, 'c'},
  844. {"cpu", required_argument, NULL, 'C'},
  845. {"delay", required_argument, NULL, 'd'},
  846. {"dump_symtab", no_argument, NULL, 'D'},
  847. {"event", required_argument, NULL, 'e'},
  848. {"filter", required_argument, NULL, 'f'},
  849. {"group", required_argument, NULL, 'g'},
  850. {"help", no_argument, NULL, 'h'},
  851. {"nmi", required_argument, NULL, 'n'},
  852. {"pid", required_argument, NULL, 'p'},
  853. {"vmlinux", required_argument, NULL, 'x'},
  854. {"symbol", required_argument, NULL, 's'},
  855. {"stat", no_argument, NULL, 'S'},
  856. {"zero", no_argument, NULL, 'z'},
  857. {NULL, 0, NULL, 0 }
  858. };
  859. int c = getopt_long(argc, argv, "+:ac:C:d:De:f:g:hn:p:s:Sx:z",
  860. long_options, &option_index);
  861. if (c == -1)
  862. break;
  863. switch (c) {
  864. case 'a': system_wide = 1; break;
  865. case 'c': default_interval = atoi(optarg); break;
  866. case 'C':
  867. /* CPU and PID are mutually exclusive */
  868. if (tid != -1) {
  869. printf("WARNING: CPU switch overriding PID\n");
  870. sleep(1);
  871. tid = -1;
  872. }
  873. profile_cpu = atoi(optarg); break;
  874. case 'd': delay_secs = atoi(optarg); break;
  875. case 'D': dump_symtab = 1; break;
  876. case 'e': error = parse_events(optarg); break;
  877. case 'f': count_filter = atoi(optarg); break;
  878. case 'g': group = atoi(optarg); break;
  879. case 'h': display_help(); break;
  880. case 'n': nmi = atoi(optarg); break;
  881. case 'p':
  882. /* CPU and PID are mutually exclusive */
  883. if (profile_cpu != -1) {
  884. printf("WARNING: PID switch overriding CPU\n");
  885. sleep(1);
  886. profile_cpu = -1;
  887. }
  888. tid = atoi(optarg); break;
  889. case 's': sym_filter = strdup(optarg); break;
  890. case 'S': run_perfstat = 1; break;
  891. case 'x': vmlinux = strdup(optarg); break;
  892. case 'z': zero = 1; break;
  893. default: error = 1; break;
  894. }
  895. }
  896. if (error)
  897. display_help();
  898. if (!nr_counters) {
  899. if (run_perfstat)
  900. nr_counters = 8;
  901. else {
  902. nr_counters = 1;
  903. event_id[0] = 0;
  904. }
  905. }
  906. for (counter = 0; counter < nr_counters; counter++) {
  907. if (event_count[counter])
  908. continue;
  909. event_count[counter] = default_interval;
  910. }
  911. }
  912. int main(int argc, char *argv[])
  913. {
  914. struct pollfd event_array[MAX_NR_CPUS][MAX_COUNTERS];
  915. struct perf_counter_hw_event hw_event;
  916. int i, counter, group_fd;
  917. unsigned int cpu;
  918. uint64_t ip;
  919. ssize_t res;
  920. int ret;
  921. process_options(argc, argv);
  922. nr_cpus = sysconf(_SC_NPROCESSORS_ONLN);
  923. assert(nr_cpus <= MAX_NR_CPUS);
  924. assert(nr_cpus >= 0);
  925. if (run_perfstat)
  926. return do_perfstat(argc, argv);
  927. if (tid != -1 || profile_cpu != -1)
  928. nr_cpus = 1;
  929. for (i = 0; i < nr_cpus; i++) {
  930. group_fd = -1;
  931. for (counter = 0; counter < nr_counters; counter++) {
  932. cpu = profile_cpu;
  933. if (tid == -1 && profile_cpu == -1)
  934. cpu = i;
  935. memset(&hw_event, 0, sizeof(hw_event));
  936. hw_event.config = event_id[counter];
  937. hw_event.irq_period = event_count[counter];
  938. hw_event.record_type = PERF_RECORD_IRQ;
  939. hw_event.nmi = nmi;
  940. printf("FOO: %d %llx %llx\n", counter, event_id[counter], event_count[counter]);
  941. fd[i][counter] = sys_perf_counter_open(&hw_event, tid, cpu, group_fd, 0);
  942. fcntl(fd[i][counter], F_SETFL, O_NONBLOCK);
  943. if (fd[i][counter] < 0) {
  944. printf("kerneltop error: syscall returned with %d (%s)\n",
  945. fd[i][counter], strerror(-fd[i][counter]));
  946. if (fd[i][counter] == -1)
  947. printf("Are you root?\n");
  948. exit(-1);
  949. }
  950. assert(fd[i][counter] >= 0);
  951. /*
  952. * First counter acts as the group leader:
  953. */
  954. if (group && group_fd == -1)
  955. group_fd = fd[i][counter];
  956. event_array[i][counter].fd = fd[i][counter];
  957. event_array[i][counter].events = POLLIN;
  958. }
  959. }
  960. parse_symbols();
  961. if (vmlinux && sym_filter_entry)
  962. parse_vmlinux(vmlinux);
  963. printf("KernelTop refresh period: %d seconds\n", delay_secs);
  964. last_refresh = time(NULL);
  965. while (1) {
  966. int hits = events;
  967. for (i = 0; i < nr_cpus; i++) {
  968. for (counter = 0; counter < nr_counters; counter++) {
  969. res = read(fd[i][counter], (char *) &ip, sizeof(ip));
  970. if (res > 0) {
  971. assert(res == sizeof(ip));
  972. process_event(ip, counter);
  973. }
  974. }
  975. }
  976. if (time(NULL) >= last_refresh + delay_secs) {
  977. print_sym_table();
  978. events = userspace_events = 0;
  979. }
  980. if (hits == events)
  981. ret = poll(event_array[0], nr_cpus, 1000);
  982. hits = events;
  983. }
  984. return 0;
  985. }