builtin-test.c 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497
  1. /*
  2. * builtin-test.c
  3. *
  4. * Builtin regression testing command: ever growing number of sanity tests
  5. */
  6. #include "builtin.h"
  7. #include "util/cache.h"
  8. #include "util/debug.h"
  9. #include "util/parse-options.h"
  10. #include "util/session.h"
  11. #include "util/symbol.h"
  12. #include "util/thread.h"
  13. static long page_size;
  14. static int vmlinux_matches_kallsyms_filter(struct map *map __used, struct symbol *sym)
  15. {
  16. bool *visited = symbol__priv(sym);
  17. *visited = true;
  18. return 0;
  19. }
  20. static int test__vmlinux_matches_kallsyms(void)
  21. {
  22. int err = -1;
  23. struct rb_node *nd;
  24. struct symbol *sym;
  25. struct map *kallsyms_map, *vmlinux_map;
  26. struct machine kallsyms, vmlinux;
  27. enum map_type type = MAP__FUNCTION;
  28. struct ref_reloc_sym ref_reloc_sym = { .name = "_stext", };
  29. /*
  30. * Step 1:
  31. *
  32. * Init the machines that will hold kernel, modules obtained from
  33. * both vmlinux + .ko files and from /proc/kallsyms split by modules.
  34. */
  35. machine__init(&kallsyms, "", HOST_KERNEL_ID);
  36. machine__init(&vmlinux, "", HOST_KERNEL_ID);
  37. /*
  38. * Step 2:
  39. *
  40. * Create the kernel maps for kallsyms and the DSO where we will then
  41. * load /proc/kallsyms. Also create the modules maps from /proc/modules
  42. * and find the .ko files that match them in /lib/modules/`uname -r`/.
  43. */
  44. if (machine__create_kernel_maps(&kallsyms) < 0) {
  45. pr_debug("machine__create_kernel_maps ");
  46. return -1;
  47. }
  48. /*
  49. * Step 3:
  50. *
  51. * Load and split /proc/kallsyms into multiple maps, one per module.
  52. */
  53. if (machine__load_kallsyms(&kallsyms, "/proc/kallsyms", type, NULL) <= 0) {
  54. pr_debug("dso__load_kallsyms ");
  55. goto out;
  56. }
  57. /*
  58. * Step 4:
  59. *
  60. * kallsyms will be internally on demand sorted by name so that we can
  61. * find the reference relocation * symbol, i.e. the symbol we will use
  62. * to see if the running kernel was relocated by checking if it has the
  63. * same value in the vmlinux file we load.
  64. */
  65. kallsyms_map = machine__kernel_map(&kallsyms, type);
  66. sym = map__find_symbol_by_name(kallsyms_map, ref_reloc_sym.name, NULL);
  67. if (sym == NULL) {
  68. pr_debug("dso__find_symbol_by_name ");
  69. goto out;
  70. }
  71. ref_reloc_sym.addr = sym->start;
  72. /*
  73. * Step 5:
  74. *
  75. * Now repeat step 2, this time for the vmlinux file we'll auto-locate.
  76. */
  77. if (machine__create_kernel_maps(&vmlinux) < 0) {
  78. pr_debug("machine__create_kernel_maps ");
  79. goto out;
  80. }
  81. vmlinux_map = machine__kernel_map(&vmlinux, type);
  82. map__kmap(vmlinux_map)->ref_reloc_sym = &ref_reloc_sym;
  83. /*
  84. * Step 6:
  85. *
  86. * Locate a vmlinux file in the vmlinux path that has a buildid that
  87. * matches the one of the running kernel.
  88. *
  89. * While doing that look if we find the ref reloc symbol, if we find it
  90. * we'll have its ref_reloc_symbol.unrelocated_addr and then
  91. * maps__reloc_vmlinux will notice and set proper ->[un]map_ip routines
  92. * to fixup the symbols.
  93. */
  94. if (machine__load_vmlinux_path(&vmlinux, type,
  95. vmlinux_matches_kallsyms_filter) <= 0) {
  96. pr_debug("machine__load_vmlinux_path ");
  97. goto out;
  98. }
  99. err = 0;
  100. /*
  101. * Step 7:
  102. *
  103. * Now look at the symbols in the vmlinux DSO and check if we find all of them
  104. * in the kallsyms dso. For the ones that are in both, check its names and
  105. * end addresses too.
  106. */
  107. for (nd = rb_first(&vmlinux_map->dso->symbols[type]); nd; nd = rb_next(nd)) {
  108. struct symbol *pair, *first_pair;
  109. bool backwards = true;
  110. sym = rb_entry(nd, struct symbol, rb_node);
  111. if (sym->start == sym->end)
  112. continue;
  113. first_pair = machine__find_kernel_symbol(&kallsyms, type, sym->start, NULL, NULL);
  114. pair = first_pair;
  115. if (pair && pair->start == sym->start) {
  116. next_pair:
  117. if (strcmp(sym->name, pair->name) == 0) {
  118. /*
  119. * kallsyms don't have the symbol end, so we
  120. * set that by using the next symbol start - 1,
  121. * in some cases we get this up to a page
  122. * wrong, trace_kmalloc when I was developing
  123. * this code was one such example, 2106 bytes
  124. * off the real size. More than that and we
  125. * _really_ have a problem.
  126. */
  127. s64 skew = sym->end - pair->end;
  128. if (llabs(skew) < page_size)
  129. continue;
  130. pr_debug("%#Lx: diff end addr for %s v: %#Lx k: %#Lx\n",
  131. sym->start, sym->name, sym->end, pair->end);
  132. } else {
  133. struct rb_node *nnd;
  134. detour:
  135. nnd = backwards ? rb_prev(&pair->rb_node) :
  136. rb_next(&pair->rb_node);
  137. if (nnd) {
  138. struct symbol *next = rb_entry(nnd, struct symbol, rb_node);
  139. if (next->start == sym->start) {
  140. pair = next;
  141. goto next_pair;
  142. }
  143. }
  144. if (backwards) {
  145. backwards = false;
  146. pair = first_pair;
  147. goto detour;
  148. }
  149. pr_debug("%#Lx: diff name v: %s k: %s\n",
  150. sym->start, sym->name, pair->name);
  151. }
  152. } else
  153. pr_debug("%#Lx: %s not on kallsyms\n", sym->start, sym->name);
  154. err = -1;
  155. }
  156. if (!verbose)
  157. goto out;
  158. pr_info("Maps only in vmlinux:\n");
  159. for (nd = rb_first(&vmlinux.kmaps.maps[type]); nd; nd = rb_next(nd)) {
  160. struct map *pos = rb_entry(nd, struct map, rb_node), *pair;
  161. /*
  162. * If it is the kernel, kallsyms is always "[kernel.kallsyms]", while
  163. * the kernel will have the path for the vmlinux file being used,
  164. * so use the short name, less descriptive but the same ("[kernel]" in
  165. * both cases.
  166. */
  167. pair = map_groups__find_by_name(&kallsyms.kmaps, type,
  168. (pos->dso->kernel ?
  169. pos->dso->short_name :
  170. pos->dso->name));
  171. if (pair)
  172. pair->priv = 1;
  173. else
  174. map__fprintf(pos, stderr);
  175. }
  176. pr_info("Maps in vmlinux with a different name in kallsyms:\n");
  177. for (nd = rb_first(&vmlinux.kmaps.maps[type]); nd; nd = rb_next(nd)) {
  178. struct map *pos = rb_entry(nd, struct map, rb_node), *pair;
  179. pair = map_groups__find(&kallsyms.kmaps, type, pos->start);
  180. if (pair == NULL || pair->priv)
  181. continue;
  182. if (pair->start == pos->start) {
  183. pair->priv = 1;
  184. pr_info(" %Lx-%Lx %Lx %s in kallsyms as",
  185. pos->start, pos->end, pos->pgoff, pos->dso->name);
  186. if (pos->pgoff != pair->pgoff || pos->end != pair->end)
  187. pr_info(": \n*%Lx-%Lx %Lx",
  188. pair->start, pair->end, pair->pgoff);
  189. pr_info(" %s\n", pair->dso->name);
  190. pair->priv = 1;
  191. }
  192. }
  193. pr_info("Maps only in kallsyms:\n");
  194. for (nd = rb_first(&kallsyms.kmaps.maps[type]);
  195. nd; nd = rb_next(nd)) {
  196. struct map *pos = rb_entry(nd, struct map, rb_node);
  197. if (!pos->priv)
  198. map__fprintf(pos, stderr);
  199. }
  200. out:
  201. return err;
  202. }
  203. #include "util/cpumap.h"
  204. #include "util/evsel.h"
  205. #include <sys/types.h>
  206. static int trace_event__id(const char *event_name)
  207. {
  208. char *filename;
  209. int err = -1, fd;
  210. if (asprintf(&filename,
  211. "/sys/kernel/debug/tracing/events/syscalls/%s/id",
  212. event_name) < 0)
  213. return -1;
  214. fd = open(filename, O_RDONLY);
  215. if (fd >= 0) {
  216. char id[16];
  217. if (read(fd, id, sizeof(id)) > 0)
  218. err = atoi(id);
  219. close(fd);
  220. }
  221. free(filename);
  222. return err;
  223. }
  224. static int test__open_syscall_event(void)
  225. {
  226. int err = -1, fd;
  227. struct thread_map *threads;
  228. struct perf_evsel *evsel;
  229. struct perf_event_attr attr;
  230. unsigned int nr_open_calls = 111, i;
  231. int id = trace_event__id("sys_enter_open");
  232. if (id < 0) {
  233. pr_debug("is debugfs mounted on /sys/kernel/debug?\n");
  234. return -1;
  235. }
  236. threads = thread_map__new(-1, getpid());
  237. if (threads == NULL) {
  238. pr_debug("thread_map__new\n");
  239. return -1;
  240. }
  241. memset(&attr, 0, sizeof(attr));
  242. attr.type = PERF_TYPE_TRACEPOINT;
  243. attr.config = id;
  244. evsel = perf_evsel__new(&attr, 0);
  245. if (evsel == NULL) {
  246. pr_debug("perf_evsel__new\n");
  247. goto out_thread_map_delete;
  248. }
  249. if (perf_evsel__open_per_thread(evsel, threads) < 0) {
  250. pr_debug("failed to open counter: %s, "
  251. "tweak /proc/sys/kernel/perf_event_paranoid?\n",
  252. strerror(errno));
  253. goto out_evsel_delete;
  254. }
  255. for (i = 0; i < nr_open_calls; ++i) {
  256. fd = open("/etc/passwd", O_RDONLY);
  257. close(fd);
  258. }
  259. if (perf_evsel__read_on_cpu(evsel, 0, 0) < 0) {
  260. pr_debug("perf_evsel__open_read_on_cpu\n");
  261. goto out_close_fd;
  262. }
  263. if (evsel->counts->cpu[0].val != nr_open_calls) {
  264. pr_debug("perf_evsel__read_on_cpu: expected to intercept %d calls, got %Ld\n",
  265. nr_open_calls, evsel->counts->cpu[0].val);
  266. goto out_close_fd;
  267. }
  268. err = 0;
  269. out_close_fd:
  270. perf_evsel__close_fd(evsel, 1, threads->nr);
  271. out_evsel_delete:
  272. perf_evsel__delete(evsel);
  273. out_thread_map_delete:
  274. thread_map__delete(threads);
  275. return err;
  276. }
  277. #include <sched.h>
  278. static int test__open_syscall_event_on_all_cpus(void)
  279. {
  280. int err = -1, fd, cpu;
  281. struct thread_map *threads;
  282. struct cpu_map *cpus;
  283. struct perf_evsel *evsel;
  284. struct perf_event_attr attr;
  285. unsigned int nr_open_calls = 111, i;
  286. cpu_set_t *cpu_set;
  287. size_t cpu_set_size;
  288. int id = trace_event__id("sys_enter_open");
  289. if (id < 0) {
  290. pr_debug("is debugfs mounted on /sys/kernel/debug?\n");
  291. return -1;
  292. }
  293. threads = thread_map__new(-1, getpid());
  294. if (threads == NULL) {
  295. pr_debug("thread_map__new\n");
  296. return -1;
  297. }
  298. cpus = cpu_map__new(NULL);
  299. if (threads == NULL) {
  300. pr_debug("thread_map__new\n");
  301. return -1;
  302. }
  303. cpu_set = CPU_ALLOC(cpus->nr);
  304. if (cpu_set == NULL)
  305. goto out_thread_map_delete;
  306. cpu_set_size = CPU_ALLOC_SIZE(cpus->nr);
  307. CPU_ZERO_S(cpu_set_size, cpu_set);
  308. memset(&attr, 0, sizeof(attr));
  309. attr.type = PERF_TYPE_TRACEPOINT;
  310. attr.config = id;
  311. evsel = perf_evsel__new(&attr, 0);
  312. if (evsel == NULL) {
  313. pr_debug("perf_evsel__new\n");
  314. goto out_cpu_free;
  315. }
  316. if (perf_evsel__open(evsel, cpus, threads) < 0) {
  317. pr_debug("failed to open counter: %s, "
  318. "tweak /proc/sys/kernel/perf_event_paranoid?\n",
  319. strerror(errno));
  320. goto out_evsel_delete;
  321. }
  322. for (cpu = 0; cpu < cpus->nr; ++cpu) {
  323. unsigned int ncalls = nr_open_calls + cpu;
  324. CPU_SET(cpu, cpu_set);
  325. sched_setaffinity(0, cpu_set_size, cpu_set);
  326. for (i = 0; i < ncalls; ++i) {
  327. fd = open("/etc/passwd", O_RDONLY);
  328. close(fd);
  329. }
  330. CPU_CLR(cpu, cpu_set);
  331. }
  332. /*
  333. * Here we need to explicitely preallocate the counts, as if
  334. * we use the auto allocation it will allocate just for 1 cpu,
  335. * as we start by cpu 0.
  336. */
  337. if (perf_evsel__alloc_counts(evsel, cpus->nr) < 0) {
  338. pr_debug("perf_evsel__alloc_counts(ncpus=%d)\n", cpus->nr);
  339. goto out_close_fd;
  340. }
  341. for (cpu = 0; cpu < cpus->nr; ++cpu) {
  342. unsigned int expected;
  343. if (perf_evsel__read_on_cpu(evsel, cpu, 0) < 0) {
  344. pr_debug("perf_evsel__open_read_on_cpu\n");
  345. goto out_close_fd;
  346. }
  347. expected = nr_open_calls + cpu;
  348. if (evsel->counts->cpu[cpu].val != expected) {
  349. pr_debug("perf_evsel__read_on_cpu: expected to intercept %d calls on cpu %d, got %Ld\n",
  350. expected, cpu, evsel->counts->cpu[cpu].val);
  351. goto out_close_fd;
  352. }
  353. }
  354. err = 0;
  355. out_close_fd:
  356. perf_evsel__close_fd(evsel, 1, threads->nr);
  357. out_evsel_delete:
  358. perf_evsel__delete(evsel);
  359. out_cpu_free:
  360. CPU_FREE(cpu_set);
  361. out_thread_map_delete:
  362. thread_map__delete(threads);
  363. return err;
  364. }
  365. static struct test {
  366. const char *desc;
  367. int (*func)(void);
  368. } tests[] = {
  369. {
  370. .desc = "vmlinux symtab matches kallsyms",
  371. .func = test__vmlinux_matches_kallsyms,
  372. },
  373. {
  374. .desc = "detect open syscall event",
  375. .func = test__open_syscall_event,
  376. },
  377. {
  378. .desc = "detect open syscall event on all cpus",
  379. .func = test__open_syscall_event_on_all_cpus,
  380. },
  381. {
  382. .func = NULL,
  383. },
  384. };
  385. static int __cmd_test(void)
  386. {
  387. int i = 0;
  388. page_size = sysconf(_SC_PAGE_SIZE);
  389. while (tests[i].func) {
  390. int err;
  391. pr_info("%2d: %s:", i + 1, tests[i].desc);
  392. pr_debug("\n--- start ---\n");
  393. err = tests[i].func();
  394. pr_debug("---- end ----\n%s:", tests[i].desc);
  395. pr_info(" %s\n", err ? "FAILED!\n" : "Ok");
  396. ++i;
  397. }
  398. return 0;
  399. }
  400. static const char * const test_usage[] = {
  401. "perf test [<options>]",
  402. NULL,
  403. };
  404. static const struct option test_options[] = {
  405. OPT_INTEGER('v', "verbose", &verbose,
  406. "be more verbose (show symbol address, etc)"),
  407. OPT_END()
  408. };
  409. int cmd_test(int argc, const char **argv, const char *prefix __used)
  410. {
  411. argc = parse_options(argc, argv, test_options, test_usage, 0);
  412. if (argc)
  413. usage_with_options(test_usage, test_options);
  414. symbol_conf.priv_size = sizeof(int);
  415. symbol_conf.sort_by_name = true;
  416. symbol_conf.try_vmlinux_path = true;
  417. if (symbol__init() < 0)
  418. return -1;
  419. setup_pager();
  420. return __cmd_test();
  421. }