builtin-test.c 31 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345
  1. /*
  2. * builtin-test.c
  3. *
  4. * Builtin regression testing command: ever growing number of sanity tests
  5. */
  6. #include "builtin.h"
  7. #include "util/cache.h"
  8. #include "util/debug.h"
  9. #include "util/debugfs.h"
  10. #include "util/evlist.h"
  11. #include "util/parse-options.h"
  12. #include "util/parse-events.h"
  13. #include "util/symbol.h"
  14. #include "util/thread_map.h"
  15. #include "util/pmu.h"
  16. #include "../../include/linux/hw_breakpoint.h"
  17. #include <sys/mman.h>
  18. static int vmlinux_matches_kallsyms_filter(struct map *map __maybe_unused,
  19. struct symbol *sym)
  20. {
  21. bool *visited = symbol__priv(sym);
  22. *visited = true;
  23. return 0;
  24. }
  25. static int test__vmlinux_matches_kallsyms(void)
  26. {
  27. int err = -1;
  28. struct rb_node *nd;
  29. struct symbol *sym;
  30. struct map *kallsyms_map, *vmlinux_map;
  31. struct machine kallsyms, vmlinux;
  32. enum map_type type = MAP__FUNCTION;
  33. long page_size = sysconf(_SC_PAGE_SIZE);
  34. struct ref_reloc_sym ref_reloc_sym = { .name = "_stext", };
  35. /*
  36. * Step 1:
  37. *
  38. * Init the machines that will hold kernel, modules obtained from
  39. * both vmlinux + .ko files and from /proc/kallsyms split by modules.
  40. */
  41. machine__init(&kallsyms, "", HOST_KERNEL_ID);
  42. machine__init(&vmlinux, "", HOST_KERNEL_ID);
  43. /*
  44. * Step 2:
  45. *
  46. * Create the kernel maps for kallsyms and the DSO where we will then
  47. * load /proc/kallsyms. Also create the modules maps from /proc/modules
  48. * and find the .ko files that match them in /lib/modules/`uname -r`/.
  49. */
  50. if (machine__create_kernel_maps(&kallsyms) < 0) {
  51. pr_debug("machine__create_kernel_maps ");
  52. return -1;
  53. }
  54. /*
  55. * Step 3:
  56. *
  57. * Load and split /proc/kallsyms into multiple maps, one per module.
  58. */
  59. if (machine__load_kallsyms(&kallsyms, "/proc/kallsyms", type, NULL) <= 0) {
  60. pr_debug("dso__load_kallsyms ");
  61. goto out;
  62. }
  63. /*
  64. * Step 4:
  65. *
  66. * kallsyms will be internally on demand sorted by name so that we can
  67. * find the reference relocation * symbol, i.e. the symbol we will use
  68. * to see if the running kernel was relocated by checking if it has the
  69. * same value in the vmlinux file we load.
  70. */
  71. kallsyms_map = machine__kernel_map(&kallsyms, type);
  72. sym = map__find_symbol_by_name(kallsyms_map, ref_reloc_sym.name, NULL);
  73. if (sym == NULL) {
  74. pr_debug("dso__find_symbol_by_name ");
  75. goto out;
  76. }
  77. ref_reloc_sym.addr = sym->start;
  78. /*
  79. * Step 5:
  80. *
  81. * Now repeat step 2, this time for the vmlinux file we'll auto-locate.
  82. */
  83. if (machine__create_kernel_maps(&vmlinux) < 0) {
  84. pr_debug("machine__create_kernel_maps ");
  85. goto out;
  86. }
  87. vmlinux_map = machine__kernel_map(&vmlinux, type);
  88. map__kmap(vmlinux_map)->ref_reloc_sym = &ref_reloc_sym;
  89. /*
  90. * Step 6:
  91. *
  92. * Locate a vmlinux file in the vmlinux path that has a buildid that
  93. * matches the one of the running kernel.
  94. *
  95. * While doing that look if we find the ref reloc symbol, if we find it
  96. * we'll have its ref_reloc_symbol.unrelocated_addr and then
  97. * maps__reloc_vmlinux will notice and set proper ->[un]map_ip routines
  98. * to fixup the symbols.
  99. */
  100. if (machine__load_vmlinux_path(&vmlinux, type,
  101. vmlinux_matches_kallsyms_filter) <= 0) {
  102. pr_debug("machine__load_vmlinux_path ");
  103. goto out;
  104. }
  105. err = 0;
  106. /*
  107. * Step 7:
  108. *
  109. * Now look at the symbols in the vmlinux DSO and check if we find all of them
  110. * in the kallsyms dso. For the ones that are in both, check its names and
  111. * end addresses too.
  112. */
  113. for (nd = rb_first(&vmlinux_map->dso->symbols[type]); nd; nd = rb_next(nd)) {
  114. struct symbol *pair, *first_pair;
  115. bool backwards = true;
  116. sym = rb_entry(nd, struct symbol, rb_node);
  117. if (sym->start == sym->end)
  118. continue;
  119. first_pair = machine__find_kernel_symbol(&kallsyms, type, sym->start, NULL, NULL);
  120. pair = first_pair;
  121. if (pair && pair->start == sym->start) {
  122. next_pair:
  123. if (strcmp(sym->name, pair->name) == 0) {
  124. /*
  125. * kallsyms don't have the symbol end, so we
  126. * set that by using the next symbol start - 1,
  127. * in some cases we get this up to a page
  128. * wrong, trace_kmalloc when I was developing
  129. * this code was one such example, 2106 bytes
  130. * off the real size. More than that and we
  131. * _really_ have a problem.
  132. */
  133. s64 skew = sym->end - pair->end;
  134. if (llabs(skew) < page_size)
  135. continue;
  136. pr_debug("%#" PRIx64 ": diff end addr for %s v: %#" PRIx64 " k: %#" PRIx64 "\n",
  137. sym->start, sym->name, sym->end, pair->end);
  138. } else {
  139. struct rb_node *nnd;
  140. detour:
  141. nnd = backwards ? rb_prev(&pair->rb_node) :
  142. rb_next(&pair->rb_node);
  143. if (nnd) {
  144. struct symbol *next = rb_entry(nnd, struct symbol, rb_node);
  145. if (next->start == sym->start) {
  146. pair = next;
  147. goto next_pair;
  148. }
  149. }
  150. if (backwards) {
  151. backwards = false;
  152. pair = first_pair;
  153. goto detour;
  154. }
  155. pr_debug("%#" PRIx64 ": diff name v: %s k: %s\n",
  156. sym->start, sym->name, pair->name);
  157. }
  158. } else
  159. pr_debug("%#" PRIx64 ": %s not on kallsyms\n", sym->start, sym->name);
  160. err = -1;
  161. }
  162. if (!verbose)
  163. goto out;
  164. pr_info("Maps only in vmlinux:\n");
  165. for (nd = rb_first(&vmlinux.kmaps.maps[type]); nd; nd = rb_next(nd)) {
  166. struct map *pos = rb_entry(nd, struct map, rb_node), *pair;
  167. /*
  168. * If it is the kernel, kallsyms is always "[kernel.kallsyms]", while
  169. * the kernel will have the path for the vmlinux file being used,
  170. * so use the short name, less descriptive but the same ("[kernel]" in
  171. * both cases.
  172. */
  173. pair = map_groups__find_by_name(&kallsyms.kmaps, type,
  174. (pos->dso->kernel ?
  175. pos->dso->short_name :
  176. pos->dso->name));
  177. if (pair)
  178. pair->priv = 1;
  179. else
  180. map__fprintf(pos, stderr);
  181. }
  182. pr_info("Maps in vmlinux with a different name in kallsyms:\n");
  183. for (nd = rb_first(&vmlinux.kmaps.maps[type]); nd; nd = rb_next(nd)) {
  184. struct map *pos = rb_entry(nd, struct map, rb_node), *pair;
  185. pair = map_groups__find(&kallsyms.kmaps, type, pos->start);
  186. if (pair == NULL || pair->priv)
  187. continue;
  188. if (pair->start == pos->start) {
  189. pair->priv = 1;
  190. pr_info(" %" PRIx64 "-%" PRIx64 " %" PRIx64 " %s in kallsyms as",
  191. pos->start, pos->end, pos->pgoff, pos->dso->name);
  192. if (pos->pgoff != pair->pgoff || pos->end != pair->end)
  193. pr_info(": \n*%" PRIx64 "-%" PRIx64 " %" PRIx64 "",
  194. pair->start, pair->end, pair->pgoff);
  195. pr_info(" %s\n", pair->dso->name);
  196. pair->priv = 1;
  197. }
  198. }
  199. pr_info("Maps only in kallsyms:\n");
  200. for (nd = rb_first(&kallsyms.kmaps.maps[type]);
  201. nd; nd = rb_next(nd)) {
  202. struct map *pos = rb_entry(nd, struct map, rb_node);
  203. if (!pos->priv)
  204. map__fprintf(pos, stderr);
  205. }
  206. out:
  207. return err;
  208. }
  209. #include "util/cpumap.h"
  210. #include "util/evsel.h"
  211. #include <sys/types.h>
  212. static int trace_event__id(const char *evname)
  213. {
  214. char *filename;
  215. int err = -1, fd;
  216. if (asprintf(&filename,
  217. "%s/syscalls/%s/id",
  218. tracing_events_path, evname) < 0)
  219. return -1;
  220. fd = open(filename, O_RDONLY);
  221. if (fd >= 0) {
  222. char id[16];
  223. if (read(fd, id, sizeof(id)) > 0)
  224. err = atoi(id);
  225. close(fd);
  226. }
  227. free(filename);
  228. return err;
  229. }
  230. static int test__open_syscall_event(void)
  231. {
  232. int err = -1, fd;
  233. struct thread_map *threads;
  234. struct perf_evsel *evsel;
  235. struct perf_event_attr attr;
  236. unsigned int nr_open_calls = 111, i;
  237. int id = trace_event__id("sys_enter_open");
  238. if (id < 0) {
  239. pr_debug("is debugfs mounted on /sys/kernel/debug?\n");
  240. return -1;
  241. }
  242. threads = thread_map__new(-1, getpid(), UINT_MAX);
  243. if (threads == NULL) {
  244. pr_debug("thread_map__new\n");
  245. return -1;
  246. }
  247. memset(&attr, 0, sizeof(attr));
  248. attr.type = PERF_TYPE_TRACEPOINT;
  249. attr.config = id;
  250. evsel = perf_evsel__new(&attr, 0);
  251. if (evsel == NULL) {
  252. pr_debug("perf_evsel__new\n");
  253. goto out_thread_map_delete;
  254. }
  255. if (perf_evsel__open_per_thread(evsel, threads) < 0) {
  256. pr_debug("failed to open counter: %s, "
  257. "tweak /proc/sys/kernel/perf_event_paranoid?\n",
  258. strerror(errno));
  259. goto out_evsel_delete;
  260. }
  261. for (i = 0; i < nr_open_calls; ++i) {
  262. fd = open("/etc/passwd", O_RDONLY);
  263. close(fd);
  264. }
  265. if (perf_evsel__read_on_cpu(evsel, 0, 0) < 0) {
  266. pr_debug("perf_evsel__read_on_cpu\n");
  267. goto out_close_fd;
  268. }
  269. if (evsel->counts->cpu[0].val != nr_open_calls) {
  270. pr_debug("perf_evsel__read_on_cpu: expected to intercept %d calls, got %" PRIu64 "\n",
  271. nr_open_calls, evsel->counts->cpu[0].val);
  272. goto out_close_fd;
  273. }
  274. err = 0;
  275. out_close_fd:
  276. perf_evsel__close_fd(evsel, 1, threads->nr);
  277. out_evsel_delete:
  278. perf_evsel__delete(evsel);
  279. out_thread_map_delete:
  280. thread_map__delete(threads);
  281. return err;
  282. }
  283. #include <sched.h>
  284. static int test__open_syscall_event_on_all_cpus(void)
  285. {
  286. int err = -1, fd, cpu;
  287. struct thread_map *threads;
  288. struct cpu_map *cpus;
  289. struct perf_evsel *evsel;
  290. struct perf_event_attr attr;
  291. unsigned int nr_open_calls = 111, i;
  292. cpu_set_t cpu_set;
  293. int id = trace_event__id("sys_enter_open");
  294. if (id < 0) {
  295. pr_debug("is debugfs mounted on /sys/kernel/debug?\n");
  296. return -1;
  297. }
  298. threads = thread_map__new(-1, getpid(), UINT_MAX);
  299. if (threads == NULL) {
  300. pr_debug("thread_map__new\n");
  301. return -1;
  302. }
  303. cpus = cpu_map__new(NULL);
  304. if (cpus == NULL) {
  305. pr_debug("cpu_map__new\n");
  306. goto out_thread_map_delete;
  307. }
  308. CPU_ZERO(&cpu_set);
  309. memset(&attr, 0, sizeof(attr));
  310. attr.type = PERF_TYPE_TRACEPOINT;
  311. attr.config = id;
  312. evsel = perf_evsel__new(&attr, 0);
  313. if (evsel == NULL) {
  314. pr_debug("perf_evsel__new\n");
  315. goto out_thread_map_delete;
  316. }
  317. if (perf_evsel__open(evsel, cpus, threads) < 0) {
  318. pr_debug("failed to open counter: %s, "
  319. "tweak /proc/sys/kernel/perf_event_paranoid?\n",
  320. strerror(errno));
  321. goto out_evsel_delete;
  322. }
  323. for (cpu = 0; cpu < cpus->nr; ++cpu) {
  324. unsigned int ncalls = nr_open_calls + cpu;
  325. /*
  326. * XXX eventually lift this restriction in a way that
  327. * keeps perf building on older glibc installations
  328. * without CPU_ALLOC. 1024 cpus in 2010 still seems
  329. * a reasonable upper limit tho :-)
  330. */
  331. if (cpus->map[cpu] >= CPU_SETSIZE) {
  332. pr_debug("Ignoring CPU %d\n", cpus->map[cpu]);
  333. continue;
  334. }
  335. CPU_SET(cpus->map[cpu], &cpu_set);
  336. if (sched_setaffinity(0, sizeof(cpu_set), &cpu_set) < 0) {
  337. pr_debug("sched_setaffinity() failed on CPU %d: %s ",
  338. cpus->map[cpu],
  339. strerror(errno));
  340. goto out_close_fd;
  341. }
  342. for (i = 0; i < ncalls; ++i) {
  343. fd = open("/etc/passwd", O_RDONLY);
  344. close(fd);
  345. }
  346. CPU_CLR(cpus->map[cpu], &cpu_set);
  347. }
  348. /*
  349. * Here we need to explicitely preallocate the counts, as if
  350. * we use the auto allocation it will allocate just for 1 cpu,
  351. * as we start by cpu 0.
  352. */
  353. if (perf_evsel__alloc_counts(evsel, cpus->nr) < 0) {
  354. pr_debug("perf_evsel__alloc_counts(ncpus=%d)\n", cpus->nr);
  355. goto out_close_fd;
  356. }
  357. err = 0;
  358. for (cpu = 0; cpu < cpus->nr; ++cpu) {
  359. unsigned int expected;
  360. if (cpus->map[cpu] >= CPU_SETSIZE)
  361. continue;
  362. if (perf_evsel__read_on_cpu(evsel, cpu, 0) < 0) {
  363. pr_debug("perf_evsel__read_on_cpu\n");
  364. err = -1;
  365. break;
  366. }
  367. expected = nr_open_calls + cpu;
  368. if (evsel->counts->cpu[cpu].val != expected) {
  369. pr_debug("perf_evsel__read_on_cpu: expected to intercept %d calls on cpu %d, got %" PRIu64 "\n",
  370. expected, cpus->map[cpu], evsel->counts->cpu[cpu].val);
  371. err = -1;
  372. }
  373. }
  374. out_close_fd:
  375. perf_evsel__close_fd(evsel, 1, threads->nr);
  376. out_evsel_delete:
  377. perf_evsel__delete(evsel);
  378. out_thread_map_delete:
  379. thread_map__delete(threads);
  380. return err;
  381. }
  382. /*
  383. * This test will generate random numbers of calls to some getpid syscalls,
  384. * then establish an mmap for a group of events that are created to monitor
  385. * the syscalls.
  386. *
  387. * It will receive the events, using mmap, use its PERF_SAMPLE_ID generated
  388. * sample.id field to map back to its respective perf_evsel instance.
  389. *
  390. * Then it checks if the number of syscalls reported as perf events by
  391. * the kernel corresponds to the number of syscalls made.
  392. */
  393. static int test__basic_mmap(void)
  394. {
  395. int err = -1;
  396. union perf_event *event;
  397. struct thread_map *threads;
  398. struct cpu_map *cpus;
  399. struct perf_evlist *evlist;
  400. struct perf_event_attr attr = {
  401. .type = PERF_TYPE_TRACEPOINT,
  402. .read_format = PERF_FORMAT_ID,
  403. .sample_type = PERF_SAMPLE_ID,
  404. .watermark = 0,
  405. };
  406. cpu_set_t cpu_set;
  407. const char *syscall_names[] = { "getsid", "getppid", "getpgrp",
  408. "getpgid", };
  409. pid_t (*syscalls[])(void) = { (void *)getsid, getppid, getpgrp,
  410. (void*)getpgid };
  411. #define nsyscalls ARRAY_SIZE(syscall_names)
  412. int ids[nsyscalls];
  413. unsigned int nr_events[nsyscalls],
  414. expected_nr_events[nsyscalls], i, j;
  415. struct perf_evsel *evsels[nsyscalls], *evsel;
  416. for (i = 0; i < nsyscalls; ++i) {
  417. char name[64];
  418. snprintf(name, sizeof(name), "sys_enter_%s", syscall_names[i]);
  419. ids[i] = trace_event__id(name);
  420. if (ids[i] < 0) {
  421. pr_debug("Is debugfs mounted on /sys/kernel/debug?\n");
  422. return -1;
  423. }
  424. nr_events[i] = 0;
  425. expected_nr_events[i] = random() % 257;
  426. }
  427. threads = thread_map__new(-1, getpid(), UINT_MAX);
  428. if (threads == NULL) {
  429. pr_debug("thread_map__new\n");
  430. return -1;
  431. }
  432. cpus = cpu_map__new(NULL);
  433. if (cpus == NULL) {
  434. pr_debug("cpu_map__new\n");
  435. goto out_free_threads;
  436. }
  437. CPU_ZERO(&cpu_set);
  438. CPU_SET(cpus->map[0], &cpu_set);
  439. sched_setaffinity(0, sizeof(cpu_set), &cpu_set);
  440. if (sched_setaffinity(0, sizeof(cpu_set), &cpu_set) < 0) {
  441. pr_debug("sched_setaffinity() failed on CPU %d: %s ",
  442. cpus->map[0], strerror(errno));
  443. goto out_free_cpus;
  444. }
  445. evlist = perf_evlist__new(cpus, threads);
  446. if (evlist == NULL) {
  447. pr_debug("perf_evlist__new\n");
  448. goto out_free_cpus;
  449. }
  450. /* anonymous union fields, can't be initialized above */
  451. attr.wakeup_events = 1;
  452. attr.sample_period = 1;
  453. for (i = 0; i < nsyscalls; ++i) {
  454. attr.config = ids[i];
  455. evsels[i] = perf_evsel__new(&attr, i);
  456. if (evsels[i] == NULL) {
  457. pr_debug("perf_evsel__new\n");
  458. goto out_free_evlist;
  459. }
  460. perf_evlist__add(evlist, evsels[i]);
  461. if (perf_evsel__open(evsels[i], cpus, threads) < 0) {
  462. pr_debug("failed to open counter: %s, "
  463. "tweak /proc/sys/kernel/perf_event_paranoid?\n",
  464. strerror(errno));
  465. goto out_close_fd;
  466. }
  467. }
  468. if (perf_evlist__mmap(evlist, 128, true) < 0) {
  469. pr_debug("failed to mmap events: %d (%s)\n", errno,
  470. strerror(errno));
  471. goto out_close_fd;
  472. }
  473. for (i = 0; i < nsyscalls; ++i)
  474. for (j = 0; j < expected_nr_events[i]; ++j) {
  475. int foo = syscalls[i]();
  476. ++foo;
  477. }
  478. while ((event = perf_evlist__mmap_read(evlist, 0)) != NULL) {
  479. struct perf_sample sample;
  480. if (event->header.type != PERF_RECORD_SAMPLE) {
  481. pr_debug("unexpected %s event\n",
  482. perf_event__name(event->header.type));
  483. goto out_munmap;
  484. }
  485. err = perf_evlist__parse_sample(evlist, event, &sample, false);
  486. if (err) {
  487. pr_err("Can't parse sample, err = %d\n", err);
  488. goto out_munmap;
  489. }
  490. evsel = perf_evlist__id2evsel(evlist, sample.id);
  491. if (evsel == NULL) {
  492. pr_debug("event with id %" PRIu64
  493. " doesn't map to an evsel\n", sample.id);
  494. goto out_munmap;
  495. }
  496. nr_events[evsel->idx]++;
  497. }
  498. list_for_each_entry(evsel, &evlist->entries, node) {
  499. if (nr_events[evsel->idx] != expected_nr_events[evsel->idx]) {
  500. pr_debug("expected %d %s events, got %d\n",
  501. expected_nr_events[evsel->idx],
  502. perf_evsel__name(evsel), nr_events[evsel->idx]);
  503. goto out_munmap;
  504. }
  505. }
  506. err = 0;
  507. out_munmap:
  508. perf_evlist__munmap(evlist);
  509. out_close_fd:
  510. for (i = 0; i < nsyscalls; ++i)
  511. perf_evsel__close_fd(evsels[i], 1, threads->nr);
  512. out_free_evlist:
  513. perf_evlist__delete(evlist);
  514. out_free_cpus:
  515. cpu_map__delete(cpus);
  516. out_free_threads:
  517. thread_map__delete(threads);
  518. return err;
  519. #undef nsyscalls
  520. }
  521. static int sched__get_first_possible_cpu(pid_t pid, cpu_set_t **maskp,
  522. size_t *sizep)
  523. {
  524. cpu_set_t *mask;
  525. size_t size;
  526. int i, cpu = -1, nrcpus = 1024;
  527. realloc:
  528. mask = CPU_ALLOC(nrcpus);
  529. size = CPU_ALLOC_SIZE(nrcpus);
  530. CPU_ZERO_S(size, mask);
  531. if (sched_getaffinity(pid, size, mask) == -1) {
  532. CPU_FREE(mask);
  533. if (errno == EINVAL && nrcpus < (1024 << 8)) {
  534. nrcpus = nrcpus << 2;
  535. goto realloc;
  536. }
  537. perror("sched_getaffinity");
  538. return -1;
  539. }
  540. for (i = 0; i < nrcpus; i++) {
  541. if (CPU_ISSET_S(i, size, mask)) {
  542. if (cpu == -1) {
  543. cpu = i;
  544. *maskp = mask;
  545. *sizep = size;
  546. } else
  547. CPU_CLR_S(i, size, mask);
  548. }
  549. }
  550. if (cpu == -1)
  551. CPU_FREE(mask);
  552. return cpu;
  553. }
  554. static int test__PERF_RECORD(void)
  555. {
  556. struct perf_record_opts opts = {
  557. .target = {
  558. .uid = UINT_MAX,
  559. .uses_mmap = true,
  560. },
  561. .no_delay = true,
  562. .freq = 10,
  563. .mmap_pages = 256,
  564. };
  565. cpu_set_t *cpu_mask = NULL;
  566. size_t cpu_mask_size = 0;
  567. struct perf_evlist *evlist = perf_evlist__new(NULL, NULL);
  568. struct perf_evsel *evsel;
  569. struct perf_sample sample;
  570. const char *cmd = "sleep";
  571. const char *argv[] = { cmd, "1", NULL, };
  572. char *bname;
  573. u64 prev_time = 0;
  574. bool found_cmd_mmap = false,
  575. found_libc_mmap = false,
  576. found_vdso_mmap = false,
  577. found_ld_mmap = false;
  578. int err = -1, errs = 0, i, wakeups = 0;
  579. u32 cpu;
  580. int total_events = 0, nr_events[PERF_RECORD_MAX] = { 0, };
  581. if (evlist == NULL || argv == NULL) {
  582. pr_debug("Not enough memory to create evlist\n");
  583. goto out;
  584. }
  585. /*
  586. * We need at least one evsel in the evlist, use the default
  587. * one: "cycles".
  588. */
  589. err = perf_evlist__add_default(evlist);
  590. if (err < 0) {
  591. pr_debug("Not enough memory to create evsel\n");
  592. goto out_delete_evlist;
  593. }
  594. /*
  595. * Create maps of threads and cpus to monitor. In this case
  596. * we start with all threads and cpus (-1, -1) but then in
  597. * perf_evlist__prepare_workload we'll fill in the only thread
  598. * we're monitoring, the one forked there.
  599. */
  600. err = perf_evlist__create_maps(evlist, &opts.target);
  601. if (err < 0) {
  602. pr_debug("Not enough memory to create thread/cpu maps\n");
  603. goto out_delete_evlist;
  604. }
  605. /*
  606. * Prepare the workload in argv[] to run, it'll fork it, and then wait
  607. * for perf_evlist__start_workload() to exec it. This is done this way
  608. * so that we have time to open the evlist (calling sys_perf_event_open
  609. * on all the fds) and then mmap them.
  610. */
  611. err = perf_evlist__prepare_workload(evlist, &opts, argv);
  612. if (err < 0) {
  613. pr_debug("Couldn't run the workload!\n");
  614. goto out_delete_evlist;
  615. }
  616. /*
  617. * Config the evsels, setting attr->comm on the first one, etc.
  618. */
  619. evsel = perf_evlist__first(evlist);
  620. evsel->attr.sample_type |= PERF_SAMPLE_CPU;
  621. evsel->attr.sample_type |= PERF_SAMPLE_TID;
  622. evsel->attr.sample_type |= PERF_SAMPLE_TIME;
  623. perf_evlist__config_attrs(evlist, &opts);
  624. err = sched__get_first_possible_cpu(evlist->workload.pid, &cpu_mask,
  625. &cpu_mask_size);
  626. if (err < 0) {
  627. pr_debug("sched__get_first_possible_cpu: %s\n", strerror(errno));
  628. goto out_delete_evlist;
  629. }
  630. cpu = err;
  631. /*
  632. * So that we can check perf_sample.cpu on all the samples.
  633. */
  634. if (sched_setaffinity(evlist->workload.pid, cpu_mask_size, cpu_mask) < 0) {
  635. pr_debug("sched_setaffinity: %s\n", strerror(errno));
  636. goto out_free_cpu_mask;
  637. }
  638. /*
  639. * Call sys_perf_event_open on all the fds on all the evsels,
  640. * grouping them if asked to.
  641. */
  642. err = perf_evlist__open(evlist);
  643. if (err < 0) {
  644. pr_debug("perf_evlist__open: %s\n", strerror(errno));
  645. goto out_delete_evlist;
  646. }
  647. /*
  648. * mmap the first fd on a given CPU and ask for events for the other
  649. * fds in the same CPU to be injected in the same mmap ring buffer
  650. * (using ioctl(PERF_EVENT_IOC_SET_OUTPUT)).
  651. */
  652. err = perf_evlist__mmap(evlist, opts.mmap_pages, false);
  653. if (err < 0) {
  654. pr_debug("perf_evlist__mmap: %s\n", strerror(errno));
  655. goto out_delete_evlist;
  656. }
  657. /*
  658. * Now that all is properly set up, enable the events, they will
  659. * count just on workload.pid, which will start...
  660. */
  661. perf_evlist__enable(evlist);
  662. /*
  663. * Now!
  664. */
  665. perf_evlist__start_workload(evlist);
  666. while (1) {
  667. int before = total_events;
  668. for (i = 0; i < evlist->nr_mmaps; i++) {
  669. union perf_event *event;
  670. while ((event = perf_evlist__mmap_read(evlist, i)) != NULL) {
  671. const u32 type = event->header.type;
  672. const char *name = perf_event__name(type);
  673. ++total_events;
  674. if (type < PERF_RECORD_MAX)
  675. nr_events[type]++;
  676. err = perf_evlist__parse_sample(evlist, event, &sample, false);
  677. if (err < 0) {
  678. if (verbose)
  679. perf_event__fprintf(event, stderr);
  680. pr_debug("Couldn't parse sample\n");
  681. goto out_err;
  682. }
  683. if (verbose) {
  684. pr_info("%" PRIu64" %d ", sample.time, sample.cpu);
  685. perf_event__fprintf(event, stderr);
  686. }
  687. if (prev_time > sample.time) {
  688. pr_debug("%s going backwards in time, prev=%" PRIu64 ", curr=%" PRIu64 "\n",
  689. name, prev_time, sample.time);
  690. ++errs;
  691. }
  692. prev_time = sample.time;
  693. if (sample.cpu != cpu) {
  694. pr_debug("%s with unexpected cpu, expected %d, got %d\n",
  695. name, cpu, sample.cpu);
  696. ++errs;
  697. }
  698. if ((pid_t)sample.pid != evlist->workload.pid) {
  699. pr_debug("%s with unexpected pid, expected %d, got %d\n",
  700. name, evlist->workload.pid, sample.pid);
  701. ++errs;
  702. }
  703. if ((pid_t)sample.tid != evlist->workload.pid) {
  704. pr_debug("%s with unexpected tid, expected %d, got %d\n",
  705. name, evlist->workload.pid, sample.tid);
  706. ++errs;
  707. }
  708. if ((type == PERF_RECORD_COMM ||
  709. type == PERF_RECORD_MMAP ||
  710. type == PERF_RECORD_FORK ||
  711. type == PERF_RECORD_EXIT) &&
  712. (pid_t)event->comm.pid != evlist->workload.pid) {
  713. pr_debug("%s with unexpected pid/tid\n", name);
  714. ++errs;
  715. }
  716. if ((type == PERF_RECORD_COMM ||
  717. type == PERF_RECORD_MMAP) &&
  718. event->comm.pid != event->comm.tid) {
  719. pr_debug("%s with different pid/tid!\n", name);
  720. ++errs;
  721. }
  722. switch (type) {
  723. case PERF_RECORD_COMM:
  724. if (strcmp(event->comm.comm, cmd)) {
  725. pr_debug("%s with unexpected comm!\n", name);
  726. ++errs;
  727. }
  728. break;
  729. case PERF_RECORD_EXIT:
  730. goto found_exit;
  731. case PERF_RECORD_MMAP:
  732. bname = strrchr(event->mmap.filename, '/');
  733. if (bname != NULL) {
  734. if (!found_cmd_mmap)
  735. found_cmd_mmap = !strcmp(bname + 1, cmd);
  736. if (!found_libc_mmap)
  737. found_libc_mmap = !strncmp(bname + 1, "libc", 4);
  738. if (!found_ld_mmap)
  739. found_ld_mmap = !strncmp(bname + 1, "ld", 2);
  740. } else if (!found_vdso_mmap)
  741. found_vdso_mmap = !strcmp(event->mmap.filename, "[vdso]");
  742. break;
  743. case PERF_RECORD_SAMPLE:
  744. /* Just ignore samples for now */
  745. break;
  746. default:
  747. pr_debug("Unexpected perf_event->header.type %d!\n",
  748. type);
  749. ++errs;
  750. }
  751. }
  752. }
  753. /*
  754. * We don't use poll here because at least at 3.1 times the
  755. * PERF_RECORD_{!SAMPLE} events don't honour
  756. * perf_event_attr.wakeup_events, just PERF_EVENT_SAMPLE does.
  757. */
  758. if (total_events == before && false)
  759. poll(evlist->pollfd, evlist->nr_fds, -1);
  760. sleep(1);
  761. if (++wakeups > 5) {
  762. pr_debug("No PERF_RECORD_EXIT event!\n");
  763. break;
  764. }
  765. }
  766. found_exit:
  767. if (nr_events[PERF_RECORD_COMM] > 1) {
  768. pr_debug("Excessive number of PERF_RECORD_COMM events!\n");
  769. ++errs;
  770. }
  771. if (nr_events[PERF_RECORD_COMM] == 0) {
  772. pr_debug("Missing PERF_RECORD_COMM for %s!\n", cmd);
  773. ++errs;
  774. }
  775. if (!found_cmd_mmap) {
  776. pr_debug("PERF_RECORD_MMAP for %s missing!\n", cmd);
  777. ++errs;
  778. }
  779. if (!found_libc_mmap) {
  780. pr_debug("PERF_RECORD_MMAP for %s missing!\n", "libc");
  781. ++errs;
  782. }
  783. if (!found_ld_mmap) {
  784. pr_debug("PERF_RECORD_MMAP for %s missing!\n", "ld");
  785. ++errs;
  786. }
  787. if (!found_vdso_mmap) {
  788. pr_debug("PERF_RECORD_MMAP for %s missing!\n", "[vdso]");
  789. ++errs;
  790. }
  791. out_err:
  792. perf_evlist__munmap(evlist);
  793. out_free_cpu_mask:
  794. CPU_FREE(cpu_mask);
  795. out_delete_evlist:
  796. perf_evlist__delete(evlist);
  797. out:
  798. return (err < 0 || errs > 0) ? -1 : 0;
  799. }
  800. #if defined(__x86_64__) || defined(__i386__)
  801. #define barrier() asm volatile("" ::: "memory")
  802. static u64 rdpmc(unsigned int counter)
  803. {
  804. unsigned int low, high;
  805. asm volatile("rdpmc" : "=a" (low), "=d" (high) : "c" (counter));
  806. return low | ((u64)high) << 32;
  807. }
  808. static u64 rdtsc(void)
  809. {
  810. unsigned int low, high;
  811. asm volatile("rdtsc" : "=a" (low), "=d" (high));
  812. return low | ((u64)high) << 32;
  813. }
  814. static u64 mmap_read_self(void *addr)
  815. {
  816. struct perf_event_mmap_page *pc = addr;
  817. u32 seq, idx, time_mult = 0, time_shift = 0;
  818. u64 count, cyc = 0, time_offset = 0, enabled, running, delta;
  819. do {
  820. seq = pc->lock;
  821. barrier();
  822. enabled = pc->time_enabled;
  823. running = pc->time_running;
  824. if (enabled != running) {
  825. cyc = rdtsc();
  826. time_mult = pc->time_mult;
  827. time_shift = pc->time_shift;
  828. time_offset = pc->time_offset;
  829. }
  830. idx = pc->index;
  831. count = pc->offset;
  832. if (idx)
  833. count += rdpmc(idx - 1);
  834. barrier();
  835. } while (pc->lock != seq);
  836. if (enabled != running) {
  837. u64 quot, rem;
  838. quot = (cyc >> time_shift);
  839. rem = cyc & ((1 << time_shift) - 1);
  840. delta = time_offset + quot * time_mult +
  841. ((rem * time_mult) >> time_shift);
  842. enabled += delta;
  843. if (idx)
  844. running += delta;
  845. quot = count / running;
  846. rem = count % running;
  847. count = quot * enabled + (rem * enabled) / running;
  848. }
  849. return count;
  850. }
  851. /*
  852. * If the RDPMC instruction faults then signal this back to the test parent task:
  853. */
  854. static void segfault_handler(int sig __maybe_unused,
  855. siginfo_t *info __maybe_unused,
  856. void *uc __maybe_unused)
  857. {
  858. exit(-1);
  859. }
  860. static int __test__rdpmc(void)
  861. {
  862. long page_size = sysconf(_SC_PAGE_SIZE);
  863. volatile int tmp = 0;
  864. u64 i, loops = 1000;
  865. int n;
  866. int fd;
  867. void *addr;
  868. struct perf_event_attr attr = {
  869. .type = PERF_TYPE_HARDWARE,
  870. .config = PERF_COUNT_HW_INSTRUCTIONS,
  871. .exclude_kernel = 1,
  872. };
  873. u64 delta_sum = 0;
  874. struct sigaction sa;
  875. sigfillset(&sa.sa_mask);
  876. sa.sa_sigaction = segfault_handler;
  877. sigaction(SIGSEGV, &sa, NULL);
  878. fd = sys_perf_event_open(&attr, 0, -1, -1, 0);
  879. if (fd < 0) {
  880. pr_err("Error: sys_perf_event_open() syscall returned "
  881. "with %d (%s)\n", fd, strerror(errno));
  882. return -1;
  883. }
  884. addr = mmap(NULL, page_size, PROT_READ, MAP_SHARED, fd, 0);
  885. if (addr == (void *)(-1)) {
  886. pr_err("Error: mmap() syscall returned with (%s)\n",
  887. strerror(errno));
  888. goto out_close;
  889. }
  890. for (n = 0; n < 6; n++) {
  891. u64 stamp, now, delta;
  892. stamp = mmap_read_self(addr);
  893. for (i = 0; i < loops; i++)
  894. tmp++;
  895. now = mmap_read_self(addr);
  896. loops *= 10;
  897. delta = now - stamp;
  898. pr_debug("%14d: %14Lu\n", n, (long long)delta);
  899. delta_sum += delta;
  900. }
  901. munmap(addr, page_size);
  902. pr_debug(" ");
  903. out_close:
  904. close(fd);
  905. if (!delta_sum)
  906. return -1;
  907. return 0;
  908. }
  909. static int test__rdpmc(void)
  910. {
  911. int status = 0;
  912. int wret = 0;
  913. int ret;
  914. int pid;
  915. pid = fork();
  916. if (pid < 0)
  917. return -1;
  918. if (!pid) {
  919. ret = __test__rdpmc();
  920. exit(ret);
  921. }
  922. wret = waitpid(pid, &status, 0);
  923. if (wret < 0 || status)
  924. return -1;
  925. return 0;
  926. }
  927. #endif
  928. static int test__perf_pmu(void)
  929. {
  930. return perf_pmu__test();
  931. }
  932. static int perf_evsel__roundtrip_cache_name_test(void)
  933. {
  934. char name[128];
  935. int type, op, err = 0, ret = 0, i, idx;
  936. struct perf_evsel *evsel;
  937. struct perf_evlist *evlist = perf_evlist__new(NULL, NULL);
  938. if (evlist == NULL)
  939. return -ENOMEM;
  940. for (type = 0; type < PERF_COUNT_HW_CACHE_MAX; type++) {
  941. for (op = 0; op < PERF_COUNT_HW_CACHE_OP_MAX; op++) {
  942. /* skip invalid cache type */
  943. if (!perf_evsel__is_cache_op_valid(type, op))
  944. continue;
  945. for (i = 0; i < PERF_COUNT_HW_CACHE_RESULT_MAX; i++) {
  946. __perf_evsel__hw_cache_type_op_res_name(type, op, i,
  947. name, sizeof(name));
  948. err = parse_events(evlist, name, 0);
  949. if (err)
  950. ret = err;
  951. }
  952. }
  953. }
  954. idx = 0;
  955. evsel = perf_evlist__first(evlist);
  956. for (type = 0; type < PERF_COUNT_HW_CACHE_MAX; type++) {
  957. for (op = 0; op < PERF_COUNT_HW_CACHE_OP_MAX; op++) {
  958. /* skip invalid cache type */
  959. if (!perf_evsel__is_cache_op_valid(type, op))
  960. continue;
  961. for (i = 0; i < PERF_COUNT_HW_CACHE_RESULT_MAX; i++) {
  962. __perf_evsel__hw_cache_type_op_res_name(type, op, i,
  963. name, sizeof(name));
  964. if (evsel->idx != idx)
  965. continue;
  966. ++idx;
  967. if (strcmp(perf_evsel__name(evsel), name)) {
  968. pr_debug("%s != %s\n", perf_evsel__name(evsel), name);
  969. ret = -1;
  970. }
  971. evsel = perf_evsel__next(evsel);
  972. }
  973. }
  974. }
  975. perf_evlist__delete(evlist);
  976. return ret;
  977. }
  978. static int __perf_evsel__name_array_test(const char *names[], int nr_names)
  979. {
  980. int i, err;
  981. struct perf_evsel *evsel;
  982. struct perf_evlist *evlist = perf_evlist__new(NULL, NULL);
  983. if (evlist == NULL)
  984. return -ENOMEM;
  985. for (i = 0; i < nr_names; ++i) {
  986. err = parse_events(evlist, names[i], 0);
  987. if (err) {
  988. pr_debug("failed to parse event '%s', err %d\n",
  989. names[i], err);
  990. goto out_delete_evlist;
  991. }
  992. }
  993. err = 0;
  994. list_for_each_entry(evsel, &evlist->entries, node) {
  995. if (strcmp(perf_evsel__name(evsel), names[evsel->idx])) {
  996. --err;
  997. pr_debug("%s != %s\n", perf_evsel__name(evsel), names[evsel->idx]);
  998. }
  999. }
  1000. out_delete_evlist:
  1001. perf_evlist__delete(evlist);
  1002. return err;
  1003. }
  1004. #define perf_evsel__name_array_test(names) \
  1005. __perf_evsel__name_array_test(names, ARRAY_SIZE(names))
  1006. static int perf_evsel__roundtrip_name_test(void)
  1007. {
  1008. int err = 0, ret = 0;
  1009. err = perf_evsel__name_array_test(perf_evsel__hw_names);
  1010. if (err)
  1011. ret = err;
  1012. err = perf_evsel__name_array_test(perf_evsel__sw_names);
  1013. if (err)
  1014. ret = err;
  1015. err = perf_evsel__roundtrip_cache_name_test();
  1016. if (err)
  1017. ret = err;
  1018. return ret;
  1019. }
  1020. static struct test {
  1021. const char *desc;
  1022. int (*func)(void);
  1023. } tests[] = {
  1024. {
  1025. .desc = "vmlinux symtab matches kallsyms",
  1026. .func = test__vmlinux_matches_kallsyms,
  1027. },
  1028. {
  1029. .desc = "detect open syscall event",
  1030. .func = test__open_syscall_event,
  1031. },
  1032. {
  1033. .desc = "detect open syscall event on all cpus",
  1034. .func = test__open_syscall_event_on_all_cpus,
  1035. },
  1036. {
  1037. .desc = "read samples using the mmap interface",
  1038. .func = test__basic_mmap,
  1039. },
  1040. {
  1041. .desc = "parse events tests",
  1042. .func = parse_events__test,
  1043. },
  1044. #if defined(__x86_64__) || defined(__i386__)
  1045. {
  1046. .desc = "x86 rdpmc test",
  1047. .func = test__rdpmc,
  1048. },
  1049. #endif
  1050. {
  1051. .desc = "Validate PERF_RECORD_* events & perf_sample fields",
  1052. .func = test__PERF_RECORD,
  1053. },
  1054. {
  1055. .desc = "Test perf pmu format parsing",
  1056. .func = test__perf_pmu,
  1057. },
  1058. {
  1059. .desc = "Test dso data interface",
  1060. .func = dso__test_data,
  1061. },
  1062. {
  1063. .desc = "roundtrip evsel->name check",
  1064. .func = perf_evsel__roundtrip_name_test,
  1065. },
  1066. {
  1067. .func = NULL,
  1068. },
  1069. };
  1070. static bool perf_test__matches(int curr, int argc, const char *argv[])
  1071. {
  1072. int i;
  1073. if (argc == 0)
  1074. return true;
  1075. for (i = 0; i < argc; ++i) {
  1076. char *end;
  1077. long nr = strtoul(argv[i], &end, 10);
  1078. if (*end == '\0') {
  1079. if (nr == curr + 1)
  1080. return true;
  1081. continue;
  1082. }
  1083. if (strstr(tests[curr].desc, argv[i]))
  1084. return true;
  1085. }
  1086. return false;
  1087. }
  1088. static int __cmd_test(int argc, const char *argv[])
  1089. {
  1090. int i = 0;
  1091. while (tests[i].func) {
  1092. int curr = i++, err;
  1093. if (!perf_test__matches(curr, argc, argv))
  1094. continue;
  1095. pr_info("%2d: %s:", i, tests[curr].desc);
  1096. pr_debug("\n--- start ---\n");
  1097. err = tests[curr].func();
  1098. pr_debug("---- end ----\n%s:", tests[curr].desc);
  1099. pr_info(" %s\n", err ? "FAILED!\n" : "Ok");
  1100. }
  1101. return 0;
  1102. }
  1103. static int perf_test__list(int argc, const char **argv)
  1104. {
  1105. int i = 0;
  1106. while (tests[i].func) {
  1107. int curr = i++;
  1108. if (argc > 1 && !strstr(tests[curr].desc, argv[1]))
  1109. continue;
  1110. pr_info("%2d: %s\n", i, tests[curr].desc);
  1111. }
  1112. return 0;
  1113. }
  1114. int cmd_test(int argc, const char **argv, const char *prefix __maybe_unused)
  1115. {
  1116. const char * const test_usage[] = {
  1117. "perf test [<options>] [{list <test-name-fragment>|[<test-name-fragments>|<test-numbers>]}]",
  1118. NULL,
  1119. };
  1120. const struct option test_options[] = {
  1121. OPT_INCR('v', "verbose", &verbose,
  1122. "be more verbose (show symbol address, etc)"),
  1123. OPT_END()
  1124. };
  1125. argc = parse_options(argc, argv, test_options, test_usage, 0);
  1126. if (argc >= 1 && !strcmp(argv[0], "list"))
  1127. return perf_test__list(argc, argv);
  1128. symbol_conf.priv_size = sizeof(int);
  1129. symbol_conf.sort_by_name = true;
  1130. symbol_conf.try_vmlinux_path = true;
  1131. if (symbol__init() < 0)
  1132. return -1;
  1133. return __cmd_test(argc, argv);
  1134. }