builtin-test.c 39 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588
  1. /*
  2. * builtin-test.c
  3. *
  4. * Builtin regression testing command: ever growing number of sanity tests
  5. */
  6. #include "builtin.h"
  7. #include "util/cache.h"
  8. #include "util/debug.h"
  9. #include "util/debugfs.h"
  10. #include "util/evlist.h"
  11. #include "util/parse-options.h"
  12. #include "util/parse-events.h"
  13. #include "util/symbol.h"
  14. #include "util/thread_map.h"
  15. #include "../../include/linux/hw_breakpoint.h"
  16. #include <sys/mman.h>
  17. static int vmlinux_matches_kallsyms_filter(struct map *map __used, struct symbol *sym)
  18. {
  19. bool *visited = symbol__priv(sym);
  20. *visited = true;
  21. return 0;
  22. }
  23. static int test__vmlinux_matches_kallsyms(void)
  24. {
  25. int err = -1;
  26. struct rb_node *nd;
  27. struct symbol *sym;
  28. struct map *kallsyms_map, *vmlinux_map;
  29. struct machine kallsyms, vmlinux;
  30. enum map_type type = MAP__FUNCTION;
  31. long page_size = sysconf(_SC_PAGE_SIZE);
  32. struct ref_reloc_sym ref_reloc_sym = { .name = "_stext", };
  33. /*
  34. * Step 1:
  35. *
  36. * Init the machines that will hold kernel, modules obtained from
  37. * both vmlinux + .ko files and from /proc/kallsyms split by modules.
  38. */
  39. machine__init(&kallsyms, "", HOST_KERNEL_ID);
  40. machine__init(&vmlinux, "", HOST_KERNEL_ID);
  41. /*
  42. * Step 2:
  43. *
  44. * Create the kernel maps for kallsyms and the DSO where we will then
  45. * load /proc/kallsyms. Also create the modules maps from /proc/modules
  46. * and find the .ko files that match them in /lib/modules/`uname -r`/.
  47. */
  48. if (machine__create_kernel_maps(&kallsyms) < 0) {
  49. pr_debug("machine__create_kernel_maps ");
  50. return -1;
  51. }
  52. /*
  53. * Step 3:
  54. *
  55. * Load and split /proc/kallsyms into multiple maps, one per module.
  56. */
  57. if (machine__load_kallsyms(&kallsyms, "/proc/kallsyms", type, NULL) <= 0) {
  58. pr_debug("dso__load_kallsyms ");
  59. goto out;
  60. }
  61. /*
  62. * Step 4:
  63. *
  64. * kallsyms will be internally on demand sorted by name so that we can
  65. * find the reference relocation * symbol, i.e. the symbol we will use
  66. * to see if the running kernel was relocated by checking if it has the
  67. * same value in the vmlinux file we load.
  68. */
  69. kallsyms_map = machine__kernel_map(&kallsyms, type);
  70. sym = map__find_symbol_by_name(kallsyms_map, ref_reloc_sym.name, NULL);
  71. if (sym == NULL) {
  72. pr_debug("dso__find_symbol_by_name ");
  73. goto out;
  74. }
  75. ref_reloc_sym.addr = sym->start;
  76. /*
  77. * Step 5:
  78. *
  79. * Now repeat step 2, this time for the vmlinux file we'll auto-locate.
  80. */
  81. if (machine__create_kernel_maps(&vmlinux) < 0) {
  82. pr_debug("machine__create_kernel_maps ");
  83. goto out;
  84. }
  85. vmlinux_map = machine__kernel_map(&vmlinux, type);
  86. map__kmap(vmlinux_map)->ref_reloc_sym = &ref_reloc_sym;
  87. /*
  88. * Step 6:
  89. *
  90. * Locate a vmlinux file in the vmlinux path that has a buildid that
  91. * matches the one of the running kernel.
  92. *
  93. * While doing that look if we find the ref reloc symbol, if we find it
  94. * we'll have its ref_reloc_symbol.unrelocated_addr and then
  95. * maps__reloc_vmlinux will notice and set proper ->[un]map_ip routines
  96. * to fixup the symbols.
  97. */
  98. if (machine__load_vmlinux_path(&vmlinux, type,
  99. vmlinux_matches_kallsyms_filter) <= 0) {
  100. pr_debug("machine__load_vmlinux_path ");
  101. goto out;
  102. }
  103. err = 0;
  104. /*
  105. * Step 7:
  106. *
  107. * Now look at the symbols in the vmlinux DSO and check if we find all of them
  108. * in the kallsyms dso. For the ones that are in both, check its names and
  109. * end addresses too.
  110. */
  111. for (nd = rb_first(&vmlinux_map->dso->symbols[type]); nd; nd = rb_next(nd)) {
  112. struct symbol *pair, *first_pair;
  113. bool backwards = true;
  114. sym = rb_entry(nd, struct symbol, rb_node);
  115. if (sym->start == sym->end)
  116. continue;
  117. first_pair = machine__find_kernel_symbol(&kallsyms, type, sym->start, NULL, NULL);
  118. pair = first_pair;
  119. if (pair && pair->start == sym->start) {
  120. next_pair:
  121. if (strcmp(sym->name, pair->name) == 0) {
  122. /*
  123. * kallsyms don't have the symbol end, so we
  124. * set that by using the next symbol start - 1,
  125. * in some cases we get this up to a page
  126. * wrong, trace_kmalloc when I was developing
  127. * this code was one such example, 2106 bytes
  128. * off the real size. More than that and we
  129. * _really_ have a problem.
  130. */
  131. s64 skew = sym->end - pair->end;
  132. if (llabs(skew) < page_size)
  133. continue;
  134. pr_debug("%#" PRIx64 ": diff end addr for %s v: %#" PRIx64 " k: %#" PRIx64 "\n",
  135. sym->start, sym->name, sym->end, pair->end);
  136. } else {
  137. struct rb_node *nnd;
  138. detour:
  139. nnd = backwards ? rb_prev(&pair->rb_node) :
  140. rb_next(&pair->rb_node);
  141. if (nnd) {
  142. struct symbol *next = rb_entry(nnd, struct symbol, rb_node);
  143. if (next->start == sym->start) {
  144. pair = next;
  145. goto next_pair;
  146. }
  147. }
  148. if (backwards) {
  149. backwards = false;
  150. pair = first_pair;
  151. goto detour;
  152. }
  153. pr_debug("%#" PRIx64 ": diff name v: %s k: %s\n",
  154. sym->start, sym->name, pair->name);
  155. }
  156. } else
  157. pr_debug("%#" PRIx64 ": %s not on kallsyms\n", sym->start, sym->name);
  158. err = -1;
  159. }
  160. if (!verbose)
  161. goto out;
  162. pr_info("Maps only in vmlinux:\n");
  163. for (nd = rb_first(&vmlinux.kmaps.maps[type]); nd; nd = rb_next(nd)) {
  164. struct map *pos = rb_entry(nd, struct map, rb_node), *pair;
  165. /*
  166. * If it is the kernel, kallsyms is always "[kernel.kallsyms]", while
  167. * the kernel will have the path for the vmlinux file being used,
  168. * so use the short name, less descriptive but the same ("[kernel]" in
  169. * both cases.
  170. */
  171. pair = map_groups__find_by_name(&kallsyms.kmaps, type,
  172. (pos->dso->kernel ?
  173. pos->dso->short_name :
  174. pos->dso->name));
  175. if (pair)
  176. pair->priv = 1;
  177. else
  178. map__fprintf(pos, stderr);
  179. }
  180. pr_info("Maps in vmlinux with a different name in kallsyms:\n");
  181. for (nd = rb_first(&vmlinux.kmaps.maps[type]); nd; nd = rb_next(nd)) {
  182. struct map *pos = rb_entry(nd, struct map, rb_node), *pair;
  183. pair = map_groups__find(&kallsyms.kmaps, type, pos->start);
  184. if (pair == NULL || pair->priv)
  185. continue;
  186. if (pair->start == pos->start) {
  187. pair->priv = 1;
  188. pr_info(" %" PRIx64 "-%" PRIx64 " %" PRIx64 " %s in kallsyms as",
  189. pos->start, pos->end, pos->pgoff, pos->dso->name);
  190. if (pos->pgoff != pair->pgoff || pos->end != pair->end)
  191. pr_info(": \n*%" PRIx64 "-%" PRIx64 " %" PRIx64 "",
  192. pair->start, pair->end, pair->pgoff);
  193. pr_info(" %s\n", pair->dso->name);
  194. pair->priv = 1;
  195. }
  196. }
  197. pr_info("Maps only in kallsyms:\n");
  198. for (nd = rb_first(&kallsyms.kmaps.maps[type]);
  199. nd; nd = rb_next(nd)) {
  200. struct map *pos = rb_entry(nd, struct map, rb_node);
  201. if (!pos->priv)
  202. map__fprintf(pos, stderr);
  203. }
  204. out:
  205. return err;
  206. }
  207. #include "util/cpumap.h"
  208. #include "util/evsel.h"
  209. #include <sys/types.h>
  210. static int trace_event__id(const char *evname)
  211. {
  212. char *filename;
  213. int err = -1, fd;
  214. if (asprintf(&filename,
  215. "%s/syscalls/%s/id",
  216. tracing_events_path, evname) < 0)
  217. return -1;
  218. fd = open(filename, O_RDONLY);
  219. if (fd >= 0) {
  220. char id[16];
  221. if (read(fd, id, sizeof(id)) > 0)
  222. err = atoi(id);
  223. close(fd);
  224. }
  225. free(filename);
  226. return err;
  227. }
  228. static int test__open_syscall_event(void)
  229. {
  230. int err = -1, fd;
  231. struct thread_map *threads;
  232. struct perf_evsel *evsel;
  233. struct perf_event_attr attr;
  234. unsigned int nr_open_calls = 111, i;
  235. int id = trace_event__id("sys_enter_open");
  236. if (id < 0) {
  237. pr_debug("is debugfs mounted on /sys/kernel/debug?\n");
  238. return -1;
  239. }
  240. threads = thread_map__new(-1, getpid(), UINT_MAX);
  241. if (threads == NULL) {
  242. pr_debug("thread_map__new\n");
  243. return -1;
  244. }
  245. memset(&attr, 0, sizeof(attr));
  246. attr.type = PERF_TYPE_TRACEPOINT;
  247. attr.config = id;
  248. evsel = perf_evsel__new(&attr, 0);
  249. if (evsel == NULL) {
  250. pr_debug("perf_evsel__new\n");
  251. goto out_thread_map_delete;
  252. }
  253. if (perf_evsel__open_per_thread(evsel, threads, false, NULL) < 0) {
  254. pr_debug("failed to open counter: %s, "
  255. "tweak /proc/sys/kernel/perf_event_paranoid?\n",
  256. strerror(errno));
  257. goto out_evsel_delete;
  258. }
  259. for (i = 0; i < nr_open_calls; ++i) {
  260. fd = open("/etc/passwd", O_RDONLY);
  261. close(fd);
  262. }
  263. if (perf_evsel__read_on_cpu(evsel, 0, 0) < 0) {
  264. pr_debug("perf_evsel__read_on_cpu\n");
  265. goto out_close_fd;
  266. }
  267. if (evsel->counts->cpu[0].val != nr_open_calls) {
  268. pr_debug("perf_evsel__read_on_cpu: expected to intercept %d calls, got %" PRIu64 "\n",
  269. nr_open_calls, evsel->counts->cpu[0].val);
  270. goto out_close_fd;
  271. }
  272. err = 0;
  273. out_close_fd:
  274. perf_evsel__close_fd(evsel, 1, threads->nr);
  275. out_evsel_delete:
  276. perf_evsel__delete(evsel);
  277. out_thread_map_delete:
  278. thread_map__delete(threads);
  279. return err;
  280. }
  281. #include <sched.h>
  282. static int test__open_syscall_event_on_all_cpus(void)
  283. {
  284. int err = -1, fd, cpu;
  285. struct thread_map *threads;
  286. struct cpu_map *cpus;
  287. struct perf_evsel *evsel;
  288. struct perf_event_attr attr;
  289. unsigned int nr_open_calls = 111, i;
  290. cpu_set_t cpu_set;
  291. int id = trace_event__id("sys_enter_open");
  292. if (id < 0) {
  293. pr_debug("is debugfs mounted on /sys/kernel/debug?\n");
  294. return -1;
  295. }
  296. threads = thread_map__new(-1, getpid(), UINT_MAX);
  297. if (threads == NULL) {
  298. pr_debug("thread_map__new\n");
  299. return -1;
  300. }
  301. cpus = cpu_map__new(NULL);
  302. if (cpus == NULL) {
  303. pr_debug("cpu_map__new\n");
  304. goto out_thread_map_delete;
  305. }
  306. CPU_ZERO(&cpu_set);
  307. memset(&attr, 0, sizeof(attr));
  308. attr.type = PERF_TYPE_TRACEPOINT;
  309. attr.config = id;
  310. evsel = perf_evsel__new(&attr, 0);
  311. if (evsel == NULL) {
  312. pr_debug("perf_evsel__new\n");
  313. goto out_thread_map_delete;
  314. }
  315. if (perf_evsel__open(evsel, cpus, threads, false, NULL) < 0) {
  316. pr_debug("failed to open counter: %s, "
  317. "tweak /proc/sys/kernel/perf_event_paranoid?\n",
  318. strerror(errno));
  319. goto out_evsel_delete;
  320. }
  321. for (cpu = 0; cpu < cpus->nr; ++cpu) {
  322. unsigned int ncalls = nr_open_calls + cpu;
  323. /*
  324. * XXX eventually lift this restriction in a way that
  325. * keeps perf building on older glibc installations
  326. * without CPU_ALLOC. 1024 cpus in 2010 still seems
  327. * a reasonable upper limit tho :-)
  328. */
  329. if (cpus->map[cpu] >= CPU_SETSIZE) {
  330. pr_debug("Ignoring CPU %d\n", cpus->map[cpu]);
  331. continue;
  332. }
  333. CPU_SET(cpus->map[cpu], &cpu_set);
  334. if (sched_setaffinity(0, sizeof(cpu_set), &cpu_set) < 0) {
  335. pr_debug("sched_setaffinity() failed on CPU %d: %s ",
  336. cpus->map[cpu],
  337. strerror(errno));
  338. goto out_close_fd;
  339. }
  340. for (i = 0; i < ncalls; ++i) {
  341. fd = open("/etc/passwd", O_RDONLY);
  342. close(fd);
  343. }
  344. CPU_CLR(cpus->map[cpu], &cpu_set);
  345. }
  346. /*
  347. * Here we need to explicitely preallocate the counts, as if
  348. * we use the auto allocation it will allocate just for 1 cpu,
  349. * as we start by cpu 0.
  350. */
  351. if (perf_evsel__alloc_counts(evsel, cpus->nr) < 0) {
  352. pr_debug("perf_evsel__alloc_counts(ncpus=%d)\n", cpus->nr);
  353. goto out_close_fd;
  354. }
  355. err = 0;
  356. for (cpu = 0; cpu < cpus->nr; ++cpu) {
  357. unsigned int expected;
  358. if (cpus->map[cpu] >= CPU_SETSIZE)
  359. continue;
  360. if (perf_evsel__read_on_cpu(evsel, cpu, 0) < 0) {
  361. pr_debug("perf_evsel__read_on_cpu\n");
  362. err = -1;
  363. break;
  364. }
  365. expected = nr_open_calls + cpu;
  366. if (evsel->counts->cpu[cpu].val != expected) {
  367. pr_debug("perf_evsel__read_on_cpu: expected to intercept %d calls on cpu %d, got %" PRIu64 "\n",
  368. expected, cpus->map[cpu], evsel->counts->cpu[cpu].val);
  369. err = -1;
  370. }
  371. }
  372. out_close_fd:
  373. perf_evsel__close_fd(evsel, 1, threads->nr);
  374. out_evsel_delete:
  375. perf_evsel__delete(evsel);
  376. out_thread_map_delete:
  377. thread_map__delete(threads);
  378. return err;
  379. }
  380. /*
  381. * This test will generate random numbers of calls to some getpid syscalls,
  382. * then establish an mmap for a group of events that are created to monitor
  383. * the syscalls.
  384. *
  385. * It will receive the events, using mmap, use its PERF_SAMPLE_ID generated
  386. * sample.id field to map back to its respective perf_evsel instance.
  387. *
  388. * Then it checks if the number of syscalls reported as perf events by
  389. * the kernel corresponds to the number of syscalls made.
  390. */
  391. static int test__basic_mmap(void)
  392. {
  393. int err = -1;
  394. union perf_event *event;
  395. struct thread_map *threads;
  396. struct cpu_map *cpus;
  397. struct perf_evlist *evlist;
  398. struct perf_event_attr attr = {
  399. .type = PERF_TYPE_TRACEPOINT,
  400. .read_format = PERF_FORMAT_ID,
  401. .sample_type = PERF_SAMPLE_ID,
  402. .watermark = 0,
  403. };
  404. cpu_set_t cpu_set;
  405. const char *syscall_names[] = { "getsid", "getppid", "getpgrp",
  406. "getpgid", };
  407. pid_t (*syscalls[])(void) = { (void *)getsid, getppid, getpgrp,
  408. (void*)getpgid };
  409. #define nsyscalls ARRAY_SIZE(syscall_names)
  410. int ids[nsyscalls];
  411. unsigned int nr_events[nsyscalls],
  412. expected_nr_events[nsyscalls], i, j;
  413. struct perf_evsel *evsels[nsyscalls], *evsel;
  414. int sample_size = __perf_evsel__sample_size(attr.sample_type);
  415. for (i = 0; i < nsyscalls; ++i) {
  416. char name[64];
  417. snprintf(name, sizeof(name), "sys_enter_%s", syscall_names[i]);
  418. ids[i] = trace_event__id(name);
  419. if (ids[i] < 0) {
  420. pr_debug("Is debugfs mounted on /sys/kernel/debug?\n");
  421. return -1;
  422. }
  423. nr_events[i] = 0;
  424. expected_nr_events[i] = random() % 257;
  425. }
  426. threads = thread_map__new(-1, getpid(), UINT_MAX);
  427. if (threads == NULL) {
  428. pr_debug("thread_map__new\n");
  429. return -1;
  430. }
  431. cpus = cpu_map__new(NULL);
  432. if (cpus == NULL) {
  433. pr_debug("cpu_map__new\n");
  434. goto out_free_threads;
  435. }
  436. CPU_ZERO(&cpu_set);
  437. CPU_SET(cpus->map[0], &cpu_set);
  438. sched_setaffinity(0, sizeof(cpu_set), &cpu_set);
  439. if (sched_setaffinity(0, sizeof(cpu_set), &cpu_set) < 0) {
  440. pr_debug("sched_setaffinity() failed on CPU %d: %s ",
  441. cpus->map[0], strerror(errno));
  442. goto out_free_cpus;
  443. }
  444. evlist = perf_evlist__new(cpus, threads);
  445. if (evlist == NULL) {
  446. pr_debug("perf_evlist__new\n");
  447. goto out_free_cpus;
  448. }
  449. /* anonymous union fields, can't be initialized above */
  450. attr.wakeup_events = 1;
  451. attr.sample_period = 1;
  452. for (i = 0; i < nsyscalls; ++i) {
  453. attr.config = ids[i];
  454. evsels[i] = perf_evsel__new(&attr, i);
  455. if (evsels[i] == NULL) {
  456. pr_debug("perf_evsel__new\n");
  457. goto out_free_evlist;
  458. }
  459. perf_evlist__add(evlist, evsels[i]);
  460. if (perf_evsel__open(evsels[i], cpus, threads, false, NULL) < 0) {
  461. pr_debug("failed to open counter: %s, "
  462. "tweak /proc/sys/kernel/perf_event_paranoid?\n",
  463. strerror(errno));
  464. goto out_close_fd;
  465. }
  466. }
  467. if (perf_evlist__mmap(evlist, 128, true) < 0) {
  468. pr_debug("failed to mmap events: %d (%s)\n", errno,
  469. strerror(errno));
  470. goto out_close_fd;
  471. }
  472. for (i = 0; i < nsyscalls; ++i)
  473. for (j = 0; j < expected_nr_events[i]; ++j) {
  474. int foo = syscalls[i]();
  475. ++foo;
  476. }
  477. while ((event = perf_evlist__mmap_read(evlist, 0)) != NULL) {
  478. struct perf_sample sample;
  479. if (event->header.type != PERF_RECORD_SAMPLE) {
  480. pr_debug("unexpected %s event\n",
  481. perf_event__name(event->header.type));
  482. goto out_munmap;
  483. }
  484. err = perf_event__parse_sample(event, attr.sample_type, sample_size,
  485. false, &sample, false);
  486. if (err) {
  487. pr_err("Can't parse sample, err = %d\n", err);
  488. goto out_munmap;
  489. }
  490. evsel = perf_evlist__id2evsel(evlist, sample.id);
  491. if (evsel == NULL) {
  492. pr_debug("event with id %" PRIu64
  493. " doesn't map to an evsel\n", sample.id);
  494. goto out_munmap;
  495. }
  496. nr_events[evsel->idx]++;
  497. }
  498. list_for_each_entry(evsel, &evlist->entries, node) {
  499. if (nr_events[evsel->idx] != expected_nr_events[evsel->idx]) {
  500. pr_debug("expected %d %s events, got %d\n",
  501. expected_nr_events[evsel->idx],
  502. event_name(evsel), nr_events[evsel->idx]);
  503. goto out_munmap;
  504. }
  505. }
  506. err = 0;
  507. out_munmap:
  508. perf_evlist__munmap(evlist);
  509. out_close_fd:
  510. for (i = 0; i < nsyscalls; ++i)
  511. perf_evsel__close_fd(evsels[i], 1, threads->nr);
  512. out_free_evlist:
  513. perf_evlist__delete(evlist);
  514. out_free_cpus:
  515. cpu_map__delete(cpus);
  516. out_free_threads:
  517. thread_map__delete(threads);
  518. return err;
  519. #undef nsyscalls
  520. }
  521. #define TEST_ASSERT_VAL(text, cond) \
  522. do { \
  523. if (!(cond)) { \
  524. pr_debug("FAILED %s:%d %s\n", __FILE__, __LINE__, text); \
  525. return -1; \
  526. } \
  527. } while (0)
  528. static int test__checkevent_tracepoint(struct perf_evlist *evlist)
  529. {
  530. struct perf_evsel *evsel = list_entry(evlist->entries.next,
  531. struct perf_evsel, node);
  532. TEST_ASSERT_VAL("wrong number of entries", 1 == evlist->nr_entries);
  533. TEST_ASSERT_VAL("wrong type", PERF_TYPE_TRACEPOINT == evsel->attr.type);
  534. TEST_ASSERT_VAL("wrong sample_type",
  535. (PERF_SAMPLE_RAW | PERF_SAMPLE_TIME | PERF_SAMPLE_CPU) ==
  536. evsel->attr.sample_type);
  537. TEST_ASSERT_VAL("wrong sample_period", 1 == evsel->attr.sample_period);
  538. return 0;
  539. }
  540. static int test__checkevent_tracepoint_multi(struct perf_evlist *evlist)
  541. {
  542. struct perf_evsel *evsel;
  543. TEST_ASSERT_VAL("wrong number of entries", evlist->nr_entries > 1);
  544. list_for_each_entry(evsel, &evlist->entries, node) {
  545. TEST_ASSERT_VAL("wrong type",
  546. PERF_TYPE_TRACEPOINT == evsel->attr.type);
  547. TEST_ASSERT_VAL("wrong sample_type",
  548. (PERF_SAMPLE_RAW | PERF_SAMPLE_TIME | PERF_SAMPLE_CPU)
  549. == evsel->attr.sample_type);
  550. TEST_ASSERT_VAL("wrong sample_period",
  551. 1 == evsel->attr.sample_period);
  552. }
  553. return 0;
  554. }
  555. static int test__checkevent_raw(struct perf_evlist *evlist)
  556. {
  557. struct perf_evsel *evsel = list_entry(evlist->entries.next,
  558. struct perf_evsel, node);
  559. TEST_ASSERT_VAL("wrong number of entries", 1 == evlist->nr_entries);
  560. TEST_ASSERT_VAL("wrong type", PERF_TYPE_RAW == evsel->attr.type);
  561. TEST_ASSERT_VAL("wrong config", 1 == evsel->attr.config);
  562. return 0;
  563. }
  564. static int test__checkevent_numeric(struct perf_evlist *evlist)
  565. {
  566. struct perf_evsel *evsel = list_entry(evlist->entries.next,
  567. struct perf_evsel, node);
  568. TEST_ASSERT_VAL("wrong number of entries", 1 == evlist->nr_entries);
  569. TEST_ASSERT_VAL("wrong type", 1 == evsel->attr.type);
  570. TEST_ASSERT_VAL("wrong config", 1 == evsel->attr.config);
  571. return 0;
  572. }
  573. static int test__checkevent_symbolic_name(struct perf_evlist *evlist)
  574. {
  575. struct perf_evsel *evsel = list_entry(evlist->entries.next,
  576. struct perf_evsel, node);
  577. TEST_ASSERT_VAL("wrong number of entries", 1 == evlist->nr_entries);
  578. TEST_ASSERT_VAL("wrong type", PERF_TYPE_HARDWARE == evsel->attr.type);
  579. TEST_ASSERT_VAL("wrong config",
  580. PERF_COUNT_HW_INSTRUCTIONS == evsel->attr.config);
  581. return 0;
  582. }
  583. static int test__checkevent_symbolic_alias(struct perf_evlist *evlist)
  584. {
  585. struct perf_evsel *evsel = list_entry(evlist->entries.next,
  586. struct perf_evsel, node);
  587. TEST_ASSERT_VAL("wrong number of entries", 1 == evlist->nr_entries);
  588. TEST_ASSERT_VAL("wrong type", PERF_TYPE_SOFTWARE == evsel->attr.type);
  589. TEST_ASSERT_VAL("wrong config",
  590. PERF_COUNT_SW_PAGE_FAULTS == evsel->attr.config);
  591. return 0;
  592. }
  593. static int test__checkevent_genhw(struct perf_evlist *evlist)
  594. {
  595. struct perf_evsel *evsel = list_entry(evlist->entries.next,
  596. struct perf_evsel, node);
  597. TEST_ASSERT_VAL("wrong number of entries", 1 == evlist->nr_entries);
  598. TEST_ASSERT_VAL("wrong type", PERF_TYPE_HW_CACHE == evsel->attr.type);
  599. TEST_ASSERT_VAL("wrong config", (1 << 16) == evsel->attr.config);
  600. return 0;
  601. }
  602. static int test__checkevent_breakpoint(struct perf_evlist *evlist)
  603. {
  604. struct perf_evsel *evsel = list_entry(evlist->entries.next,
  605. struct perf_evsel, node);
  606. TEST_ASSERT_VAL("wrong number of entries", 1 == evlist->nr_entries);
  607. TEST_ASSERT_VAL("wrong type", PERF_TYPE_BREAKPOINT == evsel->attr.type);
  608. TEST_ASSERT_VAL("wrong config", 0 == evsel->attr.config);
  609. TEST_ASSERT_VAL("wrong bp_type", (HW_BREAKPOINT_R | HW_BREAKPOINT_W) ==
  610. evsel->attr.bp_type);
  611. TEST_ASSERT_VAL("wrong bp_len", HW_BREAKPOINT_LEN_4 ==
  612. evsel->attr.bp_len);
  613. return 0;
  614. }
  615. static int test__checkevent_breakpoint_x(struct perf_evlist *evlist)
  616. {
  617. struct perf_evsel *evsel = list_entry(evlist->entries.next,
  618. struct perf_evsel, node);
  619. TEST_ASSERT_VAL("wrong number of entries", 1 == evlist->nr_entries);
  620. TEST_ASSERT_VAL("wrong type", PERF_TYPE_BREAKPOINT == evsel->attr.type);
  621. TEST_ASSERT_VAL("wrong config", 0 == evsel->attr.config);
  622. TEST_ASSERT_VAL("wrong bp_type",
  623. HW_BREAKPOINT_X == evsel->attr.bp_type);
  624. TEST_ASSERT_VAL("wrong bp_len", sizeof(long) == evsel->attr.bp_len);
  625. return 0;
  626. }
  627. static int test__checkevent_breakpoint_r(struct perf_evlist *evlist)
  628. {
  629. struct perf_evsel *evsel = list_entry(evlist->entries.next,
  630. struct perf_evsel, node);
  631. TEST_ASSERT_VAL("wrong number of entries", 1 == evlist->nr_entries);
  632. TEST_ASSERT_VAL("wrong type",
  633. PERF_TYPE_BREAKPOINT == evsel->attr.type);
  634. TEST_ASSERT_VAL("wrong config", 0 == evsel->attr.config);
  635. TEST_ASSERT_VAL("wrong bp_type",
  636. HW_BREAKPOINT_R == evsel->attr.bp_type);
  637. TEST_ASSERT_VAL("wrong bp_len",
  638. HW_BREAKPOINT_LEN_4 == evsel->attr.bp_len);
  639. return 0;
  640. }
  641. static int test__checkevent_breakpoint_w(struct perf_evlist *evlist)
  642. {
  643. struct perf_evsel *evsel = list_entry(evlist->entries.next,
  644. struct perf_evsel, node);
  645. TEST_ASSERT_VAL("wrong number of entries", 1 == evlist->nr_entries);
  646. TEST_ASSERT_VAL("wrong type",
  647. PERF_TYPE_BREAKPOINT == evsel->attr.type);
  648. TEST_ASSERT_VAL("wrong config", 0 == evsel->attr.config);
  649. TEST_ASSERT_VAL("wrong bp_type",
  650. HW_BREAKPOINT_W == evsel->attr.bp_type);
  651. TEST_ASSERT_VAL("wrong bp_len",
  652. HW_BREAKPOINT_LEN_4 == evsel->attr.bp_len);
  653. return 0;
  654. }
  655. static int test__checkevent_tracepoint_modifier(struct perf_evlist *evlist)
  656. {
  657. struct perf_evsel *evsel = list_entry(evlist->entries.next,
  658. struct perf_evsel, node);
  659. TEST_ASSERT_VAL("wrong exclude_user", evsel->attr.exclude_user);
  660. TEST_ASSERT_VAL("wrong exclude_kernel", !evsel->attr.exclude_kernel);
  661. TEST_ASSERT_VAL("wrong exclude_hv", evsel->attr.exclude_hv);
  662. TEST_ASSERT_VAL("wrong precise_ip", !evsel->attr.precise_ip);
  663. return test__checkevent_tracepoint(evlist);
  664. }
  665. static int
  666. test__checkevent_tracepoint_multi_modifier(struct perf_evlist *evlist)
  667. {
  668. struct perf_evsel *evsel;
  669. TEST_ASSERT_VAL("wrong number of entries", evlist->nr_entries > 1);
  670. list_for_each_entry(evsel, &evlist->entries, node) {
  671. TEST_ASSERT_VAL("wrong exclude_user",
  672. !evsel->attr.exclude_user);
  673. TEST_ASSERT_VAL("wrong exclude_kernel",
  674. evsel->attr.exclude_kernel);
  675. TEST_ASSERT_VAL("wrong exclude_hv", evsel->attr.exclude_hv);
  676. TEST_ASSERT_VAL("wrong precise_ip", !evsel->attr.precise_ip);
  677. }
  678. return test__checkevent_tracepoint_multi(evlist);
  679. }
  680. static int test__checkevent_raw_modifier(struct perf_evlist *evlist)
  681. {
  682. struct perf_evsel *evsel = list_entry(evlist->entries.next,
  683. struct perf_evsel, node);
  684. TEST_ASSERT_VAL("wrong exclude_user", evsel->attr.exclude_user);
  685. TEST_ASSERT_VAL("wrong exclude_kernel", !evsel->attr.exclude_kernel);
  686. TEST_ASSERT_VAL("wrong exclude_hv", evsel->attr.exclude_hv);
  687. TEST_ASSERT_VAL("wrong precise_ip", evsel->attr.precise_ip);
  688. return test__checkevent_raw(evlist);
  689. }
  690. static int test__checkevent_numeric_modifier(struct perf_evlist *evlist)
  691. {
  692. struct perf_evsel *evsel = list_entry(evlist->entries.next,
  693. struct perf_evsel, node);
  694. TEST_ASSERT_VAL("wrong exclude_user", evsel->attr.exclude_user);
  695. TEST_ASSERT_VAL("wrong exclude_kernel", evsel->attr.exclude_kernel);
  696. TEST_ASSERT_VAL("wrong exclude_hv", !evsel->attr.exclude_hv);
  697. TEST_ASSERT_VAL("wrong precise_ip", evsel->attr.precise_ip);
  698. return test__checkevent_numeric(evlist);
  699. }
  700. static int test__checkevent_symbolic_name_modifier(struct perf_evlist *evlist)
  701. {
  702. struct perf_evsel *evsel = list_entry(evlist->entries.next,
  703. struct perf_evsel, node);
  704. TEST_ASSERT_VAL("wrong exclude_user", evsel->attr.exclude_user);
  705. TEST_ASSERT_VAL("wrong exclude_kernel", evsel->attr.exclude_kernel);
  706. TEST_ASSERT_VAL("wrong exclude_hv", !evsel->attr.exclude_hv);
  707. TEST_ASSERT_VAL("wrong precise_ip", !evsel->attr.precise_ip);
  708. return test__checkevent_symbolic_name(evlist);
  709. }
  710. static int test__checkevent_symbolic_alias_modifier(struct perf_evlist *evlist)
  711. {
  712. struct perf_evsel *evsel = list_entry(evlist->entries.next,
  713. struct perf_evsel, node);
  714. TEST_ASSERT_VAL("wrong exclude_user", !evsel->attr.exclude_user);
  715. TEST_ASSERT_VAL("wrong exclude_kernel", evsel->attr.exclude_kernel);
  716. TEST_ASSERT_VAL("wrong exclude_hv", evsel->attr.exclude_hv);
  717. TEST_ASSERT_VAL("wrong precise_ip", !evsel->attr.precise_ip);
  718. return test__checkevent_symbolic_alias(evlist);
  719. }
  720. static int test__checkevent_genhw_modifier(struct perf_evlist *evlist)
  721. {
  722. struct perf_evsel *evsel = list_entry(evlist->entries.next,
  723. struct perf_evsel, node);
  724. TEST_ASSERT_VAL("wrong exclude_user", evsel->attr.exclude_user);
  725. TEST_ASSERT_VAL("wrong exclude_kernel", !evsel->attr.exclude_kernel);
  726. TEST_ASSERT_VAL("wrong exclude_hv", evsel->attr.exclude_hv);
  727. TEST_ASSERT_VAL("wrong precise_ip", evsel->attr.precise_ip);
  728. return test__checkevent_genhw(evlist);
  729. }
  730. static struct test__event_st {
  731. const char *name;
  732. __u32 type;
  733. int (*check)(struct perf_evlist *evlist);
  734. } test__events[] = {
  735. {
  736. .name = "syscalls:sys_enter_open",
  737. .check = test__checkevent_tracepoint,
  738. },
  739. {
  740. .name = "syscalls:*",
  741. .check = test__checkevent_tracepoint_multi,
  742. },
  743. {
  744. .name = "r1",
  745. .check = test__checkevent_raw,
  746. },
  747. {
  748. .name = "1:1",
  749. .check = test__checkevent_numeric,
  750. },
  751. {
  752. .name = "instructions",
  753. .check = test__checkevent_symbolic_name,
  754. },
  755. {
  756. .name = "faults",
  757. .check = test__checkevent_symbolic_alias,
  758. },
  759. {
  760. .name = "L1-dcache-load-miss",
  761. .check = test__checkevent_genhw,
  762. },
  763. {
  764. .name = "mem:0",
  765. .check = test__checkevent_breakpoint,
  766. },
  767. {
  768. .name = "mem:0:x",
  769. .check = test__checkevent_breakpoint_x,
  770. },
  771. {
  772. .name = "mem:0:r",
  773. .check = test__checkevent_breakpoint_r,
  774. },
  775. {
  776. .name = "mem:0:w",
  777. .check = test__checkevent_breakpoint_w,
  778. },
  779. {
  780. .name = "syscalls:sys_enter_open:k",
  781. .check = test__checkevent_tracepoint_modifier,
  782. },
  783. {
  784. .name = "syscalls:*:u",
  785. .check = test__checkevent_tracepoint_multi_modifier,
  786. },
  787. {
  788. .name = "r1:kp",
  789. .check = test__checkevent_raw_modifier,
  790. },
  791. {
  792. .name = "1:1:hp",
  793. .check = test__checkevent_numeric_modifier,
  794. },
  795. {
  796. .name = "instructions:h",
  797. .check = test__checkevent_symbolic_name_modifier,
  798. },
  799. {
  800. .name = "faults:u",
  801. .check = test__checkevent_symbolic_alias_modifier,
  802. },
  803. {
  804. .name = "L1-dcache-load-miss:kp",
  805. .check = test__checkevent_genhw_modifier,
  806. },
  807. };
  808. #define TEST__EVENTS_CNT (sizeof(test__events) / sizeof(struct test__event_st))
  809. static int test__parse_events(void)
  810. {
  811. struct perf_evlist *evlist;
  812. u_int i;
  813. int ret = 0;
  814. for (i = 0; i < TEST__EVENTS_CNT; i++) {
  815. struct test__event_st *e = &test__events[i];
  816. evlist = perf_evlist__new(NULL, NULL);
  817. if (evlist == NULL)
  818. break;
  819. ret = parse_events(evlist, e->name, 0);
  820. if (ret) {
  821. pr_debug("failed to parse event '%s', err %d\n",
  822. e->name, ret);
  823. break;
  824. }
  825. ret = e->check(evlist);
  826. if (ret)
  827. break;
  828. perf_evlist__delete(evlist);
  829. }
  830. return ret;
  831. }
  832. static int sched__get_first_possible_cpu(pid_t pid, cpu_set_t **maskp,
  833. size_t *sizep)
  834. {
  835. cpu_set_t *mask;
  836. size_t size;
  837. int i, cpu = -1, nrcpus = 1024;
  838. realloc:
  839. mask = CPU_ALLOC(nrcpus);
  840. size = CPU_ALLOC_SIZE(nrcpus);
  841. CPU_ZERO_S(size, mask);
  842. if (sched_getaffinity(pid, size, mask) == -1) {
  843. CPU_FREE(mask);
  844. if (errno == EINVAL && nrcpus < (1024 << 8)) {
  845. nrcpus = nrcpus << 2;
  846. goto realloc;
  847. }
  848. perror("sched_getaffinity");
  849. return -1;
  850. }
  851. for (i = 0; i < nrcpus; i++) {
  852. if (CPU_ISSET_S(i, size, mask)) {
  853. if (cpu == -1) {
  854. cpu = i;
  855. *maskp = mask;
  856. *sizep = size;
  857. } else
  858. CPU_CLR_S(i, size, mask);
  859. }
  860. }
  861. if (cpu == -1)
  862. CPU_FREE(mask);
  863. return cpu;
  864. }
  865. static int test__PERF_RECORD(void)
  866. {
  867. struct perf_record_opts opts = {
  868. .no_delay = true,
  869. .freq = 10,
  870. .mmap_pages = 256,
  871. };
  872. cpu_set_t *cpu_mask = NULL;
  873. size_t cpu_mask_size = 0;
  874. struct perf_evlist *evlist = perf_evlist__new(NULL, NULL);
  875. struct perf_evsel *evsel;
  876. struct perf_sample sample;
  877. const char *cmd = "sleep";
  878. const char *argv[] = { cmd, "1", NULL, };
  879. char *bname;
  880. u64 sample_type, prev_time = 0;
  881. bool found_cmd_mmap = false,
  882. found_libc_mmap = false,
  883. found_vdso_mmap = false,
  884. found_ld_mmap = false;
  885. int err = -1, errs = 0, i, wakeups = 0, sample_size;
  886. u32 cpu;
  887. int total_events = 0, nr_events[PERF_RECORD_MAX] = { 0, };
  888. if (evlist == NULL || argv == NULL) {
  889. pr_debug("Not enough memory to create evlist\n");
  890. goto out;
  891. }
  892. /*
  893. * We need at least one evsel in the evlist, use the default
  894. * one: "cycles".
  895. */
  896. err = perf_evlist__add_default(evlist);
  897. if (err < 0) {
  898. pr_debug("Not enough memory to create evsel\n");
  899. goto out_delete_evlist;
  900. }
  901. /*
  902. * Create maps of threads and cpus to monitor. In this case
  903. * we start with all threads and cpus (-1, -1) but then in
  904. * perf_evlist__prepare_workload we'll fill in the only thread
  905. * we're monitoring, the one forked there.
  906. */
  907. err = perf_evlist__create_maps(evlist, opts.target_pid,
  908. opts.target_tid, UINT_MAX, opts.cpu_list);
  909. if (err < 0) {
  910. pr_debug("Not enough memory to create thread/cpu maps\n");
  911. goto out_delete_evlist;
  912. }
  913. /*
  914. * Prepare the workload in argv[] to run, it'll fork it, and then wait
  915. * for perf_evlist__start_workload() to exec it. This is done this way
  916. * so that we have time to open the evlist (calling sys_perf_event_open
  917. * on all the fds) and then mmap them.
  918. */
  919. err = perf_evlist__prepare_workload(evlist, &opts, argv);
  920. if (err < 0) {
  921. pr_debug("Couldn't run the workload!\n");
  922. goto out_delete_evlist;
  923. }
  924. /*
  925. * Config the evsels, setting attr->comm on the first one, etc.
  926. */
  927. evsel = list_entry(evlist->entries.next, struct perf_evsel, node);
  928. evsel->attr.sample_type |= PERF_SAMPLE_CPU;
  929. evsel->attr.sample_type |= PERF_SAMPLE_TID;
  930. evsel->attr.sample_type |= PERF_SAMPLE_TIME;
  931. perf_evlist__config_attrs(evlist, &opts);
  932. err = sched__get_first_possible_cpu(evlist->workload.pid, &cpu_mask,
  933. &cpu_mask_size);
  934. if (err < 0) {
  935. pr_debug("sched__get_first_possible_cpu: %s\n", strerror(errno));
  936. goto out_delete_evlist;
  937. }
  938. cpu = err;
  939. /*
  940. * So that we can check perf_sample.cpu on all the samples.
  941. */
  942. if (sched_setaffinity(evlist->workload.pid, cpu_mask_size, cpu_mask) < 0) {
  943. pr_debug("sched_setaffinity: %s\n", strerror(errno));
  944. goto out_free_cpu_mask;
  945. }
  946. /*
  947. * Call sys_perf_event_open on all the fds on all the evsels,
  948. * grouping them if asked to.
  949. */
  950. err = perf_evlist__open(evlist, opts.group);
  951. if (err < 0) {
  952. pr_debug("perf_evlist__open: %s\n", strerror(errno));
  953. goto out_delete_evlist;
  954. }
  955. /*
  956. * mmap the first fd on a given CPU and ask for events for the other
  957. * fds in the same CPU to be injected in the same mmap ring buffer
  958. * (using ioctl(PERF_EVENT_IOC_SET_OUTPUT)).
  959. */
  960. err = perf_evlist__mmap(evlist, opts.mmap_pages, false);
  961. if (err < 0) {
  962. pr_debug("perf_evlist__mmap: %s\n", strerror(errno));
  963. goto out_delete_evlist;
  964. }
  965. /*
  966. * We'll need these two to parse the PERF_SAMPLE_* fields in each
  967. * event.
  968. */
  969. sample_type = perf_evlist__sample_type(evlist);
  970. sample_size = __perf_evsel__sample_size(sample_type);
  971. /*
  972. * Now that all is properly set up, enable the events, they will
  973. * count just on workload.pid, which will start...
  974. */
  975. perf_evlist__enable(evlist);
  976. /*
  977. * Now!
  978. */
  979. perf_evlist__start_workload(evlist);
  980. while (1) {
  981. int before = total_events;
  982. for (i = 0; i < evlist->nr_mmaps; i++) {
  983. union perf_event *event;
  984. while ((event = perf_evlist__mmap_read(evlist, i)) != NULL) {
  985. const u32 type = event->header.type;
  986. const char *name = perf_event__name(type);
  987. ++total_events;
  988. if (type < PERF_RECORD_MAX)
  989. nr_events[type]++;
  990. err = perf_event__parse_sample(event, sample_type,
  991. sample_size, true,
  992. &sample, false);
  993. if (err < 0) {
  994. if (verbose)
  995. perf_event__fprintf(event, stderr);
  996. pr_debug("Couldn't parse sample\n");
  997. goto out_err;
  998. }
  999. if (verbose) {
  1000. pr_info("%" PRIu64" %d ", sample.time, sample.cpu);
  1001. perf_event__fprintf(event, stderr);
  1002. }
  1003. if (prev_time > sample.time) {
  1004. pr_debug("%s going backwards in time, prev=%" PRIu64 ", curr=%" PRIu64 "\n",
  1005. name, prev_time, sample.time);
  1006. ++errs;
  1007. }
  1008. prev_time = sample.time;
  1009. if (sample.cpu != cpu) {
  1010. pr_debug("%s with unexpected cpu, expected %d, got %d\n",
  1011. name, cpu, sample.cpu);
  1012. ++errs;
  1013. }
  1014. if ((pid_t)sample.pid != evlist->workload.pid) {
  1015. pr_debug("%s with unexpected pid, expected %d, got %d\n",
  1016. name, evlist->workload.pid, sample.pid);
  1017. ++errs;
  1018. }
  1019. if ((pid_t)sample.tid != evlist->workload.pid) {
  1020. pr_debug("%s with unexpected tid, expected %d, got %d\n",
  1021. name, evlist->workload.pid, sample.tid);
  1022. ++errs;
  1023. }
  1024. if ((type == PERF_RECORD_COMM ||
  1025. type == PERF_RECORD_MMAP ||
  1026. type == PERF_RECORD_FORK ||
  1027. type == PERF_RECORD_EXIT) &&
  1028. (pid_t)event->comm.pid != evlist->workload.pid) {
  1029. pr_debug("%s with unexpected pid/tid\n", name);
  1030. ++errs;
  1031. }
  1032. if ((type == PERF_RECORD_COMM ||
  1033. type == PERF_RECORD_MMAP) &&
  1034. event->comm.pid != event->comm.tid) {
  1035. pr_debug("%s with different pid/tid!\n", name);
  1036. ++errs;
  1037. }
  1038. switch (type) {
  1039. case PERF_RECORD_COMM:
  1040. if (strcmp(event->comm.comm, cmd)) {
  1041. pr_debug("%s with unexpected comm!\n", name);
  1042. ++errs;
  1043. }
  1044. break;
  1045. case PERF_RECORD_EXIT:
  1046. goto found_exit;
  1047. case PERF_RECORD_MMAP:
  1048. bname = strrchr(event->mmap.filename, '/');
  1049. if (bname != NULL) {
  1050. if (!found_cmd_mmap)
  1051. found_cmd_mmap = !strcmp(bname + 1, cmd);
  1052. if (!found_libc_mmap)
  1053. found_libc_mmap = !strncmp(bname + 1, "libc", 4);
  1054. if (!found_ld_mmap)
  1055. found_ld_mmap = !strncmp(bname + 1, "ld", 2);
  1056. } else if (!found_vdso_mmap)
  1057. found_vdso_mmap = !strcmp(event->mmap.filename, "[vdso]");
  1058. break;
  1059. case PERF_RECORD_SAMPLE:
  1060. /* Just ignore samples for now */
  1061. break;
  1062. default:
  1063. pr_debug("Unexpected perf_event->header.type %d!\n",
  1064. type);
  1065. ++errs;
  1066. }
  1067. }
  1068. }
  1069. /*
  1070. * We don't use poll here because at least at 3.1 times the
  1071. * PERF_RECORD_{!SAMPLE} events don't honour
  1072. * perf_event_attr.wakeup_events, just PERF_EVENT_SAMPLE does.
  1073. */
  1074. if (total_events == before && false)
  1075. poll(evlist->pollfd, evlist->nr_fds, -1);
  1076. sleep(1);
  1077. if (++wakeups > 5) {
  1078. pr_debug("No PERF_RECORD_EXIT event!\n");
  1079. break;
  1080. }
  1081. }
  1082. found_exit:
  1083. if (nr_events[PERF_RECORD_COMM] > 1) {
  1084. pr_debug("Excessive number of PERF_RECORD_COMM events!\n");
  1085. ++errs;
  1086. }
  1087. if (nr_events[PERF_RECORD_COMM] == 0) {
  1088. pr_debug("Missing PERF_RECORD_COMM for %s!\n", cmd);
  1089. ++errs;
  1090. }
  1091. if (!found_cmd_mmap) {
  1092. pr_debug("PERF_RECORD_MMAP for %s missing!\n", cmd);
  1093. ++errs;
  1094. }
  1095. if (!found_libc_mmap) {
  1096. pr_debug("PERF_RECORD_MMAP for %s missing!\n", "libc");
  1097. ++errs;
  1098. }
  1099. if (!found_ld_mmap) {
  1100. pr_debug("PERF_RECORD_MMAP for %s missing!\n", "ld");
  1101. ++errs;
  1102. }
  1103. if (!found_vdso_mmap) {
  1104. pr_debug("PERF_RECORD_MMAP for %s missing!\n", "[vdso]");
  1105. ++errs;
  1106. }
  1107. out_err:
  1108. perf_evlist__munmap(evlist);
  1109. out_free_cpu_mask:
  1110. CPU_FREE(cpu_mask);
  1111. out_delete_evlist:
  1112. perf_evlist__delete(evlist);
  1113. out:
  1114. return (err < 0 || errs > 0) ? -1 : 0;
  1115. }
  1116. #if defined(__x86_64__) || defined(__i386__)
  1117. #define barrier() asm volatile("" ::: "memory")
  1118. static u64 rdpmc(unsigned int counter)
  1119. {
  1120. unsigned int low, high;
  1121. asm volatile("rdpmc" : "=a" (low), "=d" (high) : "c" (counter));
  1122. return low | ((u64)high) << 32;
  1123. }
  1124. static u64 rdtsc(void)
  1125. {
  1126. unsigned int low, high;
  1127. asm volatile("rdtsc" : "=a" (low), "=d" (high));
  1128. return low | ((u64)high) << 32;
  1129. }
  1130. static u64 mmap_read_self(void *addr)
  1131. {
  1132. struct perf_event_mmap_page *pc = addr;
  1133. u32 seq, idx, time_mult = 0, time_shift = 0;
  1134. u64 count, cyc = 0, time_offset = 0, enabled, running, delta;
  1135. do {
  1136. seq = pc->lock;
  1137. barrier();
  1138. enabled = pc->time_enabled;
  1139. running = pc->time_running;
  1140. if (enabled != running) {
  1141. cyc = rdtsc();
  1142. time_mult = pc->time_mult;
  1143. time_shift = pc->time_shift;
  1144. time_offset = pc->time_offset;
  1145. }
  1146. idx = pc->index;
  1147. count = pc->offset;
  1148. if (idx)
  1149. count += rdpmc(idx - 1);
  1150. barrier();
  1151. } while (pc->lock != seq);
  1152. if (enabled != running) {
  1153. u64 quot, rem;
  1154. quot = (cyc >> time_shift);
  1155. rem = cyc & ((1 << time_shift) - 1);
  1156. delta = time_offset + quot * time_mult +
  1157. ((rem * time_mult) >> time_shift);
  1158. enabled += delta;
  1159. if (idx)
  1160. running += delta;
  1161. quot = count / running;
  1162. rem = count % running;
  1163. count = quot * enabled + (rem * enabled) / running;
  1164. }
  1165. return count;
  1166. }
  1167. /*
  1168. * If the RDPMC instruction faults then signal this back to the test parent task:
  1169. */
  1170. static void segfault_handler(int sig __used, siginfo_t *info __used, void *uc __used)
  1171. {
  1172. exit(-1);
  1173. }
  1174. static int __test__rdpmc(void)
  1175. {
  1176. long page_size = sysconf(_SC_PAGE_SIZE);
  1177. volatile int tmp = 0;
  1178. u64 i, loops = 1000;
  1179. int n;
  1180. int fd;
  1181. void *addr;
  1182. struct perf_event_attr attr = {
  1183. .type = PERF_TYPE_HARDWARE,
  1184. .config = PERF_COUNT_HW_INSTRUCTIONS,
  1185. .exclude_kernel = 1,
  1186. };
  1187. u64 delta_sum = 0;
  1188. struct sigaction sa;
  1189. sigfillset(&sa.sa_mask);
  1190. sa.sa_sigaction = segfault_handler;
  1191. sigaction(SIGSEGV, &sa, NULL);
  1192. fprintf(stderr, "\n\n");
  1193. fd = sys_perf_event_open(&attr, 0, -1, -1, 0);
  1194. if (fd < 0) {
  1195. die("Error: sys_perf_event_open() syscall returned "
  1196. "with %d (%s)\n", fd, strerror(errno));
  1197. }
  1198. addr = mmap(NULL, page_size, PROT_READ, MAP_SHARED, fd, 0);
  1199. if (addr == (void *)(-1)) {
  1200. die("Error: mmap() syscall returned "
  1201. "with (%s)\n", strerror(errno));
  1202. }
  1203. for (n = 0; n < 6; n++) {
  1204. u64 stamp, now, delta;
  1205. stamp = mmap_read_self(addr);
  1206. for (i = 0; i < loops; i++)
  1207. tmp++;
  1208. now = mmap_read_self(addr);
  1209. loops *= 10;
  1210. delta = now - stamp;
  1211. fprintf(stderr, "%14d: %14Lu\n", n, (long long)delta);
  1212. delta_sum += delta;
  1213. }
  1214. munmap(addr, page_size);
  1215. close(fd);
  1216. fprintf(stderr, " ");
  1217. if (!delta_sum)
  1218. return -1;
  1219. return 0;
  1220. }
  1221. static int test__rdpmc(void)
  1222. {
  1223. int status = 0;
  1224. int wret = 0;
  1225. int ret;
  1226. int pid;
  1227. pid = fork();
  1228. if (pid < 0)
  1229. return -1;
  1230. if (!pid) {
  1231. ret = __test__rdpmc();
  1232. exit(ret);
  1233. }
  1234. wret = waitpid(pid, &status, 0);
  1235. if (wret < 0 || status)
  1236. return -1;
  1237. return 0;
  1238. }
  1239. #endif
  1240. static struct test {
  1241. const char *desc;
  1242. int (*func)(void);
  1243. } tests[] = {
  1244. {
  1245. .desc = "vmlinux symtab matches kallsyms",
  1246. .func = test__vmlinux_matches_kallsyms,
  1247. },
  1248. {
  1249. .desc = "detect open syscall event",
  1250. .func = test__open_syscall_event,
  1251. },
  1252. {
  1253. .desc = "detect open syscall event on all cpus",
  1254. .func = test__open_syscall_event_on_all_cpus,
  1255. },
  1256. {
  1257. .desc = "read samples using the mmap interface",
  1258. .func = test__basic_mmap,
  1259. },
  1260. {
  1261. .desc = "parse events tests",
  1262. .func = test__parse_events,
  1263. },
  1264. #if defined(__x86_64__) || defined(__i386__)
  1265. {
  1266. .desc = "x86 rdpmc test",
  1267. .func = test__rdpmc,
  1268. },
  1269. #endif
  1270. {
  1271. .desc = "Validate PERF_RECORD_* events & perf_sample fields",
  1272. .func = test__PERF_RECORD,
  1273. },
  1274. {
  1275. .func = NULL,
  1276. },
  1277. };
  1278. static bool perf_test__matches(int curr, int argc, const char *argv[])
  1279. {
  1280. int i;
  1281. if (argc == 0)
  1282. return true;
  1283. for (i = 0; i < argc; ++i) {
  1284. char *end;
  1285. long nr = strtoul(argv[i], &end, 10);
  1286. if (*end == '\0') {
  1287. if (nr == curr + 1)
  1288. return true;
  1289. continue;
  1290. }
  1291. if (strstr(tests[curr].desc, argv[i]))
  1292. return true;
  1293. }
  1294. return false;
  1295. }
  1296. static int __cmd_test(int argc, const char *argv[])
  1297. {
  1298. int i = 0;
  1299. while (tests[i].func) {
  1300. int curr = i++, err;
  1301. if (!perf_test__matches(curr, argc, argv))
  1302. continue;
  1303. pr_info("%2d: %s:", i, tests[curr].desc);
  1304. pr_debug("\n--- start ---\n");
  1305. err = tests[curr].func();
  1306. pr_debug("---- end ----\n%s:", tests[curr].desc);
  1307. pr_info(" %s\n", err ? "FAILED!\n" : "Ok");
  1308. }
  1309. return 0;
  1310. }
  1311. static int perf_test__list(int argc, const char **argv)
  1312. {
  1313. int i = 0;
  1314. while (tests[i].func) {
  1315. int curr = i++;
  1316. if (argc > 1 && !strstr(tests[curr].desc, argv[1]))
  1317. continue;
  1318. pr_info("%2d: %s\n", i, tests[curr].desc);
  1319. }
  1320. return 0;
  1321. }
  1322. int cmd_test(int argc, const char **argv, const char *prefix __used)
  1323. {
  1324. const char * const test_usage[] = {
  1325. "perf test [<options>] [{list <test-name-fragment>|[<test-name-fragments>|<test-numbers>]}]",
  1326. NULL,
  1327. };
  1328. const struct option test_options[] = {
  1329. OPT_INCR('v', "verbose", &verbose,
  1330. "be more verbose (show symbol address, etc)"),
  1331. OPT_END()
  1332. };
  1333. argc = parse_options(argc, argv, test_options, test_usage, 0);
  1334. if (argc >= 1 && !strcmp(argv[0], "list"))
  1335. return perf_test__list(argc, argv);
  1336. symbol_conf.priv_size = sizeof(int);
  1337. symbol_conf.sort_by_name = true;
  1338. symbol_conf.try_vmlinux_path = true;
  1339. if (symbol__init() < 0)
  1340. return -1;
  1341. return __cmd_test(argc, argv);
  1342. }