builtin-test.c 35 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550
  1. /*
  2. * builtin-test.c
  3. *
  4. * Builtin regression testing command: ever growing number of sanity tests
  5. */
  6. #include "builtin.h"
  7. #include "util/cache.h"
  8. #include "util/color.h"
  9. #include "util/debug.h"
  10. #include "util/debugfs.h"
  11. #include "util/evlist.h"
  12. #include "util/machine.h"
  13. #include "util/parse-options.h"
  14. #include "util/parse-events.h"
  15. #include "util/symbol.h"
  16. #include "util/thread_map.h"
  17. #include "util/pmu.h"
  18. #include "event-parse.h"
  19. #include "../../include/linux/hw_breakpoint.h"
  20. #include <sys/mman.h>
  21. static int vmlinux_matches_kallsyms_filter(struct map *map __maybe_unused,
  22. struct symbol *sym)
  23. {
  24. bool *visited = symbol__priv(sym);
  25. *visited = true;
  26. return 0;
  27. }
  28. static int test__vmlinux_matches_kallsyms(void)
  29. {
  30. int err = -1;
  31. struct rb_node *nd;
  32. struct symbol *sym;
  33. struct map *kallsyms_map, *vmlinux_map;
  34. struct machine kallsyms, vmlinux;
  35. enum map_type type = MAP__FUNCTION;
  36. struct ref_reloc_sym ref_reloc_sym = { .name = "_stext", };
  37. /*
  38. * Step 1:
  39. *
  40. * Init the machines that will hold kernel, modules obtained from
  41. * both vmlinux + .ko files and from /proc/kallsyms split by modules.
  42. */
  43. machine__init(&kallsyms, "", HOST_KERNEL_ID);
  44. machine__init(&vmlinux, "", HOST_KERNEL_ID);
  45. /*
  46. * Step 2:
  47. *
  48. * Create the kernel maps for kallsyms and the DSO where we will then
  49. * load /proc/kallsyms. Also create the modules maps from /proc/modules
  50. * and find the .ko files that match them in /lib/modules/`uname -r`/.
  51. */
  52. if (machine__create_kernel_maps(&kallsyms) < 0) {
  53. pr_debug("machine__create_kernel_maps ");
  54. return -1;
  55. }
  56. /*
  57. * Step 3:
  58. *
  59. * Load and split /proc/kallsyms into multiple maps, one per module.
  60. */
  61. if (machine__load_kallsyms(&kallsyms, "/proc/kallsyms", type, NULL) <= 0) {
  62. pr_debug("dso__load_kallsyms ");
  63. goto out;
  64. }
  65. /*
  66. * Step 4:
  67. *
  68. * kallsyms will be internally on demand sorted by name so that we can
  69. * find the reference relocation * symbol, i.e. the symbol we will use
  70. * to see if the running kernel was relocated by checking if it has the
  71. * same value in the vmlinux file we load.
  72. */
  73. kallsyms_map = machine__kernel_map(&kallsyms, type);
  74. sym = map__find_symbol_by_name(kallsyms_map, ref_reloc_sym.name, NULL);
  75. if (sym == NULL) {
  76. pr_debug("dso__find_symbol_by_name ");
  77. goto out;
  78. }
  79. ref_reloc_sym.addr = sym->start;
  80. /*
  81. * Step 5:
  82. *
  83. * Now repeat step 2, this time for the vmlinux file we'll auto-locate.
  84. */
  85. if (machine__create_kernel_maps(&vmlinux) < 0) {
  86. pr_debug("machine__create_kernel_maps ");
  87. goto out;
  88. }
  89. vmlinux_map = machine__kernel_map(&vmlinux, type);
  90. map__kmap(vmlinux_map)->ref_reloc_sym = &ref_reloc_sym;
  91. /*
  92. * Step 6:
  93. *
  94. * Locate a vmlinux file in the vmlinux path that has a buildid that
  95. * matches the one of the running kernel.
  96. *
  97. * While doing that look if we find the ref reloc symbol, if we find it
  98. * we'll have its ref_reloc_symbol.unrelocated_addr and then
  99. * maps__reloc_vmlinux will notice and set proper ->[un]map_ip routines
  100. * to fixup the symbols.
  101. */
  102. if (machine__load_vmlinux_path(&vmlinux, type,
  103. vmlinux_matches_kallsyms_filter) <= 0) {
  104. pr_debug("machine__load_vmlinux_path ");
  105. goto out;
  106. }
  107. err = 0;
  108. /*
  109. * Step 7:
  110. *
  111. * Now look at the symbols in the vmlinux DSO and check if we find all of them
  112. * in the kallsyms dso. For the ones that are in both, check its names and
  113. * end addresses too.
  114. */
  115. for (nd = rb_first(&vmlinux_map->dso->symbols[type]); nd; nd = rb_next(nd)) {
  116. struct symbol *pair, *first_pair;
  117. bool backwards = true;
  118. sym = rb_entry(nd, struct symbol, rb_node);
  119. if (sym->start == sym->end)
  120. continue;
  121. first_pair = machine__find_kernel_symbol(&kallsyms, type, sym->start, NULL, NULL);
  122. pair = first_pair;
  123. if (pair && pair->start == sym->start) {
  124. next_pair:
  125. if (strcmp(sym->name, pair->name) == 0) {
  126. /*
  127. * kallsyms don't have the symbol end, so we
  128. * set that by using the next symbol start - 1,
  129. * in some cases we get this up to a page
  130. * wrong, trace_kmalloc when I was developing
  131. * this code was one such example, 2106 bytes
  132. * off the real size. More than that and we
  133. * _really_ have a problem.
  134. */
  135. s64 skew = sym->end - pair->end;
  136. if (llabs(skew) < page_size)
  137. continue;
  138. pr_debug("%#" PRIx64 ": diff end addr for %s v: %#" PRIx64 " k: %#" PRIx64 "\n",
  139. sym->start, sym->name, sym->end, pair->end);
  140. } else {
  141. struct rb_node *nnd;
  142. detour:
  143. nnd = backwards ? rb_prev(&pair->rb_node) :
  144. rb_next(&pair->rb_node);
  145. if (nnd) {
  146. struct symbol *next = rb_entry(nnd, struct symbol, rb_node);
  147. if (next->start == sym->start) {
  148. pair = next;
  149. goto next_pair;
  150. }
  151. }
  152. if (backwards) {
  153. backwards = false;
  154. pair = first_pair;
  155. goto detour;
  156. }
  157. pr_debug("%#" PRIx64 ": diff name v: %s k: %s\n",
  158. sym->start, sym->name, pair->name);
  159. }
  160. } else
  161. pr_debug("%#" PRIx64 ": %s not on kallsyms\n", sym->start, sym->name);
  162. err = -1;
  163. }
  164. if (!verbose)
  165. goto out;
  166. pr_info("Maps only in vmlinux:\n");
  167. for (nd = rb_first(&vmlinux.kmaps.maps[type]); nd; nd = rb_next(nd)) {
  168. struct map *pos = rb_entry(nd, struct map, rb_node), *pair;
  169. /*
  170. * If it is the kernel, kallsyms is always "[kernel.kallsyms]", while
  171. * the kernel will have the path for the vmlinux file being used,
  172. * so use the short name, less descriptive but the same ("[kernel]" in
  173. * both cases.
  174. */
  175. pair = map_groups__find_by_name(&kallsyms.kmaps, type,
  176. (pos->dso->kernel ?
  177. pos->dso->short_name :
  178. pos->dso->name));
  179. if (pair)
  180. pair->priv = 1;
  181. else
  182. map__fprintf(pos, stderr);
  183. }
  184. pr_info("Maps in vmlinux with a different name in kallsyms:\n");
  185. for (nd = rb_first(&vmlinux.kmaps.maps[type]); nd; nd = rb_next(nd)) {
  186. struct map *pos = rb_entry(nd, struct map, rb_node), *pair;
  187. pair = map_groups__find(&kallsyms.kmaps, type, pos->start);
  188. if (pair == NULL || pair->priv)
  189. continue;
  190. if (pair->start == pos->start) {
  191. pair->priv = 1;
  192. pr_info(" %" PRIx64 "-%" PRIx64 " %" PRIx64 " %s in kallsyms as",
  193. pos->start, pos->end, pos->pgoff, pos->dso->name);
  194. if (pos->pgoff != pair->pgoff || pos->end != pair->end)
  195. pr_info(": \n*%" PRIx64 "-%" PRIx64 " %" PRIx64 "",
  196. pair->start, pair->end, pair->pgoff);
  197. pr_info(" %s\n", pair->dso->name);
  198. pair->priv = 1;
  199. }
  200. }
  201. pr_info("Maps only in kallsyms:\n");
  202. for (nd = rb_first(&kallsyms.kmaps.maps[type]);
  203. nd; nd = rb_next(nd)) {
  204. struct map *pos = rb_entry(nd, struct map, rb_node);
  205. if (!pos->priv)
  206. map__fprintf(pos, stderr);
  207. }
  208. out:
  209. return err;
  210. }
  211. #include "util/cpumap.h"
  212. #include "util/evsel.h"
  213. #include <sys/types.h>
  214. static int trace_event__id(const char *evname)
  215. {
  216. char *filename;
  217. int err = -1, fd;
  218. if (asprintf(&filename,
  219. "%s/syscalls/%s/id",
  220. tracing_events_path, evname) < 0)
  221. return -1;
  222. fd = open(filename, O_RDONLY);
  223. if (fd >= 0) {
  224. char id[16];
  225. if (read(fd, id, sizeof(id)) > 0)
  226. err = atoi(id);
  227. close(fd);
  228. }
  229. free(filename);
  230. return err;
  231. }
  232. static int test__open_syscall_event(void)
  233. {
  234. int err = -1, fd;
  235. struct thread_map *threads;
  236. struct perf_evsel *evsel;
  237. struct perf_event_attr attr;
  238. unsigned int nr_open_calls = 111, i;
  239. int id = trace_event__id("sys_enter_open");
  240. if (id < 0) {
  241. pr_debug("is debugfs mounted on /sys/kernel/debug?\n");
  242. return -1;
  243. }
  244. threads = thread_map__new(-1, getpid(), UINT_MAX);
  245. if (threads == NULL) {
  246. pr_debug("thread_map__new\n");
  247. return -1;
  248. }
  249. memset(&attr, 0, sizeof(attr));
  250. attr.type = PERF_TYPE_TRACEPOINT;
  251. attr.config = id;
  252. evsel = perf_evsel__new(&attr, 0);
  253. if (evsel == NULL) {
  254. pr_debug("perf_evsel__new\n");
  255. goto out_thread_map_delete;
  256. }
  257. if (perf_evsel__open_per_thread(evsel, threads) < 0) {
  258. pr_debug("failed to open counter: %s, "
  259. "tweak /proc/sys/kernel/perf_event_paranoid?\n",
  260. strerror(errno));
  261. goto out_evsel_delete;
  262. }
  263. for (i = 0; i < nr_open_calls; ++i) {
  264. fd = open("/etc/passwd", O_RDONLY);
  265. close(fd);
  266. }
  267. if (perf_evsel__read_on_cpu(evsel, 0, 0) < 0) {
  268. pr_debug("perf_evsel__read_on_cpu\n");
  269. goto out_close_fd;
  270. }
  271. if (evsel->counts->cpu[0].val != nr_open_calls) {
  272. pr_debug("perf_evsel__read_on_cpu: expected to intercept %d calls, got %" PRIu64 "\n",
  273. nr_open_calls, evsel->counts->cpu[0].val);
  274. goto out_close_fd;
  275. }
  276. err = 0;
  277. out_close_fd:
  278. perf_evsel__close_fd(evsel, 1, threads->nr);
  279. out_evsel_delete:
  280. perf_evsel__delete(evsel);
  281. out_thread_map_delete:
  282. thread_map__delete(threads);
  283. return err;
  284. }
  285. #include <sched.h>
  286. static int test__open_syscall_event_on_all_cpus(void)
  287. {
  288. int err = -1, fd, cpu;
  289. struct thread_map *threads;
  290. struct cpu_map *cpus;
  291. struct perf_evsel *evsel;
  292. struct perf_event_attr attr;
  293. unsigned int nr_open_calls = 111, i;
  294. cpu_set_t cpu_set;
  295. int id = trace_event__id("sys_enter_open");
  296. if (id < 0) {
  297. pr_debug("is debugfs mounted on /sys/kernel/debug?\n");
  298. return -1;
  299. }
  300. threads = thread_map__new(-1, getpid(), UINT_MAX);
  301. if (threads == NULL) {
  302. pr_debug("thread_map__new\n");
  303. return -1;
  304. }
  305. cpus = cpu_map__new(NULL);
  306. if (cpus == NULL) {
  307. pr_debug("cpu_map__new\n");
  308. goto out_thread_map_delete;
  309. }
  310. CPU_ZERO(&cpu_set);
  311. memset(&attr, 0, sizeof(attr));
  312. attr.type = PERF_TYPE_TRACEPOINT;
  313. attr.config = id;
  314. evsel = perf_evsel__new(&attr, 0);
  315. if (evsel == NULL) {
  316. pr_debug("perf_evsel__new\n");
  317. goto out_thread_map_delete;
  318. }
  319. if (perf_evsel__open(evsel, cpus, threads) < 0) {
  320. pr_debug("failed to open counter: %s, "
  321. "tweak /proc/sys/kernel/perf_event_paranoid?\n",
  322. strerror(errno));
  323. goto out_evsel_delete;
  324. }
  325. for (cpu = 0; cpu < cpus->nr; ++cpu) {
  326. unsigned int ncalls = nr_open_calls + cpu;
  327. /*
  328. * XXX eventually lift this restriction in a way that
  329. * keeps perf building on older glibc installations
  330. * without CPU_ALLOC. 1024 cpus in 2010 still seems
  331. * a reasonable upper limit tho :-)
  332. */
  333. if (cpus->map[cpu] >= CPU_SETSIZE) {
  334. pr_debug("Ignoring CPU %d\n", cpus->map[cpu]);
  335. continue;
  336. }
  337. CPU_SET(cpus->map[cpu], &cpu_set);
  338. if (sched_setaffinity(0, sizeof(cpu_set), &cpu_set) < 0) {
  339. pr_debug("sched_setaffinity() failed on CPU %d: %s ",
  340. cpus->map[cpu],
  341. strerror(errno));
  342. goto out_close_fd;
  343. }
  344. for (i = 0; i < ncalls; ++i) {
  345. fd = open("/etc/passwd", O_RDONLY);
  346. close(fd);
  347. }
  348. CPU_CLR(cpus->map[cpu], &cpu_set);
  349. }
  350. /*
  351. * Here we need to explicitely preallocate the counts, as if
  352. * we use the auto allocation it will allocate just for 1 cpu,
  353. * as we start by cpu 0.
  354. */
  355. if (perf_evsel__alloc_counts(evsel, cpus->nr) < 0) {
  356. pr_debug("perf_evsel__alloc_counts(ncpus=%d)\n", cpus->nr);
  357. goto out_close_fd;
  358. }
  359. err = 0;
  360. for (cpu = 0; cpu < cpus->nr; ++cpu) {
  361. unsigned int expected;
  362. if (cpus->map[cpu] >= CPU_SETSIZE)
  363. continue;
  364. if (perf_evsel__read_on_cpu(evsel, cpu, 0) < 0) {
  365. pr_debug("perf_evsel__read_on_cpu\n");
  366. err = -1;
  367. break;
  368. }
  369. expected = nr_open_calls + cpu;
  370. if (evsel->counts->cpu[cpu].val != expected) {
  371. pr_debug("perf_evsel__read_on_cpu: expected to intercept %d calls on cpu %d, got %" PRIu64 "\n",
  372. expected, cpus->map[cpu], evsel->counts->cpu[cpu].val);
  373. err = -1;
  374. }
  375. }
  376. out_close_fd:
  377. perf_evsel__close_fd(evsel, 1, threads->nr);
  378. out_evsel_delete:
  379. perf_evsel__delete(evsel);
  380. out_thread_map_delete:
  381. thread_map__delete(threads);
  382. return err;
  383. }
  384. /*
  385. * This test will generate random numbers of calls to some getpid syscalls,
  386. * then establish an mmap for a group of events that are created to monitor
  387. * the syscalls.
  388. *
  389. * It will receive the events, using mmap, use its PERF_SAMPLE_ID generated
  390. * sample.id field to map back to its respective perf_evsel instance.
  391. *
  392. * Then it checks if the number of syscalls reported as perf events by
  393. * the kernel corresponds to the number of syscalls made.
  394. */
  395. static int test__basic_mmap(void)
  396. {
  397. int err = -1;
  398. union perf_event *event;
  399. struct thread_map *threads;
  400. struct cpu_map *cpus;
  401. struct perf_evlist *evlist;
  402. struct perf_event_attr attr = {
  403. .type = PERF_TYPE_TRACEPOINT,
  404. .read_format = PERF_FORMAT_ID,
  405. .sample_type = PERF_SAMPLE_ID,
  406. .watermark = 0,
  407. };
  408. cpu_set_t cpu_set;
  409. const char *syscall_names[] = { "getsid", "getppid", "getpgrp",
  410. "getpgid", };
  411. pid_t (*syscalls[])(void) = { (void *)getsid, getppid, getpgrp,
  412. (void*)getpgid };
  413. #define nsyscalls ARRAY_SIZE(syscall_names)
  414. int ids[nsyscalls];
  415. unsigned int nr_events[nsyscalls],
  416. expected_nr_events[nsyscalls], i, j;
  417. struct perf_evsel *evsels[nsyscalls], *evsel;
  418. for (i = 0; i < nsyscalls; ++i) {
  419. char name[64];
  420. snprintf(name, sizeof(name), "sys_enter_%s", syscall_names[i]);
  421. ids[i] = trace_event__id(name);
  422. if (ids[i] < 0) {
  423. pr_debug("Is debugfs mounted on /sys/kernel/debug?\n");
  424. return -1;
  425. }
  426. nr_events[i] = 0;
  427. expected_nr_events[i] = random() % 257;
  428. }
  429. threads = thread_map__new(-1, getpid(), UINT_MAX);
  430. if (threads == NULL) {
  431. pr_debug("thread_map__new\n");
  432. return -1;
  433. }
  434. cpus = cpu_map__new(NULL);
  435. if (cpus == NULL) {
  436. pr_debug("cpu_map__new\n");
  437. goto out_free_threads;
  438. }
  439. CPU_ZERO(&cpu_set);
  440. CPU_SET(cpus->map[0], &cpu_set);
  441. sched_setaffinity(0, sizeof(cpu_set), &cpu_set);
  442. if (sched_setaffinity(0, sizeof(cpu_set), &cpu_set) < 0) {
  443. pr_debug("sched_setaffinity() failed on CPU %d: %s ",
  444. cpus->map[0], strerror(errno));
  445. goto out_free_cpus;
  446. }
  447. evlist = perf_evlist__new(cpus, threads);
  448. if (evlist == NULL) {
  449. pr_debug("perf_evlist__new\n");
  450. goto out_free_cpus;
  451. }
  452. /* anonymous union fields, can't be initialized above */
  453. attr.wakeup_events = 1;
  454. attr.sample_period = 1;
  455. for (i = 0; i < nsyscalls; ++i) {
  456. attr.config = ids[i];
  457. evsels[i] = perf_evsel__new(&attr, i);
  458. if (evsels[i] == NULL) {
  459. pr_debug("perf_evsel__new\n");
  460. goto out_free_evlist;
  461. }
  462. perf_evlist__add(evlist, evsels[i]);
  463. if (perf_evsel__open(evsels[i], cpus, threads) < 0) {
  464. pr_debug("failed to open counter: %s, "
  465. "tweak /proc/sys/kernel/perf_event_paranoid?\n",
  466. strerror(errno));
  467. goto out_close_fd;
  468. }
  469. }
  470. if (perf_evlist__mmap(evlist, 128, true) < 0) {
  471. pr_debug("failed to mmap events: %d (%s)\n", errno,
  472. strerror(errno));
  473. goto out_close_fd;
  474. }
  475. for (i = 0; i < nsyscalls; ++i)
  476. for (j = 0; j < expected_nr_events[i]; ++j) {
  477. int foo = syscalls[i]();
  478. ++foo;
  479. }
  480. while ((event = perf_evlist__mmap_read(evlist, 0)) != NULL) {
  481. struct perf_sample sample;
  482. if (event->header.type != PERF_RECORD_SAMPLE) {
  483. pr_debug("unexpected %s event\n",
  484. perf_event__name(event->header.type));
  485. goto out_munmap;
  486. }
  487. err = perf_evlist__parse_sample(evlist, event, &sample);
  488. if (err) {
  489. pr_err("Can't parse sample, err = %d\n", err);
  490. goto out_munmap;
  491. }
  492. evsel = perf_evlist__id2evsel(evlist, sample.id);
  493. if (evsel == NULL) {
  494. pr_debug("event with id %" PRIu64
  495. " doesn't map to an evsel\n", sample.id);
  496. goto out_munmap;
  497. }
  498. nr_events[evsel->idx]++;
  499. }
  500. list_for_each_entry(evsel, &evlist->entries, node) {
  501. if (nr_events[evsel->idx] != expected_nr_events[evsel->idx]) {
  502. pr_debug("expected %d %s events, got %d\n",
  503. expected_nr_events[evsel->idx],
  504. perf_evsel__name(evsel), nr_events[evsel->idx]);
  505. goto out_munmap;
  506. }
  507. }
  508. err = 0;
  509. out_munmap:
  510. perf_evlist__munmap(evlist);
  511. out_close_fd:
  512. for (i = 0; i < nsyscalls; ++i)
  513. perf_evsel__close_fd(evsels[i], 1, threads->nr);
  514. out_free_evlist:
  515. perf_evlist__delete(evlist);
  516. out_free_cpus:
  517. cpu_map__delete(cpus);
  518. out_free_threads:
  519. thread_map__delete(threads);
  520. return err;
  521. #undef nsyscalls
  522. }
  523. static int sched__get_first_possible_cpu(pid_t pid, cpu_set_t *maskp)
  524. {
  525. int i, cpu = -1, nrcpus = 1024;
  526. realloc:
  527. CPU_ZERO(maskp);
  528. if (sched_getaffinity(pid, sizeof(*maskp), maskp) == -1) {
  529. if (errno == EINVAL && nrcpus < (1024 << 8)) {
  530. nrcpus = nrcpus << 2;
  531. goto realloc;
  532. }
  533. perror("sched_getaffinity");
  534. return -1;
  535. }
  536. for (i = 0; i < nrcpus; i++) {
  537. if (CPU_ISSET(i, maskp)) {
  538. if (cpu == -1)
  539. cpu = i;
  540. else
  541. CPU_CLR(i, maskp);
  542. }
  543. }
  544. return cpu;
  545. }
  546. static int test__PERF_RECORD(void)
  547. {
  548. struct perf_record_opts opts = {
  549. .target = {
  550. .uid = UINT_MAX,
  551. .uses_mmap = true,
  552. },
  553. .no_delay = true,
  554. .freq = 10,
  555. .mmap_pages = 256,
  556. };
  557. cpu_set_t cpu_mask;
  558. size_t cpu_mask_size = sizeof(cpu_mask);
  559. struct perf_evlist *evlist = perf_evlist__new(NULL, NULL);
  560. struct perf_evsel *evsel;
  561. struct perf_sample sample;
  562. const char *cmd = "sleep";
  563. const char *argv[] = { cmd, "1", NULL, };
  564. char *bname;
  565. u64 prev_time = 0;
  566. bool found_cmd_mmap = false,
  567. found_libc_mmap = false,
  568. found_vdso_mmap = false,
  569. found_ld_mmap = false;
  570. int err = -1, errs = 0, i, wakeups = 0;
  571. u32 cpu;
  572. int total_events = 0, nr_events[PERF_RECORD_MAX] = { 0, };
  573. if (evlist == NULL || argv == NULL) {
  574. pr_debug("Not enough memory to create evlist\n");
  575. goto out;
  576. }
  577. /*
  578. * We need at least one evsel in the evlist, use the default
  579. * one: "cycles".
  580. */
  581. err = perf_evlist__add_default(evlist);
  582. if (err < 0) {
  583. pr_debug("Not enough memory to create evsel\n");
  584. goto out_delete_evlist;
  585. }
  586. /*
  587. * Create maps of threads and cpus to monitor. In this case
  588. * we start with all threads and cpus (-1, -1) but then in
  589. * perf_evlist__prepare_workload we'll fill in the only thread
  590. * we're monitoring, the one forked there.
  591. */
  592. err = perf_evlist__create_maps(evlist, &opts.target);
  593. if (err < 0) {
  594. pr_debug("Not enough memory to create thread/cpu maps\n");
  595. goto out_delete_evlist;
  596. }
  597. /*
  598. * Prepare the workload in argv[] to run, it'll fork it, and then wait
  599. * for perf_evlist__start_workload() to exec it. This is done this way
  600. * so that we have time to open the evlist (calling sys_perf_event_open
  601. * on all the fds) and then mmap them.
  602. */
  603. err = perf_evlist__prepare_workload(evlist, &opts, argv);
  604. if (err < 0) {
  605. pr_debug("Couldn't run the workload!\n");
  606. goto out_delete_evlist;
  607. }
  608. /*
  609. * Config the evsels, setting attr->comm on the first one, etc.
  610. */
  611. evsel = perf_evlist__first(evlist);
  612. evsel->attr.sample_type |= PERF_SAMPLE_CPU;
  613. evsel->attr.sample_type |= PERF_SAMPLE_TID;
  614. evsel->attr.sample_type |= PERF_SAMPLE_TIME;
  615. perf_evlist__config_attrs(evlist, &opts);
  616. err = sched__get_first_possible_cpu(evlist->workload.pid, &cpu_mask);
  617. if (err < 0) {
  618. pr_debug("sched__get_first_possible_cpu: %s\n", strerror(errno));
  619. goto out_delete_evlist;
  620. }
  621. cpu = err;
  622. /*
  623. * So that we can check perf_sample.cpu on all the samples.
  624. */
  625. if (sched_setaffinity(evlist->workload.pid, cpu_mask_size, &cpu_mask) < 0) {
  626. pr_debug("sched_setaffinity: %s\n", strerror(errno));
  627. goto out_delete_evlist;
  628. }
  629. /*
  630. * Call sys_perf_event_open on all the fds on all the evsels,
  631. * grouping them if asked to.
  632. */
  633. err = perf_evlist__open(evlist);
  634. if (err < 0) {
  635. pr_debug("perf_evlist__open: %s\n", strerror(errno));
  636. goto out_delete_evlist;
  637. }
  638. /*
  639. * mmap the first fd on a given CPU and ask for events for the other
  640. * fds in the same CPU to be injected in the same mmap ring buffer
  641. * (using ioctl(PERF_EVENT_IOC_SET_OUTPUT)).
  642. */
  643. err = perf_evlist__mmap(evlist, opts.mmap_pages, false);
  644. if (err < 0) {
  645. pr_debug("perf_evlist__mmap: %s\n", strerror(errno));
  646. goto out_delete_evlist;
  647. }
  648. /*
  649. * Now that all is properly set up, enable the events, they will
  650. * count just on workload.pid, which will start...
  651. */
  652. perf_evlist__enable(evlist);
  653. /*
  654. * Now!
  655. */
  656. perf_evlist__start_workload(evlist);
  657. while (1) {
  658. int before = total_events;
  659. for (i = 0; i < evlist->nr_mmaps; i++) {
  660. union perf_event *event;
  661. while ((event = perf_evlist__mmap_read(evlist, i)) != NULL) {
  662. const u32 type = event->header.type;
  663. const char *name = perf_event__name(type);
  664. ++total_events;
  665. if (type < PERF_RECORD_MAX)
  666. nr_events[type]++;
  667. err = perf_evlist__parse_sample(evlist, event, &sample);
  668. if (err < 0) {
  669. if (verbose)
  670. perf_event__fprintf(event, stderr);
  671. pr_debug("Couldn't parse sample\n");
  672. goto out_err;
  673. }
  674. if (verbose) {
  675. pr_info("%" PRIu64" %d ", sample.time, sample.cpu);
  676. perf_event__fprintf(event, stderr);
  677. }
  678. if (prev_time > sample.time) {
  679. pr_debug("%s going backwards in time, prev=%" PRIu64 ", curr=%" PRIu64 "\n",
  680. name, prev_time, sample.time);
  681. ++errs;
  682. }
  683. prev_time = sample.time;
  684. if (sample.cpu != cpu) {
  685. pr_debug("%s with unexpected cpu, expected %d, got %d\n",
  686. name, cpu, sample.cpu);
  687. ++errs;
  688. }
  689. if ((pid_t)sample.pid != evlist->workload.pid) {
  690. pr_debug("%s with unexpected pid, expected %d, got %d\n",
  691. name, evlist->workload.pid, sample.pid);
  692. ++errs;
  693. }
  694. if ((pid_t)sample.tid != evlist->workload.pid) {
  695. pr_debug("%s with unexpected tid, expected %d, got %d\n",
  696. name, evlist->workload.pid, sample.tid);
  697. ++errs;
  698. }
  699. if ((type == PERF_RECORD_COMM ||
  700. type == PERF_RECORD_MMAP ||
  701. type == PERF_RECORD_FORK ||
  702. type == PERF_RECORD_EXIT) &&
  703. (pid_t)event->comm.pid != evlist->workload.pid) {
  704. pr_debug("%s with unexpected pid/tid\n", name);
  705. ++errs;
  706. }
  707. if ((type == PERF_RECORD_COMM ||
  708. type == PERF_RECORD_MMAP) &&
  709. event->comm.pid != event->comm.tid) {
  710. pr_debug("%s with different pid/tid!\n", name);
  711. ++errs;
  712. }
  713. switch (type) {
  714. case PERF_RECORD_COMM:
  715. if (strcmp(event->comm.comm, cmd)) {
  716. pr_debug("%s with unexpected comm!\n", name);
  717. ++errs;
  718. }
  719. break;
  720. case PERF_RECORD_EXIT:
  721. goto found_exit;
  722. case PERF_RECORD_MMAP:
  723. bname = strrchr(event->mmap.filename, '/');
  724. if (bname != NULL) {
  725. if (!found_cmd_mmap)
  726. found_cmd_mmap = !strcmp(bname + 1, cmd);
  727. if (!found_libc_mmap)
  728. found_libc_mmap = !strncmp(bname + 1, "libc", 4);
  729. if (!found_ld_mmap)
  730. found_ld_mmap = !strncmp(bname + 1, "ld", 2);
  731. } else if (!found_vdso_mmap)
  732. found_vdso_mmap = !strcmp(event->mmap.filename, "[vdso]");
  733. break;
  734. case PERF_RECORD_SAMPLE:
  735. /* Just ignore samples for now */
  736. break;
  737. default:
  738. pr_debug("Unexpected perf_event->header.type %d!\n",
  739. type);
  740. ++errs;
  741. }
  742. }
  743. }
  744. /*
  745. * We don't use poll here because at least at 3.1 times the
  746. * PERF_RECORD_{!SAMPLE} events don't honour
  747. * perf_event_attr.wakeup_events, just PERF_EVENT_SAMPLE does.
  748. */
  749. if (total_events == before && false)
  750. poll(evlist->pollfd, evlist->nr_fds, -1);
  751. sleep(1);
  752. if (++wakeups > 5) {
  753. pr_debug("No PERF_RECORD_EXIT event!\n");
  754. break;
  755. }
  756. }
  757. found_exit:
  758. if (nr_events[PERF_RECORD_COMM] > 1) {
  759. pr_debug("Excessive number of PERF_RECORD_COMM events!\n");
  760. ++errs;
  761. }
  762. if (nr_events[PERF_RECORD_COMM] == 0) {
  763. pr_debug("Missing PERF_RECORD_COMM for %s!\n", cmd);
  764. ++errs;
  765. }
  766. if (!found_cmd_mmap) {
  767. pr_debug("PERF_RECORD_MMAP for %s missing!\n", cmd);
  768. ++errs;
  769. }
  770. if (!found_libc_mmap) {
  771. pr_debug("PERF_RECORD_MMAP for %s missing!\n", "libc");
  772. ++errs;
  773. }
  774. if (!found_ld_mmap) {
  775. pr_debug("PERF_RECORD_MMAP for %s missing!\n", "ld");
  776. ++errs;
  777. }
  778. if (!found_vdso_mmap) {
  779. pr_debug("PERF_RECORD_MMAP for %s missing!\n", "[vdso]");
  780. ++errs;
  781. }
  782. out_err:
  783. perf_evlist__munmap(evlist);
  784. out_delete_evlist:
  785. perf_evlist__delete(evlist);
  786. out:
  787. return (err < 0 || errs > 0) ? -1 : 0;
  788. }
  789. #if defined(__x86_64__) || defined(__i386__)
  790. #define barrier() asm volatile("" ::: "memory")
  791. static u64 rdpmc(unsigned int counter)
  792. {
  793. unsigned int low, high;
  794. asm volatile("rdpmc" : "=a" (low), "=d" (high) : "c" (counter));
  795. return low | ((u64)high) << 32;
  796. }
  797. static u64 rdtsc(void)
  798. {
  799. unsigned int low, high;
  800. asm volatile("rdtsc" : "=a" (low), "=d" (high));
  801. return low | ((u64)high) << 32;
  802. }
  803. static u64 mmap_read_self(void *addr)
  804. {
  805. struct perf_event_mmap_page *pc = addr;
  806. u32 seq, idx, time_mult = 0, time_shift = 0;
  807. u64 count, cyc = 0, time_offset = 0, enabled, running, delta;
  808. do {
  809. seq = pc->lock;
  810. barrier();
  811. enabled = pc->time_enabled;
  812. running = pc->time_running;
  813. if (enabled != running) {
  814. cyc = rdtsc();
  815. time_mult = pc->time_mult;
  816. time_shift = pc->time_shift;
  817. time_offset = pc->time_offset;
  818. }
  819. idx = pc->index;
  820. count = pc->offset;
  821. if (idx)
  822. count += rdpmc(idx - 1);
  823. barrier();
  824. } while (pc->lock != seq);
  825. if (enabled != running) {
  826. u64 quot, rem;
  827. quot = (cyc >> time_shift);
  828. rem = cyc & ((1 << time_shift) - 1);
  829. delta = time_offset + quot * time_mult +
  830. ((rem * time_mult) >> time_shift);
  831. enabled += delta;
  832. if (idx)
  833. running += delta;
  834. quot = count / running;
  835. rem = count % running;
  836. count = quot * enabled + (rem * enabled) / running;
  837. }
  838. return count;
  839. }
  840. /*
  841. * If the RDPMC instruction faults then signal this back to the test parent task:
  842. */
  843. static void segfault_handler(int sig __maybe_unused,
  844. siginfo_t *info __maybe_unused,
  845. void *uc __maybe_unused)
  846. {
  847. exit(-1);
  848. }
  849. static int __test__rdpmc(void)
  850. {
  851. volatile int tmp = 0;
  852. u64 i, loops = 1000;
  853. int n;
  854. int fd;
  855. void *addr;
  856. struct perf_event_attr attr = {
  857. .type = PERF_TYPE_HARDWARE,
  858. .config = PERF_COUNT_HW_INSTRUCTIONS,
  859. .exclude_kernel = 1,
  860. };
  861. u64 delta_sum = 0;
  862. struct sigaction sa;
  863. sigfillset(&sa.sa_mask);
  864. sa.sa_sigaction = segfault_handler;
  865. sigaction(SIGSEGV, &sa, NULL);
  866. fd = sys_perf_event_open(&attr, 0, -1, -1, 0);
  867. if (fd < 0) {
  868. pr_err("Error: sys_perf_event_open() syscall returned "
  869. "with %d (%s)\n", fd, strerror(errno));
  870. return -1;
  871. }
  872. addr = mmap(NULL, page_size, PROT_READ, MAP_SHARED, fd, 0);
  873. if (addr == (void *)(-1)) {
  874. pr_err("Error: mmap() syscall returned with (%s)\n",
  875. strerror(errno));
  876. goto out_close;
  877. }
  878. for (n = 0; n < 6; n++) {
  879. u64 stamp, now, delta;
  880. stamp = mmap_read_self(addr);
  881. for (i = 0; i < loops; i++)
  882. tmp++;
  883. now = mmap_read_self(addr);
  884. loops *= 10;
  885. delta = now - stamp;
  886. pr_debug("%14d: %14Lu\n", n, (long long)delta);
  887. delta_sum += delta;
  888. }
  889. munmap(addr, page_size);
  890. pr_debug(" ");
  891. out_close:
  892. close(fd);
  893. if (!delta_sum)
  894. return -1;
  895. return 0;
  896. }
  897. static int test__rdpmc(void)
  898. {
  899. int status = 0;
  900. int wret = 0;
  901. int ret;
  902. int pid;
  903. pid = fork();
  904. if (pid < 0)
  905. return -1;
  906. if (!pid) {
  907. ret = __test__rdpmc();
  908. exit(ret);
  909. }
  910. wret = waitpid(pid, &status, 0);
  911. if (wret < 0 || status)
  912. return -1;
  913. return 0;
  914. }
  915. #endif
  916. static int test__perf_pmu(void)
  917. {
  918. return perf_pmu__test();
  919. }
  920. static int perf_evsel__roundtrip_cache_name_test(void)
  921. {
  922. char name[128];
  923. int type, op, err = 0, ret = 0, i, idx;
  924. struct perf_evsel *evsel;
  925. struct perf_evlist *evlist = perf_evlist__new(NULL, NULL);
  926. if (evlist == NULL)
  927. return -ENOMEM;
  928. for (type = 0; type < PERF_COUNT_HW_CACHE_MAX; type++) {
  929. for (op = 0; op < PERF_COUNT_HW_CACHE_OP_MAX; op++) {
  930. /* skip invalid cache type */
  931. if (!perf_evsel__is_cache_op_valid(type, op))
  932. continue;
  933. for (i = 0; i < PERF_COUNT_HW_CACHE_RESULT_MAX; i++) {
  934. __perf_evsel__hw_cache_type_op_res_name(type, op, i,
  935. name, sizeof(name));
  936. err = parse_events(evlist, name, 0);
  937. if (err)
  938. ret = err;
  939. }
  940. }
  941. }
  942. idx = 0;
  943. evsel = perf_evlist__first(evlist);
  944. for (type = 0; type < PERF_COUNT_HW_CACHE_MAX; type++) {
  945. for (op = 0; op < PERF_COUNT_HW_CACHE_OP_MAX; op++) {
  946. /* skip invalid cache type */
  947. if (!perf_evsel__is_cache_op_valid(type, op))
  948. continue;
  949. for (i = 0; i < PERF_COUNT_HW_CACHE_RESULT_MAX; i++) {
  950. __perf_evsel__hw_cache_type_op_res_name(type, op, i,
  951. name, sizeof(name));
  952. if (evsel->idx != idx)
  953. continue;
  954. ++idx;
  955. if (strcmp(perf_evsel__name(evsel), name)) {
  956. pr_debug("%s != %s\n", perf_evsel__name(evsel), name);
  957. ret = -1;
  958. }
  959. evsel = perf_evsel__next(evsel);
  960. }
  961. }
  962. }
  963. perf_evlist__delete(evlist);
  964. return ret;
  965. }
  966. static int __perf_evsel__name_array_test(const char *names[], int nr_names)
  967. {
  968. int i, err;
  969. struct perf_evsel *evsel;
  970. struct perf_evlist *evlist = perf_evlist__new(NULL, NULL);
  971. if (evlist == NULL)
  972. return -ENOMEM;
  973. for (i = 0; i < nr_names; ++i) {
  974. err = parse_events(evlist, names[i], 0);
  975. if (err) {
  976. pr_debug("failed to parse event '%s', err %d\n",
  977. names[i], err);
  978. goto out_delete_evlist;
  979. }
  980. }
  981. err = 0;
  982. list_for_each_entry(evsel, &evlist->entries, node) {
  983. if (strcmp(perf_evsel__name(evsel), names[evsel->idx])) {
  984. --err;
  985. pr_debug("%s != %s\n", perf_evsel__name(evsel), names[evsel->idx]);
  986. }
  987. }
  988. out_delete_evlist:
  989. perf_evlist__delete(evlist);
  990. return err;
  991. }
  992. #define perf_evsel__name_array_test(names) \
  993. __perf_evsel__name_array_test(names, ARRAY_SIZE(names))
  994. static int perf_evsel__roundtrip_name_test(void)
  995. {
  996. int err = 0, ret = 0;
  997. err = perf_evsel__name_array_test(perf_evsel__hw_names);
  998. if (err)
  999. ret = err;
  1000. err = perf_evsel__name_array_test(perf_evsel__sw_names);
  1001. if (err)
  1002. ret = err;
  1003. err = perf_evsel__roundtrip_cache_name_test();
  1004. if (err)
  1005. ret = err;
  1006. return ret;
  1007. }
  1008. static int perf_evsel__test_field(struct perf_evsel *evsel, const char *name,
  1009. int size, bool should_be_signed)
  1010. {
  1011. struct format_field *field = perf_evsel__field(evsel, name);
  1012. int is_signed;
  1013. int ret = 0;
  1014. if (field == NULL) {
  1015. pr_debug("%s: \"%s\" field not found!\n", evsel->name, name);
  1016. return -1;
  1017. }
  1018. is_signed = !!(field->flags | FIELD_IS_SIGNED);
  1019. if (should_be_signed && !is_signed) {
  1020. pr_debug("%s: \"%s\" signedness(%d) is wrong, should be %d\n",
  1021. evsel->name, name, is_signed, should_be_signed);
  1022. ret = -1;
  1023. }
  1024. if (field->size != size) {
  1025. pr_debug("%s: \"%s\" size (%d) should be %d!\n",
  1026. evsel->name, name, field->size, size);
  1027. ret = -1;
  1028. }
  1029. return ret;
  1030. }
  1031. static int perf_evsel__tp_sched_test(void)
  1032. {
  1033. struct perf_evsel *evsel = perf_evsel__newtp("sched", "sched_switch", 0);
  1034. int ret = 0;
  1035. if (evsel == NULL) {
  1036. pr_debug("perf_evsel__new\n");
  1037. return -1;
  1038. }
  1039. if (perf_evsel__test_field(evsel, "prev_comm", 16, true))
  1040. ret = -1;
  1041. if (perf_evsel__test_field(evsel, "prev_pid", 4, true))
  1042. ret = -1;
  1043. if (perf_evsel__test_field(evsel, "prev_prio", 4, true))
  1044. ret = -1;
  1045. if (perf_evsel__test_field(evsel, "prev_state", 8, true))
  1046. ret = -1;
  1047. if (perf_evsel__test_field(evsel, "next_comm", 16, true))
  1048. ret = -1;
  1049. if (perf_evsel__test_field(evsel, "next_pid", 4, true))
  1050. ret = -1;
  1051. if (perf_evsel__test_field(evsel, "next_prio", 4, true))
  1052. ret = -1;
  1053. perf_evsel__delete(evsel);
  1054. evsel = perf_evsel__newtp("sched", "sched_wakeup", 0);
  1055. if (perf_evsel__test_field(evsel, "comm", 16, true))
  1056. ret = -1;
  1057. if (perf_evsel__test_field(evsel, "pid", 4, true))
  1058. ret = -1;
  1059. if (perf_evsel__test_field(evsel, "prio", 4, true))
  1060. ret = -1;
  1061. if (perf_evsel__test_field(evsel, "success", 4, true))
  1062. ret = -1;
  1063. if (perf_evsel__test_field(evsel, "target_cpu", 4, true))
  1064. ret = -1;
  1065. return ret;
  1066. }
  1067. static int test__syscall_open_tp_fields(void)
  1068. {
  1069. struct perf_record_opts opts = {
  1070. .target = {
  1071. .uid = UINT_MAX,
  1072. .uses_mmap = true,
  1073. },
  1074. .no_delay = true,
  1075. .freq = 1,
  1076. .mmap_pages = 256,
  1077. .raw_samples = true,
  1078. };
  1079. const char *filename = "/etc/passwd";
  1080. int flags = O_RDONLY | O_DIRECTORY;
  1081. struct perf_evlist *evlist = perf_evlist__new(NULL, NULL);
  1082. struct perf_evsel *evsel;
  1083. int err = -1, i, nr_events = 0, nr_polls = 0;
  1084. if (evlist == NULL) {
  1085. pr_debug("%s: perf_evlist__new\n", __func__);
  1086. goto out;
  1087. }
  1088. evsel = perf_evsel__newtp("syscalls", "sys_enter_open", 0);
  1089. if (evsel == NULL) {
  1090. pr_debug("%s: perf_evsel__newtp\n", __func__);
  1091. goto out_delete_evlist;
  1092. }
  1093. perf_evlist__add(evlist, evsel);
  1094. err = perf_evlist__create_maps(evlist, &opts.target);
  1095. if (err < 0) {
  1096. pr_debug("%s: perf_evlist__create_maps\n", __func__);
  1097. goto out_delete_evlist;
  1098. }
  1099. perf_evsel__config(evsel, &opts, evsel);
  1100. evlist->threads->map[0] = getpid();
  1101. err = perf_evlist__open(evlist);
  1102. if (err < 0) {
  1103. pr_debug("perf_evlist__open: %s\n", strerror(errno));
  1104. goto out_delete_evlist;
  1105. }
  1106. err = perf_evlist__mmap(evlist, UINT_MAX, false);
  1107. if (err < 0) {
  1108. pr_debug("perf_evlist__mmap: %s\n", strerror(errno));
  1109. goto out_delete_evlist;
  1110. }
  1111. perf_evlist__enable(evlist);
  1112. /*
  1113. * Generate the event:
  1114. */
  1115. open(filename, flags);
  1116. while (1) {
  1117. int before = nr_events;
  1118. for (i = 0; i < evlist->nr_mmaps; i++) {
  1119. union perf_event *event;
  1120. while ((event = perf_evlist__mmap_read(evlist, i)) != NULL) {
  1121. const u32 type = event->header.type;
  1122. int tp_flags;
  1123. struct perf_sample sample;
  1124. ++nr_events;
  1125. if (type != PERF_RECORD_SAMPLE)
  1126. continue;
  1127. err = perf_evsel__parse_sample(evsel, event, &sample);
  1128. if (err) {
  1129. pr_err("Can't parse sample, err = %d\n", err);
  1130. goto out_munmap;
  1131. }
  1132. tp_flags = perf_evsel__intval(evsel, &sample, "flags");
  1133. if (flags != tp_flags) {
  1134. pr_debug("%s: Expected flags=%#x, got %#x\n",
  1135. __func__, flags, tp_flags);
  1136. goto out_munmap;
  1137. }
  1138. goto out_ok;
  1139. }
  1140. }
  1141. if (nr_events == before)
  1142. poll(evlist->pollfd, evlist->nr_fds, 10);
  1143. if (++nr_polls > 5) {
  1144. pr_debug("%s: no events!\n", __func__);
  1145. goto out_munmap;
  1146. }
  1147. }
  1148. out_ok:
  1149. err = 0;
  1150. out_munmap:
  1151. perf_evlist__munmap(evlist);
  1152. out_delete_evlist:
  1153. perf_evlist__delete(evlist);
  1154. out:
  1155. return err;
  1156. }
  1157. static struct test {
  1158. const char *desc;
  1159. int (*func)(void);
  1160. } tests[] = {
  1161. {
  1162. .desc = "vmlinux symtab matches kallsyms",
  1163. .func = test__vmlinux_matches_kallsyms,
  1164. },
  1165. {
  1166. .desc = "detect open syscall event",
  1167. .func = test__open_syscall_event,
  1168. },
  1169. {
  1170. .desc = "detect open syscall event on all cpus",
  1171. .func = test__open_syscall_event_on_all_cpus,
  1172. },
  1173. {
  1174. .desc = "read samples using the mmap interface",
  1175. .func = test__basic_mmap,
  1176. },
  1177. {
  1178. .desc = "parse events tests",
  1179. .func = parse_events__test,
  1180. },
  1181. #if defined(__x86_64__) || defined(__i386__)
  1182. {
  1183. .desc = "x86 rdpmc test",
  1184. .func = test__rdpmc,
  1185. },
  1186. #endif
  1187. {
  1188. .desc = "Validate PERF_RECORD_* events & perf_sample fields",
  1189. .func = test__PERF_RECORD,
  1190. },
  1191. {
  1192. .desc = "Test perf pmu format parsing",
  1193. .func = test__perf_pmu,
  1194. },
  1195. {
  1196. .desc = "Test dso data interface",
  1197. .func = dso__test_data,
  1198. },
  1199. {
  1200. .desc = "roundtrip evsel->name check",
  1201. .func = perf_evsel__roundtrip_name_test,
  1202. },
  1203. {
  1204. .desc = "Check parsing of sched tracepoints fields",
  1205. .func = perf_evsel__tp_sched_test,
  1206. },
  1207. {
  1208. .desc = "Generate and check syscalls:sys_enter_open event fields",
  1209. .func = test__syscall_open_tp_fields,
  1210. },
  1211. {
  1212. .desc = "struct perf_event_attr setup",
  1213. .func = test_attr__run,
  1214. },
  1215. {
  1216. .func = NULL,
  1217. },
  1218. };
  1219. static bool perf_test__matches(int curr, int argc, const char *argv[])
  1220. {
  1221. int i;
  1222. if (argc == 0)
  1223. return true;
  1224. for (i = 0; i < argc; ++i) {
  1225. char *end;
  1226. long nr = strtoul(argv[i], &end, 10);
  1227. if (*end == '\0') {
  1228. if (nr == curr + 1)
  1229. return true;
  1230. continue;
  1231. }
  1232. if (strstr(tests[curr].desc, argv[i]))
  1233. return true;
  1234. }
  1235. return false;
  1236. }
  1237. static int __cmd_test(int argc, const char *argv[])
  1238. {
  1239. int i = 0;
  1240. int width = 0;
  1241. while (tests[i].func) {
  1242. int len = strlen(tests[i].desc);
  1243. if (width < len)
  1244. width = len;
  1245. ++i;
  1246. }
  1247. i = 0;
  1248. while (tests[i].func) {
  1249. int curr = i++, err;
  1250. if (!perf_test__matches(curr, argc, argv))
  1251. continue;
  1252. pr_info("%2d: %-*s:", i, width, tests[curr].desc);
  1253. pr_debug("\n--- start ---\n");
  1254. err = tests[curr].func();
  1255. pr_debug("---- end ----\n%s:", tests[curr].desc);
  1256. if (err)
  1257. color_fprintf(stderr, PERF_COLOR_RED, " FAILED!\n");
  1258. else
  1259. pr_info(" Ok\n");
  1260. }
  1261. return 0;
  1262. }
  1263. static int perf_test__list(int argc, const char **argv)
  1264. {
  1265. int i = 0;
  1266. while (tests[i].func) {
  1267. int curr = i++;
  1268. if (argc > 1 && !strstr(tests[curr].desc, argv[1]))
  1269. continue;
  1270. pr_info("%2d: %s\n", i, tests[curr].desc);
  1271. }
  1272. return 0;
  1273. }
  1274. int cmd_test(int argc, const char **argv, const char *prefix __maybe_unused)
  1275. {
  1276. const char * const test_usage[] = {
  1277. "perf test [<options>] [{list <test-name-fragment>|[<test-name-fragments>|<test-numbers>]}]",
  1278. NULL,
  1279. };
  1280. const struct option test_options[] = {
  1281. OPT_INCR('v', "verbose", &verbose,
  1282. "be more verbose (show symbol address, etc)"),
  1283. OPT_END()
  1284. };
  1285. argc = parse_options(argc, argv, test_options, test_usage, 0);
  1286. if (argc >= 1 && !strcmp(argv[0], "list"))
  1287. return perf_test__list(argc, argv);
  1288. symbol_conf.priv_size = sizeof(int);
  1289. symbol_conf.sort_by_name = true;
  1290. symbol_conf.try_vmlinux_path = true;
  1291. if (symbol__init() < 0)
  1292. return -1;
  1293. return __cmd_test(argc, argv);
  1294. }