builtin-test.c 40 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610
  1. /*
  2. * builtin-test.c
  3. *
  4. * Builtin regression testing command: ever growing number of sanity tests
  5. */
  6. #include "builtin.h"
  7. #include "util/cache.h"
  8. #include "util/debug.h"
  9. #include "util/debugfs.h"
  10. #include "util/evlist.h"
  11. #include "util/parse-options.h"
  12. #include "util/parse-events.h"
  13. #include "util/symbol.h"
  14. #include "util/thread_map.h"
  15. #include "../../include/linux/hw_breakpoint.h"
  16. #include <sys/mman.h>
  17. static int vmlinux_matches_kallsyms_filter(struct map *map __used, struct symbol *sym)
  18. {
  19. bool *visited = symbol__priv(sym);
  20. *visited = true;
  21. return 0;
  22. }
  23. static int test__vmlinux_matches_kallsyms(void)
  24. {
  25. int err = -1;
  26. struct rb_node *nd;
  27. struct symbol *sym;
  28. struct map *kallsyms_map, *vmlinux_map;
  29. struct machine kallsyms, vmlinux;
  30. enum map_type type = MAP__FUNCTION;
  31. long page_size = sysconf(_SC_PAGE_SIZE);
  32. struct ref_reloc_sym ref_reloc_sym = { .name = "_stext", };
  33. /*
  34. * Step 1:
  35. *
  36. * Init the machines that will hold kernel, modules obtained from
  37. * both vmlinux + .ko files and from /proc/kallsyms split by modules.
  38. */
  39. machine__init(&kallsyms, "", HOST_KERNEL_ID);
  40. machine__init(&vmlinux, "", HOST_KERNEL_ID);
  41. /*
  42. * Step 2:
  43. *
  44. * Create the kernel maps for kallsyms and the DSO where we will then
  45. * load /proc/kallsyms. Also create the modules maps from /proc/modules
  46. * and find the .ko files that match them in /lib/modules/`uname -r`/.
  47. */
  48. if (machine__create_kernel_maps(&kallsyms) < 0) {
  49. pr_debug("machine__create_kernel_maps ");
  50. return -1;
  51. }
  52. /*
  53. * Step 3:
  54. *
  55. * Load and split /proc/kallsyms into multiple maps, one per module.
  56. */
  57. if (machine__load_kallsyms(&kallsyms, "/proc/kallsyms", type, NULL) <= 0) {
  58. pr_debug("dso__load_kallsyms ");
  59. goto out;
  60. }
  61. /*
  62. * Step 4:
  63. *
  64. * kallsyms will be internally on demand sorted by name so that we can
  65. * find the reference relocation * symbol, i.e. the symbol we will use
  66. * to see if the running kernel was relocated by checking if it has the
  67. * same value in the vmlinux file we load.
  68. */
  69. kallsyms_map = machine__kernel_map(&kallsyms, type);
  70. sym = map__find_symbol_by_name(kallsyms_map, ref_reloc_sym.name, NULL);
  71. if (sym == NULL) {
  72. pr_debug("dso__find_symbol_by_name ");
  73. goto out;
  74. }
  75. ref_reloc_sym.addr = sym->start;
  76. /*
  77. * Step 5:
  78. *
  79. * Now repeat step 2, this time for the vmlinux file we'll auto-locate.
  80. */
  81. if (machine__create_kernel_maps(&vmlinux) < 0) {
  82. pr_debug("machine__create_kernel_maps ");
  83. goto out;
  84. }
  85. vmlinux_map = machine__kernel_map(&vmlinux, type);
  86. map__kmap(vmlinux_map)->ref_reloc_sym = &ref_reloc_sym;
  87. /*
  88. * Step 6:
  89. *
  90. * Locate a vmlinux file in the vmlinux path that has a buildid that
  91. * matches the one of the running kernel.
  92. *
  93. * While doing that look if we find the ref reloc symbol, if we find it
  94. * we'll have its ref_reloc_symbol.unrelocated_addr and then
  95. * maps__reloc_vmlinux will notice and set proper ->[un]map_ip routines
  96. * to fixup the symbols.
  97. */
  98. if (machine__load_vmlinux_path(&vmlinux, type,
  99. vmlinux_matches_kallsyms_filter) <= 0) {
  100. pr_debug("machine__load_vmlinux_path ");
  101. goto out;
  102. }
  103. err = 0;
  104. /*
  105. * Step 7:
  106. *
  107. * Now look at the symbols in the vmlinux DSO and check if we find all of them
  108. * in the kallsyms dso. For the ones that are in both, check its names and
  109. * end addresses too.
  110. */
  111. for (nd = rb_first(&vmlinux_map->dso->symbols[type]); nd; nd = rb_next(nd)) {
  112. struct symbol *pair, *first_pair;
  113. bool backwards = true;
  114. sym = rb_entry(nd, struct symbol, rb_node);
  115. if (sym->start == sym->end)
  116. continue;
  117. first_pair = machine__find_kernel_symbol(&kallsyms, type, sym->start, NULL, NULL);
  118. pair = first_pair;
  119. if (pair && pair->start == sym->start) {
  120. next_pair:
  121. if (strcmp(sym->name, pair->name) == 0) {
  122. /*
  123. * kallsyms don't have the symbol end, so we
  124. * set that by using the next symbol start - 1,
  125. * in some cases we get this up to a page
  126. * wrong, trace_kmalloc when I was developing
  127. * this code was one such example, 2106 bytes
  128. * off the real size. More than that and we
  129. * _really_ have a problem.
  130. */
  131. s64 skew = sym->end - pair->end;
  132. if (llabs(skew) < page_size)
  133. continue;
  134. pr_debug("%#" PRIx64 ": diff end addr for %s v: %#" PRIx64 " k: %#" PRIx64 "\n",
  135. sym->start, sym->name, sym->end, pair->end);
  136. } else {
  137. struct rb_node *nnd;
  138. detour:
  139. nnd = backwards ? rb_prev(&pair->rb_node) :
  140. rb_next(&pair->rb_node);
  141. if (nnd) {
  142. struct symbol *next = rb_entry(nnd, struct symbol, rb_node);
  143. if (next->start == sym->start) {
  144. pair = next;
  145. goto next_pair;
  146. }
  147. }
  148. if (backwards) {
  149. backwards = false;
  150. pair = first_pair;
  151. goto detour;
  152. }
  153. pr_debug("%#" PRIx64 ": diff name v: %s k: %s\n",
  154. sym->start, sym->name, pair->name);
  155. }
  156. } else
  157. pr_debug("%#" PRIx64 ": %s not on kallsyms\n", sym->start, sym->name);
  158. err = -1;
  159. }
  160. if (!verbose)
  161. goto out;
  162. pr_info("Maps only in vmlinux:\n");
  163. for (nd = rb_first(&vmlinux.kmaps.maps[type]); nd; nd = rb_next(nd)) {
  164. struct map *pos = rb_entry(nd, struct map, rb_node), *pair;
  165. /*
  166. * If it is the kernel, kallsyms is always "[kernel.kallsyms]", while
  167. * the kernel will have the path for the vmlinux file being used,
  168. * so use the short name, less descriptive but the same ("[kernel]" in
  169. * both cases.
  170. */
  171. pair = map_groups__find_by_name(&kallsyms.kmaps, type,
  172. (pos->dso->kernel ?
  173. pos->dso->short_name :
  174. pos->dso->name));
  175. if (pair)
  176. pair->priv = 1;
  177. else
  178. map__fprintf(pos, stderr);
  179. }
  180. pr_info("Maps in vmlinux with a different name in kallsyms:\n");
  181. for (nd = rb_first(&vmlinux.kmaps.maps[type]); nd; nd = rb_next(nd)) {
  182. struct map *pos = rb_entry(nd, struct map, rb_node), *pair;
  183. pair = map_groups__find(&kallsyms.kmaps, type, pos->start);
  184. if (pair == NULL || pair->priv)
  185. continue;
  186. if (pair->start == pos->start) {
  187. pair->priv = 1;
  188. pr_info(" %" PRIx64 "-%" PRIx64 " %" PRIx64 " %s in kallsyms as",
  189. pos->start, pos->end, pos->pgoff, pos->dso->name);
  190. if (pos->pgoff != pair->pgoff || pos->end != pair->end)
  191. pr_info(": \n*%" PRIx64 "-%" PRIx64 " %" PRIx64 "",
  192. pair->start, pair->end, pair->pgoff);
  193. pr_info(" %s\n", pair->dso->name);
  194. pair->priv = 1;
  195. }
  196. }
  197. pr_info("Maps only in kallsyms:\n");
  198. for (nd = rb_first(&kallsyms.kmaps.maps[type]);
  199. nd; nd = rb_next(nd)) {
  200. struct map *pos = rb_entry(nd, struct map, rb_node);
  201. if (!pos->priv)
  202. map__fprintf(pos, stderr);
  203. }
  204. out:
  205. return err;
  206. }
  207. #include "util/cpumap.h"
  208. #include "util/evsel.h"
  209. #include <sys/types.h>
  210. static int trace_event__id(const char *evname)
  211. {
  212. char *filename;
  213. int err = -1, fd;
  214. if (asprintf(&filename,
  215. "%s/syscalls/%s/id",
  216. tracing_events_path, evname) < 0)
  217. return -1;
  218. fd = open(filename, O_RDONLY);
  219. if (fd >= 0) {
  220. char id[16];
  221. if (read(fd, id, sizeof(id)) > 0)
  222. err = atoi(id);
  223. close(fd);
  224. }
  225. free(filename);
  226. return err;
  227. }
  228. static int test__open_syscall_event(void)
  229. {
  230. int err = -1, fd;
  231. struct thread_map *threads;
  232. struct perf_evsel *evsel;
  233. struct perf_event_attr attr;
  234. unsigned int nr_open_calls = 111, i;
  235. int id = trace_event__id("sys_enter_open");
  236. if (id < 0) {
  237. pr_debug("is debugfs mounted on /sys/kernel/debug?\n");
  238. return -1;
  239. }
  240. threads = thread_map__new(-1, getpid(), UINT_MAX);
  241. if (threads == NULL) {
  242. pr_debug("thread_map__new\n");
  243. return -1;
  244. }
  245. memset(&attr, 0, sizeof(attr));
  246. attr.type = PERF_TYPE_TRACEPOINT;
  247. attr.config = id;
  248. evsel = perf_evsel__new(&attr, 0);
  249. if (evsel == NULL) {
  250. pr_debug("perf_evsel__new\n");
  251. goto out_thread_map_delete;
  252. }
  253. if (perf_evsel__open_per_thread(evsel, threads, false, NULL) < 0) {
  254. pr_debug("failed to open counter: %s, "
  255. "tweak /proc/sys/kernel/perf_event_paranoid?\n",
  256. strerror(errno));
  257. goto out_evsel_delete;
  258. }
  259. for (i = 0; i < nr_open_calls; ++i) {
  260. fd = open("/etc/passwd", O_RDONLY);
  261. close(fd);
  262. }
  263. if (perf_evsel__read_on_cpu(evsel, 0, 0) < 0) {
  264. pr_debug("perf_evsel__read_on_cpu\n");
  265. goto out_close_fd;
  266. }
  267. if (evsel->counts->cpu[0].val != nr_open_calls) {
  268. pr_debug("perf_evsel__read_on_cpu: expected to intercept %d calls, got %" PRIu64 "\n",
  269. nr_open_calls, evsel->counts->cpu[0].val);
  270. goto out_close_fd;
  271. }
  272. err = 0;
  273. out_close_fd:
  274. perf_evsel__close_fd(evsel, 1, threads->nr);
  275. out_evsel_delete:
  276. perf_evsel__delete(evsel);
  277. out_thread_map_delete:
  278. thread_map__delete(threads);
  279. return err;
  280. }
  281. #include <sched.h>
  282. static int test__open_syscall_event_on_all_cpus(void)
  283. {
  284. int err = -1, fd, cpu;
  285. struct thread_map *threads;
  286. struct cpu_map *cpus;
  287. struct perf_evsel *evsel;
  288. struct perf_event_attr attr;
  289. unsigned int nr_open_calls = 111, i;
  290. cpu_set_t cpu_set;
  291. int id = trace_event__id("sys_enter_open");
  292. if (id < 0) {
  293. pr_debug("is debugfs mounted on /sys/kernel/debug?\n");
  294. return -1;
  295. }
  296. threads = thread_map__new(-1, getpid(), UINT_MAX);
  297. if (threads == NULL) {
  298. pr_debug("thread_map__new\n");
  299. return -1;
  300. }
  301. cpus = cpu_map__new(NULL);
  302. if (cpus == NULL) {
  303. pr_debug("cpu_map__new\n");
  304. goto out_thread_map_delete;
  305. }
  306. CPU_ZERO(&cpu_set);
  307. memset(&attr, 0, sizeof(attr));
  308. attr.type = PERF_TYPE_TRACEPOINT;
  309. attr.config = id;
  310. evsel = perf_evsel__new(&attr, 0);
  311. if (evsel == NULL) {
  312. pr_debug("perf_evsel__new\n");
  313. goto out_thread_map_delete;
  314. }
  315. if (perf_evsel__open(evsel, cpus, threads, false, NULL) < 0) {
  316. pr_debug("failed to open counter: %s, "
  317. "tweak /proc/sys/kernel/perf_event_paranoid?\n",
  318. strerror(errno));
  319. goto out_evsel_delete;
  320. }
  321. for (cpu = 0; cpu < cpus->nr; ++cpu) {
  322. unsigned int ncalls = nr_open_calls + cpu;
  323. /*
  324. * XXX eventually lift this restriction in a way that
  325. * keeps perf building on older glibc installations
  326. * without CPU_ALLOC. 1024 cpus in 2010 still seems
  327. * a reasonable upper limit tho :-)
  328. */
  329. if (cpus->map[cpu] >= CPU_SETSIZE) {
  330. pr_debug("Ignoring CPU %d\n", cpus->map[cpu]);
  331. continue;
  332. }
  333. CPU_SET(cpus->map[cpu], &cpu_set);
  334. if (sched_setaffinity(0, sizeof(cpu_set), &cpu_set) < 0) {
  335. pr_debug("sched_setaffinity() failed on CPU %d: %s ",
  336. cpus->map[cpu],
  337. strerror(errno));
  338. goto out_close_fd;
  339. }
  340. for (i = 0; i < ncalls; ++i) {
  341. fd = open("/etc/passwd", O_RDONLY);
  342. close(fd);
  343. }
  344. CPU_CLR(cpus->map[cpu], &cpu_set);
  345. }
  346. /*
  347. * Here we need to explicitely preallocate the counts, as if
  348. * we use the auto allocation it will allocate just for 1 cpu,
  349. * as we start by cpu 0.
  350. */
  351. if (perf_evsel__alloc_counts(evsel, cpus->nr) < 0) {
  352. pr_debug("perf_evsel__alloc_counts(ncpus=%d)\n", cpus->nr);
  353. goto out_close_fd;
  354. }
  355. err = 0;
  356. for (cpu = 0; cpu < cpus->nr; ++cpu) {
  357. unsigned int expected;
  358. if (cpus->map[cpu] >= CPU_SETSIZE)
  359. continue;
  360. if (perf_evsel__read_on_cpu(evsel, cpu, 0) < 0) {
  361. pr_debug("perf_evsel__read_on_cpu\n");
  362. err = -1;
  363. break;
  364. }
  365. expected = nr_open_calls + cpu;
  366. if (evsel->counts->cpu[cpu].val != expected) {
  367. pr_debug("perf_evsel__read_on_cpu: expected to intercept %d calls on cpu %d, got %" PRIu64 "\n",
  368. expected, cpus->map[cpu], evsel->counts->cpu[cpu].val);
  369. err = -1;
  370. }
  371. }
  372. out_close_fd:
  373. perf_evsel__close_fd(evsel, 1, threads->nr);
  374. out_evsel_delete:
  375. perf_evsel__delete(evsel);
  376. out_thread_map_delete:
  377. thread_map__delete(threads);
  378. return err;
  379. }
  380. /*
  381. * This test will generate random numbers of calls to some getpid syscalls,
  382. * then establish an mmap for a group of events that are created to monitor
  383. * the syscalls.
  384. *
  385. * It will receive the events, using mmap, use its PERF_SAMPLE_ID generated
  386. * sample.id field to map back to its respective perf_evsel instance.
  387. *
  388. * Then it checks if the number of syscalls reported as perf events by
  389. * the kernel corresponds to the number of syscalls made.
  390. */
  391. static int test__basic_mmap(void)
  392. {
  393. int err = -1;
  394. union perf_event *event;
  395. struct thread_map *threads;
  396. struct cpu_map *cpus;
  397. struct perf_evlist *evlist;
  398. struct perf_event_attr attr = {
  399. .type = PERF_TYPE_TRACEPOINT,
  400. .read_format = PERF_FORMAT_ID,
  401. .sample_type = PERF_SAMPLE_ID,
  402. .watermark = 0,
  403. };
  404. cpu_set_t cpu_set;
  405. const char *syscall_names[] = { "getsid", "getppid", "getpgrp",
  406. "getpgid", };
  407. pid_t (*syscalls[])(void) = { (void *)getsid, getppid, getpgrp,
  408. (void*)getpgid };
  409. #define nsyscalls ARRAY_SIZE(syscall_names)
  410. int ids[nsyscalls];
  411. unsigned int nr_events[nsyscalls],
  412. expected_nr_events[nsyscalls], i, j;
  413. struct perf_evsel *evsels[nsyscalls], *evsel;
  414. int sample_size = __perf_evsel__sample_size(attr.sample_type);
  415. for (i = 0; i < nsyscalls; ++i) {
  416. char name[64];
  417. snprintf(name, sizeof(name), "sys_enter_%s", syscall_names[i]);
  418. ids[i] = trace_event__id(name);
  419. if (ids[i] < 0) {
  420. pr_debug("Is debugfs mounted on /sys/kernel/debug?\n");
  421. return -1;
  422. }
  423. nr_events[i] = 0;
  424. expected_nr_events[i] = random() % 257;
  425. }
  426. threads = thread_map__new(-1, getpid(), UINT_MAX);
  427. if (threads == NULL) {
  428. pr_debug("thread_map__new\n");
  429. return -1;
  430. }
  431. cpus = cpu_map__new(NULL);
  432. if (cpus == NULL) {
  433. pr_debug("cpu_map__new\n");
  434. goto out_free_threads;
  435. }
  436. CPU_ZERO(&cpu_set);
  437. CPU_SET(cpus->map[0], &cpu_set);
  438. sched_setaffinity(0, sizeof(cpu_set), &cpu_set);
  439. if (sched_setaffinity(0, sizeof(cpu_set), &cpu_set) < 0) {
  440. pr_debug("sched_setaffinity() failed on CPU %d: %s ",
  441. cpus->map[0], strerror(errno));
  442. goto out_free_cpus;
  443. }
  444. evlist = perf_evlist__new(cpus, threads);
  445. if (evlist == NULL) {
  446. pr_debug("perf_evlist__new\n");
  447. goto out_free_cpus;
  448. }
  449. /* anonymous union fields, can't be initialized above */
  450. attr.wakeup_events = 1;
  451. attr.sample_period = 1;
  452. for (i = 0; i < nsyscalls; ++i) {
  453. attr.config = ids[i];
  454. evsels[i] = perf_evsel__new(&attr, i);
  455. if (evsels[i] == NULL) {
  456. pr_debug("perf_evsel__new\n");
  457. goto out_free_evlist;
  458. }
  459. perf_evlist__add(evlist, evsels[i]);
  460. if (perf_evsel__open(evsels[i], cpus, threads, false, NULL) < 0) {
  461. pr_debug("failed to open counter: %s, "
  462. "tweak /proc/sys/kernel/perf_event_paranoid?\n",
  463. strerror(errno));
  464. goto out_close_fd;
  465. }
  466. }
  467. if (perf_evlist__mmap(evlist, 128, true) < 0) {
  468. pr_debug("failed to mmap events: %d (%s)\n", errno,
  469. strerror(errno));
  470. goto out_close_fd;
  471. }
  472. for (i = 0; i < nsyscalls; ++i)
  473. for (j = 0; j < expected_nr_events[i]; ++j) {
  474. int foo = syscalls[i]();
  475. ++foo;
  476. }
  477. while ((event = perf_evlist__mmap_read(evlist, 0)) != NULL) {
  478. struct perf_sample sample;
  479. if (event->header.type != PERF_RECORD_SAMPLE) {
  480. pr_debug("unexpected %s event\n",
  481. perf_event__name(event->header.type));
  482. goto out_munmap;
  483. }
  484. err = perf_event__parse_sample(event, attr.sample_type, sample_size,
  485. false, &sample, false);
  486. if (err) {
  487. pr_err("Can't parse sample, err = %d\n", err);
  488. goto out_munmap;
  489. }
  490. evsel = perf_evlist__id2evsel(evlist, sample.id);
  491. if (evsel == NULL) {
  492. pr_debug("event with id %" PRIu64
  493. " doesn't map to an evsel\n", sample.id);
  494. goto out_munmap;
  495. }
  496. nr_events[evsel->idx]++;
  497. }
  498. list_for_each_entry(evsel, &evlist->entries, node) {
  499. if (nr_events[evsel->idx] != expected_nr_events[evsel->idx]) {
  500. pr_debug("expected %d %s events, got %d\n",
  501. expected_nr_events[evsel->idx],
  502. event_name(evsel), nr_events[evsel->idx]);
  503. goto out_munmap;
  504. }
  505. }
  506. err = 0;
  507. out_munmap:
  508. perf_evlist__munmap(evlist);
  509. out_close_fd:
  510. for (i = 0; i < nsyscalls; ++i)
  511. perf_evsel__close_fd(evsels[i], 1, threads->nr);
  512. out_free_evlist:
  513. perf_evlist__delete(evlist);
  514. out_free_cpus:
  515. cpu_map__delete(cpus);
  516. out_free_threads:
  517. thread_map__delete(threads);
  518. return err;
  519. #undef nsyscalls
  520. }
  521. #define TEST_ASSERT_VAL(text, cond) \
  522. do { \
  523. if (!(cond)) { \
  524. pr_debug("FAILED %s:%d %s\n", __FILE__, __LINE__, text); \
  525. return -1; \
  526. } \
  527. } while (0)
  528. static int test__checkevent_tracepoint(struct perf_evlist *evlist)
  529. {
  530. struct perf_evsel *evsel = list_entry(evlist->entries.next,
  531. struct perf_evsel, node);
  532. TEST_ASSERT_VAL("wrong number of entries", 1 == evlist->nr_entries);
  533. TEST_ASSERT_VAL("wrong type", PERF_TYPE_TRACEPOINT == evsel->attr.type);
  534. TEST_ASSERT_VAL("wrong sample_type",
  535. (PERF_SAMPLE_RAW | PERF_SAMPLE_TIME | PERF_SAMPLE_CPU) ==
  536. evsel->attr.sample_type);
  537. TEST_ASSERT_VAL("wrong sample_period", 1 == evsel->attr.sample_period);
  538. return 0;
  539. }
  540. static int test__checkevent_tracepoint_multi(struct perf_evlist *evlist)
  541. {
  542. struct perf_evsel *evsel;
  543. TEST_ASSERT_VAL("wrong number of entries", evlist->nr_entries > 1);
  544. list_for_each_entry(evsel, &evlist->entries, node) {
  545. TEST_ASSERT_VAL("wrong type",
  546. PERF_TYPE_TRACEPOINT == evsel->attr.type);
  547. TEST_ASSERT_VAL("wrong sample_type",
  548. (PERF_SAMPLE_RAW | PERF_SAMPLE_TIME | PERF_SAMPLE_CPU)
  549. == evsel->attr.sample_type);
  550. TEST_ASSERT_VAL("wrong sample_period",
  551. 1 == evsel->attr.sample_period);
  552. }
  553. return 0;
  554. }
  555. static int test__checkevent_raw(struct perf_evlist *evlist)
  556. {
  557. struct perf_evsel *evsel = list_entry(evlist->entries.next,
  558. struct perf_evsel, node);
  559. TEST_ASSERT_VAL("wrong number of entries", 1 == evlist->nr_entries);
  560. TEST_ASSERT_VAL("wrong type", PERF_TYPE_RAW == evsel->attr.type);
  561. TEST_ASSERT_VAL("wrong config", 0x1a == evsel->attr.config);
  562. return 0;
  563. }
  564. static int test__checkevent_numeric(struct perf_evlist *evlist)
  565. {
  566. struct perf_evsel *evsel = list_entry(evlist->entries.next,
  567. struct perf_evsel, node);
  568. TEST_ASSERT_VAL("wrong number of entries", 1 == evlist->nr_entries);
  569. TEST_ASSERT_VAL("wrong type", 1 == evsel->attr.type);
  570. TEST_ASSERT_VAL("wrong config", 1 == evsel->attr.config);
  571. return 0;
  572. }
  573. static int test__checkevent_symbolic_name(struct perf_evlist *evlist)
  574. {
  575. struct perf_evsel *evsel = list_entry(evlist->entries.next,
  576. struct perf_evsel, node);
  577. TEST_ASSERT_VAL("wrong number of entries", 1 == evlist->nr_entries);
  578. TEST_ASSERT_VAL("wrong type", PERF_TYPE_HARDWARE == evsel->attr.type);
  579. TEST_ASSERT_VAL("wrong config",
  580. PERF_COUNT_HW_INSTRUCTIONS == evsel->attr.config);
  581. return 0;
  582. }
  583. static int test__checkevent_symbolic_name_config(struct perf_evlist *evlist)
  584. {
  585. struct perf_evsel *evsel = list_entry(evlist->entries.next,
  586. struct perf_evsel, node);
  587. TEST_ASSERT_VAL("wrong number of entries", 1 == evlist->nr_entries);
  588. TEST_ASSERT_VAL("wrong type", PERF_TYPE_HARDWARE == evsel->attr.type);
  589. TEST_ASSERT_VAL("wrong config",
  590. PERF_COUNT_HW_CPU_CYCLES == evsel->attr.config);
  591. TEST_ASSERT_VAL("wrong period",
  592. 100000 == evsel->attr.sample_period);
  593. TEST_ASSERT_VAL("wrong config1",
  594. 0 == evsel->attr.config1);
  595. TEST_ASSERT_VAL("wrong config2",
  596. 1 == evsel->attr.config2);
  597. return 0;
  598. }
  599. static int test__checkevent_symbolic_alias(struct perf_evlist *evlist)
  600. {
  601. struct perf_evsel *evsel = list_entry(evlist->entries.next,
  602. struct perf_evsel, node);
  603. TEST_ASSERT_VAL("wrong number of entries", 1 == evlist->nr_entries);
  604. TEST_ASSERT_VAL("wrong type", PERF_TYPE_SOFTWARE == evsel->attr.type);
  605. TEST_ASSERT_VAL("wrong config",
  606. PERF_COUNT_SW_PAGE_FAULTS == evsel->attr.config);
  607. return 0;
  608. }
  609. static int test__checkevent_genhw(struct perf_evlist *evlist)
  610. {
  611. struct perf_evsel *evsel = list_entry(evlist->entries.next,
  612. struct perf_evsel, node);
  613. TEST_ASSERT_VAL("wrong number of entries", 1 == evlist->nr_entries);
  614. TEST_ASSERT_VAL("wrong type", PERF_TYPE_HW_CACHE == evsel->attr.type);
  615. TEST_ASSERT_VAL("wrong config", (1 << 16) == evsel->attr.config);
  616. return 0;
  617. }
  618. static int test__checkevent_breakpoint(struct perf_evlist *evlist)
  619. {
  620. struct perf_evsel *evsel = list_entry(evlist->entries.next,
  621. struct perf_evsel, node);
  622. TEST_ASSERT_VAL("wrong number of entries", 1 == evlist->nr_entries);
  623. TEST_ASSERT_VAL("wrong type", PERF_TYPE_BREAKPOINT == evsel->attr.type);
  624. TEST_ASSERT_VAL("wrong config", 0 == evsel->attr.config);
  625. TEST_ASSERT_VAL("wrong bp_type", (HW_BREAKPOINT_R | HW_BREAKPOINT_W) ==
  626. evsel->attr.bp_type);
  627. TEST_ASSERT_VAL("wrong bp_len", HW_BREAKPOINT_LEN_4 ==
  628. evsel->attr.bp_len);
  629. return 0;
  630. }
  631. static int test__checkevent_breakpoint_x(struct perf_evlist *evlist)
  632. {
  633. struct perf_evsel *evsel = list_entry(evlist->entries.next,
  634. struct perf_evsel, node);
  635. TEST_ASSERT_VAL("wrong number of entries", 1 == evlist->nr_entries);
  636. TEST_ASSERT_VAL("wrong type", PERF_TYPE_BREAKPOINT == evsel->attr.type);
  637. TEST_ASSERT_VAL("wrong config", 0 == evsel->attr.config);
  638. TEST_ASSERT_VAL("wrong bp_type",
  639. HW_BREAKPOINT_X == evsel->attr.bp_type);
  640. TEST_ASSERT_VAL("wrong bp_len", sizeof(long) == evsel->attr.bp_len);
  641. return 0;
  642. }
  643. static int test__checkevent_breakpoint_r(struct perf_evlist *evlist)
  644. {
  645. struct perf_evsel *evsel = list_entry(evlist->entries.next,
  646. struct perf_evsel, node);
  647. TEST_ASSERT_VAL("wrong number of entries", 1 == evlist->nr_entries);
  648. TEST_ASSERT_VAL("wrong type",
  649. PERF_TYPE_BREAKPOINT == evsel->attr.type);
  650. TEST_ASSERT_VAL("wrong config", 0 == evsel->attr.config);
  651. TEST_ASSERT_VAL("wrong bp_type",
  652. HW_BREAKPOINT_R == evsel->attr.bp_type);
  653. TEST_ASSERT_VAL("wrong bp_len",
  654. HW_BREAKPOINT_LEN_4 == evsel->attr.bp_len);
  655. return 0;
  656. }
  657. static int test__checkevent_breakpoint_w(struct perf_evlist *evlist)
  658. {
  659. struct perf_evsel *evsel = list_entry(evlist->entries.next,
  660. struct perf_evsel, node);
  661. TEST_ASSERT_VAL("wrong number of entries", 1 == evlist->nr_entries);
  662. TEST_ASSERT_VAL("wrong type",
  663. PERF_TYPE_BREAKPOINT == evsel->attr.type);
  664. TEST_ASSERT_VAL("wrong config", 0 == evsel->attr.config);
  665. TEST_ASSERT_VAL("wrong bp_type",
  666. HW_BREAKPOINT_W == evsel->attr.bp_type);
  667. TEST_ASSERT_VAL("wrong bp_len",
  668. HW_BREAKPOINT_LEN_4 == evsel->attr.bp_len);
  669. return 0;
  670. }
  671. static int test__checkevent_tracepoint_modifier(struct perf_evlist *evlist)
  672. {
  673. struct perf_evsel *evsel = list_entry(evlist->entries.next,
  674. struct perf_evsel, node);
  675. TEST_ASSERT_VAL("wrong exclude_user", evsel->attr.exclude_user);
  676. TEST_ASSERT_VAL("wrong exclude_kernel", !evsel->attr.exclude_kernel);
  677. TEST_ASSERT_VAL("wrong exclude_hv", evsel->attr.exclude_hv);
  678. TEST_ASSERT_VAL("wrong precise_ip", !evsel->attr.precise_ip);
  679. return test__checkevent_tracepoint(evlist);
  680. }
  681. static int
  682. test__checkevent_tracepoint_multi_modifier(struct perf_evlist *evlist)
  683. {
  684. struct perf_evsel *evsel;
  685. TEST_ASSERT_VAL("wrong number of entries", evlist->nr_entries > 1);
  686. list_for_each_entry(evsel, &evlist->entries, node) {
  687. TEST_ASSERT_VAL("wrong exclude_user",
  688. !evsel->attr.exclude_user);
  689. TEST_ASSERT_VAL("wrong exclude_kernel",
  690. evsel->attr.exclude_kernel);
  691. TEST_ASSERT_VAL("wrong exclude_hv", evsel->attr.exclude_hv);
  692. TEST_ASSERT_VAL("wrong precise_ip", !evsel->attr.precise_ip);
  693. }
  694. return test__checkevent_tracepoint_multi(evlist);
  695. }
  696. static int test__checkevent_raw_modifier(struct perf_evlist *evlist)
  697. {
  698. struct perf_evsel *evsel = list_entry(evlist->entries.next,
  699. struct perf_evsel, node);
  700. TEST_ASSERT_VAL("wrong exclude_user", evsel->attr.exclude_user);
  701. TEST_ASSERT_VAL("wrong exclude_kernel", !evsel->attr.exclude_kernel);
  702. TEST_ASSERT_VAL("wrong exclude_hv", evsel->attr.exclude_hv);
  703. TEST_ASSERT_VAL("wrong precise_ip", evsel->attr.precise_ip);
  704. return test__checkevent_raw(evlist);
  705. }
  706. static int test__checkevent_numeric_modifier(struct perf_evlist *evlist)
  707. {
  708. struct perf_evsel *evsel = list_entry(evlist->entries.next,
  709. struct perf_evsel, node);
  710. TEST_ASSERT_VAL("wrong exclude_user", evsel->attr.exclude_user);
  711. TEST_ASSERT_VAL("wrong exclude_kernel", evsel->attr.exclude_kernel);
  712. TEST_ASSERT_VAL("wrong exclude_hv", !evsel->attr.exclude_hv);
  713. TEST_ASSERT_VAL("wrong precise_ip", evsel->attr.precise_ip);
  714. return test__checkevent_numeric(evlist);
  715. }
  716. static int test__checkevent_symbolic_name_modifier(struct perf_evlist *evlist)
  717. {
  718. struct perf_evsel *evsel = list_entry(evlist->entries.next,
  719. struct perf_evsel, node);
  720. TEST_ASSERT_VAL("wrong exclude_user", evsel->attr.exclude_user);
  721. TEST_ASSERT_VAL("wrong exclude_kernel", evsel->attr.exclude_kernel);
  722. TEST_ASSERT_VAL("wrong exclude_hv", !evsel->attr.exclude_hv);
  723. TEST_ASSERT_VAL("wrong precise_ip", !evsel->attr.precise_ip);
  724. return test__checkevent_symbolic_name(evlist);
  725. }
  726. static int test__checkevent_symbolic_alias_modifier(struct perf_evlist *evlist)
  727. {
  728. struct perf_evsel *evsel = list_entry(evlist->entries.next,
  729. struct perf_evsel, node);
  730. TEST_ASSERT_VAL("wrong exclude_user", !evsel->attr.exclude_user);
  731. TEST_ASSERT_VAL("wrong exclude_kernel", evsel->attr.exclude_kernel);
  732. TEST_ASSERT_VAL("wrong exclude_hv", evsel->attr.exclude_hv);
  733. TEST_ASSERT_VAL("wrong precise_ip", !evsel->attr.precise_ip);
  734. return test__checkevent_symbolic_alias(evlist);
  735. }
  736. static int test__checkevent_genhw_modifier(struct perf_evlist *evlist)
  737. {
  738. struct perf_evsel *evsel = list_entry(evlist->entries.next,
  739. struct perf_evsel, node);
  740. TEST_ASSERT_VAL("wrong exclude_user", evsel->attr.exclude_user);
  741. TEST_ASSERT_VAL("wrong exclude_kernel", !evsel->attr.exclude_kernel);
  742. TEST_ASSERT_VAL("wrong exclude_hv", evsel->attr.exclude_hv);
  743. TEST_ASSERT_VAL("wrong precise_ip", evsel->attr.precise_ip);
  744. return test__checkevent_genhw(evlist);
  745. }
  746. static struct test__event_st {
  747. const char *name;
  748. __u32 type;
  749. int (*check)(struct perf_evlist *evlist);
  750. } test__events[] = {
  751. {
  752. .name = "syscalls:sys_enter_open",
  753. .check = test__checkevent_tracepoint,
  754. },
  755. {
  756. .name = "syscalls:*",
  757. .check = test__checkevent_tracepoint_multi,
  758. },
  759. {
  760. .name = "r1a",
  761. .check = test__checkevent_raw,
  762. },
  763. {
  764. .name = "1:1",
  765. .check = test__checkevent_numeric,
  766. },
  767. {
  768. .name = "instructions",
  769. .check = test__checkevent_symbolic_name,
  770. },
  771. {
  772. .name = "cycles/period=100000,config2/",
  773. .check = test__checkevent_symbolic_name_config,
  774. },
  775. {
  776. .name = "faults",
  777. .check = test__checkevent_symbolic_alias,
  778. },
  779. {
  780. .name = "L1-dcache-load-miss",
  781. .check = test__checkevent_genhw,
  782. },
  783. {
  784. .name = "mem:0",
  785. .check = test__checkevent_breakpoint,
  786. },
  787. {
  788. .name = "mem:0:x",
  789. .check = test__checkevent_breakpoint_x,
  790. },
  791. {
  792. .name = "mem:0:r",
  793. .check = test__checkevent_breakpoint_r,
  794. },
  795. {
  796. .name = "mem:0:w",
  797. .check = test__checkevent_breakpoint_w,
  798. },
  799. {
  800. .name = "syscalls:sys_enter_open:k",
  801. .check = test__checkevent_tracepoint_modifier,
  802. },
  803. {
  804. .name = "syscalls:*:u",
  805. .check = test__checkevent_tracepoint_multi_modifier,
  806. },
  807. {
  808. .name = "r1a:kp",
  809. .check = test__checkevent_raw_modifier,
  810. },
  811. {
  812. .name = "1:1:hp",
  813. .check = test__checkevent_numeric_modifier,
  814. },
  815. {
  816. .name = "instructions:h",
  817. .check = test__checkevent_symbolic_name_modifier,
  818. },
  819. {
  820. .name = "faults:u",
  821. .check = test__checkevent_symbolic_alias_modifier,
  822. },
  823. {
  824. .name = "L1-dcache-load-miss:kp",
  825. .check = test__checkevent_genhw_modifier,
  826. },
  827. };
  828. #define TEST__EVENTS_CNT (sizeof(test__events) / sizeof(struct test__event_st))
  829. static int test__parse_events(void)
  830. {
  831. struct perf_evlist *evlist;
  832. u_int i;
  833. int ret = 0;
  834. for (i = 0; i < TEST__EVENTS_CNT; i++) {
  835. struct test__event_st *e = &test__events[i];
  836. evlist = perf_evlist__new(NULL, NULL);
  837. if (evlist == NULL)
  838. break;
  839. ret = parse_events(evlist, e->name, 0);
  840. if (ret) {
  841. pr_debug("failed to parse event '%s', err %d\n",
  842. e->name, ret);
  843. break;
  844. }
  845. ret = e->check(evlist);
  846. if (ret)
  847. break;
  848. perf_evlist__delete(evlist);
  849. }
  850. return ret;
  851. }
  852. static int sched__get_first_possible_cpu(pid_t pid, cpu_set_t **maskp,
  853. size_t *sizep)
  854. {
  855. cpu_set_t *mask;
  856. size_t size;
  857. int i, cpu = -1, nrcpus = 1024;
  858. realloc:
  859. mask = CPU_ALLOC(nrcpus);
  860. size = CPU_ALLOC_SIZE(nrcpus);
  861. CPU_ZERO_S(size, mask);
  862. if (sched_getaffinity(pid, size, mask) == -1) {
  863. CPU_FREE(mask);
  864. if (errno == EINVAL && nrcpus < (1024 << 8)) {
  865. nrcpus = nrcpus << 2;
  866. goto realloc;
  867. }
  868. perror("sched_getaffinity");
  869. return -1;
  870. }
  871. for (i = 0; i < nrcpus; i++) {
  872. if (CPU_ISSET_S(i, size, mask)) {
  873. if (cpu == -1) {
  874. cpu = i;
  875. *maskp = mask;
  876. *sizep = size;
  877. } else
  878. CPU_CLR_S(i, size, mask);
  879. }
  880. }
  881. if (cpu == -1)
  882. CPU_FREE(mask);
  883. return cpu;
  884. }
  885. static int test__PERF_RECORD(void)
  886. {
  887. struct perf_record_opts opts = {
  888. .no_delay = true,
  889. .freq = 10,
  890. .mmap_pages = 256,
  891. };
  892. cpu_set_t *cpu_mask = NULL;
  893. size_t cpu_mask_size = 0;
  894. struct perf_evlist *evlist = perf_evlist__new(NULL, NULL);
  895. struct perf_evsel *evsel;
  896. struct perf_sample sample;
  897. const char *cmd = "sleep";
  898. const char *argv[] = { cmd, "1", NULL, };
  899. char *bname;
  900. u64 sample_type, prev_time = 0;
  901. bool found_cmd_mmap = false,
  902. found_libc_mmap = false,
  903. found_vdso_mmap = false,
  904. found_ld_mmap = false;
  905. int err = -1, errs = 0, i, wakeups = 0, sample_size;
  906. u32 cpu;
  907. int total_events = 0, nr_events[PERF_RECORD_MAX] = { 0, };
  908. if (evlist == NULL || argv == NULL) {
  909. pr_debug("Not enough memory to create evlist\n");
  910. goto out;
  911. }
  912. /*
  913. * We need at least one evsel in the evlist, use the default
  914. * one: "cycles".
  915. */
  916. err = perf_evlist__add_default(evlist);
  917. if (err < 0) {
  918. pr_debug("Not enough memory to create evsel\n");
  919. goto out_delete_evlist;
  920. }
  921. /*
  922. * Create maps of threads and cpus to monitor. In this case
  923. * we start with all threads and cpus (-1, -1) but then in
  924. * perf_evlist__prepare_workload we'll fill in the only thread
  925. * we're monitoring, the one forked there.
  926. */
  927. err = perf_evlist__create_maps(evlist, opts.target_pid,
  928. opts.target_tid, UINT_MAX, opts.cpu_list);
  929. if (err < 0) {
  930. pr_debug("Not enough memory to create thread/cpu maps\n");
  931. goto out_delete_evlist;
  932. }
  933. /*
  934. * Prepare the workload in argv[] to run, it'll fork it, and then wait
  935. * for perf_evlist__start_workload() to exec it. This is done this way
  936. * so that we have time to open the evlist (calling sys_perf_event_open
  937. * on all the fds) and then mmap them.
  938. */
  939. err = perf_evlist__prepare_workload(evlist, &opts, argv);
  940. if (err < 0) {
  941. pr_debug("Couldn't run the workload!\n");
  942. goto out_delete_evlist;
  943. }
  944. /*
  945. * Config the evsels, setting attr->comm on the first one, etc.
  946. */
  947. evsel = list_entry(evlist->entries.next, struct perf_evsel, node);
  948. evsel->attr.sample_type |= PERF_SAMPLE_CPU;
  949. evsel->attr.sample_type |= PERF_SAMPLE_TID;
  950. evsel->attr.sample_type |= PERF_SAMPLE_TIME;
  951. perf_evlist__config_attrs(evlist, &opts);
  952. err = sched__get_first_possible_cpu(evlist->workload.pid, &cpu_mask,
  953. &cpu_mask_size);
  954. if (err < 0) {
  955. pr_debug("sched__get_first_possible_cpu: %s\n", strerror(errno));
  956. goto out_delete_evlist;
  957. }
  958. cpu = err;
  959. /*
  960. * So that we can check perf_sample.cpu on all the samples.
  961. */
  962. if (sched_setaffinity(evlist->workload.pid, cpu_mask_size, cpu_mask) < 0) {
  963. pr_debug("sched_setaffinity: %s\n", strerror(errno));
  964. goto out_free_cpu_mask;
  965. }
  966. /*
  967. * Call sys_perf_event_open on all the fds on all the evsels,
  968. * grouping them if asked to.
  969. */
  970. err = perf_evlist__open(evlist, opts.group);
  971. if (err < 0) {
  972. pr_debug("perf_evlist__open: %s\n", strerror(errno));
  973. goto out_delete_evlist;
  974. }
  975. /*
  976. * mmap the first fd on a given CPU and ask for events for the other
  977. * fds in the same CPU to be injected in the same mmap ring buffer
  978. * (using ioctl(PERF_EVENT_IOC_SET_OUTPUT)).
  979. */
  980. err = perf_evlist__mmap(evlist, opts.mmap_pages, false);
  981. if (err < 0) {
  982. pr_debug("perf_evlist__mmap: %s\n", strerror(errno));
  983. goto out_delete_evlist;
  984. }
  985. /*
  986. * We'll need these two to parse the PERF_SAMPLE_* fields in each
  987. * event.
  988. */
  989. sample_type = perf_evlist__sample_type(evlist);
  990. sample_size = __perf_evsel__sample_size(sample_type);
  991. /*
  992. * Now that all is properly set up, enable the events, they will
  993. * count just on workload.pid, which will start...
  994. */
  995. perf_evlist__enable(evlist);
  996. /*
  997. * Now!
  998. */
  999. perf_evlist__start_workload(evlist);
  1000. while (1) {
  1001. int before = total_events;
  1002. for (i = 0; i < evlist->nr_mmaps; i++) {
  1003. union perf_event *event;
  1004. while ((event = perf_evlist__mmap_read(evlist, i)) != NULL) {
  1005. const u32 type = event->header.type;
  1006. const char *name = perf_event__name(type);
  1007. ++total_events;
  1008. if (type < PERF_RECORD_MAX)
  1009. nr_events[type]++;
  1010. err = perf_event__parse_sample(event, sample_type,
  1011. sample_size, true,
  1012. &sample, false);
  1013. if (err < 0) {
  1014. if (verbose)
  1015. perf_event__fprintf(event, stderr);
  1016. pr_debug("Couldn't parse sample\n");
  1017. goto out_err;
  1018. }
  1019. if (verbose) {
  1020. pr_info("%" PRIu64" %d ", sample.time, sample.cpu);
  1021. perf_event__fprintf(event, stderr);
  1022. }
  1023. if (prev_time > sample.time) {
  1024. pr_debug("%s going backwards in time, prev=%" PRIu64 ", curr=%" PRIu64 "\n",
  1025. name, prev_time, sample.time);
  1026. ++errs;
  1027. }
  1028. prev_time = sample.time;
  1029. if (sample.cpu != cpu) {
  1030. pr_debug("%s with unexpected cpu, expected %d, got %d\n",
  1031. name, cpu, sample.cpu);
  1032. ++errs;
  1033. }
  1034. if ((pid_t)sample.pid != evlist->workload.pid) {
  1035. pr_debug("%s with unexpected pid, expected %d, got %d\n",
  1036. name, evlist->workload.pid, sample.pid);
  1037. ++errs;
  1038. }
  1039. if ((pid_t)sample.tid != evlist->workload.pid) {
  1040. pr_debug("%s with unexpected tid, expected %d, got %d\n",
  1041. name, evlist->workload.pid, sample.tid);
  1042. ++errs;
  1043. }
  1044. if ((type == PERF_RECORD_COMM ||
  1045. type == PERF_RECORD_MMAP ||
  1046. type == PERF_RECORD_FORK ||
  1047. type == PERF_RECORD_EXIT) &&
  1048. (pid_t)event->comm.pid != evlist->workload.pid) {
  1049. pr_debug("%s with unexpected pid/tid\n", name);
  1050. ++errs;
  1051. }
  1052. if ((type == PERF_RECORD_COMM ||
  1053. type == PERF_RECORD_MMAP) &&
  1054. event->comm.pid != event->comm.tid) {
  1055. pr_debug("%s with different pid/tid!\n", name);
  1056. ++errs;
  1057. }
  1058. switch (type) {
  1059. case PERF_RECORD_COMM:
  1060. if (strcmp(event->comm.comm, cmd)) {
  1061. pr_debug("%s with unexpected comm!\n", name);
  1062. ++errs;
  1063. }
  1064. break;
  1065. case PERF_RECORD_EXIT:
  1066. goto found_exit;
  1067. case PERF_RECORD_MMAP:
  1068. bname = strrchr(event->mmap.filename, '/');
  1069. if (bname != NULL) {
  1070. if (!found_cmd_mmap)
  1071. found_cmd_mmap = !strcmp(bname + 1, cmd);
  1072. if (!found_libc_mmap)
  1073. found_libc_mmap = !strncmp(bname + 1, "libc", 4);
  1074. if (!found_ld_mmap)
  1075. found_ld_mmap = !strncmp(bname + 1, "ld", 2);
  1076. } else if (!found_vdso_mmap)
  1077. found_vdso_mmap = !strcmp(event->mmap.filename, "[vdso]");
  1078. break;
  1079. case PERF_RECORD_SAMPLE:
  1080. /* Just ignore samples for now */
  1081. break;
  1082. default:
  1083. pr_debug("Unexpected perf_event->header.type %d!\n",
  1084. type);
  1085. ++errs;
  1086. }
  1087. }
  1088. }
  1089. /*
  1090. * We don't use poll here because at least at 3.1 times the
  1091. * PERF_RECORD_{!SAMPLE} events don't honour
  1092. * perf_event_attr.wakeup_events, just PERF_EVENT_SAMPLE does.
  1093. */
  1094. if (total_events == before && false)
  1095. poll(evlist->pollfd, evlist->nr_fds, -1);
  1096. sleep(1);
  1097. if (++wakeups > 5) {
  1098. pr_debug("No PERF_RECORD_EXIT event!\n");
  1099. break;
  1100. }
  1101. }
  1102. found_exit:
  1103. if (nr_events[PERF_RECORD_COMM] > 1) {
  1104. pr_debug("Excessive number of PERF_RECORD_COMM events!\n");
  1105. ++errs;
  1106. }
  1107. if (nr_events[PERF_RECORD_COMM] == 0) {
  1108. pr_debug("Missing PERF_RECORD_COMM for %s!\n", cmd);
  1109. ++errs;
  1110. }
  1111. if (!found_cmd_mmap) {
  1112. pr_debug("PERF_RECORD_MMAP for %s missing!\n", cmd);
  1113. ++errs;
  1114. }
  1115. if (!found_libc_mmap) {
  1116. pr_debug("PERF_RECORD_MMAP for %s missing!\n", "libc");
  1117. ++errs;
  1118. }
  1119. if (!found_ld_mmap) {
  1120. pr_debug("PERF_RECORD_MMAP for %s missing!\n", "ld");
  1121. ++errs;
  1122. }
  1123. if (!found_vdso_mmap) {
  1124. pr_debug("PERF_RECORD_MMAP for %s missing!\n", "[vdso]");
  1125. ++errs;
  1126. }
  1127. out_err:
  1128. perf_evlist__munmap(evlist);
  1129. out_free_cpu_mask:
  1130. CPU_FREE(cpu_mask);
  1131. out_delete_evlist:
  1132. perf_evlist__delete(evlist);
  1133. out:
  1134. return (err < 0 || errs > 0) ? -1 : 0;
  1135. }
  1136. #if defined(__x86_64__) || defined(__i386__)
  1137. #define barrier() asm volatile("" ::: "memory")
  1138. static u64 rdpmc(unsigned int counter)
  1139. {
  1140. unsigned int low, high;
  1141. asm volatile("rdpmc" : "=a" (low), "=d" (high) : "c" (counter));
  1142. return low | ((u64)high) << 32;
  1143. }
  1144. static u64 rdtsc(void)
  1145. {
  1146. unsigned int low, high;
  1147. asm volatile("rdtsc" : "=a" (low), "=d" (high));
  1148. return low | ((u64)high) << 32;
  1149. }
  1150. static u64 mmap_read_self(void *addr)
  1151. {
  1152. struct perf_event_mmap_page *pc = addr;
  1153. u32 seq, idx, time_mult = 0, time_shift = 0;
  1154. u64 count, cyc = 0, time_offset = 0, enabled, running, delta;
  1155. do {
  1156. seq = pc->lock;
  1157. barrier();
  1158. enabled = pc->time_enabled;
  1159. running = pc->time_running;
  1160. if (enabled != running) {
  1161. cyc = rdtsc();
  1162. time_mult = pc->time_mult;
  1163. time_shift = pc->time_shift;
  1164. time_offset = pc->time_offset;
  1165. }
  1166. idx = pc->index;
  1167. count = pc->offset;
  1168. if (idx)
  1169. count += rdpmc(idx - 1);
  1170. barrier();
  1171. } while (pc->lock != seq);
  1172. if (enabled != running) {
  1173. u64 quot, rem;
  1174. quot = (cyc >> time_shift);
  1175. rem = cyc & ((1 << time_shift) - 1);
  1176. delta = time_offset + quot * time_mult +
  1177. ((rem * time_mult) >> time_shift);
  1178. enabled += delta;
  1179. if (idx)
  1180. running += delta;
  1181. quot = count / running;
  1182. rem = count % running;
  1183. count = quot * enabled + (rem * enabled) / running;
  1184. }
  1185. return count;
  1186. }
  1187. /*
  1188. * If the RDPMC instruction faults then signal this back to the test parent task:
  1189. */
  1190. static void segfault_handler(int sig __used, siginfo_t *info __used, void *uc __used)
  1191. {
  1192. exit(-1);
  1193. }
  1194. static int __test__rdpmc(void)
  1195. {
  1196. long page_size = sysconf(_SC_PAGE_SIZE);
  1197. volatile int tmp = 0;
  1198. u64 i, loops = 1000;
  1199. int n;
  1200. int fd;
  1201. void *addr;
  1202. struct perf_event_attr attr = {
  1203. .type = PERF_TYPE_HARDWARE,
  1204. .config = PERF_COUNT_HW_INSTRUCTIONS,
  1205. .exclude_kernel = 1,
  1206. };
  1207. u64 delta_sum = 0;
  1208. struct sigaction sa;
  1209. sigfillset(&sa.sa_mask);
  1210. sa.sa_sigaction = segfault_handler;
  1211. sigaction(SIGSEGV, &sa, NULL);
  1212. fprintf(stderr, "\n\n");
  1213. fd = sys_perf_event_open(&attr, 0, -1, -1, 0);
  1214. if (fd < 0) {
  1215. die("Error: sys_perf_event_open() syscall returned "
  1216. "with %d (%s)\n", fd, strerror(errno));
  1217. }
  1218. addr = mmap(NULL, page_size, PROT_READ, MAP_SHARED, fd, 0);
  1219. if (addr == (void *)(-1)) {
  1220. die("Error: mmap() syscall returned "
  1221. "with (%s)\n", strerror(errno));
  1222. }
  1223. for (n = 0; n < 6; n++) {
  1224. u64 stamp, now, delta;
  1225. stamp = mmap_read_self(addr);
  1226. for (i = 0; i < loops; i++)
  1227. tmp++;
  1228. now = mmap_read_self(addr);
  1229. loops *= 10;
  1230. delta = now - stamp;
  1231. fprintf(stderr, "%14d: %14Lu\n", n, (long long)delta);
  1232. delta_sum += delta;
  1233. }
  1234. munmap(addr, page_size);
  1235. close(fd);
  1236. fprintf(stderr, " ");
  1237. if (!delta_sum)
  1238. return -1;
  1239. return 0;
  1240. }
  1241. static int test__rdpmc(void)
  1242. {
  1243. int status = 0;
  1244. int wret = 0;
  1245. int ret;
  1246. int pid;
  1247. pid = fork();
  1248. if (pid < 0)
  1249. return -1;
  1250. if (!pid) {
  1251. ret = __test__rdpmc();
  1252. exit(ret);
  1253. }
  1254. wret = waitpid(pid, &status, 0);
  1255. if (wret < 0 || status)
  1256. return -1;
  1257. return 0;
  1258. }
  1259. #endif
  1260. static struct test {
  1261. const char *desc;
  1262. int (*func)(void);
  1263. } tests[] = {
  1264. {
  1265. .desc = "vmlinux symtab matches kallsyms",
  1266. .func = test__vmlinux_matches_kallsyms,
  1267. },
  1268. {
  1269. .desc = "detect open syscall event",
  1270. .func = test__open_syscall_event,
  1271. },
  1272. {
  1273. .desc = "detect open syscall event on all cpus",
  1274. .func = test__open_syscall_event_on_all_cpus,
  1275. },
  1276. {
  1277. .desc = "read samples using the mmap interface",
  1278. .func = test__basic_mmap,
  1279. },
  1280. {
  1281. .desc = "parse events tests",
  1282. .func = test__parse_events,
  1283. },
  1284. #if defined(__x86_64__) || defined(__i386__)
  1285. {
  1286. .desc = "x86 rdpmc test",
  1287. .func = test__rdpmc,
  1288. },
  1289. #endif
  1290. {
  1291. .desc = "Validate PERF_RECORD_* events & perf_sample fields",
  1292. .func = test__PERF_RECORD,
  1293. },
  1294. {
  1295. .func = NULL,
  1296. },
  1297. };
  1298. static bool perf_test__matches(int curr, int argc, const char *argv[])
  1299. {
  1300. int i;
  1301. if (argc == 0)
  1302. return true;
  1303. for (i = 0; i < argc; ++i) {
  1304. char *end;
  1305. long nr = strtoul(argv[i], &end, 10);
  1306. if (*end == '\0') {
  1307. if (nr == curr + 1)
  1308. return true;
  1309. continue;
  1310. }
  1311. if (strstr(tests[curr].desc, argv[i]))
  1312. return true;
  1313. }
  1314. return false;
  1315. }
  1316. static int __cmd_test(int argc, const char *argv[])
  1317. {
  1318. int i = 0;
  1319. while (tests[i].func) {
  1320. int curr = i++, err;
  1321. if (!perf_test__matches(curr, argc, argv))
  1322. continue;
  1323. pr_info("%2d: %s:", i, tests[curr].desc);
  1324. pr_debug("\n--- start ---\n");
  1325. err = tests[curr].func();
  1326. pr_debug("---- end ----\n%s:", tests[curr].desc);
  1327. pr_info(" %s\n", err ? "FAILED!\n" : "Ok");
  1328. }
  1329. return 0;
  1330. }
  1331. static int perf_test__list(int argc, const char **argv)
  1332. {
  1333. int i = 0;
  1334. while (tests[i].func) {
  1335. int curr = i++;
  1336. if (argc > 1 && !strstr(tests[curr].desc, argv[1]))
  1337. continue;
  1338. pr_info("%2d: %s\n", i, tests[curr].desc);
  1339. }
  1340. return 0;
  1341. }
  1342. int cmd_test(int argc, const char **argv, const char *prefix __used)
  1343. {
  1344. const char * const test_usage[] = {
  1345. "perf test [<options>] [{list <test-name-fragment>|[<test-name-fragments>|<test-numbers>]}]",
  1346. NULL,
  1347. };
  1348. const struct option test_options[] = {
  1349. OPT_INCR('v', "verbose", &verbose,
  1350. "be more verbose (show symbol address, etc)"),
  1351. OPT_END()
  1352. };
  1353. argc = parse_options(argc, argv, test_options, test_usage, 0);
  1354. if (argc >= 1 && !strcmp(argv[0], "list"))
  1355. return perf_test__list(argc, argv);
  1356. symbol_conf.priv_size = sizeof(int);
  1357. symbol_conf.sort_by_name = true;
  1358. symbol_conf.try_vmlinux_path = true;
  1359. if (symbol__init() < 0)
  1360. return -1;
  1361. return __cmd_test(argc, argv);
  1362. }