evlist.c 23 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013
  1. /*
  2. * Copyright (C) 2011, Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com>
  3. *
  4. * Parts came from builtin-{top,stat,record}.c, see those files for further
  5. * copyright notes.
  6. *
  7. * Released under the GPL v2. (and only v2, not any later version)
  8. */
  9. #include "util.h"
  10. #include <lk/debugfs.h>
  11. #include <poll.h>
  12. #include "cpumap.h"
  13. #include "thread_map.h"
  14. #include "target.h"
  15. #include "evlist.h"
  16. #include "evsel.h"
  17. #include "debug.h"
  18. #include <unistd.h>
  19. #include "parse-events.h"
  20. #include <sys/mman.h>
  21. #include <linux/bitops.h>
  22. #include <linux/hash.h>
  23. #define FD(e, x, y) (*(int *)xyarray__entry(e->fd, x, y))
  24. #define SID(e, x, y) xyarray__entry(e->sample_id, x, y)
  25. void perf_evlist__init(struct perf_evlist *evlist, struct cpu_map *cpus,
  26. struct thread_map *threads)
  27. {
  28. int i;
  29. for (i = 0; i < PERF_EVLIST__HLIST_SIZE; ++i)
  30. INIT_HLIST_HEAD(&evlist->heads[i]);
  31. INIT_LIST_HEAD(&evlist->entries);
  32. perf_evlist__set_maps(evlist, cpus, threads);
  33. evlist->workload.pid = -1;
  34. }
  35. struct perf_evlist *perf_evlist__new(void)
  36. {
  37. struct perf_evlist *evlist = zalloc(sizeof(*evlist));
  38. if (evlist != NULL)
  39. perf_evlist__init(evlist, NULL, NULL);
  40. return evlist;
  41. }
  42. /**
  43. * perf_evlist__set_id_pos - set the positions of event ids.
  44. * @evlist: selected event list
  45. *
  46. * Events with compatible sample types all have the same id_pos
  47. * and is_pos. For convenience, put a copy on evlist.
  48. */
  49. void perf_evlist__set_id_pos(struct perf_evlist *evlist)
  50. {
  51. struct perf_evsel *first = perf_evlist__first(evlist);
  52. evlist->id_pos = first->id_pos;
  53. evlist->is_pos = first->is_pos;
  54. }
  55. static void perf_evlist__purge(struct perf_evlist *evlist)
  56. {
  57. struct perf_evsel *pos, *n;
  58. list_for_each_entry_safe(pos, n, &evlist->entries, node) {
  59. list_del_init(&pos->node);
  60. perf_evsel__delete(pos);
  61. }
  62. evlist->nr_entries = 0;
  63. }
  64. void perf_evlist__exit(struct perf_evlist *evlist)
  65. {
  66. free(evlist->mmap);
  67. free(evlist->pollfd);
  68. evlist->mmap = NULL;
  69. evlist->pollfd = NULL;
  70. }
  71. void perf_evlist__delete(struct perf_evlist *evlist)
  72. {
  73. perf_evlist__purge(evlist);
  74. perf_evlist__exit(evlist);
  75. free(evlist);
  76. }
  77. void perf_evlist__add(struct perf_evlist *evlist, struct perf_evsel *entry)
  78. {
  79. list_add_tail(&entry->node, &evlist->entries);
  80. if (!evlist->nr_entries++)
  81. perf_evlist__set_id_pos(evlist);
  82. }
  83. void perf_evlist__splice_list_tail(struct perf_evlist *evlist,
  84. struct list_head *list,
  85. int nr_entries)
  86. {
  87. bool set_id_pos = !evlist->nr_entries;
  88. list_splice_tail(list, &evlist->entries);
  89. evlist->nr_entries += nr_entries;
  90. if (set_id_pos)
  91. perf_evlist__set_id_pos(evlist);
  92. }
  93. void __perf_evlist__set_leader(struct list_head *list)
  94. {
  95. struct perf_evsel *evsel, *leader;
  96. leader = list_entry(list->next, struct perf_evsel, node);
  97. evsel = list_entry(list->prev, struct perf_evsel, node);
  98. leader->nr_members = evsel->idx - leader->idx + 1;
  99. list_for_each_entry(evsel, list, node) {
  100. evsel->leader = leader;
  101. }
  102. }
  103. void perf_evlist__set_leader(struct perf_evlist *evlist)
  104. {
  105. if (evlist->nr_entries) {
  106. evlist->nr_groups = evlist->nr_entries > 1 ? 1 : 0;
  107. __perf_evlist__set_leader(&evlist->entries);
  108. }
  109. }
  110. int perf_evlist__add_default(struct perf_evlist *evlist)
  111. {
  112. struct perf_event_attr attr = {
  113. .type = PERF_TYPE_HARDWARE,
  114. .config = PERF_COUNT_HW_CPU_CYCLES,
  115. };
  116. struct perf_evsel *evsel;
  117. event_attr_init(&attr);
  118. evsel = perf_evsel__new(&attr, 0);
  119. if (evsel == NULL)
  120. goto error;
  121. /* use strdup() because free(evsel) assumes name is allocated */
  122. evsel->name = strdup("cycles");
  123. if (!evsel->name)
  124. goto error_free;
  125. perf_evlist__add(evlist, evsel);
  126. return 0;
  127. error_free:
  128. perf_evsel__delete(evsel);
  129. error:
  130. return -ENOMEM;
  131. }
  132. static int perf_evlist__add_attrs(struct perf_evlist *evlist,
  133. struct perf_event_attr *attrs, size_t nr_attrs)
  134. {
  135. struct perf_evsel *evsel, *n;
  136. LIST_HEAD(head);
  137. size_t i;
  138. for (i = 0; i < nr_attrs; i++) {
  139. evsel = perf_evsel__new(attrs + i, evlist->nr_entries + i);
  140. if (evsel == NULL)
  141. goto out_delete_partial_list;
  142. list_add_tail(&evsel->node, &head);
  143. }
  144. perf_evlist__splice_list_tail(evlist, &head, nr_attrs);
  145. return 0;
  146. out_delete_partial_list:
  147. list_for_each_entry_safe(evsel, n, &head, node)
  148. perf_evsel__delete(evsel);
  149. return -1;
  150. }
  151. int __perf_evlist__add_default_attrs(struct perf_evlist *evlist,
  152. struct perf_event_attr *attrs, size_t nr_attrs)
  153. {
  154. size_t i;
  155. for (i = 0; i < nr_attrs; i++)
  156. event_attr_init(attrs + i);
  157. return perf_evlist__add_attrs(evlist, attrs, nr_attrs);
  158. }
  159. struct perf_evsel *
  160. perf_evlist__find_tracepoint_by_id(struct perf_evlist *evlist, int id)
  161. {
  162. struct perf_evsel *evsel;
  163. list_for_each_entry(evsel, &evlist->entries, node) {
  164. if (evsel->attr.type == PERF_TYPE_TRACEPOINT &&
  165. (int)evsel->attr.config == id)
  166. return evsel;
  167. }
  168. return NULL;
  169. }
  170. struct perf_evsel *
  171. perf_evlist__find_tracepoint_by_name(struct perf_evlist *evlist,
  172. const char *name)
  173. {
  174. struct perf_evsel *evsel;
  175. list_for_each_entry(evsel, &evlist->entries, node) {
  176. if ((evsel->attr.type == PERF_TYPE_TRACEPOINT) &&
  177. (strcmp(evsel->name, name) == 0))
  178. return evsel;
  179. }
  180. return NULL;
  181. }
  182. int perf_evlist__add_newtp(struct perf_evlist *evlist,
  183. const char *sys, const char *name, void *handler)
  184. {
  185. struct perf_evsel *evsel;
  186. evsel = perf_evsel__newtp(sys, name, evlist->nr_entries);
  187. if (evsel == NULL)
  188. return -1;
  189. evsel->handler.func = handler;
  190. perf_evlist__add(evlist, evsel);
  191. return 0;
  192. }
  193. void perf_evlist__disable(struct perf_evlist *evlist)
  194. {
  195. int cpu, thread;
  196. struct perf_evsel *pos;
  197. int nr_cpus = cpu_map__nr(evlist->cpus);
  198. int nr_threads = thread_map__nr(evlist->threads);
  199. for (cpu = 0; cpu < nr_cpus; cpu++) {
  200. list_for_each_entry(pos, &evlist->entries, node) {
  201. if (!perf_evsel__is_group_leader(pos))
  202. continue;
  203. for (thread = 0; thread < nr_threads; thread++)
  204. ioctl(FD(pos, cpu, thread),
  205. PERF_EVENT_IOC_DISABLE, 0);
  206. }
  207. }
  208. }
  209. void perf_evlist__enable(struct perf_evlist *evlist)
  210. {
  211. int cpu, thread;
  212. struct perf_evsel *pos;
  213. int nr_cpus = cpu_map__nr(evlist->cpus);
  214. int nr_threads = thread_map__nr(evlist->threads);
  215. for (cpu = 0; cpu < nr_cpus; cpu++) {
  216. list_for_each_entry(pos, &evlist->entries, node) {
  217. if (!perf_evsel__is_group_leader(pos))
  218. continue;
  219. for (thread = 0; thread < nr_threads; thread++)
  220. ioctl(FD(pos, cpu, thread),
  221. PERF_EVENT_IOC_ENABLE, 0);
  222. }
  223. }
  224. }
  225. static int perf_evlist__alloc_pollfd(struct perf_evlist *evlist)
  226. {
  227. int nr_cpus = cpu_map__nr(evlist->cpus);
  228. int nr_threads = thread_map__nr(evlist->threads);
  229. int nfds = nr_cpus * nr_threads * evlist->nr_entries;
  230. evlist->pollfd = malloc(sizeof(struct pollfd) * nfds);
  231. return evlist->pollfd != NULL ? 0 : -ENOMEM;
  232. }
  233. void perf_evlist__add_pollfd(struct perf_evlist *evlist, int fd)
  234. {
  235. fcntl(fd, F_SETFL, O_NONBLOCK);
  236. evlist->pollfd[evlist->nr_fds].fd = fd;
  237. evlist->pollfd[evlist->nr_fds].events = POLLIN;
  238. evlist->nr_fds++;
  239. }
  240. static void perf_evlist__id_hash(struct perf_evlist *evlist,
  241. struct perf_evsel *evsel,
  242. int cpu, int thread, u64 id)
  243. {
  244. int hash;
  245. struct perf_sample_id *sid = SID(evsel, cpu, thread);
  246. sid->id = id;
  247. sid->evsel = evsel;
  248. hash = hash_64(sid->id, PERF_EVLIST__HLIST_BITS);
  249. hlist_add_head(&sid->node, &evlist->heads[hash]);
  250. }
  251. void perf_evlist__id_add(struct perf_evlist *evlist, struct perf_evsel *evsel,
  252. int cpu, int thread, u64 id)
  253. {
  254. perf_evlist__id_hash(evlist, evsel, cpu, thread, id);
  255. evsel->id[evsel->ids++] = id;
  256. }
  257. static int perf_evlist__id_add_fd(struct perf_evlist *evlist,
  258. struct perf_evsel *evsel,
  259. int cpu, int thread, int fd)
  260. {
  261. u64 read_data[4] = { 0, };
  262. int id_idx = 1; /* The first entry is the counter value */
  263. u64 id;
  264. int ret;
  265. ret = ioctl(fd, PERF_EVENT_IOC_ID, &id);
  266. if (!ret)
  267. goto add;
  268. if (errno != ENOTTY)
  269. return -1;
  270. /* Legacy way to get event id.. All hail to old kernels! */
  271. /*
  272. * This way does not work with group format read, so bail
  273. * out in that case.
  274. */
  275. if (perf_evlist__read_format(evlist) & PERF_FORMAT_GROUP)
  276. return -1;
  277. if (!(evsel->attr.read_format & PERF_FORMAT_ID) ||
  278. read(fd, &read_data, sizeof(read_data)) == -1)
  279. return -1;
  280. if (evsel->attr.read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
  281. ++id_idx;
  282. if (evsel->attr.read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
  283. ++id_idx;
  284. id = read_data[id_idx];
  285. add:
  286. perf_evlist__id_add(evlist, evsel, cpu, thread, id);
  287. return 0;
  288. }
  289. struct perf_sample_id *perf_evlist__id2sid(struct perf_evlist *evlist, u64 id)
  290. {
  291. struct hlist_head *head;
  292. struct perf_sample_id *sid;
  293. int hash;
  294. hash = hash_64(id, PERF_EVLIST__HLIST_BITS);
  295. head = &evlist->heads[hash];
  296. hlist_for_each_entry(sid, head, node)
  297. if (sid->id == id)
  298. return sid;
  299. return NULL;
  300. }
  301. struct perf_evsel *perf_evlist__id2evsel(struct perf_evlist *evlist, u64 id)
  302. {
  303. struct perf_sample_id *sid;
  304. if (evlist->nr_entries == 1)
  305. return perf_evlist__first(evlist);
  306. sid = perf_evlist__id2sid(evlist, id);
  307. if (sid)
  308. return sid->evsel;
  309. if (!perf_evlist__sample_id_all(evlist))
  310. return perf_evlist__first(evlist);
  311. return NULL;
  312. }
  313. static int perf_evlist__event2id(struct perf_evlist *evlist,
  314. union perf_event *event, u64 *id)
  315. {
  316. const u64 *array = event->sample.array;
  317. ssize_t n;
  318. n = (event->header.size - sizeof(event->header)) >> 3;
  319. if (event->header.type == PERF_RECORD_SAMPLE) {
  320. if (evlist->id_pos >= n)
  321. return -1;
  322. *id = array[evlist->id_pos];
  323. } else {
  324. if (evlist->is_pos > n)
  325. return -1;
  326. n -= evlist->is_pos;
  327. *id = array[n];
  328. }
  329. return 0;
  330. }
  331. static struct perf_evsel *perf_evlist__event2evsel(struct perf_evlist *evlist,
  332. union perf_event *event)
  333. {
  334. struct hlist_head *head;
  335. struct perf_sample_id *sid;
  336. int hash;
  337. u64 id;
  338. if (evlist->nr_entries == 1)
  339. return perf_evlist__first(evlist);
  340. if (perf_evlist__event2id(evlist, event, &id))
  341. return NULL;
  342. /* Synthesized events have an id of zero */
  343. if (!id)
  344. return perf_evlist__first(evlist);
  345. hash = hash_64(id, PERF_EVLIST__HLIST_BITS);
  346. head = &evlist->heads[hash];
  347. hlist_for_each_entry(sid, head, node) {
  348. if (sid->id == id)
  349. return sid->evsel;
  350. }
  351. return NULL;
  352. }
  353. union perf_event *perf_evlist__mmap_read(struct perf_evlist *evlist, int idx)
  354. {
  355. struct perf_mmap *md = &evlist->mmap[idx];
  356. unsigned int head = perf_mmap__read_head(md);
  357. unsigned int old = md->prev;
  358. unsigned char *data = md->base + page_size;
  359. union perf_event *event = NULL;
  360. if (evlist->overwrite) {
  361. /*
  362. * If we're further behind than half the buffer, there's a chance
  363. * the writer will bite our tail and mess up the samples under us.
  364. *
  365. * If we somehow ended up ahead of the head, we got messed up.
  366. *
  367. * In either case, truncate and restart at head.
  368. */
  369. int diff = head - old;
  370. if (diff > md->mask / 2 || diff < 0) {
  371. fprintf(stderr, "WARNING: failed to keep up with mmap data.\n");
  372. /*
  373. * head points to a known good entry, start there.
  374. */
  375. old = head;
  376. }
  377. }
  378. if (old != head) {
  379. size_t size;
  380. event = (union perf_event *)&data[old & md->mask];
  381. size = event->header.size;
  382. /*
  383. * Event straddles the mmap boundary -- header should always
  384. * be inside due to u64 alignment of output.
  385. */
  386. if ((old & md->mask) + size != ((old + size) & md->mask)) {
  387. unsigned int offset = old;
  388. unsigned int len = min(sizeof(*event), size), cpy;
  389. void *dst = &md->event_copy;
  390. do {
  391. cpy = min(md->mask + 1 - (offset & md->mask), len);
  392. memcpy(dst, &data[offset & md->mask], cpy);
  393. offset += cpy;
  394. dst += cpy;
  395. len -= cpy;
  396. } while (len);
  397. event = &md->event_copy;
  398. }
  399. old += size;
  400. }
  401. md->prev = old;
  402. if (!evlist->overwrite)
  403. perf_mmap__write_tail(md, old);
  404. return event;
  405. }
  406. static void __perf_evlist__munmap(struct perf_evlist *evlist, int idx)
  407. {
  408. if (evlist->mmap[idx].base != NULL) {
  409. munmap(evlist->mmap[idx].base, evlist->mmap_len);
  410. evlist->mmap[idx].base = NULL;
  411. }
  412. }
  413. void perf_evlist__munmap(struct perf_evlist *evlist)
  414. {
  415. int i;
  416. for (i = 0; i < evlist->nr_mmaps; i++)
  417. __perf_evlist__munmap(evlist, i);
  418. free(evlist->mmap);
  419. evlist->mmap = NULL;
  420. }
  421. static int perf_evlist__alloc_mmap(struct perf_evlist *evlist)
  422. {
  423. evlist->nr_mmaps = cpu_map__nr(evlist->cpus);
  424. if (cpu_map__empty(evlist->cpus))
  425. evlist->nr_mmaps = thread_map__nr(evlist->threads);
  426. evlist->mmap = zalloc(evlist->nr_mmaps * sizeof(struct perf_mmap));
  427. return evlist->mmap != NULL ? 0 : -ENOMEM;
  428. }
  429. static int __perf_evlist__mmap(struct perf_evlist *evlist,
  430. int idx, int prot, int mask, int fd)
  431. {
  432. evlist->mmap[idx].prev = 0;
  433. evlist->mmap[idx].mask = mask;
  434. evlist->mmap[idx].base = mmap(NULL, evlist->mmap_len, prot,
  435. MAP_SHARED, fd, 0);
  436. if (evlist->mmap[idx].base == MAP_FAILED) {
  437. evlist->mmap[idx].base = NULL;
  438. return -1;
  439. }
  440. perf_evlist__add_pollfd(evlist, fd);
  441. return 0;
  442. }
  443. static int perf_evlist__mmap_per_cpu(struct perf_evlist *evlist, int prot, int mask)
  444. {
  445. struct perf_evsel *evsel;
  446. int cpu, thread;
  447. int nr_cpus = cpu_map__nr(evlist->cpus);
  448. int nr_threads = thread_map__nr(evlist->threads);
  449. pr_debug2("perf event ring buffer mmapped per cpu\n");
  450. for (cpu = 0; cpu < nr_cpus; cpu++) {
  451. int output = -1;
  452. for (thread = 0; thread < nr_threads; thread++) {
  453. list_for_each_entry(evsel, &evlist->entries, node) {
  454. int fd = FD(evsel, cpu, thread);
  455. if (output == -1) {
  456. output = fd;
  457. if (__perf_evlist__mmap(evlist, cpu,
  458. prot, mask, output) < 0)
  459. goto out_unmap;
  460. } else {
  461. if (ioctl(fd, PERF_EVENT_IOC_SET_OUTPUT, output) != 0)
  462. goto out_unmap;
  463. }
  464. if ((evsel->attr.read_format & PERF_FORMAT_ID) &&
  465. perf_evlist__id_add_fd(evlist, evsel, cpu, thread, fd) < 0)
  466. goto out_unmap;
  467. }
  468. }
  469. }
  470. return 0;
  471. out_unmap:
  472. for (cpu = 0; cpu < nr_cpus; cpu++)
  473. __perf_evlist__munmap(evlist, cpu);
  474. return -1;
  475. }
  476. static int perf_evlist__mmap_per_thread(struct perf_evlist *evlist, int prot, int mask)
  477. {
  478. struct perf_evsel *evsel;
  479. int thread;
  480. int nr_threads = thread_map__nr(evlist->threads);
  481. pr_debug2("perf event ring buffer mmapped per thread\n");
  482. for (thread = 0; thread < nr_threads; thread++) {
  483. int output = -1;
  484. list_for_each_entry(evsel, &evlist->entries, node) {
  485. int fd = FD(evsel, 0, thread);
  486. if (output == -1) {
  487. output = fd;
  488. if (__perf_evlist__mmap(evlist, thread,
  489. prot, mask, output) < 0)
  490. goto out_unmap;
  491. } else {
  492. if (ioctl(fd, PERF_EVENT_IOC_SET_OUTPUT, output) != 0)
  493. goto out_unmap;
  494. }
  495. if ((evsel->attr.read_format & PERF_FORMAT_ID) &&
  496. perf_evlist__id_add_fd(evlist, evsel, 0, thread, fd) < 0)
  497. goto out_unmap;
  498. }
  499. }
  500. return 0;
  501. out_unmap:
  502. for (thread = 0; thread < nr_threads; thread++)
  503. __perf_evlist__munmap(evlist, thread);
  504. return -1;
  505. }
  506. /** perf_evlist__mmap - Create per cpu maps to receive events
  507. *
  508. * @evlist - list of events
  509. * @pages - map length in pages
  510. * @overwrite - overwrite older events?
  511. *
  512. * If overwrite is false the user needs to signal event consuption using:
  513. *
  514. * struct perf_mmap *m = &evlist->mmap[cpu];
  515. * unsigned int head = perf_mmap__read_head(m);
  516. *
  517. * perf_mmap__write_tail(m, head)
  518. *
  519. * Using perf_evlist__read_on_cpu does this automatically.
  520. */
  521. int perf_evlist__mmap(struct perf_evlist *evlist, unsigned int pages,
  522. bool overwrite)
  523. {
  524. struct perf_evsel *evsel;
  525. const struct cpu_map *cpus = evlist->cpus;
  526. const struct thread_map *threads = evlist->threads;
  527. int prot = PROT_READ | (overwrite ? 0 : PROT_WRITE), mask;
  528. /* 512 kiB: default amount of unprivileged mlocked memory */
  529. if (pages == UINT_MAX)
  530. pages = (512 * 1024) / page_size;
  531. else if (!is_power_of_2(pages))
  532. return -EINVAL;
  533. mask = pages * page_size - 1;
  534. if (evlist->mmap == NULL && perf_evlist__alloc_mmap(evlist) < 0)
  535. return -ENOMEM;
  536. if (evlist->pollfd == NULL && perf_evlist__alloc_pollfd(evlist) < 0)
  537. return -ENOMEM;
  538. evlist->overwrite = overwrite;
  539. evlist->mmap_len = (pages + 1) * page_size;
  540. list_for_each_entry(evsel, &evlist->entries, node) {
  541. if ((evsel->attr.read_format & PERF_FORMAT_ID) &&
  542. evsel->sample_id == NULL &&
  543. perf_evsel__alloc_id(evsel, cpu_map__nr(cpus), threads->nr) < 0)
  544. return -ENOMEM;
  545. }
  546. if (cpu_map__empty(cpus))
  547. return perf_evlist__mmap_per_thread(evlist, prot, mask);
  548. return perf_evlist__mmap_per_cpu(evlist, prot, mask);
  549. }
  550. int perf_evlist__create_maps(struct perf_evlist *evlist,
  551. struct perf_target *target)
  552. {
  553. evlist->threads = thread_map__new_str(target->pid, target->tid,
  554. target->uid);
  555. if (evlist->threads == NULL)
  556. return -1;
  557. if (perf_target__has_task(target))
  558. evlist->cpus = cpu_map__dummy_new();
  559. else if (!perf_target__has_cpu(target) && !target->uses_mmap)
  560. evlist->cpus = cpu_map__dummy_new();
  561. else
  562. evlist->cpus = cpu_map__new(target->cpu_list);
  563. if (evlist->cpus == NULL)
  564. goto out_delete_threads;
  565. return 0;
  566. out_delete_threads:
  567. thread_map__delete(evlist->threads);
  568. return -1;
  569. }
  570. void perf_evlist__delete_maps(struct perf_evlist *evlist)
  571. {
  572. cpu_map__delete(evlist->cpus);
  573. thread_map__delete(evlist->threads);
  574. evlist->cpus = NULL;
  575. evlist->threads = NULL;
  576. }
  577. int perf_evlist__apply_filters(struct perf_evlist *evlist)
  578. {
  579. struct perf_evsel *evsel;
  580. int err = 0;
  581. const int ncpus = cpu_map__nr(evlist->cpus),
  582. nthreads = thread_map__nr(evlist->threads);
  583. list_for_each_entry(evsel, &evlist->entries, node) {
  584. if (evsel->filter == NULL)
  585. continue;
  586. err = perf_evsel__set_filter(evsel, ncpus, nthreads, evsel->filter);
  587. if (err)
  588. break;
  589. }
  590. return err;
  591. }
  592. int perf_evlist__set_filter(struct perf_evlist *evlist, const char *filter)
  593. {
  594. struct perf_evsel *evsel;
  595. int err = 0;
  596. const int ncpus = cpu_map__nr(evlist->cpus),
  597. nthreads = thread_map__nr(evlist->threads);
  598. list_for_each_entry(evsel, &evlist->entries, node) {
  599. err = perf_evsel__set_filter(evsel, ncpus, nthreads, filter);
  600. if (err)
  601. break;
  602. }
  603. return err;
  604. }
  605. bool perf_evlist__valid_sample_type(struct perf_evlist *evlist)
  606. {
  607. struct perf_evsel *pos;
  608. if (evlist->nr_entries == 1)
  609. return true;
  610. if (evlist->id_pos < 0 || evlist->is_pos < 0)
  611. return false;
  612. list_for_each_entry(pos, &evlist->entries, node) {
  613. if (pos->id_pos != evlist->id_pos ||
  614. pos->is_pos != evlist->is_pos)
  615. return false;
  616. }
  617. return true;
  618. }
  619. u64 __perf_evlist__combined_sample_type(struct perf_evlist *evlist)
  620. {
  621. struct perf_evsel *evsel;
  622. if (evlist->combined_sample_type)
  623. return evlist->combined_sample_type;
  624. list_for_each_entry(evsel, &evlist->entries, node)
  625. evlist->combined_sample_type |= evsel->attr.sample_type;
  626. return evlist->combined_sample_type;
  627. }
  628. u64 perf_evlist__combined_sample_type(struct perf_evlist *evlist)
  629. {
  630. evlist->combined_sample_type = 0;
  631. return __perf_evlist__combined_sample_type(evlist);
  632. }
  633. bool perf_evlist__valid_read_format(struct perf_evlist *evlist)
  634. {
  635. struct perf_evsel *first = perf_evlist__first(evlist), *pos = first;
  636. u64 read_format = first->attr.read_format;
  637. u64 sample_type = first->attr.sample_type;
  638. list_for_each_entry_continue(pos, &evlist->entries, node) {
  639. if (read_format != pos->attr.read_format)
  640. return false;
  641. }
  642. /* PERF_SAMPLE_READ imples PERF_FORMAT_ID. */
  643. if ((sample_type & PERF_SAMPLE_READ) &&
  644. !(read_format & PERF_FORMAT_ID)) {
  645. return false;
  646. }
  647. return true;
  648. }
  649. u64 perf_evlist__read_format(struct perf_evlist *evlist)
  650. {
  651. struct perf_evsel *first = perf_evlist__first(evlist);
  652. return first->attr.read_format;
  653. }
  654. u16 perf_evlist__id_hdr_size(struct perf_evlist *evlist)
  655. {
  656. struct perf_evsel *first = perf_evlist__first(evlist);
  657. struct perf_sample *data;
  658. u64 sample_type;
  659. u16 size = 0;
  660. if (!first->attr.sample_id_all)
  661. goto out;
  662. sample_type = first->attr.sample_type;
  663. if (sample_type & PERF_SAMPLE_TID)
  664. size += sizeof(data->tid) * 2;
  665. if (sample_type & PERF_SAMPLE_TIME)
  666. size += sizeof(data->time);
  667. if (sample_type & PERF_SAMPLE_ID)
  668. size += sizeof(data->id);
  669. if (sample_type & PERF_SAMPLE_STREAM_ID)
  670. size += sizeof(data->stream_id);
  671. if (sample_type & PERF_SAMPLE_CPU)
  672. size += sizeof(data->cpu) * 2;
  673. if (sample_type & PERF_SAMPLE_IDENTIFIER)
  674. size += sizeof(data->id);
  675. out:
  676. return size;
  677. }
  678. bool perf_evlist__valid_sample_id_all(struct perf_evlist *evlist)
  679. {
  680. struct perf_evsel *first = perf_evlist__first(evlist), *pos = first;
  681. list_for_each_entry_continue(pos, &evlist->entries, node) {
  682. if (first->attr.sample_id_all != pos->attr.sample_id_all)
  683. return false;
  684. }
  685. return true;
  686. }
  687. bool perf_evlist__sample_id_all(struct perf_evlist *evlist)
  688. {
  689. struct perf_evsel *first = perf_evlist__first(evlist);
  690. return first->attr.sample_id_all;
  691. }
  692. void perf_evlist__set_selected(struct perf_evlist *evlist,
  693. struct perf_evsel *evsel)
  694. {
  695. evlist->selected = evsel;
  696. }
  697. void perf_evlist__close(struct perf_evlist *evlist)
  698. {
  699. struct perf_evsel *evsel;
  700. int ncpus = cpu_map__nr(evlist->cpus);
  701. int nthreads = thread_map__nr(evlist->threads);
  702. list_for_each_entry_reverse(evsel, &evlist->entries, node)
  703. perf_evsel__close(evsel, ncpus, nthreads);
  704. }
  705. int perf_evlist__open(struct perf_evlist *evlist)
  706. {
  707. struct perf_evsel *evsel;
  708. int err;
  709. list_for_each_entry(evsel, &evlist->entries, node) {
  710. err = perf_evsel__open(evsel, evlist->cpus, evlist->threads);
  711. if (err < 0)
  712. goto out_err;
  713. }
  714. return 0;
  715. out_err:
  716. perf_evlist__close(evlist);
  717. errno = -err;
  718. return err;
  719. }
  720. int perf_evlist__prepare_workload(struct perf_evlist *evlist,
  721. struct perf_target *target,
  722. const char *argv[], bool pipe_output,
  723. bool want_signal)
  724. {
  725. int child_ready_pipe[2], go_pipe[2];
  726. char bf;
  727. if (pipe(child_ready_pipe) < 0) {
  728. perror("failed to create 'ready' pipe");
  729. return -1;
  730. }
  731. if (pipe(go_pipe) < 0) {
  732. perror("failed to create 'go' pipe");
  733. goto out_close_ready_pipe;
  734. }
  735. evlist->workload.pid = fork();
  736. if (evlist->workload.pid < 0) {
  737. perror("failed to fork");
  738. goto out_close_pipes;
  739. }
  740. if (!evlist->workload.pid) {
  741. if (pipe_output)
  742. dup2(2, 1);
  743. signal(SIGTERM, SIG_DFL);
  744. close(child_ready_pipe[0]);
  745. close(go_pipe[1]);
  746. fcntl(go_pipe[0], F_SETFD, FD_CLOEXEC);
  747. /*
  748. * Tell the parent we're ready to go
  749. */
  750. close(child_ready_pipe[1]);
  751. /*
  752. * Wait until the parent tells us to go.
  753. */
  754. if (read(go_pipe[0], &bf, 1) == -1)
  755. perror("unable to read pipe");
  756. execvp(argv[0], (char **)argv);
  757. perror(argv[0]);
  758. if (want_signal)
  759. kill(getppid(), SIGUSR1);
  760. exit(-1);
  761. }
  762. if (perf_target__none(target))
  763. evlist->threads->map[0] = evlist->workload.pid;
  764. close(child_ready_pipe[1]);
  765. close(go_pipe[0]);
  766. /*
  767. * wait for child to settle
  768. */
  769. if (read(child_ready_pipe[0], &bf, 1) == -1) {
  770. perror("unable to read pipe");
  771. goto out_close_pipes;
  772. }
  773. fcntl(go_pipe[1], F_SETFD, FD_CLOEXEC);
  774. evlist->workload.cork_fd = go_pipe[1];
  775. close(child_ready_pipe[0]);
  776. return 0;
  777. out_close_pipes:
  778. close(go_pipe[0]);
  779. close(go_pipe[1]);
  780. out_close_ready_pipe:
  781. close(child_ready_pipe[0]);
  782. close(child_ready_pipe[1]);
  783. return -1;
  784. }
  785. int perf_evlist__start_workload(struct perf_evlist *evlist)
  786. {
  787. if (evlist->workload.cork_fd > 0) {
  788. char bf = 0;
  789. int ret;
  790. /*
  791. * Remove the cork, let it rip!
  792. */
  793. ret = write(evlist->workload.cork_fd, &bf, 1);
  794. if (ret < 0)
  795. perror("enable to write to pipe");
  796. close(evlist->workload.cork_fd);
  797. return ret;
  798. }
  799. return 0;
  800. }
  801. int perf_evlist__parse_sample(struct perf_evlist *evlist, union perf_event *event,
  802. struct perf_sample *sample)
  803. {
  804. struct perf_evsel *evsel = perf_evlist__event2evsel(evlist, event);
  805. if (!evsel)
  806. return -EFAULT;
  807. return perf_evsel__parse_sample(evsel, event, sample);
  808. }
  809. size_t perf_evlist__fprintf(struct perf_evlist *evlist, FILE *fp)
  810. {
  811. struct perf_evsel *evsel;
  812. size_t printed = 0;
  813. list_for_each_entry(evsel, &evlist->entries, node) {
  814. printed += fprintf(fp, "%s%s", evsel->idx ? ", " : "",
  815. perf_evsel__name(evsel));
  816. }
  817. return printed + fprintf(fp, "\n");;
  818. }