evlist.c 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513
  1. /*
  2. * Copyright (C) 2011, Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com>
  3. *
  4. * Parts came from builtin-{top,stat,record}.c, see those files for further
  5. * copyright notes.
  6. *
  7. * Released under the GPL v2. (and only v2, not any later version)
  8. */
  9. #include <poll.h>
  10. #include "cpumap.h"
  11. #include "thread_map.h"
  12. #include "evlist.h"
  13. #include "evsel.h"
  14. #include "util.h"
  15. #include <sys/mman.h>
  16. #include <linux/bitops.h>
  17. #include <linux/hash.h>
  18. #define FD(e, x, y) (*(int *)xyarray__entry(e->fd, x, y))
  19. #define SID(e, x, y) xyarray__entry(e->sample_id, x, y)
  20. void perf_evlist__init(struct perf_evlist *evlist, struct cpu_map *cpus,
  21. struct thread_map *threads)
  22. {
  23. int i;
  24. for (i = 0; i < PERF_EVLIST__HLIST_SIZE; ++i)
  25. INIT_HLIST_HEAD(&evlist->heads[i]);
  26. INIT_LIST_HEAD(&evlist->entries);
  27. perf_evlist__set_maps(evlist, cpus, threads);
  28. }
  29. struct perf_evlist *perf_evlist__new(struct cpu_map *cpus,
  30. struct thread_map *threads)
  31. {
  32. struct perf_evlist *evlist = zalloc(sizeof(*evlist));
  33. if (evlist != NULL)
  34. perf_evlist__init(evlist, cpus, threads);
  35. return evlist;
  36. }
  37. static void perf_evlist__purge(struct perf_evlist *evlist)
  38. {
  39. struct perf_evsel *pos, *n;
  40. list_for_each_entry_safe(pos, n, &evlist->entries, node) {
  41. list_del_init(&pos->node);
  42. perf_evsel__delete(pos);
  43. }
  44. evlist->nr_entries = 0;
  45. }
  46. void perf_evlist__exit(struct perf_evlist *evlist)
  47. {
  48. free(evlist->mmap);
  49. free(evlist->pollfd);
  50. evlist->mmap = NULL;
  51. evlist->pollfd = NULL;
  52. }
  53. void perf_evlist__delete(struct perf_evlist *evlist)
  54. {
  55. perf_evlist__purge(evlist);
  56. perf_evlist__exit(evlist);
  57. free(evlist);
  58. }
  59. void perf_evlist__add(struct perf_evlist *evlist, struct perf_evsel *entry)
  60. {
  61. list_add_tail(&entry->node, &evlist->entries);
  62. ++evlist->nr_entries;
  63. }
  64. int perf_evlist__add_default(struct perf_evlist *evlist)
  65. {
  66. struct perf_event_attr attr = {
  67. .type = PERF_TYPE_HARDWARE,
  68. .config = PERF_COUNT_HW_CPU_CYCLES,
  69. };
  70. struct perf_evsel *evsel = perf_evsel__new(&attr, 0);
  71. if (evsel == NULL)
  72. return -ENOMEM;
  73. perf_evlist__add(evlist, evsel);
  74. return 0;
  75. }
  76. void perf_evlist__disable(struct perf_evlist *evlist)
  77. {
  78. int cpu, thread;
  79. struct perf_evsel *pos;
  80. for (cpu = 0; cpu < evlist->cpus->nr; cpu++) {
  81. list_for_each_entry(pos, &evlist->entries, node) {
  82. for (thread = 0; thread < evlist->threads->nr; thread++)
  83. ioctl(FD(pos, cpu, thread), PERF_EVENT_IOC_DISABLE);
  84. }
  85. }
  86. }
  87. int perf_evlist__alloc_pollfd(struct perf_evlist *evlist)
  88. {
  89. int nfds = evlist->cpus->nr * evlist->threads->nr * evlist->nr_entries;
  90. evlist->pollfd = malloc(sizeof(struct pollfd) * nfds);
  91. return evlist->pollfd != NULL ? 0 : -ENOMEM;
  92. }
  93. void perf_evlist__add_pollfd(struct perf_evlist *evlist, int fd)
  94. {
  95. fcntl(fd, F_SETFL, O_NONBLOCK);
  96. evlist->pollfd[evlist->nr_fds].fd = fd;
  97. evlist->pollfd[evlist->nr_fds].events = POLLIN;
  98. evlist->nr_fds++;
  99. }
  100. static void perf_evlist__id_hash(struct perf_evlist *evlist,
  101. struct perf_evsel *evsel,
  102. int cpu, int thread, u64 id)
  103. {
  104. int hash;
  105. struct perf_sample_id *sid = SID(evsel, cpu, thread);
  106. sid->id = id;
  107. sid->evsel = evsel;
  108. hash = hash_64(sid->id, PERF_EVLIST__HLIST_BITS);
  109. hlist_add_head(&sid->node, &evlist->heads[hash]);
  110. }
  111. void perf_evlist__id_add(struct perf_evlist *evlist, struct perf_evsel *evsel,
  112. int cpu, int thread, u64 id)
  113. {
  114. perf_evlist__id_hash(evlist, evsel, cpu, thread, id);
  115. evsel->id[evsel->ids++] = id;
  116. }
  117. static int perf_evlist__id_add_fd(struct perf_evlist *evlist,
  118. struct perf_evsel *evsel,
  119. int cpu, int thread, int fd)
  120. {
  121. u64 read_data[4] = { 0, };
  122. int id_idx = 1; /* The first entry is the counter value */
  123. if (!(evsel->attr.read_format & PERF_FORMAT_ID) ||
  124. read(fd, &read_data, sizeof(read_data)) == -1)
  125. return -1;
  126. if (evsel->attr.read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
  127. ++id_idx;
  128. if (evsel->attr.read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
  129. ++id_idx;
  130. perf_evlist__id_add(evlist, evsel, cpu, thread, read_data[id_idx]);
  131. return 0;
  132. }
  133. struct perf_evsel *perf_evlist__id2evsel(struct perf_evlist *evlist, u64 id)
  134. {
  135. struct hlist_head *head;
  136. struct hlist_node *pos;
  137. struct perf_sample_id *sid;
  138. int hash;
  139. if (evlist->nr_entries == 1)
  140. return list_entry(evlist->entries.next, struct perf_evsel, node);
  141. hash = hash_64(id, PERF_EVLIST__HLIST_BITS);
  142. head = &evlist->heads[hash];
  143. hlist_for_each_entry(sid, pos, head, node)
  144. if (sid->id == id)
  145. return sid->evsel;
  146. return NULL;
  147. }
  148. union perf_event *perf_evlist__mmap_read(struct perf_evlist *evlist, int idx)
  149. {
  150. /* XXX Move this to perf.c, making it generally available */
  151. unsigned int page_size = sysconf(_SC_PAGE_SIZE);
  152. struct perf_mmap *md = &evlist->mmap[idx];
  153. unsigned int head = perf_mmap__read_head(md);
  154. unsigned int old = md->prev;
  155. unsigned char *data = md->base + page_size;
  156. union perf_event *event = NULL;
  157. if (evlist->overwrite) {
  158. /*
  159. * If we're further behind than half the buffer, there's a chance
  160. * the writer will bite our tail and mess up the samples under us.
  161. *
  162. * If we somehow ended up ahead of the head, we got messed up.
  163. *
  164. * In either case, truncate and restart at head.
  165. */
  166. int diff = head - old;
  167. if (diff > md->mask / 2 || diff < 0) {
  168. fprintf(stderr, "WARNING: failed to keep up with mmap data.\n");
  169. /*
  170. * head points to a known good entry, start there.
  171. */
  172. old = head;
  173. }
  174. }
  175. if (old != head) {
  176. size_t size;
  177. event = (union perf_event *)&data[old & md->mask];
  178. size = event->header.size;
  179. /*
  180. * Event straddles the mmap boundary -- header should always
  181. * be inside due to u64 alignment of output.
  182. */
  183. if ((old & md->mask) + size != ((old + size) & md->mask)) {
  184. unsigned int offset = old;
  185. unsigned int len = min(sizeof(*event), size), cpy;
  186. void *dst = &evlist->event_copy;
  187. do {
  188. cpy = min(md->mask + 1 - (offset & md->mask), len);
  189. memcpy(dst, &data[offset & md->mask], cpy);
  190. offset += cpy;
  191. dst += cpy;
  192. len -= cpy;
  193. } while (len);
  194. event = &evlist->event_copy;
  195. }
  196. old += size;
  197. }
  198. md->prev = old;
  199. if (!evlist->overwrite)
  200. perf_mmap__write_tail(md, old);
  201. return event;
  202. }
  203. void perf_evlist__munmap(struct perf_evlist *evlist)
  204. {
  205. int i;
  206. for (i = 0; i < evlist->nr_mmaps; i++) {
  207. if (evlist->mmap[i].base != NULL) {
  208. munmap(evlist->mmap[i].base, evlist->mmap_len);
  209. evlist->mmap[i].base = NULL;
  210. }
  211. }
  212. free(evlist->mmap);
  213. evlist->mmap = NULL;
  214. }
  215. int perf_evlist__alloc_mmap(struct perf_evlist *evlist)
  216. {
  217. evlist->nr_mmaps = evlist->cpus->nr;
  218. if (evlist->cpus->map[0] == -1)
  219. evlist->nr_mmaps = evlist->threads->nr;
  220. evlist->mmap = zalloc(evlist->nr_mmaps * sizeof(struct perf_mmap));
  221. return evlist->mmap != NULL ? 0 : -ENOMEM;
  222. }
  223. static int __perf_evlist__mmap(struct perf_evlist *evlist,
  224. int idx, int prot, int mask, int fd)
  225. {
  226. evlist->mmap[idx].prev = 0;
  227. evlist->mmap[idx].mask = mask;
  228. evlist->mmap[idx].base = mmap(NULL, evlist->mmap_len, prot,
  229. MAP_SHARED, fd, 0);
  230. if (evlist->mmap[idx].base == MAP_FAILED)
  231. return -1;
  232. perf_evlist__add_pollfd(evlist, fd);
  233. return 0;
  234. }
  235. static int perf_evlist__mmap_per_cpu(struct perf_evlist *evlist, int prot, int mask)
  236. {
  237. struct perf_evsel *evsel;
  238. int cpu, thread;
  239. for (cpu = 0; cpu < evlist->cpus->nr; cpu++) {
  240. int output = -1;
  241. for (thread = 0; thread < evlist->threads->nr; thread++) {
  242. list_for_each_entry(evsel, &evlist->entries, node) {
  243. int fd = FD(evsel, cpu, thread);
  244. if (output == -1) {
  245. output = fd;
  246. if (__perf_evlist__mmap(evlist, cpu,
  247. prot, mask, output) < 0)
  248. goto out_unmap;
  249. } else {
  250. if (ioctl(fd, PERF_EVENT_IOC_SET_OUTPUT, output) != 0)
  251. goto out_unmap;
  252. }
  253. if ((evsel->attr.read_format & PERF_FORMAT_ID) &&
  254. perf_evlist__id_add_fd(evlist, evsel, cpu, thread, fd) < 0)
  255. goto out_unmap;
  256. }
  257. }
  258. }
  259. return 0;
  260. out_unmap:
  261. for (cpu = 0; cpu < evlist->cpus->nr; cpu++) {
  262. if (evlist->mmap[cpu].base != NULL) {
  263. munmap(evlist->mmap[cpu].base, evlist->mmap_len);
  264. evlist->mmap[cpu].base = NULL;
  265. }
  266. }
  267. return -1;
  268. }
  269. static int perf_evlist__mmap_per_thread(struct perf_evlist *evlist, int prot, int mask)
  270. {
  271. struct perf_evsel *evsel;
  272. int thread;
  273. for (thread = 0; thread < evlist->threads->nr; thread++) {
  274. int output = -1;
  275. list_for_each_entry(evsel, &evlist->entries, node) {
  276. int fd = FD(evsel, 0, thread);
  277. if (output == -1) {
  278. output = fd;
  279. if (__perf_evlist__mmap(evlist, thread,
  280. prot, mask, output) < 0)
  281. goto out_unmap;
  282. } else {
  283. if (ioctl(fd, PERF_EVENT_IOC_SET_OUTPUT, output) != 0)
  284. goto out_unmap;
  285. }
  286. if ((evsel->attr.read_format & PERF_FORMAT_ID) &&
  287. perf_evlist__id_add_fd(evlist, evsel, 0, thread, fd) < 0)
  288. goto out_unmap;
  289. }
  290. }
  291. return 0;
  292. out_unmap:
  293. for (thread = 0; thread < evlist->threads->nr; thread++) {
  294. if (evlist->mmap[thread].base != NULL) {
  295. munmap(evlist->mmap[thread].base, evlist->mmap_len);
  296. evlist->mmap[thread].base = NULL;
  297. }
  298. }
  299. return -1;
  300. }
  301. /** perf_evlist__mmap - Create per cpu maps to receive events
  302. *
  303. * @evlist - list of events
  304. * @pages - map length in pages
  305. * @overwrite - overwrite older events?
  306. *
  307. * If overwrite is false the user needs to signal event consuption using:
  308. *
  309. * struct perf_mmap *m = &evlist->mmap[cpu];
  310. * unsigned int head = perf_mmap__read_head(m);
  311. *
  312. * perf_mmap__write_tail(m, head)
  313. *
  314. * Using perf_evlist__read_on_cpu does this automatically.
  315. */
  316. int perf_evlist__mmap(struct perf_evlist *evlist, int pages, bool overwrite)
  317. {
  318. unsigned int page_size = sysconf(_SC_PAGE_SIZE);
  319. int mask = pages * page_size - 1;
  320. struct perf_evsel *evsel;
  321. const struct cpu_map *cpus = evlist->cpus;
  322. const struct thread_map *threads = evlist->threads;
  323. int prot = PROT_READ | (overwrite ? 0 : PROT_WRITE);
  324. if (evlist->mmap == NULL && perf_evlist__alloc_mmap(evlist) < 0)
  325. return -ENOMEM;
  326. if (evlist->pollfd == NULL && perf_evlist__alloc_pollfd(evlist) < 0)
  327. return -ENOMEM;
  328. evlist->overwrite = overwrite;
  329. evlist->mmap_len = (pages + 1) * page_size;
  330. list_for_each_entry(evsel, &evlist->entries, node) {
  331. if ((evsel->attr.read_format & PERF_FORMAT_ID) &&
  332. evsel->sample_id == NULL &&
  333. perf_evsel__alloc_id(evsel, cpus->nr, threads->nr) < 0)
  334. return -ENOMEM;
  335. }
  336. if (evlist->cpus->map[0] == -1)
  337. return perf_evlist__mmap_per_thread(evlist, prot, mask);
  338. return perf_evlist__mmap_per_cpu(evlist, prot, mask);
  339. }
  340. int perf_evlist__create_maps(struct perf_evlist *evlist, pid_t target_pid,
  341. pid_t target_tid, const char *cpu_list)
  342. {
  343. evlist->threads = thread_map__new(target_pid, target_tid);
  344. if (evlist->threads == NULL)
  345. return -1;
  346. if (cpu_list == NULL && target_tid != -1)
  347. evlist->cpus = cpu_map__dummy_new();
  348. else
  349. evlist->cpus = cpu_map__new(cpu_list);
  350. if (evlist->cpus == NULL)
  351. goto out_delete_threads;
  352. return 0;
  353. out_delete_threads:
  354. thread_map__delete(evlist->threads);
  355. return -1;
  356. }
  357. void perf_evlist__delete_maps(struct perf_evlist *evlist)
  358. {
  359. cpu_map__delete(evlist->cpus);
  360. thread_map__delete(evlist->threads);
  361. evlist->cpus = NULL;
  362. evlist->threads = NULL;
  363. }
  364. int perf_evlist__set_filters(struct perf_evlist *evlist)
  365. {
  366. const struct thread_map *threads = evlist->threads;
  367. const struct cpu_map *cpus = evlist->cpus;
  368. struct perf_evsel *evsel;
  369. char *filter;
  370. int thread;
  371. int cpu;
  372. int err;
  373. int fd;
  374. list_for_each_entry(evsel, &evlist->entries, node) {
  375. filter = evsel->filter;
  376. if (!filter)
  377. continue;
  378. for (cpu = 0; cpu < cpus->nr; cpu++) {
  379. for (thread = 0; thread < threads->nr; thread++) {
  380. fd = FD(evsel, cpu, thread);
  381. err = ioctl(fd, PERF_EVENT_IOC_SET_FILTER, filter);
  382. if (err)
  383. return err;
  384. }
  385. }
  386. }
  387. return 0;
  388. }
  389. bool perf_evlist__valid_sample_type(const struct perf_evlist *evlist)
  390. {
  391. struct perf_evsel *pos, *first;
  392. pos = first = list_entry(evlist->entries.next, struct perf_evsel, node);
  393. list_for_each_entry_continue(pos, &evlist->entries, node) {
  394. if (first->attr.sample_type != pos->attr.sample_type)
  395. return false;
  396. }
  397. return true;
  398. }
  399. u64 perf_evlist__sample_type(const struct perf_evlist *evlist)
  400. {
  401. struct perf_evsel *first;
  402. first = list_entry(evlist->entries.next, struct perf_evsel, node);
  403. return first->attr.sample_type;
  404. }
  405. bool perf_evlist__valid_sample_id_all(const struct perf_evlist *evlist)
  406. {
  407. struct perf_evsel *pos, *first;
  408. pos = first = list_entry(evlist->entries.next, struct perf_evsel, node);
  409. list_for_each_entry_continue(pos, &evlist->entries, node) {
  410. if (first->attr.sample_id_all != pos->attr.sample_id_all)
  411. return false;
  412. }
  413. return true;
  414. }
  415. bool perf_evlist__sample_id_all(const struct perf_evlist *evlist)
  416. {
  417. struct perf_evsel *first;
  418. first = list_entry(evlist->entries.next, struct perf_evsel, node);
  419. return first->attr.sample_id_all;
  420. }