|
@@ -1,18 +1,19 @@
|
|
|
+/*
|
|
|
+ * Copyright (C) 2011, Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com>
|
|
|
+ *
|
|
|
+ * Parts came from builtin-{top,stat,record}.c, see those files for further
|
|
|
+ * copyright notes.
|
|
|
+ *
|
|
|
+ * Released under the GPL v2. (and only v2, not any later version)
|
|
|
+ */
|
|
|
+
|
|
|
#include "evsel.h"
|
|
|
#include "evlist.h"
|
|
|
-#include "../perf.h"
|
|
|
#include "util.h"
|
|
|
#include "cpumap.h"
|
|
|
#include "thread_map.h"
|
|
|
|
|
|
-#include <unistd.h>
|
|
|
-#include <sys/mman.h>
|
|
|
-
|
|
|
-#include <linux/bitops.h>
|
|
|
-#include <linux/hash.h>
|
|
|
-
|
|
|
#define FD(e, x, y) (*(int *)xyarray__entry(e->fd, x, y))
|
|
|
-#define SID(e, x, y) xyarray__entry(e->id, x, y)
|
|
|
|
|
|
void perf_evsel__init(struct perf_evsel *evsel,
|
|
|
struct perf_event_attr *attr, int idx)
|
|
@@ -74,24 +75,6 @@ void perf_evsel__close_fd(struct perf_evsel *evsel, int ncpus, int nthreads)
|
|
|
}
|
|
|
}
|
|
|
|
|
|
-void perf_evlist__munmap(struct perf_evlist *evlist, int ncpus)
|
|
|
-{
|
|
|
- int cpu;
|
|
|
-
|
|
|
- for (cpu = 0; cpu < ncpus; cpu++) {
|
|
|
- if (evlist->mmap[cpu].base != NULL) {
|
|
|
- munmap(evlist->mmap[cpu].base, evlist->mmap_len);
|
|
|
- evlist->mmap[cpu].base = NULL;
|
|
|
- }
|
|
|
- }
|
|
|
-}
|
|
|
-
|
|
|
-int perf_evlist__alloc_mmap(struct perf_evlist *evlist, int ncpus)
|
|
|
-{
|
|
|
- evlist->mmap = zalloc(ncpus * sizeof(struct perf_mmap));
|
|
|
- return evlist->mmap != NULL ? 0 : -ENOMEM;
|
|
|
-}
|
|
|
-
|
|
|
void perf_evsel__exit(struct perf_evsel *evsel)
|
|
|
{
|
|
|
assert(list_empty(&evsel->node));
|
|
@@ -258,115 +241,6 @@ int perf_evsel__open_per_thread(struct perf_evsel *evsel,
|
|
|
return __perf_evsel__open(evsel, &empty_cpu_map.map, threads, group, inherit);
|
|
|
}
|
|
|
|
|
|
-static int __perf_evlist__mmap(struct perf_evlist *evlist, int cpu, int prot,
|
|
|
- int mask, int fd)
|
|
|
-{
|
|
|
- evlist->mmap[cpu].prev = 0;
|
|
|
- evlist->mmap[cpu].mask = mask;
|
|
|
- evlist->mmap[cpu].base = mmap(NULL, evlist->mmap_len, prot,
|
|
|
- MAP_SHARED, fd, 0);
|
|
|
- if (evlist->mmap[cpu].base == MAP_FAILED)
|
|
|
- return -1;
|
|
|
-
|
|
|
- perf_evlist__add_pollfd(evlist, fd);
|
|
|
- return 0;
|
|
|
-}
|
|
|
-
|
|
|
-static int perf_evlist__id_hash(struct perf_evlist *evlist, struct perf_evsel *evsel,
|
|
|
- int cpu, int thread, int fd)
|
|
|
-{
|
|
|
- struct perf_sample_id *sid;
|
|
|
- u64 read_data[4] = { 0, };
|
|
|
- int hash, id_idx = 1; /* The first entry is the counter value */
|
|
|
-
|
|
|
- if (!(evsel->attr.read_format & PERF_FORMAT_ID) ||
|
|
|
- read(fd, &read_data, sizeof(read_data)) == -1)
|
|
|
- return -1;
|
|
|
-
|
|
|
- if (evsel->attr.read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
|
|
|
- ++id_idx;
|
|
|
- if (evsel->attr.read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
|
|
|
- ++id_idx;
|
|
|
-
|
|
|
- sid = SID(evsel, cpu, thread);
|
|
|
- sid->id = read_data[id_idx];
|
|
|
- sid->evsel = evsel;
|
|
|
- hash = hash_64(sid->id, PERF_EVLIST__HLIST_BITS);
|
|
|
- hlist_add_head(&sid->node, &evlist->heads[hash]);
|
|
|
- return 0;
|
|
|
-}
|
|
|
-
|
|
|
-/** perf_evlist__mmap - Create per cpu maps to receive events
|
|
|
- *
|
|
|
- * @evlist - list of events
|
|
|
- * @cpus - cpu map being monitored
|
|
|
- * @threads - threads map being monitored
|
|
|
- * @pages - map length in pages
|
|
|
- * @overwrite - overwrite older events?
|
|
|
- *
|
|
|
- * If overwrite is false the user needs to signal event consuption using:
|
|
|
- *
|
|
|
- * struct perf_mmap *m = &evlist->mmap[cpu];
|
|
|
- * unsigned int head = perf_mmap__read_head(m);
|
|
|
- *
|
|
|
- * perf_mmap__write_tail(m, head)
|
|
|
- */
|
|
|
-int perf_evlist__mmap(struct perf_evlist *evlist, struct cpu_map *cpus,
|
|
|
- struct thread_map *threads, int pages, bool overwrite)
|
|
|
-{
|
|
|
- unsigned int page_size = sysconf(_SC_PAGE_SIZE);
|
|
|
- int mask = pages * page_size - 1, cpu;
|
|
|
- struct perf_evsel *first_evsel, *evsel;
|
|
|
- int thread, prot = PROT_READ | (overwrite ? 0 : PROT_WRITE);
|
|
|
-
|
|
|
- if (evlist->mmap == NULL &&
|
|
|
- perf_evlist__alloc_mmap(evlist, cpus->nr) < 0)
|
|
|
- return -ENOMEM;
|
|
|
-
|
|
|
- if (evlist->pollfd == NULL &&
|
|
|
- perf_evlist__alloc_pollfd(evlist, cpus->nr, threads->nr) < 0)
|
|
|
- return -ENOMEM;
|
|
|
-
|
|
|
- evlist->overwrite = overwrite;
|
|
|
- evlist->mmap_len = (pages + 1) * page_size;
|
|
|
- first_evsel = list_entry(evlist->entries.next, struct perf_evsel, node);
|
|
|
-
|
|
|
- list_for_each_entry(evsel, &evlist->entries, node) {
|
|
|
- if ((evsel->attr.read_format & PERF_FORMAT_ID) &&
|
|
|
- evsel->id == NULL &&
|
|
|
- perf_evsel__alloc_id(evsel, cpus->nr, threads->nr) < 0)
|
|
|
- return -ENOMEM;
|
|
|
-
|
|
|
- for (cpu = 0; cpu < cpus->nr; cpu++) {
|
|
|
- for (thread = 0; thread < threads->nr; thread++) {
|
|
|
- int fd = FD(evsel, cpu, thread);
|
|
|
-
|
|
|
- if (evsel->idx || thread) {
|
|
|
- if (ioctl(fd, PERF_EVENT_IOC_SET_OUTPUT,
|
|
|
- FD(first_evsel, cpu, 0)) != 0)
|
|
|
- goto out_unmap;
|
|
|
- } else if (__perf_evlist__mmap(evlist, cpu, prot, mask, fd) < 0)
|
|
|
- goto out_unmap;
|
|
|
-
|
|
|
- if ((evsel->attr.read_format & PERF_FORMAT_ID) &&
|
|
|
- perf_evlist__id_hash(evlist, evsel, cpu, thread, fd) < 0)
|
|
|
- goto out_unmap;
|
|
|
- }
|
|
|
- }
|
|
|
- }
|
|
|
-
|
|
|
- return 0;
|
|
|
-
|
|
|
-out_unmap:
|
|
|
- for (cpu = 0; cpu < cpus->nr; cpu++) {
|
|
|
- if (evlist->mmap[cpu].base != NULL) {
|
|
|
- munmap(evlist->mmap[cpu].base, evlist->mmap_len);
|
|
|
- evlist->mmap[cpu].base = NULL;
|
|
|
- }
|
|
|
- }
|
|
|
- return -1;
|
|
|
-}
|
|
|
-
|
|
|
static int perf_event__parse_id_sample(const union perf_event *event, u64 type,
|
|
|
struct perf_sample *sample)
|
|
|
{
|