|
@@ -227,12 +227,14 @@ void perf_evlist__disable(struct perf_evlist *evlist)
|
|
|
{
|
|
|
int cpu, thread;
|
|
|
struct perf_evsel *pos;
|
|
|
+ int nr_cpus = cpu_map__nr(evlist->cpus);
|
|
|
+ int nr_threads = thread_map__nr(evlist->threads);
|
|
|
|
|
|
- for (cpu = 0; cpu < cpu_map__nr(evlist->cpus); cpu++) {
|
|
|
+ for (cpu = 0; cpu < nr_cpus; cpu++) {
|
|
|
list_for_each_entry(pos, &evlist->entries, node) {
|
|
|
if (!perf_evsel__is_group_leader(pos))
|
|
|
continue;
|
|
|
- for (thread = 0; thread < evlist->threads->nr; thread++)
|
|
|
+ for (thread = 0; thread < nr_threads; thread++)
|
|
|
ioctl(FD(pos, cpu, thread),
|
|
|
PERF_EVENT_IOC_DISABLE, 0);
|
|
|
}
|
|
@@ -243,12 +245,14 @@ void perf_evlist__enable(struct perf_evlist *evlist)
|
|
|
{
|
|
|
int cpu, thread;
|
|
|
struct perf_evsel *pos;
|
|
|
+ int nr_cpus = cpu_map__nr(evlist->cpus);
|
|
|
+ int nr_threads = thread_map__nr(evlist->threads);
|
|
|
|
|
|
- for (cpu = 0; cpu < cpu_map__nr(evlist->cpus); cpu++) {
|
|
|
+ for (cpu = 0; cpu < nr_cpus; cpu++) {
|
|
|
list_for_each_entry(pos, &evlist->entries, node) {
|
|
|
if (!perf_evsel__is_group_leader(pos))
|
|
|
continue;
|
|
|
- for (thread = 0; thread < evlist->threads->nr; thread++)
|
|
|
+ for (thread = 0; thread < nr_threads; thread++)
|
|
|
ioctl(FD(pos, cpu, thread),
|
|
|
PERF_EVENT_IOC_ENABLE, 0);
|
|
|
}
|
|
@@ -257,7 +261,9 @@ void perf_evlist__enable(struct perf_evlist *evlist)
|
|
|
|
|
|
static int perf_evlist__alloc_pollfd(struct perf_evlist *evlist)
|
|
|
{
|
|
|
- int nfds = cpu_map__nr(evlist->cpus) * evlist->threads->nr * evlist->nr_entries;
|
|
|
+ int nr_cpus = cpu_map__nr(evlist->cpus);
|
|
|
+ int nr_threads = thread_map__nr(evlist->threads);
|
|
|
+ int nfds = nr_cpus * nr_threads * evlist->nr_entries;
|
|
|
evlist->pollfd = malloc(sizeof(struct pollfd) * nfds);
|
|
|
return evlist->pollfd != NULL ? 0 : -ENOMEM;
|
|
|
}
|
|
@@ -417,7 +423,7 @@ static int perf_evlist__alloc_mmap(struct perf_evlist *evlist)
|
|
|
{
|
|
|
evlist->nr_mmaps = cpu_map__nr(evlist->cpus);
|
|
|
if (cpu_map__all(evlist->cpus))
|
|
|
- evlist->nr_mmaps = evlist->threads->nr;
|
|
|
+ evlist->nr_mmaps = thread_map__nr(evlist->threads);
|
|
|
evlist->mmap = zalloc(evlist->nr_mmaps * sizeof(struct perf_mmap));
|
|
|
return evlist->mmap != NULL ? 0 : -ENOMEM;
|
|
|
}
|
|
@@ -442,11 +448,13 @@ static int perf_evlist__mmap_per_cpu(struct perf_evlist *evlist, int prot, int m
|
|
|
{
|
|
|
struct perf_evsel *evsel;
|
|
|
int cpu, thread;
|
|
|
+ int nr_cpus = cpu_map__nr(evlist->cpus);
|
|
|
+ int nr_threads = thread_map__nr(evlist->threads);
|
|
|
|
|
|
- for (cpu = 0; cpu < cpu_map__nr(evlist->cpus); cpu++) {
|
|
|
+ for (cpu = 0; cpu < nr_cpus; cpu++) {
|
|
|
int output = -1;
|
|
|
|
|
|
- for (thread = 0; thread < evlist->threads->nr; thread++) {
|
|
|
+ for (thread = 0; thread < nr_threads; thread++) {
|
|
|
list_for_each_entry(evsel, &evlist->entries, node) {
|
|
|
int fd = FD(evsel, cpu, thread);
|
|
|
|
|
@@ -470,7 +478,7 @@ static int perf_evlist__mmap_per_cpu(struct perf_evlist *evlist, int prot, int m
|
|
|
return 0;
|
|
|
|
|
|
out_unmap:
|
|
|
- for (cpu = 0; cpu < cpu_map__nr(evlist->cpus); cpu++) {
|
|
|
+ for (cpu = 0; cpu < nr_cpus; cpu++) {
|
|
|
if (evlist->mmap[cpu].base != NULL) {
|
|
|
munmap(evlist->mmap[cpu].base, evlist->mmap_len);
|
|
|
evlist->mmap[cpu].base = NULL;
|
|
@@ -483,8 +491,9 @@ static int perf_evlist__mmap_per_thread(struct perf_evlist *evlist, int prot, in
|
|
|
{
|
|
|
struct perf_evsel *evsel;
|
|
|
int thread;
|
|
|
+ int nr_threads = thread_map__nr(evlist->threads);
|
|
|
|
|
|
- for (thread = 0; thread < evlist->threads->nr; thread++) {
|
|
|
+ for (thread = 0; thread < nr_threads; thread++) {
|
|
|
int output = -1;
|
|
|
|
|
|
list_for_each_entry(evsel, &evlist->entries, node) {
|
|
@@ -509,7 +518,7 @@ static int perf_evlist__mmap_per_thread(struct perf_evlist *evlist, int prot, in
|
|
|
return 0;
|
|
|
|
|
|
out_unmap:
|
|
|
- for (thread = 0; thread < evlist->threads->nr; thread++) {
|
|
|
+ for (thread = 0; thread < nr_threads; thread++) {
|
|
|
if (evlist->mmap[thread].base != NULL) {
|
|
|
munmap(evlist->mmap[thread].base, evlist->mmap_len);
|
|
|
evlist->mmap[thread].base = NULL;
|
|
@@ -610,7 +619,7 @@ int perf_evlist__apply_filters(struct perf_evlist *evlist)
|
|
|
struct perf_evsel *evsel;
|
|
|
int err = 0;
|
|
|
const int ncpus = cpu_map__nr(evlist->cpus),
|
|
|
- nthreads = evlist->threads->nr;
|
|
|
+ nthreads = thread_map__nr(evlist->threads);
|
|
|
|
|
|
list_for_each_entry(evsel, &evlist->entries, node) {
|
|
|
if (evsel->filter == NULL)
|
|
@@ -629,7 +638,7 @@ int perf_evlist__set_filter(struct perf_evlist *evlist, const char *filter)
|
|
|
struct perf_evsel *evsel;
|
|
|
int err = 0;
|
|
|
const int ncpus = cpu_map__nr(evlist->cpus),
|
|
|
- nthreads = evlist->threads->nr;
|
|
|
+ nthreads = thread_map__nr(evlist->threads);
|
|
|
|
|
|
list_for_each_entry(evsel, &evlist->entries, node) {
|
|
|
err = perf_evsel__set_filter(evsel, ncpus, nthreads, filter);
|
|
@@ -726,7 +735,7 @@ int perf_evlist__open(struct perf_evlist *evlist)
|
|
|
return 0;
|
|
|
out_err:
|
|
|
ncpus = cpu_map__nr(evlist->cpus);
|
|
|
- nthreads = evlist->threads ? evlist->threads->nr : 1;
|
|
|
+ nthreads = thread_map__nr(evlist->threads);
|
|
|
|
|
|
list_for_each_entry_reverse(evsel, &evlist->entries, node)
|
|
|
perf_evsel__close(evsel, ncpus, nthreads);
|