evsel.c 4.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185
  1. #include "evsel.h"
  2. #include "../perf.h"
  3. #include "util.h"
  4. #include "cpumap.h"
  5. #include "thread.h"
  6. #define FD(e, x, y) (*(int *)xyarray__entry(e->fd, x, y))
  7. struct perf_evsel *perf_evsel__new(struct perf_event_attr *attr, int idx)
  8. {
  9. struct perf_evsel *evsel = zalloc(sizeof(*evsel));
  10. if (evsel != NULL) {
  11. evsel->idx = idx;
  12. evsel->attr = *attr;
  13. INIT_LIST_HEAD(&evsel->node);
  14. }
  15. return evsel;
  16. }
  17. int perf_evsel__alloc_fd(struct perf_evsel *evsel, int ncpus, int nthreads)
  18. {
  19. evsel->fd = xyarray__new(ncpus, nthreads, sizeof(int));
  20. return evsel->fd != NULL ? 0 : -ENOMEM;
  21. }
  22. int perf_evsel__alloc_counts(struct perf_evsel *evsel, int ncpus)
  23. {
  24. evsel->counts = zalloc((sizeof(*evsel->counts) +
  25. (ncpus * sizeof(struct perf_counts_values))));
  26. return evsel->counts != NULL ? 0 : -ENOMEM;
  27. }
  28. void perf_evsel__free_fd(struct perf_evsel *evsel)
  29. {
  30. xyarray__delete(evsel->fd);
  31. evsel->fd = NULL;
  32. }
  33. void perf_evsel__close_fd(struct perf_evsel *evsel, int ncpus, int nthreads)
  34. {
  35. int cpu, thread;
  36. for (cpu = 0; cpu < ncpus; cpu++)
  37. for (thread = 0; thread < nthreads; ++thread) {
  38. close(FD(evsel, cpu, thread));
  39. FD(evsel, cpu, thread) = -1;
  40. }
  41. }
  42. void perf_evsel__delete(struct perf_evsel *evsel)
  43. {
  44. assert(list_empty(&evsel->node));
  45. xyarray__delete(evsel->fd);
  46. free(evsel);
  47. }
  48. int __perf_evsel__read_on_cpu(struct perf_evsel *evsel,
  49. int cpu, int thread, bool scale)
  50. {
  51. struct perf_counts_values count;
  52. size_t nv = scale ? 3 : 1;
  53. if (FD(evsel, cpu, thread) < 0)
  54. return -EINVAL;
  55. if (evsel->counts == NULL && perf_evsel__alloc_counts(evsel, cpu + 1) < 0)
  56. return -ENOMEM;
  57. if (readn(FD(evsel, cpu, thread), &count, nv * sizeof(u64)) < 0)
  58. return -errno;
  59. if (scale) {
  60. if (count.run == 0)
  61. count.val = 0;
  62. else if (count.run < count.ena)
  63. count.val = (u64)((double)count.val * count.ena / count.run + 0.5);
  64. } else
  65. count.ena = count.run = 0;
  66. evsel->counts->cpu[cpu] = count;
  67. return 0;
  68. }
  69. int __perf_evsel__read(struct perf_evsel *evsel,
  70. int ncpus, int nthreads, bool scale)
  71. {
  72. size_t nv = scale ? 3 : 1;
  73. int cpu, thread;
  74. struct perf_counts_values *aggr = &evsel->counts->aggr, count;
  75. aggr->val = 0;
  76. for (cpu = 0; cpu < ncpus; cpu++) {
  77. for (thread = 0; thread < nthreads; thread++) {
  78. if (FD(evsel, cpu, thread) < 0)
  79. continue;
  80. if (readn(FD(evsel, cpu, thread),
  81. &count, nv * sizeof(u64)) < 0)
  82. return -errno;
  83. aggr->val += count.val;
  84. if (scale) {
  85. aggr->ena += count.ena;
  86. aggr->run += count.run;
  87. }
  88. }
  89. }
  90. evsel->counts->scaled = 0;
  91. if (scale) {
  92. if (aggr->run == 0) {
  93. evsel->counts->scaled = -1;
  94. aggr->val = 0;
  95. return 0;
  96. }
  97. if (aggr->run < aggr->ena) {
  98. evsel->counts->scaled = 1;
  99. aggr->val = (u64)((double)aggr->val * aggr->ena / aggr->run + 0.5);
  100. }
  101. } else
  102. aggr->ena = aggr->run = 0;
  103. return 0;
  104. }
  105. int perf_evsel__open_per_cpu(struct perf_evsel *evsel, struct cpu_map *cpus)
  106. {
  107. int cpu;
  108. if (evsel->fd == NULL && perf_evsel__alloc_fd(evsel, cpus->nr, 1) < 0)
  109. return -1;
  110. for (cpu = 0; cpu < cpus->nr; cpu++) {
  111. FD(evsel, cpu, 0) = sys_perf_event_open(&evsel->attr, -1,
  112. cpus->map[cpu], -1, 0);
  113. if (FD(evsel, cpu, 0) < 0)
  114. goto out_close;
  115. }
  116. return 0;
  117. out_close:
  118. while (--cpu >= 0) {
  119. close(FD(evsel, cpu, 0));
  120. FD(evsel, cpu, 0) = -1;
  121. }
  122. return -1;
  123. }
  124. int perf_evsel__open_per_thread(struct perf_evsel *evsel, struct thread_map *threads)
  125. {
  126. int thread;
  127. if (evsel->fd == NULL && perf_evsel__alloc_fd(evsel, 1, threads->nr))
  128. return -1;
  129. for (thread = 0; thread < threads->nr; thread++) {
  130. FD(evsel, 0, thread) = sys_perf_event_open(&evsel->attr,
  131. threads->map[thread], -1, -1, 0);
  132. if (FD(evsel, 0, thread) < 0)
  133. goto out_close;
  134. }
  135. return 0;
  136. out_close:
  137. while (--thread >= 0) {
  138. close(FD(evsel, 0, thread));
  139. FD(evsel, 0, thread) = -1;
  140. }
  141. return -1;
  142. }
  143. int perf_evsel__open(struct perf_evsel *evsel,
  144. struct cpu_map *cpus, struct thread_map *threads)
  145. {
  146. if (threads == NULL)
  147. return perf_evsel__open_per_cpu(evsel, cpus);
  148. return perf_evsel__open_per_thread(evsel, threads);
  149. }