evsel.c 3.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177
  1. #include "evsel.h"
  2. #include "../perf.h"
  3. #include "util.h"
  4. #include "cpumap.h"
  5. #include "thread.h"
  6. #define FD(e, x, y) (*(int *)xyarray__entry(e->fd, x, y))
  7. struct perf_evsel *perf_evsel__new(u32 type, u64 config, int idx)
  8. {
  9. struct perf_evsel *evsel = zalloc(sizeof(*evsel));
  10. if (evsel != NULL) {
  11. evsel->idx = idx;
  12. evsel->attr.type = type;
  13. evsel->attr.config = config;
  14. INIT_LIST_HEAD(&evsel->node);
  15. }
  16. return evsel;
  17. }
  18. int perf_evsel__alloc_fd(struct perf_evsel *evsel, int ncpus, int nthreads)
  19. {
  20. evsel->fd = xyarray__new(ncpus, nthreads, sizeof(int));
  21. return evsel->fd != NULL ? 0 : -ENOMEM;
  22. }
  23. int perf_evsel__alloc_counts(struct perf_evsel *evsel, int ncpus)
  24. {
  25. evsel->counts = zalloc((sizeof(*evsel->counts) +
  26. (ncpus * sizeof(struct perf_counts_values))));
  27. return evsel->counts != NULL ? 0 : -ENOMEM;
  28. }
  29. void perf_evsel__free_fd(struct perf_evsel *evsel)
  30. {
  31. xyarray__delete(evsel->fd);
  32. evsel->fd = NULL;
  33. }
  34. void perf_evsel__close_fd(struct perf_evsel *evsel, int ncpus, int nthreads)
  35. {
  36. int cpu, thread;
  37. for (cpu = 0; cpu < ncpus; cpu++)
  38. for (thread = 0; thread < nthreads; ++thread) {
  39. close(FD(evsel, cpu, thread));
  40. FD(evsel, cpu, thread) = -1;
  41. }
  42. }
  43. void perf_evsel__delete(struct perf_evsel *evsel)
  44. {
  45. assert(list_empty(&evsel->node));
  46. xyarray__delete(evsel->fd);
  47. free(evsel);
  48. }
  49. int __perf_evsel__read_on_cpu(struct perf_evsel *evsel,
  50. int cpu, int thread, bool scale)
  51. {
  52. struct perf_counts_values count;
  53. size_t nv = scale ? 3 : 1;
  54. if (FD(evsel, cpu, thread) < 0)
  55. return -EINVAL;
  56. if (readn(FD(evsel, cpu, thread), &count, nv * sizeof(u64)) < 0)
  57. return -errno;
  58. if (scale) {
  59. if (count.run == 0)
  60. count.val = 0;
  61. else if (count.run < count.ena)
  62. count.val = (u64)((double)count.val * count.ena / count.run + 0.5);
  63. } else
  64. count.ena = count.run = 0;
  65. evsel->counts->cpu[cpu] = count;
  66. return 0;
  67. }
  68. int __perf_evsel__read(struct perf_evsel *evsel,
  69. int ncpus, int nthreads, bool scale)
  70. {
  71. size_t nv = scale ? 3 : 1;
  72. int cpu, thread;
  73. struct perf_counts_values *aggr = &evsel->counts->aggr, count;
  74. aggr->val = 0;
  75. for (cpu = 0; cpu < ncpus; cpu++) {
  76. for (thread = 0; thread < nthreads; thread++) {
  77. if (FD(evsel, cpu, thread) < 0)
  78. continue;
  79. if (readn(FD(evsel, cpu, thread),
  80. &count, nv * sizeof(u64)) < 0)
  81. return -errno;
  82. aggr->val += count.val;
  83. if (scale) {
  84. aggr->ena += count.ena;
  85. aggr->run += count.run;
  86. }
  87. }
  88. }
  89. evsel->counts->scaled = 0;
  90. if (scale) {
  91. if (aggr->run == 0) {
  92. evsel->counts->scaled = -1;
  93. aggr->val = 0;
  94. return 0;
  95. }
  96. if (aggr->run < aggr->ena) {
  97. evsel->counts->scaled = 1;
  98. aggr->val = (u64)((double)aggr->val * aggr->ena / aggr->run + 0.5);
  99. }
  100. } else
  101. aggr->ena = aggr->run = 0;
  102. return 0;
  103. }
  104. int perf_evsel__open_per_cpu(struct perf_evsel *evsel, struct cpu_map *cpus)
  105. {
  106. int cpu;
  107. for (cpu = 0; cpu < cpus->nr; cpu++) {
  108. FD(evsel, cpu, 0) = sys_perf_event_open(&evsel->attr, -1,
  109. cpus->map[cpu], -1, 0);
  110. if (FD(evsel, cpu, 0) < 0)
  111. goto out_close;
  112. }
  113. return 0;
  114. out_close:
  115. while (--cpu >= 0) {
  116. close(FD(evsel, cpu, 0));
  117. FD(evsel, cpu, 0) = -1;
  118. }
  119. return -1;
  120. }
  121. int perf_evsel__open_per_thread(struct perf_evsel *evsel, struct thread_map *threads)
  122. {
  123. int thread;
  124. for (thread = 0; thread < threads->nr; thread++) {
  125. FD(evsel, 0, thread) = sys_perf_event_open(&evsel->attr,
  126. threads->map[thread], -1, -1, 0);
  127. if (FD(evsel, 0, thread) < 0)
  128. goto out_close;
  129. }
  130. return 0;
  131. out_close:
  132. while (--thread >= 0) {
  133. close(FD(evsel, 0, thread));
  134. FD(evsel, 0, thread) = -1;
  135. }
  136. return -1;
  137. }
  138. int perf_evsel__open(struct perf_evsel *evsel,
  139. struct cpu_map *cpus, struct thread_map *threads)
  140. {
  141. if (threads == NULL)
  142. return perf_evsel__open_per_cpu(evsel, cpus);
  143. return perf_evsel__open_per_thread(evsel, threads);
  144. }