evsel.c 4.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186
  1. #include "evsel.h"
  2. #include "../perf.h"
  3. #include "util.h"
  4. #include "cpumap.h"
  5. #include "thread.h"
  6. #define FD(e, x, y) (*(int *)xyarray__entry(e->fd, x, y))
  7. struct perf_evsel *perf_evsel__new(u32 type, u64 config, int idx)
  8. {
  9. struct perf_evsel *evsel = zalloc(sizeof(*evsel));
  10. if (evsel != NULL) {
  11. evsel->idx = idx;
  12. evsel->attr.type = type;
  13. evsel->attr.config = config;
  14. INIT_LIST_HEAD(&evsel->node);
  15. }
  16. return evsel;
  17. }
  18. int perf_evsel__alloc_fd(struct perf_evsel *evsel, int ncpus, int nthreads)
  19. {
  20. evsel->fd = xyarray__new(ncpus, nthreads, sizeof(int));
  21. return evsel->fd != NULL ? 0 : -ENOMEM;
  22. }
  23. int perf_evsel__alloc_counts(struct perf_evsel *evsel, int ncpus)
  24. {
  25. evsel->counts = zalloc((sizeof(*evsel->counts) +
  26. (ncpus * sizeof(struct perf_counts_values))));
  27. return evsel->counts != NULL ? 0 : -ENOMEM;
  28. }
  29. void perf_evsel__free_fd(struct perf_evsel *evsel)
  30. {
  31. xyarray__delete(evsel->fd);
  32. evsel->fd = NULL;
  33. }
  34. void perf_evsel__close_fd(struct perf_evsel *evsel, int ncpus, int nthreads)
  35. {
  36. int cpu, thread;
  37. for (cpu = 0; cpu < ncpus; cpu++)
  38. for (thread = 0; thread < nthreads; ++thread) {
  39. close(FD(evsel, cpu, thread));
  40. FD(evsel, cpu, thread) = -1;
  41. }
  42. }
  43. void perf_evsel__delete(struct perf_evsel *evsel)
  44. {
  45. assert(list_empty(&evsel->node));
  46. xyarray__delete(evsel->fd);
  47. free(evsel);
  48. }
  49. int __perf_evsel__read_on_cpu(struct perf_evsel *evsel,
  50. int cpu, int thread, bool scale)
  51. {
  52. struct perf_counts_values count;
  53. size_t nv = scale ? 3 : 1;
  54. if (FD(evsel, cpu, thread) < 0)
  55. return -EINVAL;
  56. if (evsel->counts == NULL && perf_evsel__alloc_counts(evsel, cpu + 1) < 0)
  57. return -ENOMEM;
  58. if (readn(FD(evsel, cpu, thread), &count, nv * sizeof(u64)) < 0)
  59. return -errno;
  60. if (scale) {
  61. if (count.run == 0)
  62. count.val = 0;
  63. else if (count.run < count.ena)
  64. count.val = (u64)((double)count.val * count.ena / count.run + 0.5);
  65. } else
  66. count.ena = count.run = 0;
  67. evsel->counts->cpu[cpu] = count;
  68. return 0;
  69. }
  70. int __perf_evsel__read(struct perf_evsel *evsel,
  71. int ncpus, int nthreads, bool scale)
  72. {
  73. size_t nv = scale ? 3 : 1;
  74. int cpu, thread;
  75. struct perf_counts_values *aggr = &evsel->counts->aggr, count;
  76. aggr->val = 0;
  77. for (cpu = 0; cpu < ncpus; cpu++) {
  78. for (thread = 0; thread < nthreads; thread++) {
  79. if (FD(evsel, cpu, thread) < 0)
  80. continue;
  81. if (readn(FD(evsel, cpu, thread),
  82. &count, nv * sizeof(u64)) < 0)
  83. return -errno;
  84. aggr->val += count.val;
  85. if (scale) {
  86. aggr->ena += count.ena;
  87. aggr->run += count.run;
  88. }
  89. }
  90. }
  91. evsel->counts->scaled = 0;
  92. if (scale) {
  93. if (aggr->run == 0) {
  94. evsel->counts->scaled = -1;
  95. aggr->val = 0;
  96. return 0;
  97. }
  98. if (aggr->run < aggr->ena) {
  99. evsel->counts->scaled = 1;
  100. aggr->val = (u64)((double)aggr->val * aggr->ena / aggr->run + 0.5);
  101. }
  102. } else
  103. aggr->ena = aggr->run = 0;
  104. return 0;
  105. }
  106. int perf_evsel__open_per_cpu(struct perf_evsel *evsel, struct cpu_map *cpus)
  107. {
  108. int cpu;
  109. if (evsel->fd == NULL && perf_evsel__alloc_fd(evsel, cpus->nr, 1) < 0)
  110. return -1;
  111. for (cpu = 0; cpu < cpus->nr; cpu++) {
  112. FD(evsel, cpu, 0) = sys_perf_event_open(&evsel->attr, -1,
  113. cpus->map[cpu], -1, 0);
  114. if (FD(evsel, cpu, 0) < 0)
  115. goto out_close;
  116. }
  117. return 0;
  118. out_close:
  119. while (--cpu >= 0) {
  120. close(FD(evsel, cpu, 0));
  121. FD(evsel, cpu, 0) = -1;
  122. }
  123. return -1;
  124. }
  125. int perf_evsel__open_per_thread(struct perf_evsel *evsel, struct thread_map *threads)
  126. {
  127. int thread;
  128. if (evsel->fd == NULL && perf_evsel__alloc_fd(evsel, 1, threads->nr))
  129. return -1;
  130. for (thread = 0; thread < threads->nr; thread++) {
  131. FD(evsel, 0, thread) = sys_perf_event_open(&evsel->attr,
  132. threads->map[thread], -1, -1, 0);
  133. if (FD(evsel, 0, thread) < 0)
  134. goto out_close;
  135. }
  136. return 0;
  137. out_close:
  138. while (--thread >= 0) {
  139. close(FD(evsel, 0, thread));
  140. FD(evsel, 0, thread) = -1;
  141. }
  142. return -1;
  143. }
  144. int perf_evsel__open(struct perf_evsel *evsel,
  145. struct cpu_map *cpus, struct thread_map *threads)
  146. {
  147. if (threads == NULL)
  148. return perf_evsel__open_per_cpu(evsel, cpus);
  149. return perf_evsel__open_per_thread(evsel, threads);
  150. }