perfcounters.h 5.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267
  1. /*
  2. * Ioctls that can be done on a perf counter fd:
  3. */
  4. #define PERF_COUNTER_IOC_ENABLE _IO('$', 0)
  5. #define PERF_COUNTER_IOC_DISABLE _IO('$', 1)
  6. /*
  7. * prctl(PR_TASK_PERF_COUNTERS_DISABLE) will (cheaply) disable all
  8. * counters in the current task.
  9. */
  10. #define PR_TASK_PERF_COUNTERS_DISABLE 31
  11. #define PR_TASK_PERF_COUNTERS_ENABLE 32
  12. #define MAX_COUNTERS 64
  13. #define MAX_NR_CPUS 256
  14. #define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
  15. #define rdclock() \
  16. ({ \
  17. struct timespec ts; \
  18. \
  19. clock_gettime(CLOCK_MONOTONIC, &ts); \
  20. ts.tv_sec * 1000000000ULL + ts.tv_nsec; \
  21. })
  22. /*
  23. * Pick up some kernel type conventions:
  24. */
  25. #define __user
  26. #define asmlinkage
  27. typedef unsigned int __u32;
  28. typedef unsigned long long __u64;
  29. typedef long long __s64;
  30. /*
  31. * User-space ABI bits:
  32. */
  33. /*
  34. * Generalized performance counter event types, used by the hw_event.type
  35. * parameter of the sys_perf_counter_open() syscall:
  36. */
  37. enum hw_event_types {
  38. /*
  39. * Common hardware events, generalized by the kernel:
  40. */
  41. PERF_COUNT_CPU_CYCLES = 0,
  42. PERF_COUNT_INSTRUCTIONS = 1,
  43. PERF_COUNT_CACHE_REFERENCES = 2,
  44. PERF_COUNT_CACHE_MISSES = 3,
  45. PERF_COUNT_BRANCH_INSTRUCTIONS = 4,
  46. PERF_COUNT_BRANCH_MISSES = 5,
  47. PERF_COUNT_BUS_CYCLES = 6,
  48. PERF_HW_EVENTS_MAX = 7,
  49. /*
  50. * Special "software" counters provided by the kernel, even if
  51. * the hardware does not support performance counters. These
  52. * counters measure various physical and sw events of the
  53. * kernel (and allow the profiling of them as well):
  54. */
  55. PERF_COUNT_CPU_CLOCK = -1,
  56. PERF_COUNT_TASK_CLOCK = -2,
  57. PERF_COUNT_PAGE_FAULTS = -3,
  58. PERF_COUNT_CONTEXT_SWITCHES = -4,
  59. PERF_COUNT_CPU_MIGRATIONS = -5,
  60. PERF_SW_EVENTS_MIN = -6,
  61. };
  62. /*
  63. * IRQ-notification data record type:
  64. */
  65. enum perf_counter_record_type {
  66. PERF_RECORD_SIMPLE = 0,
  67. PERF_RECORD_IRQ = 1,
  68. PERF_RECORD_GROUP = 2,
  69. };
  70. /*
  71. * Hardware event to monitor via a performance monitoring counter:
  72. */
  73. struct perf_counter_hw_event {
  74. __s64 type;
  75. __u64 irq_period;
  76. __u64 record_type;
  77. __u64 read_format;
  78. __u64 disabled : 1, /* off by default */
  79. nmi : 1, /* NMI sampling */
  80. raw : 1, /* raw event type */
  81. inherit : 1, /* children inherit it */
  82. pinned : 1, /* must always be on PMU */
  83. exclusive : 1, /* only group on PMU */
  84. exclude_user : 1, /* don't count user */
  85. exclude_kernel : 1, /* ditto kernel */
  86. exclude_hv : 1, /* ditto hypervisor */
  87. exclude_idle : 1, /* don't count when idle */
  88. __reserved_1 : 54;
  89. __u32 extra_config_len;
  90. __u32 __reserved_4;
  91. __u64 __reserved_2;
  92. __u64 __reserved_3;
  93. };
  94. #ifdef __x86_64__
  95. # define __NR_perf_counter_open 295
  96. #endif
  97. #ifdef __i386__
  98. # define __NR_perf_counter_open 333
  99. #endif
  100. #ifdef __powerpc__
  101. #define __NR_perf_counter_open 319
  102. #endif
  103. asmlinkage int sys_perf_counter_open(
  104. struct perf_counter_hw_event *hw_event_uptr __user,
  105. pid_t pid,
  106. int cpu,
  107. int group_fd,
  108. unsigned long flags)
  109. {
  110. int ret;
  111. ret = syscall(
  112. __NR_perf_counter_open, hw_event_uptr, pid, cpu, group_fd, flags);
  113. #if defined(__x86_64__) || defined(__i386__)
  114. if (ret < 0 && ret > -4096) {
  115. errno = -ret;
  116. ret = -1;
  117. }
  118. #endif
  119. return ret;
  120. }
  121. static char *hw_event_names [] = {
  122. "CPU cycles",
  123. "instructions",
  124. "cache references",
  125. "cache misses",
  126. "branches",
  127. "branch misses",
  128. "bus cycles",
  129. };
  130. static char *sw_event_names [] = {
  131. "cpu clock ticks",
  132. "task clock ticks",
  133. "pagefaults",
  134. "context switches",
  135. "CPU migrations",
  136. };
  137. struct event_symbol {
  138. int event;
  139. char *symbol;
  140. };
  141. static struct event_symbol event_symbols [] = {
  142. {PERF_COUNT_CPU_CYCLES, "cpu-cycles", },
  143. {PERF_COUNT_CPU_CYCLES, "cycles", },
  144. {PERF_COUNT_INSTRUCTIONS, "instructions", },
  145. {PERF_COUNT_CACHE_REFERENCES, "cache-references", },
  146. {PERF_COUNT_CACHE_MISSES, "cache-misses", },
  147. {PERF_COUNT_BRANCH_INSTRUCTIONS, "branch-instructions", },
  148. {PERF_COUNT_BRANCH_INSTRUCTIONS, "branches", },
  149. {PERF_COUNT_BRANCH_MISSES, "branch-misses", },
  150. {PERF_COUNT_BUS_CYCLES, "bus-cycles", },
  151. {PERF_COUNT_CPU_CLOCK, "cpu-ticks", },
  152. {PERF_COUNT_CPU_CLOCK, "ticks", },
  153. {PERF_COUNT_TASK_CLOCK, "task-ticks", },
  154. {PERF_COUNT_PAGE_FAULTS, "page-faults", },
  155. {PERF_COUNT_PAGE_FAULTS, "faults", },
  156. {PERF_COUNT_CONTEXT_SWITCHES, "context-switches", },
  157. {PERF_COUNT_CONTEXT_SWITCHES, "cs", },
  158. {PERF_COUNT_CPU_MIGRATIONS, "cpu-migrations", },
  159. {PERF_COUNT_CPU_MIGRATIONS, "migrations", },
  160. };
  161. static int type_valid(int type)
  162. {
  163. if (type >= PERF_HW_EVENTS_MAX)
  164. return 0;
  165. if (type <= PERF_SW_EVENTS_MIN)
  166. return 0;
  167. return 1;
  168. }
  169. static char *event_name(int ctr)
  170. {
  171. int type = event_id[ctr];
  172. static char buf[32];
  173. if (event_raw[ctr]) {
  174. sprintf(buf, "raw 0x%x", type);
  175. return buf;
  176. }
  177. if (!type_valid(type))
  178. return "unknown";
  179. if (type >= 0)
  180. return hw_event_names[type];
  181. return sw_event_names[-type-1];
  182. }
  183. /*
  184. * Each event can have multiple symbolic names.
  185. * Symbolic names are (almost) exactly matched.
  186. */
  187. static int match_event_symbols(char *str)
  188. {
  189. unsigned int i;
  190. if (isdigit(str[0]) || str[0] == '-')
  191. return atoi(str);
  192. for (i = 0; i < ARRAY_SIZE(event_symbols); i++) {
  193. if (!strncmp(str, event_symbols[i].symbol,
  194. strlen(event_symbols[i].symbol)))
  195. return event_symbols[i].event;
  196. }
  197. return PERF_HW_EVENTS_MAX;
  198. }
  199. static void parse_events(char *str)
  200. {
  201. int type, raw;
  202. again:
  203. nr_counters++;
  204. if (nr_counters == MAX_COUNTERS)
  205. display_help();
  206. raw = 0;
  207. if (*str == 'r') {
  208. raw = 1;
  209. ++str;
  210. type = strtol(str, NULL, 16);
  211. } else {
  212. type = match_event_symbols(str);
  213. if (!type_valid(type))
  214. display_help();
  215. }
  216. event_id[nr_counters] = type;
  217. event_raw[nr_counters] = raw;
  218. str = strstr(str, ",");
  219. if (str) {
  220. str++;
  221. goto again;
  222. }
  223. }