parse-events.c 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824
  1. #include "util.h"
  2. #include "../perf.h"
  3. #include "parse-options.h"
  4. #include "parse-events.h"
  5. #include "exec_cmd.h"
  6. #include "string.h"
  7. #include "cache.h"
  8. int nr_counters;
  9. struct perf_counter_attr attrs[MAX_COUNTERS];
  10. struct event_symbol {
  11. u8 type;
  12. u64 config;
  13. const char *symbol;
  14. const char *alias;
  15. };
  16. enum event_result {
  17. EVT_FAILED,
  18. EVT_HANDLED,
  19. EVT_HANDLED_ALL
  20. };
  21. char debugfs_path[MAXPATHLEN];
  22. #define CHW(x) .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_##x
  23. #define CSW(x) .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_SW_##x
  24. static struct event_symbol event_symbols[] = {
  25. { CHW(CPU_CYCLES), "cpu-cycles", "cycles" },
  26. { CHW(INSTRUCTIONS), "instructions", "" },
  27. { CHW(CACHE_REFERENCES), "cache-references", "" },
  28. { CHW(CACHE_MISSES), "cache-misses", "" },
  29. { CHW(BRANCH_INSTRUCTIONS), "branch-instructions", "branches" },
  30. { CHW(BRANCH_MISSES), "branch-misses", "" },
  31. { CHW(BUS_CYCLES), "bus-cycles", "" },
  32. { CSW(CPU_CLOCK), "cpu-clock", "" },
  33. { CSW(TASK_CLOCK), "task-clock", "" },
  34. { CSW(PAGE_FAULTS), "page-faults", "faults" },
  35. { CSW(PAGE_FAULTS_MIN), "minor-faults", "" },
  36. { CSW(PAGE_FAULTS_MAJ), "major-faults", "" },
  37. { CSW(CONTEXT_SWITCHES), "context-switches", "cs" },
  38. { CSW(CPU_MIGRATIONS), "cpu-migrations", "migrations" },
  39. };
  40. #define __PERF_COUNTER_FIELD(config, name) \
  41. ((config & PERF_COUNTER_##name##_MASK) >> PERF_COUNTER_##name##_SHIFT)
  42. #define PERF_COUNTER_RAW(config) __PERF_COUNTER_FIELD(config, RAW)
  43. #define PERF_COUNTER_CONFIG(config) __PERF_COUNTER_FIELD(config, CONFIG)
  44. #define PERF_COUNTER_TYPE(config) __PERF_COUNTER_FIELD(config, TYPE)
  45. #define PERF_COUNTER_ID(config) __PERF_COUNTER_FIELD(config, EVENT)
  46. static const char *hw_event_names[] = {
  47. "cycles",
  48. "instructions",
  49. "cache-references",
  50. "cache-misses",
  51. "branches",
  52. "branch-misses",
  53. "bus-cycles",
  54. };
  55. static const char *sw_event_names[] = {
  56. "cpu-clock-msecs",
  57. "task-clock-msecs",
  58. "page-faults",
  59. "context-switches",
  60. "CPU-migrations",
  61. "minor-faults",
  62. "major-faults",
  63. };
  64. #define MAX_ALIASES 8
  65. static const char *hw_cache[][MAX_ALIASES] = {
  66. { "L1-dcache", "l1-d", "l1d", "L1-data", },
  67. { "L1-icache", "l1-i", "l1i", "L1-instruction", },
  68. { "LLC", "L2" },
  69. { "dTLB", "d-tlb", "Data-TLB", },
  70. { "iTLB", "i-tlb", "Instruction-TLB", },
  71. { "branch", "branches", "bpu", "btb", "bpc", },
  72. };
  73. static const char *hw_cache_op[][MAX_ALIASES] = {
  74. { "load", "loads", "read", },
  75. { "store", "stores", "write", },
  76. { "prefetch", "prefetches", "speculative-read", "speculative-load", },
  77. };
  78. static const char *hw_cache_result[][MAX_ALIASES] = {
  79. { "refs", "Reference", "ops", "access", },
  80. { "misses", "miss", },
  81. };
  82. #define C(x) PERF_COUNT_HW_CACHE_##x
  83. #define CACHE_READ (1 << C(OP_READ))
  84. #define CACHE_WRITE (1 << C(OP_WRITE))
  85. #define CACHE_PREFETCH (1 << C(OP_PREFETCH))
  86. #define COP(x) (1 << x)
  87. /*
  88. * cache operartion stat
  89. * L1I : Read and prefetch only
  90. * ITLB and BPU : Read-only
  91. */
  92. static unsigned long hw_cache_stat[C(MAX)] = {
  93. [C(L1D)] = (CACHE_READ | CACHE_WRITE | CACHE_PREFETCH),
  94. [C(L1I)] = (CACHE_READ | CACHE_PREFETCH),
  95. [C(LL)] = (CACHE_READ | CACHE_WRITE | CACHE_PREFETCH),
  96. [C(DTLB)] = (CACHE_READ | CACHE_WRITE | CACHE_PREFETCH),
  97. [C(ITLB)] = (CACHE_READ),
  98. [C(BPU)] = (CACHE_READ),
  99. };
  100. #define for_each_subsystem(sys_dir, sys_dirent, sys_next) \
  101. while (!readdir_r(sys_dir, &sys_dirent, &sys_next) && sys_next) \
  102. if (sys_dirent.d_type == DT_DIR && \
  103. (strcmp(sys_dirent.d_name, ".")) && \
  104. (strcmp(sys_dirent.d_name, "..")))
  105. static int tp_event_has_id(struct dirent *sys_dir, struct dirent *evt_dir)
  106. {
  107. char evt_path[MAXPATHLEN];
  108. int fd;
  109. snprintf(evt_path, MAXPATHLEN, "%s/%s/%s/id", debugfs_path,
  110. sys_dir->d_name, evt_dir->d_name);
  111. fd = open(evt_path, O_RDONLY);
  112. if (fd < 0)
  113. return -EINVAL;
  114. close(fd);
  115. return 0;
  116. }
  117. #define for_each_event(sys_dirent, evt_dir, evt_dirent, evt_next) \
  118. while (!readdir_r(evt_dir, &evt_dirent, &evt_next) && evt_next) \
  119. if (evt_dirent.d_type == DT_DIR && \
  120. (strcmp(evt_dirent.d_name, ".")) && \
  121. (strcmp(evt_dirent.d_name, "..")) && \
  122. (!tp_event_has_id(&sys_dirent, &evt_dirent)))
  123. #define MAX_EVENT_LENGTH 512
  124. int valid_debugfs_mount(const char *debugfs)
  125. {
  126. struct statfs st_fs;
  127. if (statfs(debugfs, &st_fs) < 0)
  128. return -ENOENT;
  129. else if (st_fs.f_type != (long) DEBUGFS_MAGIC)
  130. return -ENOENT;
  131. return 0;
  132. }
  133. struct tracepoint_path *tracepoint_id_to_path(u64 config)
  134. {
  135. struct tracepoint_path *path = NULL;
  136. DIR *sys_dir, *evt_dir;
  137. struct dirent *sys_next, *evt_next, sys_dirent, evt_dirent;
  138. char id_buf[4];
  139. int sys_dir_fd, fd;
  140. u64 id;
  141. char evt_path[MAXPATHLEN];
  142. if (valid_debugfs_mount(debugfs_path))
  143. return NULL;
  144. sys_dir = opendir(debugfs_path);
  145. if (!sys_dir)
  146. goto cleanup;
  147. sys_dir_fd = dirfd(sys_dir);
  148. for_each_subsystem(sys_dir, sys_dirent, sys_next) {
  149. int dfd = openat(sys_dir_fd, sys_dirent.d_name,
  150. O_RDONLY|O_DIRECTORY), evt_dir_fd;
  151. if (dfd == -1)
  152. continue;
  153. evt_dir = fdopendir(dfd);
  154. if (!evt_dir) {
  155. close(dfd);
  156. continue;
  157. }
  158. evt_dir_fd = dirfd(evt_dir);
  159. for_each_event(sys_dirent, evt_dir, evt_dirent, evt_next) {
  160. snprintf(evt_path, MAXPATHLEN, "%s/id",
  161. evt_dirent.d_name);
  162. fd = openat(evt_dir_fd, evt_path, O_RDONLY);
  163. if (fd < 0)
  164. continue;
  165. if (read(fd, id_buf, sizeof(id_buf)) < 0) {
  166. close(fd);
  167. continue;
  168. }
  169. close(fd);
  170. id = atoll(id_buf);
  171. if (id == config) {
  172. closedir(evt_dir);
  173. closedir(sys_dir);
  174. path = calloc(1, sizeof(path));
  175. path->system = malloc(MAX_EVENT_LENGTH);
  176. if (!path->system) {
  177. free(path);
  178. return NULL;
  179. }
  180. path->name = malloc(MAX_EVENT_LENGTH);
  181. if (!path->name) {
  182. free(path->system);
  183. free(path);
  184. return NULL;
  185. }
  186. strncpy(path->system, sys_dirent.d_name,
  187. MAX_EVENT_LENGTH);
  188. strncpy(path->name, evt_dirent.d_name,
  189. MAX_EVENT_LENGTH);
  190. return path;
  191. }
  192. }
  193. closedir(evt_dir);
  194. }
  195. cleanup:
  196. closedir(sys_dir);
  197. return NULL;
  198. }
  199. #define TP_PATH_LEN (MAX_EVENT_LENGTH * 2 + 1)
  200. static const char *tracepoint_id_to_name(u64 config)
  201. {
  202. static char buf[TP_PATH_LEN];
  203. struct tracepoint_path *path;
  204. path = tracepoint_id_to_path(config);
  205. if (path) {
  206. snprintf(buf, TP_PATH_LEN, "%s:%s", path->system, path->name);
  207. free(path->name);
  208. free(path->system);
  209. free(path);
  210. } else
  211. snprintf(buf, TP_PATH_LEN, "%s:%s", "unknown", "unknown");
  212. return buf;
  213. }
  214. static int is_cache_op_valid(u8 cache_type, u8 cache_op)
  215. {
  216. if (hw_cache_stat[cache_type] & COP(cache_op))
  217. return 1; /* valid */
  218. else
  219. return 0; /* invalid */
  220. }
  221. static char *event_cache_name(u8 cache_type, u8 cache_op, u8 cache_result)
  222. {
  223. static char name[50];
  224. if (cache_result) {
  225. sprintf(name, "%s-%s-%s", hw_cache[cache_type][0],
  226. hw_cache_op[cache_op][0],
  227. hw_cache_result[cache_result][0]);
  228. } else {
  229. sprintf(name, "%s-%s", hw_cache[cache_type][0],
  230. hw_cache_op[cache_op][1]);
  231. }
  232. return name;
  233. }
  234. const char *event_name(int counter)
  235. {
  236. u64 config = attrs[counter].config;
  237. int type = attrs[counter].type;
  238. return __event_name(type, config);
  239. }
  240. const char *__event_name(int type, u64 config)
  241. {
  242. static char buf[32];
  243. if (type == PERF_TYPE_RAW) {
  244. sprintf(buf, "raw 0x%llx", config);
  245. return buf;
  246. }
  247. switch (type) {
  248. case PERF_TYPE_HARDWARE:
  249. if (config < PERF_COUNT_HW_MAX)
  250. return hw_event_names[config];
  251. return "unknown-hardware";
  252. case PERF_TYPE_HW_CACHE: {
  253. u8 cache_type, cache_op, cache_result;
  254. cache_type = (config >> 0) & 0xff;
  255. if (cache_type > PERF_COUNT_HW_CACHE_MAX)
  256. return "unknown-ext-hardware-cache-type";
  257. cache_op = (config >> 8) & 0xff;
  258. if (cache_op > PERF_COUNT_HW_CACHE_OP_MAX)
  259. return "unknown-ext-hardware-cache-op";
  260. cache_result = (config >> 16) & 0xff;
  261. if (cache_result > PERF_COUNT_HW_CACHE_RESULT_MAX)
  262. return "unknown-ext-hardware-cache-result";
  263. if (!is_cache_op_valid(cache_type, cache_op))
  264. return "invalid-cache";
  265. return event_cache_name(cache_type, cache_op, cache_result);
  266. }
  267. case PERF_TYPE_SOFTWARE:
  268. if (config < PERF_COUNT_SW_MAX)
  269. return sw_event_names[config];
  270. return "unknown-software";
  271. case PERF_TYPE_TRACEPOINT:
  272. return tracepoint_id_to_name(config);
  273. default:
  274. break;
  275. }
  276. return "unknown";
  277. }
  278. static int parse_aliases(const char **str, const char *names[][MAX_ALIASES], int size)
  279. {
  280. int i, j;
  281. int n, longest = -1;
  282. for (i = 0; i < size; i++) {
  283. for (j = 0; j < MAX_ALIASES && names[i][j]; j++) {
  284. n = strlen(names[i][j]);
  285. if (n > longest && !strncasecmp(*str, names[i][j], n))
  286. longest = n;
  287. }
  288. if (longest > 0) {
  289. *str += longest;
  290. return i;
  291. }
  292. }
  293. return -1;
  294. }
  295. static enum event_result
  296. parse_generic_hw_event(const char **str, struct perf_counter_attr *attr)
  297. {
  298. const char *s = *str;
  299. int cache_type = -1, cache_op = -1, cache_result = -1;
  300. cache_type = parse_aliases(&s, hw_cache, PERF_COUNT_HW_CACHE_MAX);
  301. /*
  302. * No fallback - if we cannot get a clear cache type
  303. * then bail out:
  304. */
  305. if (cache_type == -1)
  306. return EVT_FAILED;
  307. while ((cache_op == -1 || cache_result == -1) && *s == '-') {
  308. ++s;
  309. if (cache_op == -1) {
  310. cache_op = parse_aliases(&s, hw_cache_op,
  311. PERF_COUNT_HW_CACHE_OP_MAX);
  312. if (cache_op >= 0) {
  313. if (!is_cache_op_valid(cache_type, cache_op))
  314. return 0;
  315. continue;
  316. }
  317. }
  318. if (cache_result == -1) {
  319. cache_result = parse_aliases(&s, hw_cache_result,
  320. PERF_COUNT_HW_CACHE_RESULT_MAX);
  321. if (cache_result >= 0)
  322. continue;
  323. }
  324. /*
  325. * Can't parse this as a cache op or result, so back up
  326. * to the '-'.
  327. */
  328. --s;
  329. break;
  330. }
  331. /*
  332. * Fall back to reads:
  333. */
  334. if (cache_op == -1)
  335. cache_op = PERF_COUNT_HW_CACHE_OP_READ;
  336. /*
  337. * Fall back to accesses:
  338. */
  339. if (cache_result == -1)
  340. cache_result = PERF_COUNT_HW_CACHE_RESULT_ACCESS;
  341. attr->config = cache_type | (cache_op << 8) | (cache_result << 16);
  342. attr->type = PERF_TYPE_HW_CACHE;
  343. *str = s;
  344. return EVT_HANDLED;
  345. }
  346. static enum event_result
  347. parse_single_tracepoint_event(char *sys_name,
  348. const char *evt_name,
  349. unsigned int evt_length,
  350. char *flags,
  351. struct perf_counter_attr *attr,
  352. const char **strp)
  353. {
  354. char evt_path[MAXPATHLEN];
  355. char id_buf[4];
  356. u64 id;
  357. int fd;
  358. if (flags) {
  359. if (!strncmp(flags, "record", strlen(flags)))
  360. attr->sample_type |= PERF_SAMPLE_RAW;
  361. }
  362. snprintf(evt_path, MAXPATHLEN, "%s/%s/%s/id", debugfs_path,
  363. sys_name, evt_name);
  364. fd = open(evt_path, O_RDONLY);
  365. if (fd < 0)
  366. return EVT_FAILED;
  367. if (read(fd, id_buf, sizeof(id_buf)) < 0) {
  368. close(fd);
  369. return EVT_FAILED;
  370. }
  371. close(fd);
  372. id = atoll(id_buf);
  373. attr->config = id;
  374. attr->type = PERF_TYPE_TRACEPOINT;
  375. *strp = evt_name + evt_length;
  376. return EVT_HANDLED;
  377. }
  378. /* sys + ':' + event + ':' + flags*/
  379. #define MAX_EVOPT_LEN (MAX_EVENT_LENGTH * 2 + 2 + 128)
  380. static enum event_result
  381. parse_subsystem_tracepoint_event(char *sys_name, char *flags)
  382. {
  383. char evt_path[MAXPATHLEN];
  384. struct dirent *evt_ent;
  385. DIR *evt_dir;
  386. snprintf(evt_path, MAXPATHLEN, "%s/%s", debugfs_path, sys_name);
  387. evt_dir = opendir(evt_path);
  388. if (!evt_dir) {
  389. perror("Can't open event dir");
  390. return EVT_FAILED;
  391. }
  392. while ((evt_ent = readdir(evt_dir))) {
  393. char event_opt[MAX_EVOPT_LEN + 1];
  394. int len;
  395. unsigned int rem = MAX_EVOPT_LEN;
  396. if (!strcmp(evt_ent->d_name, ".")
  397. || !strcmp(evt_ent->d_name, "..")
  398. || !strcmp(evt_ent->d_name, "enable")
  399. || !strcmp(evt_ent->d_name, "filter"))
  400. continue;
  401. len = snprintf(event_opt, MAX_EVOPT_LEN, "%s:%s", sys_name,
  402. evt_ent->d_name);
  403. if (len < 0)
  404. return EVT_FAILED;
  405. rem -= len;
  406. if (flags) {
  407. if (rem < strlen(flags) + 1)
  408. return EVT_FAILED;
  409. strcat(event_opt, ":");
  410. strcat(event_opt, flags);
  411. }
  412. if (parse_events(NULL, event_opt, 0))
  413. return EVT_FAILED;
  414. }
  415. return EVT_HANDLED_ALL;
  416. }
  417. static enum event_result parse_tracepoint_event(const char **strp,
  418. struct perf_counter_attr *attr)
  419. {
  420. const char *evt_name;
  421. char *flags;
  422. char sys_name[MAX_EVENT_LENGTH];
  423. unsigned int sys_length, evt_length;
  424. if (valid_debugfs_mount(debugfs_path))
  425. return 0;
  426. evt_name = strchr(*strp, ':');
  427. if (!evt_name)
  428. return EVT_FAILED;
  429. sys_length = evt_name - *strp;
  430. if (sys_length >= MAX_EVENT_LENGTH)
  431. return 0;
  432. strncpy(sys_name, *strp, sys_length);
  433. sys_name[sys_length] = '\0';
  434. evt_name = evt_name + 1;
  435. flags = strchr(evt_name, ':');
  436. if (flags) {
  437. /* split it out: */
  438. evt_name = strndup(evt_name, flags - evt_name);
  439. flags++;
  440. }
  441. evt_length = strlen(evt_name);
  442. if (evt_length >= MAX_EVENT_LENGTH)
  443. return EVT_FAILED;
  444. if (!strcmp(evt_name, "*")) {
  445. *strp = evt_name + evt_length;
  446. return parse_subsystem_tracepoint_event(sys_name, flags);
  447. } else
  448. return parse_single_tracepoint_event(sys_name, evt_name,
  449. evt_length, flags,
  450. attr, strp);
  451. }
  452. static int check_events(const char *str, unsigned int i)
  453. {
  454. int n;
  455. n = strlen(event_symbols[i].symbol);
  456. if (!strncmp(str, event_symbols[i].symbol, n))
  457. return n;
  458. n = strlen(event_symbols[i].alias);
  459. if (n)
  460. if (!strncmp(str, event_symbols[i].alias, n))
  461. return n;
  462. return 0;
  463. }
  464. static enum event_result
  465. parse_symbolic_event(const char **strp, struct perf_counter_attr *attr)
  466. {
  467. const char *str = *strp;
  468. unsigned int i;
  469. int n;
  470. for (i = 0; i < ARRAY_SIZE(event_symbols); i++) {
  471. n = check_events(str, i);
  472. if (n > 0) {
  473. attr->type = event_symbols[i].type;
  474. attr->config = event_symbols[i].config;
  475. *strp = str + n;
  476. return EVT_HANDLED;
  477. }
  478. }
  479. return EVT_FAILED;
  480. }
  481. static enum event_result
  482. parse_raw_event(const char **strp, struct perf_counter_attr *attr)
  483. {
  484. const char *str = *strp;
  485. u64 config;
  486. int n;
  487. if (*str != 'r')
  488. return EVT_FAILED;
  489. n = hex2u64(str + 1, &config);
  490. if (n > 0) {
  491. *strp = str + n + 1;
  492. attr->type = PERF_TYPE_RAW;
  493. attr->config = config;
  494. return EVT_HANDLED;
  495. }
  496. return EVT_FAILED;
  497. }
  498. static enum event_result
  499. parse_numeric_event(const char **strp, struct perf_counter_attr *attr)
  500. {
  501. const char *str = *strp;
  502. char *endp;
  503. unsigned long type;
  504. u64 config;
  505. type = strtoul(str, &endp, 0);
  506. if (endp > str && type < PERF_TYPE_MAX && *endp == ':') {
  507. str = endp + 1;
  508. config = strtoul(str, &endp, 0);
  509. if (endp > str) {
  510. attr->type = type;
  511. attr->config = config;
  512. *strp = endp;
  513. return EVT_HANDLED;
  514. }
  515. }
  516. return EVT_FAILED;
  517. }
  518. static enum event_result
  519. parse_event_modifier(const char **strp, struct perf_counter_attr *attr)
  520. {
  521. const char *str = *strp;
  522. int eu = 1, ek = 1, eh = 1;
  523. if (*str++ != ':')
  524. return 0;
  525. while (*str) {
  526. if (*str == 'u')
  527. eu = 0;
  528. else if (*str == 'k')
  529. ek = 0;
  530. else if (*str == 'h')
  531. eh = 0;
  532. else
  533. break;
  534. ++str;
  535. }
  536. if (str >= *strp + 2) {
  537. *strp = str;
  538. attr->exclude_user = eu;
  539. attr->exclude_kernel = ek;
  540. attr->exclude_hv = eh;
  541. return 1;
  542. }
  543. return 0;
  544. }
  545. /*
  546. * Each event can have multiple symbolic names.
  547. * Symbolic names are (almost) exactly matched.
  548. */
  549. static enum event_result
  550. parse_event_symbols(const char **str, struct perf_counter_attr *attr)
  551. {
  552. enum event_result ret;
  553. ret = parse_tracepoint_event(str, attr);
  554. if (ret != EVT_FAILED)
  555. goto modifier;
  556. ret = parse_raw_event(str, attr);
  557. if (ret != EVT_FAILED)
  558. goto modifier;
  559. ret = parse_numeric_event(str, attr);
  560. if (ret != EVT_FAILED)
  561. goto modifier;
  562. ret = parse_symbolic_event(str, attr);
  563. if (ret != EVT_FAILED)
  564. goto modifier;
  565. ret = parse_generic_hw_event(str, attr);
  566. if (ret != EVT_FAILED)
  567. goto modifier;
  568. return EVT_FAILED;
  569. modifier:
  570. parse_event_modifier(str, attr);
  571. return ret;
  572. }
  573. int parse_events(const struct option *opt __used, const char *str, int unset __used)
  574. {
  575. struct perf_counter_attr attr;
  576. enum event_result ret;
  577. for (;;) {
  578. if (nr_counters == MAX_COUNTERS)
  579. return -1;
  580. memset(&attr, 0, sizeof(attr));
  581. ret = parse_event_symbols(&str, &attr);
  582. if (ret == EVT_FAILED)
  583. return -1;
  584. if (!(*str == 0 || *str == ',' || isspace(*str)))
  585. return -1;
  586. if (ret != EVT_HANDLED_ALL) {
  587. attrs[nr_counters] = attr;
  588. nr_counters++;
  589. }
  590. if (*str == 0)
  591. break;
  592. if (*str == ',')
  593. ++str;
  594. while (isspace(*str))
  595. ++str;
  596. }
  597. return 0;
  598. }
  599. static const char * const event_type_descriptors[] = {
  600. "",
  601. "Hardware event",
  602. "Software event",
  603. "Tracepoint event",
  604. "Hardware cache event",
  605. };
  606. /*
  607. * Print the events from <debugfs_mount_point>/tracing/events
  608. */
  609. static void print_tracepoint_events(void)
  610. {
  611. DIR *sys_dir, *evt_dir;
  612. struct dirent *sys_next, *evt_next, sys_dirent, evt_dirent;
  613. int sys_dir_fd;
  614. char evt_path[MAXPATHLEN];
  615. if (valid_debugfs_mount(debugfs_path))
  616. return;
  617. sys_dir = opendir(debugfs_path);
  618. if (!sys_dir)
  619. goto cleanup;
  620. sys_dir_fd = dirfd(sys_dir);
  621. for_each_subsystem(sys_dir, sys_dirent, sys_next) {
  622. int dfd = openat(sys_dir_fd, sys_dirent.d_name,
  623. O_RDONLY|O_DIRECTORY), evt_dir_fd;
  624. if (dfd == -1)
  625. continue;
  626. evt_dir = fdopendir(dfd);
  627. if (!evt_dir) {
  628. close(dfd);
  629. continue;
  630. }
  631. evt_dir_fd = dirfd(evt_dir);
  632. for_each_event(sys_dirent, evt_dir, evt_dirent, evt_next) {
  633. snprintf(evt_path, MAXPATHLEN, "%s:%s",
  634. sys_dirent.d_name, evt_dirent.d_name);
  635. fprintf(stderr, " %-42s [%s]\n", evt_path,
  636. event_type_descriptors[PERF_TYPE_TRACEPOINT+1]);
  637. }
  638. closedir(evt_dir);
  639. }
  640. cleanup:
  641. closedir(sys_dir);
  642. }
  643. /*
  644. * Print the help text for the event symbols:
  645. */
  646. void print_events(void)
  647. {
  648. struct event_symbol *syms = event_symbols;
  649. unsigned int i, type, op, prev_type = -1;
  650. char name[40];
  651. fprintf(stderr, "\n");
  652. fprintf(stderr, "List of pre-defined events (to be used in -e):\n");
  653. for (i = 0; i < ARRAY_SIZE(event_symbols); i++, syms++) {
  654. type = syms->type + 1;
  655. if (type >= ARRAY_SIZE(event_type_descriptors))
  656. type = 0;
  657. if (type != prev_type)
  658. fprintf(stderr, "\n");
  659. if (strlen(syms->alias))
  660. sprintf(name, "%s OR %s", syms->symbol, syms->alias);
  661. else
  662. strcpy(name, syms->symbol);
  663. fprintf(stderr, " %-42s [%s]\n", name,
  664. event_type_descriptors[type]);
  665. prev_type = type;
  666. }
  667. fprintf(stderr, "\n");
  668. for (type = 0; type < PERF_COUNT_HW_CACHE_MAX; type++) {
  669. for (op = 0; op < PERF_COUNT_HW_CACHE_OP_MAX; op++) {
  670. /* skip invalid cache type */
  671. if (!is_cache_op_valid(type, op))
  672. continue;
  673. for (i = 0; i < PERF_COUNT_HW_CACHE_RESULT_MAX; i++) {
  674. fprintf(stderr, " %-42s [%s]\n",
  675. event_cache_name(type, op, i),
  676. event_type_descriptors[4]);
  677. }
  678. }
  679. }
  680. fprintf(stderr, "\n");
  681. fprintf(stderr, " %-42s [raw hardware event descriptor]\n",
  682. "rNNN");
  683. fprintf(stderr, "\n");
  684. print_tracepoint_events();
  685. exit(129);
  686. }