parse-events.c 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827
  1. #include "util.h"
  2. #include "../perf.h"
  3. #include "parse-options.h"
  4. #include "parse-events.h"
  5. #include "exec_cmd.h"
  6. #include "string.h"
  7. #include "cache.h"
  8. int nr_counters;
  9. struct perf_counter_attr attrs[MAX_COUNTERS];
  10. struct event_symbol {
  11. u8 type;
  12. u64 config;
  13. const char *symbol;
  14. const char *alias;
  15. };
  16. enum event_result {
  17. EVT_FAILED,
  18. EVT_HANDLED,
  19. EVT_HANDLED_ALL
  20. };
  21. char debugfs_path[MAXPATHLEN];
  22. #define CHW(x) .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_##x
  23. #define CSW(x) .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_SW_##x
  24. static struct event_symbol event_symbols[] = {
  25. { CHW(CPU_CYCLES), "cpu-cycles", "cycles" },
  26. { CHW(INSTRUCTIONS), "instructions", "" },
  27. { CHW(CACHE_REFERENCES), "cache-references", "" },
  28. { CHW(CACHE_MISSES), "cache-misses", "" },
  29. { CHW(BRANCH_INSTRUCTIONS), "branch-instructions", "branches" },
  30. { CHW(BRANCH_MISSES), "branch-misses", "" },
  31. { CHW(BUS_CYCLES), "bus-cycles", "" },
  32. { CSW(CPU_CLOCK), "cpu-clock", "" },
  33. { CSW(TASK_CLOCK), "task-clock", "" },
  34. { CSW(PAGE_FAULTS), "page-faults", "faults" },
  35. { CSW(PAGE_FAULTS_MIN), "minor-faults", "" },
  36. { CSW(PAGE_FAULTS_MAJ), "major-faults", "" },
  37. { CSW(CONTEXT_SWITCHES), "context-switches", "cs" },
  38. { CSW(CPU_MIGRATIONS), "cpu-migrations", "migrations" },
  39. };
  40. #define __PERF_COUNTER_FIELD(config, name) \
  41. ((config & PERF_COUNTER_##name##_MASK) >> PERF_COUNTER_##name##_SHIFT)
  42. #define PERF_COUNTER_RAW(config) __PERF_COUNTER_FIELD(config, RAW)
  43. #define PERF_COUNTER_CONFIG(config) __PERF_COUNTER_FIELD(config, CONFIG)
  44. #define PERF_COUNTER_TYPE(config) __PERF_COUNTER_FIELD(config, TYPE)
  45. #define PERF_COUNTER_ID(config) __PERF_COUNTER_FIELD(config, EVENT)
  46. static const char *hw_event_names[] = {
  47. "cycles",
  48. "instructions",
  49. "cache-references",
  50. "cache-misses",
  51. "branches",
  52. "branch-misses",
  53. "bus-cycles",
  54. };
  55. static const char *sw_event_names[] = {
  56. "cpu-clock-msecs",
  57. "task-clock-msecs",
  58. "page-faults",
  59. "context-switches",
  60. "CPU-migrations",
  61. "minor-faults",
  62. "major-faults",
  63. };
  64. #define MAX_ALIASES 8
  65. static const char *hw_cache[][MAX_ALIASES] = {
  66. { "L1-dcache", "l1-d", "l1d", "L1-data", },
  67. { "L1-icache", "l1-i", "l1i", "L1-instruction", },
  68. { "LLC", "L2" },
  69. { "dTLB", "d-tlb", "Data-TLB", },
  70. { "iTLB", "i-tlb", "Instruction-TLB", },
  71. { "branch", "branches", "bpu", "btb", "bpc", },
  72. };
  73. static const char *hw_cache_op[][MAX_ALIASES] = {
  74. { "load", "loads", "read", },
  75. { "store", "stores", "write", },
  76. { "prefetch", "prefetches", "speculative-read", "speculative-load", },
  77. };
  78. static const char *hw_cache_result[][MAX_ALIASES] = {
  79. { "refs", "Reference", "ops", "access", },
  80. { "misses", "miss", },
  81. };
  82. #define C(x) PERF_COUNT_HW_CACHE_##x
  83. #define CACHE_READ (1 << C(OP_READ))
  84. #define CACHE_WRITE (1 << C(OP_WRITE))
  85. #define CACHE_PREFETCH (1 << C(OP_PREFETCH))
  86. #define COP(x) (1 << x)
  87. /*
  88. * cache operartion stat
  89. * L1I : Read and prefetch only
  90. * ITLB and BPU : Read-only
  91. */
  92. static unsigned long hw_cache_stat[C(MAX)] = {
  93. [C(L1D)] = (CACHE_READ | CACHE_WRITE | CACHE_PREFETCH),
  94. [C(L1I)] = (CACHE_READ | CACHE_PREFETCH),
  95. [C(LL)] = (CACHE_READ | CACHE_WRITE | CACHE_PREFETCH),
  96. [C(DTLB)] = (CACHE_READ | CACHE_WRITE | CACHE_PREFETCH),
  97. [C(ITLB)] = (CACHE_READ),
  98. [C(BPU)] = (CACHE_READ),
  99. };
  100. #define for_each_subsystem(sys_dir, sys_dirent, sys_next) \
  101. while (!readdir_r(sys_dir, &sys_dirent, &sys_next) && sys_next) \
  102. if (sys_dirent.d_type == DT_DIR && \
  103. (strcmp(sys_dirent.d_name, ".")) && \
  104. (strcmp(sys_dirent.d_name, "..")))
  105. static int tp_event_has_id(struct dirent *sys_dir, struct dirent *evt_dir)
  106. {
  107. char evt_path[MAXPATHLEN];
  108. int fd;
  109. snprintf(evt_path, MAXPATHLEN, "%s/%s/%s/id", debugfs_path,
  110. sys_dir->d_name, evt_dir->d_name);
  111. fd = open(evt_path, O_RDONLY);
  112. if (fd < 0)
  113. return -EINVAL;
  114. close(fd);
  115. return 0;
  116. }
  117. #define for_each_event(sys_dirent, evt_dir, evt_dirent, evt_next) \
  118. while (!readdir_r(evt_dir, &evt_dirent, &evt_next) && evt_next) \
  119. if (evt_dirent.d_type == DT_DIR && \
  120. (strcmp(evt_dirent.d_name, ".")) && \
  121. (strcmp(evt_dirent.d_name, "..")) && \
  122. (!tp_event_has_id(&sys_dirent, &evt_dirent)))
  123. #define MAX_EVENT_LENGTH 512
  124. int valid_debugfs_mount(const char *debugfs)
  125. {
  126. struct statfs st_fs;
  127. if (statfs(debugfs, &st_fs) < 0)
  128. return -ENOENT;
  129. else if (st_fs.f_type != (long) DEBUGFS_MAGIC)
  130. return -ENOENT;
  131. return 0;
  132. }
  133. struct tracepoint_path *tracepoint_id_to_path(u64 config)
  134. {
  135. struct tracepoint_path *path = NULL;
  136. DIR *sys_dir, *evt_dir;
  137. struct dirent *sys_next, *evt_next, sys_dirent, evt_dirent;
  138. char id_buf[4];
  139. int sys_dir_fd, fd;
  140. u64 id;
  141. char evt_path[MAXPATHLEN];
  142. if (valid_debugfs_mount(debugfs_path))
  143. return NULL;
  144. sys_dir = opendir(debugfs_path);
  145. if (!sys_dir)
  146. goto cleanup;
  147. sys_dir_fd = dirfd(sys_dir);
  148. for_each_subsystem(sys_dir, sys_dirent, sys_next) {
  149. int dfd = openat(sys_dir_fd, sys_dirent.d_name,
  150. O_RDONLY|O_DIRECTORY), evt_dir_fd;
  151. if (dfd == -1)
  152. continue;
  153. evt_dir = fdopendir(dfd);
  154. if (!evt_dir) {
  155. close(dfd);
  156. continue;
  157. }
  158. evt_dir_fd = dirfd(evt_dir);
  159. for_each_event(sys_dirent, evt_dir, evt_dirent, evt_next) {
  160. snprintf(evt_path, MAXPATHLEN, "%s/id",
  161. evt_dirent.d_name);
  162. fd = openat(evt_dir_fd, evt_path, O_RDONLY);
  163. if (fd < 0)
  164. continue;
  165. if (read(fd, id_buf, sizeof(id_buf)) < 0) {
  166. close(fd);
  167. continue;
  168. }
  169. close(fd);
  170. id = atoll(id_buf);
  171. if (id == config) {
  172. closedir(evt_dir);
  173. closedir(sys_dir);
  174. path = calloc(1, sizeof(path));
  175. path->system = malloc(MAX_EVENT_LENGTH);
  176. if (!path->system) {
  177. free(path);
  178. return NULL;
  179. }
  180. path->name = malloc(MAX_EVENT_LENGTH);
  181. if (!path->name) {
  182. free(path->system);
  183. free(path);
  184. return NULL;
  185. }
  186. strncpy(path->system, sys_dirent.d_name,
  187. MAX_EVENT_LENGTH);
  188. strncpy(path->name, evt_dirent.d_name,
  189. MAX_EVENT_LENGTH);
  190. return path;
  191. }
  192. }
  193. closedir(evt_dir);
  194. }
  195. cleanup:
  196. closedir(sys_dir);
  197. return NULL;
  198. }
  199. #define TP_PATH_LEN (MAX_EVENT_LENGTH * 2 + 1)
  200. static const char *tracepoint_id_to_name(u64 config)
  201. {
  202. static char buf[TP_PATH_LEN];
  203. struct tracepoint_path *path;
  204. path = tracepoint_id_to_path(config);
  205. if (path) {
  206. snprintf(buf, TP_PATH_LEN, "%s:%s", path->system, path->name);
  207. free(path->name);
  208. free(path->system);
  209. free(path);
  210. } else
  211. snprintf(buf, TP_PATH_LEN, "%s:%s", "unknown", "unknown");
  212. return buf;
  213. }
  214. static int is_cache_op_valid(u8 cache_type, u8 cache_op)
  215. {
  216. if (hw_cache_stat[cache_type] & COP(cache_op))
  217. return 1; /* valid */
  218. else
  219. return 0; /* invalid */
  220. }
  221. static char *event_cache_name(u8 cache_type, u8 cache_op, u8 cache_result)
  222. {
  223. static char name[50];
  224. if (cache_result) {
  225. sprintf(name, "%s-%s-%s", hw_cache[cache_type][0],
  226. hw_cache_op[cache_op][0],
  227. hw_cache_result[cache_result][0]);
  228. } else {
  229. sprintf(name, "%s-%s", hw_cache[cache_type][0],
  230. hw_cache_op[cache_op][1]);
  231. }
  232. return name;
  233. }
  234. const char *event_name(int counter)
  235. {
  236. u64 config = attrs[counter].config;
  237. int type = attrs[counter].type;
  238. return __event_name(type, config);
  239. }
  240. const char *__event_name(int type, u64 config)
  241. {
  242. static char buf[32];
  243. if (type == PERF_TYPE_RAW) {
  244. sprintf(buf, "raw 0x%llx", config);
  245. return buf;
  246. }
  247. switch (type) {
  248. case PERF_TYPE_HARDWARE:
  249. if (config < PERF_COUNT_HW_MAX)
  250. return hw_event_names[config];
  251. return "unknown-hardware";
  252. case PERF_TYPE_HW_CACHE: {
  253. u8 cache_type, cache_op, cache_result;
  254. cache_type = (config >> 0) & 0xff;
  255. if (cache_type > PERF_COUNT_HW_CACHE_MAX)
  256. return "unknown-ext-hardware-cache-type";
  257. cache_op = (config >> 8) & 0xff;
  258. if (cache_op > PERF_COUNT_HW_CACHE_OP_MAX)
  259. return "unknown-ext-hardware-cache-op";
  260. cache_result = (config >> 16) & 0xff;
  261. if (cache_result > PERF_COUNT_HW_CACHE_RESULT_MAX)
  262. return "unknown-ext-hardware-cache-result";
  263. if (!is_cache_op_valid(cache_type, cache_op))
  264. return "invalid-cache";
  265. return event_cache_name(cache_type, cache_op, cache_result);
  266. }
  267. case PERF_TYPE_SOFTWARE:
  268. if (config < PERF_COUNT_SW_MAX)
  269. return sw_event_names[config];
  270. return "unknown-software";
  271. case PERF_TYPE_TRACEPOINT:
  272. return tracepoint_id_to_name(config);
  273. default:
  274. break;
  275. }
  276. return "unknown";
  277. }
  278. static int parse_aliases(const char **str, const char *names[][MAX_ALIASES], int size)
  279. {
  280. int i, j;
  281. int n, longest = -1;
  282. for (i = 0; i < size; i++) {
  283. for (j = 0; j < MAX_ALIASES && names[i][j]; j++) {
  284. n = strlen(names[i][j]);
  285. if (n > longest && !strncasecmp(*str, names[i][j], n))
  286. longest = n;
  287. }
  288. if (longest > 0) {
  289. *str += longest;
  290. return i;
  291. }
  292. }
  293. return -1;
  294. }
  295. static enum event_result
  296. parse_generic_hw_event(const char **str, struct perf_counter_attr *attr)
  297. {
  298. const char *s = *str;
  299. int cache_type = -1, cache_op = -1, cache_result = -1;
  300. cache_type = parse_aliases(&s, hw_cache, PERF_COUNT_HW_CACHE_MAX);
  301. /*
  302. * No fallback - if we cannot get a clear cache type
  303. * then bail out:
  304. */
  305. if (cache_type == -1)
  306. return EVT_FAILED;
  307. while ((cache_op == -1 || cache_result == -1) && *s == '-') {
  308. ++s;
  309. if (cache_op == -1) {
  310. cache_op = parse_aliases(&s, hw_cache_op,
  311. PERF_COUNT_HW_CACHE_OP_MAX);
  312. if (cache_op >= 0) {
  313. if (!is_cache_op_valid(cache_type, cache_op))
  314. return 0;
  315. continue;
  316. }
  317. }
  318. if (cache_result == -1) {
  319. cache_result = parse_aliases(&s, hw_cache_result,
  320. PERF_COUNT_HW_CACHE_RESULT_MAX);
  321. if (cache_result >= 0)
  322. continue;
  323. }
  324. /*
  325. * Can't parse this as a cache op or result, so back up
  326. * to the '-'.
  327. */
  328. --s;
  329. break;
  330. }
  331. /*
  332. * Fall back to reads:
  333. */
  334. if (cache_op == -1)
  335. cache_op = PERF_COUNT_HW_CACHE_OP_READ;
  336. /*
  337. * Fall back to accesses:
  338. */
  339. if (cache_result == -1)
  340. cache_result = PERF_COUNT_HW_CACHE_RESULT_ACCESS;
  341. attr->config = cache_type | (cache_op << 8) | (cache_result << 16);
  342. attr->type = PERF_TYPE_HW_CACHE;
  343. *str = s;
  344. return EVT_HANDLED;
  345. }
  346. static enum event_result
  347. parse_single_tracepoint_event(char *sys_name,
  348. const char *evt_name,
  349. unsigned int evt_length,
  350. char *flags,
  351. struct perf_counter_attr *attr,
  352. const char **strp)
  353. {
  354. char evt_path[MAXPATHLEN];
  355. char id_buf[4];
  356. u64 id;
  357. int fd;
  358. if (flags) {
  359. if (!strncmp(flags, "record", strlen(flags))) {
  360. attr->sample_type |= PERF_SAMPLE_RAW;
  361. attr->sample_type |= PERF_SAMPLE_TIME;
  362. attr->sample_type |= PERF_SAMPLE_CPU;
  363. }
  364. }
  365. snprintf(evt_path, MAXPATHLEN, "%s/%s/%s/id", debugfs_path,
  366. sys_name, evt_name);
  367. fd = open(evt_path, O_RDONLY);
  368. if (fd < 0)
  369. return EVT_FAILED;
  370. if (read(fd, id_buf, sizeof(id_buf)) < 0) {
  371. close(fd);
  372. return EVT_FAILED;
  373. }
  374. close(fd);
  375. id = atoll(id_buf);
  376. attr->config = id;
  377. attr->type = PERF_TYPE_TRACEPOINT;
  378. *strp = evt_name + evt_length;
  379. return EVT_HANDLED;
  380. }
  381. /* sys + ':' + event + ':' + flags*/
  382. #define MAX_EVOPT_LEN (MAX_EVENT_LENGTH * 2 + 2 + 128)
  383. static enum event_result
  384. parse_subsystem_tracepoint_event(char *sys_name, char *flags)
  385. {
  386. char evt_path[MAXPATHLEN];
  387. struct dirent *evt_ent;
  388. DIR *evt_dir;
  389. snprintf(evt_path, MAXPATHLEN, "%s/%s", debugfs_path, sys_name);
  390. evt_dir = opendir(evt_path);
  391. if (!evt_dir) {
  392. perror("Can't open event dir");
  393. return EVT_FAILED;
  394. }
  395. while ((evt_ent = readdir(evt_dir))) {
  396. char event_opt[MAX_EVOPT_LEN + 1];
  397. int len;
  398. unsigned int rem = MAX_EVOPT_LEN;
  399. if (!strcmp(evt_ent->d_name, ".")
  400. || !strcmp(evt_ent->d_name, "..")
  401. || !strcmp(evt_ent->d_name, "enable")
  402. || !strcmp(evt_ent->d_name, "filter"))
  403. continue;
  404. len = snprintf(event_opt, MAX_EVOPT_LEN, "%s:%s", sys_name,
  405. evt_ent->d_name);
  406. if (len < 0)
  407. return EVT_FAILED;
  408. rem -= len;
  409. if (flags) {
  410. if (rem < strlen(flags) + 1)
  411. return EVT_FAILED;
  412. strcat(event_opt, ":");
  413. strcat(event_opt, flags);
  414. }
  415. if (parse_events(NULL, event_opt, 0))
  416. return EVT_FAILED;
  417. }
  418. return EVT_HANDLED_ALL;
  419. }
  420. static enum event_result parse_tracepoint_event(const char **strp,
  421. struct perf_counter_attr *attr)
  422. {
  423. const char *evt_name;
  424. char *flags;
  425. char sys_name[MAX_EVENT_LENGTH];
  426. unsigned int sys_length, evt_length;
  427. if (valid_debugfs_mount(debugfs_path))
  428. return 0;
  429. evt_name = strchr(*strp, ':');
  430. if (!evt_name)
  431. return EVT_FAILED;
  432. sys_length = evt_name - *strp;
  433. if (sys_length >= MAX_EVENT_LENGTH)
  434. return 0;
  435. strncpy(sys_name, *strp, sys_length);
  436. sys_name[sys_length] = '\0';
  437. evt_name = evt_name + 1;
  438. flags = strchr(evt_name, ':');
  439. if (flags) {
  440. /* split it out: */
  441. evt_name = strndup(evt_name, flags - evt_name);
  442. flags++;
  443. }
  444. evt_length = strlen(evt_name);
  445. if (evt_length >= MAX_EVENT_LENGTH)
  446. return EVT_FAILED;
  447. if (!strcmp(evt_name, "*")) {
  448. *strp = evt_name + evt_length;
  449. return parse_subsystem_tracepoint_event(sys_name, flags);
  450. } else
  451. return parse_single_tracepoint_event(sys_name, evt_name,
  452. evt_length, flags,
  453. attr, strp);
  454. }
  455. static int check_events(const char *str, unsigned int i)
  456. {
  457. int n;
  458. n = strlen(event_symbols[i].symbol);
  459. if (!strncmp(str, event_symbols[i].symbol, n))
  460. return n;
  461. n = strlen(event_symbols[i].alias);
  462. if (n)
  463. if (!strncmp(str, event_symbols[i].alias, n))
  464. return n;
  465. return 0;
  466. }
  467. static enum event_result
  468. parse_symbolic_event(const char **strp, struct perf_counter_attr *attr)
  469. {
  470. const char *str = *strp;
  471. unsigned int i;
  472. int n;
  473. for (i = 0; i < ARRAY_SIZE(event_symbols); i++) {
  474. n = check_events(str, i);
  475. if (n > 0) {
  476. attr->type = event_symbols[i].type;
  477. attr->config = event_symbols[i].config;
  478. *strp = str + n;
  479. return EVT_HANDLED;
  480. }
  481. }
  482. return EVT_FAILED;
  483. }
  484. static enum event_result
  485. parse_raw_event(const char **strp, struct perf_counter_attr *attr)
  486. {
  487. const char *str = *strp;
  488. u64 config;
  489. int n;
  490. if (*str != 'r')
  491. return EVT_FAILED;
  492. n = hex2u64(str + 1, &config);
  493. if (n > 0) {
  494. *strp = str + n + 1;
  495. attr->type = PERF_TYPE_RAW;
  496. attr->config = config;
  497. return EVT_HANDLED;
  498. }
  499. return EVT_FAILED;
  500. }
  501. static enum event_result
  502. parse_numeric_event(const char **strp, struct perf_counter_attr *attr)
  503. {
  504. const char *str = *strp;
  505. char *endp;
  506. unsigned long type;
  507. u64 config;
  508. type = strtoul(str, &endp, 0);
  509. if (endp > str && type < PERF_TYPE_MAX && *endp == ':') {
  510. str = endp + 1;
  511. config = strtoul(str, &endp, 0);
  512. if (endp > str) {
  513. attr->type = type;
  514. attr->config = config;
  515. *strp = endp;
  516. return EVT_HANDLED;
  517. }
  518. }
  519. return EVT_FAILED;
  520. }
  521. static enum event_result
  522. parse_event_modifier(const char **strp, struct perf_counter_attr *attr)
  523. {
  524. const char *str = *strp;
  525. int eu = 1, ek = 1, eh = 1;
  526. if (*str++ != ':')
  527. return 0;
  528. while (*str) {
  529. if (*str == 'u')
  530. eu = 0;
  531. else if (*str == 'k')
  532. ek = 0;
  533. else if (*str == 'h')
  534. eh = 0;
  535. else
  536. break;
  537. ++str;
  538. }
  539. if (str >= *strp + 2) {
  540. *strp = str;
  541. attr->exclude_user = eu;
  542. attr->exclude_kernel = ek;
  543. attr->exclude_hv = eh;
  544. return 1;
  545. }
  546. return 0;
  547. }
  548. /*
  549. * Each event can have multiple symbolic names.
  550. * Symbolic names are (almost) exactly matched.
  551. */
  552. static enum event_result
  553. parse_event_symbols(const char **str, struct perf_counter_attr *attr)
  554. {
  555. enum event_result ret;
  556. ret = parse_tracepoint_event(str, attr);
  557. if (ret != EVT_FAILED)
  558. goto modifier;
  559. ret = parse_raw_event(str, attr);
  560. if (ret != EVT_FAILED)
  561. goto modifier;
  562. ret = parse_numeric_event(str, attr);
  563. if (ret != EVT_FAILED)
  564. goto modifier;
  565. ret = parse_symbolic_event(str, attr);
  566. if (ret != EVT_FAILED)
  567. goto modifier;
  568. ret = parse_generic_hw_event(str, attr);
  569. if (ret != EVT_FAILED)
  570. goto modifier;
  571. return EVT_FAILED;
  572. modifier:
  573. parse_event_modifier(str, attr);
  574. return ret;
  575. }
  576. int parse_events(const struct option *opt __used, const char *str, int unset __used)
  577. {
  578. struct perf_counter_attr attr;
  579. enum event_result ret;
  580. for (;;) {
  581. if (nr_counters == MAX_COUNTERS)
  582. return -1;
  583. memset(&attr, 0, sizeof(attr));
  584. ret = parse_event_symbols(&str, &attr);
  585. if (ret == EVT_FAILED)
  586. return -1;
  587. if (!(*str == 0 || *str == ',' || isspace(*str)))
  588. return -1;
  589. if (ret != EVT_HANDLED_ALL) {
  590. attrs[nr_counters] = attr;
  591. nr_counters++;
  592. }
  593. if (*str == 0)
  594. break;
  595. if (*str == ',')
  596. ++str;
  597. while (isspace(*str))
  598. ++str;
  599. }
  600. return 0;
  601. }
  602. static const char * const event_type_descriptors[] = {
  603. "",
  604. "Hardware event",
  605. "Software event",
  606. "Tracepoint event",
  607. "Hardware cache event",
  608. };
  609. /*
  610. * Print the events from <debugfs_mount_point>/tracing/events
  611. */
  612. static void print_tracepoint_events(void)
  613. {
  614. DIR *sys_dir, *evt_dir;
  615. struct dirent *sys_next, *evt_next, sys_dirent, evt_dirent;
  616. int sys_dir_fd;
  617. char evt_path[MAXPATHLEN];
  618. if (valid_debugfs_mount(debugfs_path))
  619. return;
  620. sys_dir = opendir(debugfs_path);
  621. if (!sys_dir)
  622. goto cleanup;
  623. sys_dir_fd = dirfd(sys_dir);
  624. for_each_subsystem(sys_dir, sys_dirent, sys_next) {
  625. int dfd = openat(sys_dir_fd, sys_dirent.d_name,
  626. O_RDONLY|O_DIRECTORY), evt_dir_fd;
  627. if (dfd == -1)
  628. continue;
  629. evt_dir = fdopendir(dfd);
  630. if (!evt_dir) {
  631. close(dfd);
  632. continue;
  633. }
  634. evt_dir_fd = dirfd(evt_dir);
  635. for_each_event(sys_dirent, evt_dir, evt_dirent, evt_next) {
  636. snprintf(evt_path, MAXPATHLEN, "%s:%s",
  637. sys_dirent.d_name, evt_dirent.d_name);
  638. fprintf(stderr, " %-42s [%s]\n", evt_path,
  639. event_type_descriptors[PERF_TYPE_TRACEPOINT+1]);
  640. }
  641. closedir(evt_dir);
  642. }
  643. cleanup:
  644. closedir(sys_dir);
  645. }
  646. /*
  647. * Print the help text for the event symbols:
  648. */
  649. void print_events(void)
  650. {
  651. struct event_symbol *syms = event_symbols;
  652. unsigned int i, type, op, prev_type = -1;
  653. char name[40];
  654. fprintf(stderr, "\n");
  655. fprintf(stderr, "List of pre-defined events (to be used in -e):\n");
  656. for (i = 0; i < ARRAY_SIZE(event_symbols); i++, syms++) {
  657. type = syms->type + 1;
  658. if (type >= ARRAY_SIZE(event_type_descriptors))
  659. type = 0;
  660. if (type != prev_type)
  661. fprintf(stderr, "\n");
  662. if (strlen(syms->alias))
  663. sprintf(name, "%s OR %s", syms->symbol, syms->alias);
  664. else
  665. strcpy(name, syms->symbol);
  666. fprintf(stderr, " %-42s [%s]\n", name,
  667. event_type_descriptors[type]);
  668. prev_type = type;
  669. }
  670. fprintf(stderr, "\n");
  671. for (type = 0; type < PERF_COUNT_HW_CACHE_MAX; type++) {
  672. for (op = 0; op < PERF_COUNT_HW_CACHE_OP_MAX; op++) {
  673. /* skip invalid cache type */
  674. if (!is_cache_op_valid(type, op))
  675. continue;
  676. for (i = 0; i < PERF_COUNT_HW_CACHE_RESULT_MAX; i++) {
  677. fprintf(stderr, " %-42s [%s]\n",
  678. event_cache_name(type, op, i),
  679. event_type_descriptors[4]);
  680. }
  681. }
  682. }
  683. fprintf(stderr, "\n");
  684. fprintf(stderr, " %-42s [raw hardware event descriptor]\n",
  685. "rNNN");
  686. fprintf(stderr, "\n");
  687. print_tracepoint_events();
  688. exit(129);
  689. }