parse-events.c 26 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117
  1. #include "../../../include/linux/hw_breakpoint.h"
  2. #include "util.h"
  3. #include "../perf.h"
  4. #include "evlist.h"
  5. #include "evsel.h"
  6. #include "parse-options.h"
  7. #include "parse-events.h"
  8. #include "exec_cmd.h"
  9. #include "string.h"
  10. #include "symbol.h"
  11. #include "cache.h"
  12. #include "header.h"
  13. #include "debugfs.h"
  14. #include "parse-events-flex.h"
  15. #include "pmu.h"
  16. #define MAX_NAME_LEN 100
  17. struct event_symbol {
  18. u8 type;
  19. u64 config;
  20. const char *symbol;
  21. const char *alias;
  22. };
  23. #ifdef PARSER_DEBUG
  24. extern int parse_events_debug;
  25. #endif
  26. int parse_events_parse(struct list_head *list, int *idx);
  27. #define CHW(x) .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_##x
  28. #define CSW(x) .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_SW_##x
  29. static struct event_symbol event_symbols[] = {
  30. { CHW(CPU_CYCLES), "cpu-cycles", "cycles" },
  31. { CHW(STALLED_CYCLES_FRONTEND), "stalled-cycles-frontend", "idle-cycles-frontend" },
  32. { CHW(STALLED_CYCLES_BACKEND), "stalled-cycles-backend", "idle-cycles-backend" },
  33. { CHW(INSTRUCTIONS), "instructions", "" },
  34. { CHW(CACHE_REFERENCES), "cache-references", "" },
  35. { CHW(CACHE_MISSES), "cache-misses", "" },
  36. { CHW(BRANCH_INSTRUCTIONS), "branch-instructions", "branches" },
  37. { CHW(BRANCH_MISSES), "branch-misses", "" },
  38. { CHW(BUS_CYCLES), "bus-cycles", "" },
  39. { CHW(REF_CPU_CYCLES), "ref-cycles", "" },
  40. { CSW(CPU_CLOCK), "cpu-clock", "" },
  41. { CSW(TASK_CLOCK), "task-clock", "" },
  42. { CSW(PAGE_FAULTS), "page-faults", "faults" },
  43. { CSW(PAGE_FAULTS_MIN), "minor-faults", "" },
  44. { CSW(PAGE_FAULTS_MAJ), "major-faults", "" },
  45. { CSW(CONTEXT_SWITCHES), "context-switches", "cs" },
  46. { CSW(CPU_MIGRATIONS), "cpu-migrations", "migrations" },
  47. { CSW(ALIGNMENT_FAULTS), "alignment-faults", "" },
  48. { CSW(EMULATION_FAULTS), "emulation-faults", "" },
  49. };
  50. #define __PERF_EVENT_FIELD(config, name) \
  51. ((config & PERF_EVENT_##name##_MASK) >> PERF_EVENT_##name##_SHIFT)
  52. #define PERF_EVENT_RAW(config) __PERF_EVENT_FIELD(config, RAW)
  53. #define PERF_EVENT_CONFIG(config) __PERF_EVENT_FIELD(config, CONFIG)
  54. #define PERF_EVENT_TYPE(config) __PERF_EVENT_FIELD(config, TYPE)
  55. #define PERF_EVENT_ID(config) __PERF_EVENT_FIELD(config, EVENT)
  56. static const char *sw_event_names[PERF_COUNT_SW_MAX] = {
  57. "cpu-clock",
  58. "task-clock",
  59. "page-faults",
  60. "context-switches",
  61. "CPU-migrations",
  62. "minor-faults",
  63. "major-faults",
  64. "alignment-faults",
  65. "emulation-faults",
  66. };
  67. #define MAX_ALIASES 8
  68. static const char *hw_cache[PERF_COUNT_HW_CACHE_MAX][MAX_ALIASES] = {
  69. { "L1-dcache", "l1-d", "l1d", "L1-data", },
  70. { "L1-icache", "l1-i", "l1i", "L1-instruction", },
  71. { "LLC", "L2", },
  72. { "dTLB", "d-tlb", "Data-TLB", },
  73. { "iTLB", "i-tlb", "Instruction-TLB", },
  74. { "branch", "branches", "bpu", "btb", "bpc", },
  75. { "node", },
  76. };
  77. static const char *hw_cache_op[PERF_COUNT_HW_CACHE_OP_MAX][MAX_ALIASES] = {
  78. { "load", "loads", "read", },
  79. { "store", "stores", "write", },
  80. { "prefetch", "prefetches", "speculative-read", "speculative-load", },
  81. };
  82. static const char *hw_cache_result[PERF_COUNT_HW_CACHE_RESULT_MAX]
  83. [MAX_ALIASES] = {
  84. { "refs", "Reference", "ops", "access", },
  85. { "misses", "miss", },
  86. };
  87. #define C(x) PERF_COUNT_HW_CACHE_##x
  88. #define CACHE_READ (1 << C(OP_READ))
  89. #define CACHE_WRITE (1 << C(OP_WRITE))
  90. #define CACHE_PREFETCH (1 << C(OP_PREFETCH))
  91. #define COP(x) (1 << x)
  92. /*
  93. * cache operartion stat
  94. * L1I : Read and prefetch only
  95. * ITLB and BPU : Read-only
  96. */
  97. static unsigned long hw_cache_stat[C(MAX)] = {
  98. [C(L1D)] = (CACHE_READ | CACHE_WRITE | CACHE_PREFETCH),
  99. [C(L1I)] = (CACHE_READ | CACHE_PREFETCH),
  100. [C(LL)] = (CACHE_READ | CACHE_WRITE | CACHE_PREFETCH),
  101. [C(DTLB)] = (CACHE_READ | CACHE_WRITE | CACHE_PREFETCH),
  102. [C(ITLB)] = (CACHE_READ),
  103. [C(BPU)] = (CACHE_READ),
  104. [C(NODE)] = (CACHE_READ | CACHE_WRITE | CACHE_PREFETCH),
  105. };
  106. #define for_each_subsystem(sys_dir, sys_dirent, sys_next) \
  107. while (!readdir_r(sys_dir, &sys_dirent, &sys_next) && sys_next) \
  108. if (sys_dirent.d_type == DT_DIR && \
  109. (strcmp(sys_dirent.d_name, ".")) && \
  110. (strcmp(sys_dirent.d_name, "..")))
  111. static int tp_event_has_id(struct dirent *sys_dir, struct dirent *evt_dir)
  112. {
  113. char evt_path[MAXPATHLEN];
  114. int fd;
  115. snprintf(evt_path, MAXPATHLEN, "%s/%s/%s/id", tracing_events_path,
  116. sys_dir->d_name, evt_dir->d_name);
  117. fd = open(evt_path, O_RDONLY);
  118. if (fd < 0)
  119. return -EINVAL;
  120. close(fd);
  121. return 0;
  122. }
  123. #define for_each_event(sys_dirent, evt_dir, evt_dirent, evt_next) \
  124. while (!readdir_r(evt_dir, &evt_dirent, &evt_next) && evt_next) \
  125. if (evt_dirent.d_type == DT_DIR && \
  126. (strcmp(evt_dirent.d_name, ".")) && \
  127. (strcmp(evt_dirent.d_name, "..")) && \
  128. (!tp_event_has_id(&sys_dirent, &evt_dirent)))
  129. #define MAX_EVENT_LENGTH 512
  130. struct tracepoint_path *tracepoint_id_to_path(u64 config)
  131. {
  132. struct tracepoint_path *path = NULL;
  133. DIR *sys_dir, *evt_dir;
  134. struct dirent *sys_next, *evt_next, sys_dirent, evt_dirent;
  135. char id_buf[24];
  136. int fd;
  137. u64 id;
  138. char evt_path[MAXPATHLEN];
  139. char dir_path[MAXPATHLEN];
  140. if (debugfs_valid_mountpoint(tracing_events_path))
  141. return NULL;
  142. sys_dir = opendir(tracing_events_path);
  143. if (!sys_dir)
  144. return NULL;
  145. for_each_subsystem(sys_dir, sys_dirent, sys_next) {
  146. snprintf(dir_path, MAXPATHLEN, "%s/%s", tracing_events_path,
  147. sys_dirent.d_name);
  148. evt_dir = opendir(dir_path);
  149. if (!evt_dir)
  150. continue;
  151. for_each_event(sys_dirent, evt_dir, evt_dirent, evt_next) {
  152. snprintf(evt_path, MAXPATHLEN, "%s/%s/id", dir_path,
  153. evt_dirent.d_name);
  154. fd = open(evt_path, O_RDONLY);
  155. if (fd < 0)
  156. continue;
  157. if (read(fd, id_buf, sizeof(id_buf)) < 0) {
  158. close(fd);
  159. continue;
  160. }
  161. close(fd);
  162. id = atoll(id_buf);
  163. if (id == config) {
  164. closedir(evt_dir);
  165. closedir(sys_dir);
  166. path = zalloc(sizeof(*path));
  167. path->system = malloc(MAX_EVENT_LENGTH);
  168. if (!path->system) {
  169. free(path);
  170. return NULL;
  171. }
  172. path->name = malloc(MAX_EVENT_LENGTH);
  173. if (!path->name) {
  174. free(path->system);
  175. free(path);
  176. return NULL;
  177. }
  178. strncpy(path->system, sys_dirent.d_name,
  179. MAX_EVENT_LENGTH);
  180. strncpy(path->name, evt_dirent.d_name,
  181. MAX_EVENT_LENGTH);
  182. return path;
  183. }
  184. }
  185. closedir(evt_dir);
  186. }
  187. closedir(sys_dir);
  188. return NULL;
  189. }
  190. #define TP_PATH_LEN (MAX_EVENT_LENGTH * 2 + 1)
  191. static const char *tracepoint_id_to_name(u64 config)
  192. {
  193. static char buf[TP_PATH_LEN];
  194. struct tracepoint_path *path;
  195. path = tracepoint_id_to_path(config);
  196. if (path) {
  197. snprintf(buf, TP_PATH_LEN, "%s:%s", path->system, path->name);
  198. free(path->name);
  199. free(path->system);
  200. free(path);
  201. } else
  202. snprintf(buf, TP_PATH_LEN, "%s:%s", "unknown", "unknown");
  203. return buf;
  204. }
  205. static int is_cache_op_valid(u8 cache_type, u8 cache_op)
  206. {
  207. if (hw_cache_stat[cache_type] & COP(cache_op))
  208. return 1; /* valid */
  209. else
  210. return 0; /* invalid */
  211. }
  212. static char *event_cache_name(u8 cache_type, u8 cache_op, u8 cache_result)
  213. {
  214. static char name[50];
  215. if (cache_result) {
  216. sprintf(name, "%s-%s-%s", hw_cache[cache_type][0],
  217. hw_cache_op[cache_op][0],
  218. hw_cache_result[cache_result][0]);
  219. } else {
  220. sprintf(name, "%s-%s", hw_cache[cache_type][0],
  221. hw_cache_op[cache_op][1]);
  222. }
  223. return name;
  224. }
  225. const char *event_type(int type)
  226. {
  227. switch (type) {
  228. case PERF_TYPE_HARDWARE:
  229. return "hardware";
  230. case PERF_TYPE_SOFTWARE:
  231. return "software";
  232. case PERF_TYPE_TRACEPOINT:
  233. return "tracepoint";
  234. case PERF_TYPE_HW_CACHE:
  235. return "hardware-cache";
  236. default:
  237. break;
  238. }
  239. return "unknown";
  240. }
  241. const char *event_name(struct perf_evsel *evsel)
  242. {
  243. u64 config = evsel->attr.config;
  244. int type = evsel->attr.type;
  245. if (type == PERF_TYPE_RAW || type == PERF_TYPE_HARDWARE) {
  246. /*
  247. * XXX minimal fix, see comment on perf_evsen__name, this static buffer
  248. * will go away together with event_name in the next devel cycle.
  249. */
  250. static char bf[128];
  251. perf_evsel__name(evsel, bf, sizeof(bf));
  252. return bf;
  253. }
  254. if (evsel->name)
  255. return evsel->name;
  256. return __event_name(type, config);
  257. }
  258. const char *__event_name(int type, u64 config)
  259. {
  260. static char buf[32];
  261. if (type == PERF_TYPE_RAW) {
  262. sprintf(buf, "raw 0x%" PRIx64, config);
  263. return buf;
  264. }
  265. switch (type) {
  266. case PERF_TYPE_HARDWARE:
  267. return __perf_evsel__hw_name(config);
  268. case PERF_TYPE_HW_CACHE: {
  269. u8 cache_type, cache_op, cache_result;
  270. cache_type = (config >> 0) & 0xff;
  271. if (cache_type > PERF_COUNT_HW_CACHE_MAX)
  272. return "unknown-ext-hardware-cache-type";
  273. cache_op = (config >> 8) & 0xff;
  274. if (cache_op > PERF_COUNT_HW_CACHE_OP_MAX)
  275. return "unknown-ext-hardware-cache-op";
  276. cache_result = (config >> 16) & 0xff;
  277. if (cache_result > PERF_COUNT_HW_CACHE_RESULT_MAX)
  278. return "unknown-ext-hardware-cache-result";
  279. if (!is_cache_op_valid(cache_type, cache_op))
  280. return "invalid-cache";
  281. return event_cache_name(cache_type, cache_op, cache_result);
  282. }
  283. case PERF_TYPE_SOFTWARE:
  284. if (config < PERF_COUNT_SW_MAX && sw_event_names[config])
  285. return sw_event_names[config];
  286. return "unknown-software";
  287. case PERF_TYPE_TRACEPOINT:
  288. return tracepoint_id_to_name(config);
  289. default:
  290. break;
  291. }
  292. return "unknown";
  293. }
  294. static int add_event(struct list_head **_list, int *idx,
  295. struct perf_event_attr *attr, char *name)
  296. {
  297. struct perf_evsel *evsel;
  298. struct list_head *list = *_list;
  299. if (!list) {
  300. list = malloc(sizeof(*list));
  301. if (!list)
  302. return -ENOMEM;
  303. INIT_LIST_HEAD(list);
  304. }
  305. event_attr_init(attr);
  306. evsel = perf_evsel__new(attr, (*idx)++);
  307. if (!evsel) {
  308. free(list);
  309. return -ENOMEM;
  310. }
  311. evsel->name = strdup(name);
  312. list_add_tail(&evsel->node, list);
  313. *_list = list;
  314. return 0;
  315. }
  316. static int parse_aliases(char *str, const char *names[][MAX_ALIASES], int size)
  317. {
  318. int i, j;
  319. int n, longest = -1;
  320. for (i = 0; i < size; i++) {
  321. for (j = 0; j < MAX_ALIASES && names[i][j]; j++) {
  322. n = strlen(names[i][j]);
  323. if (n > longest && !strncasecmp(str, names[i][j], n))
  324. longest = n;
  325. }
  326. if (longest > 0)
  327. return i;
  328. }
  329. return -1;
  330. }
  331. int parse_events_add_cache(struct list_head **list, int *idx,
  332. char *type, char *op_result1, char *op_result2)
  333. {
  334. struct perf_event_attr attr;
  335. char name[MAX_NAME_LEN];
  336. int cache_type = -1, cache_op = -1, cache_result = -1;
  337. char *op_result[2] = { op_result1, op_result2 };
  338. int i, n;
  339. /*
  340. * No fallback - if we cannot get a clear cache type
  341. * then bail out:
  342. */
  343. cache_type = parse_aliases(type, hw_cache,
  344. PERF_COUNT_HW_CACHE_MAX);
  345. if (cache_type == -1)
  346. return -EINVAL;
  347. n = snprintf(name, MAX_NAME_LEN, "%s", type);
  348. for (i = 0; (i < 2) && (op_result[i]); i++) {
  349. char *str = op_result[i];
  350. snprintf(name + n, MAX_NAME_LEN - n, "-%s\n", str);
  351. if (cache_op == -1) {
  352. cache_op = parse_aliases(str, hw_cache_op,
  353. PERF_COUNT_HW_CACHE_OP_MAX);
  354. if (cache_op >= 0) {
  355. if (!is_cache_op_valid(cache_type, cache_op))
  356. return -EINVAL;
  357. continue;
  358. }
  359. }
  360. if (cache_result == -1) {
  361. cache_result = parse_aliases(str, hw_cache_result,
  362. PERF_COUNT_HW_CACHE_RESULT_MAX);
  363. if (cache_result >= 0)
  364. continue;
  365. }
  366. }
  367. /*
  368. * Fall back to reads:
  369. */
  370. if (cache_op == -1)
  371. cache_op = PERF_COUNT_HW_CACHE_OP_READ;
  372. /*
  373. * Fall back to accesses:
  374. */
  375. if (cache_result == -1)
  376. cache_result = PERF_COUNT_HW_CACHE_RESULT_ACCESS;
  377. memset(&attr, 0, sizeof(attr));
  378. attr.config = cache_type | (cache_op << 8) | (cache_result << 16);
  379. attr.type = PERF_TYPE_HW_CACHE;
  380. return add_event(list, idx, &attr, name);
  381. }
  382. static int add_tracepoint(struct list_head **list, int *idx,
  383. char *sys_name, char *evt_name)
  384. {
  385. struct perf_event_attr attr;
  386. char name[MAX_NAME_LEN];
  387. char evt_path[MAXPATHLEN];
  388. char id_buf[4];
  389. u64 id;
  390. int fd;
  391. snprintf(evt_path, MAXPATHLEN, "%s/%s/%s/id", tracing_events_path,
  392. sys_name, evt_name);
  393. fd = open(evt_path, O_RDONLY);
  394. if (fd < 0)
  395. return -1;
  396. if (read(fd, id_buf, sizeof(id_buf)) < 0) {
  397. close(fd);
  398. return -1;
  399. }
  400. close(fd);
  401. id = atoll(id_buf);
  402. memset(&attr, 0, sizeof(attr));
  403. attr.config = id;
  404. attr.type = PERF_TYPE_TRACEPOINT;
  405. attr.sample_type |= PERF_SAMPLE_RAW;
  406. attr.sample_type |= PERF_SAMPLE_TIME;
  407. attr.sample_type |= PERF_SAMPLE_CPU;
  408. attr.sample_period = 1;
  409. snprintf(name, MAX_NAME_LEN, "%s:%s", sys_name, evt_name);
  410. return add_event(list, idx, &attr, name);
  411. }
  412. static int add_tracepoint_multi(struct list_head **list, int *idx,
  413. char *sys_name, char *evt_name)
  414. {
  415. char evt_path[MAXPATHLEN];
  416. struct dirent *evt_ent;
  417. DIR *evt_dir;
  418. int ret = 0;
  419. snprintf(evt_path, MAXPATHLEN, "%s/%s", tracing_events_path, sys_name);
  420. evt_dir = opendir(evt_path);
  421. if (!evt_dir) {
  422. perror("Can't open event dir");
  423. return -1;
  424. }
  425. while (!ret && (evt_ent = readdir(evt_dir))) {
  426. if (!strcmp(evt_ent->d_name, ".")
  427. || !strcmp(evt_ent->d_name, "..")
  428. || !strcmp(evt_ent->d_name, "enable")
  429. || !strcmp(evt_ent->d_name, "filter"))
  430. continue;
  431. if (!strglobmatch(evt_ent->d_name, evt_name))
  432. continue;
  433. ret = add_tracepoint(list, idx, sys_name, evt_ent->d_name);
  434. }
  435. return ret;
  436. }
  437. int parse_events_add_tracepoint(struct list_head **list, int *idx,
  438. char *sys, char *event)
  439. {
  440. int ret;
  441. ret = debugfs_valid_mountpoint(tracing_events_path);
  442. if (ret)
  443. return ret;
  444. return strpbrk(event, "*?") ?
  445. add_tracepoint_multi(list, idx, sys, event) :
  446. add_tracepoint(list, idx, sys, event);
  447. }
  448. static int
  449. parse_breakpoint_type(const char *type, struct perf_event_attr *attr)
  450. {
  451. int i;
  452. for (i = 0; i < 3; i++) {
  453. if (!type || !type[i])
  454. break;
  455. switch (type[i]) {
  456. case 'r':
  457. attr->bp_type |= HW_BREAKPOINT_R;
  458. break;
  459. case 'w':
  460. attr->bp_type |= HW_BREAKPOINT_W;
  461. break;
  462. case 'x':
  463. attr->bp_type |= HW_BREAKPOINT_X;
  464. break;
  465. default:
  466. return -EINVAL;
  467. }
  468. }
  469. if (!attr->bp_type) /* Default */
  470. attr->bp_type = HW_BREAKPOINT_R | HW_BREAKPOINT_W;
  471. return 0;
  472. }
  473. int parse_events_add_breakpoint(struct list_head **list, int *idx,
  474. void *ptr, char *type)
  475. {
  476. struct perf_event_attr attr;
  477. char name[MAX_NAME_LEN];
  478. memset(&attr, 0, sizeof(attr));
  479. attr.bp_addr = (unsigned long) ptr;
  480. if (parse_breakpoint_type(type, &attr))
  481. return -EINVAL;
  482. /*
  483. * We should find a nice way to override the access length
  484. * Provide some defaults for now
  485. */
  486. if (attr.bp_type == HW_BREAKPOINT_X)
  487. attr.bp_len = sizeof(long);
  488. else
  489. attr.bp_len = HW_BREAKPOINT_LEN_4;
  490. attr.type = PERF_TYPE_BREAKPOINT;
  491. snprintf(name, MAX_NAME_LEN, "mem:%p:%s", ptr, type ? type : "rw");
  492. return add_event(list, idx, &attr, name);
  493. }
  494. static int config_term(struct perf_event_attr *attr,
  495. struct parse_events__term *term)
  496. {
  497. #define CHECK_TYPE_VAL(type) \
  498. do { \
  499. if (PARSE_EVENTS__TERM_TYPE_ ## type != term->type_val) \
  500. return -EINVAL; \
  501. } while (0)
  502. switch (term->type_term) {
  503. case PARSE_EVENTS__TERM_TYPE_CONFIG:
  504. CHECK_TYPE_VAL(NUM);
  505. attr->config = term->val.num;
  506. break;
  507. case PARSE_EVENTS__TERM_TYPE_CONFIG1:
  508. CHECK_TYPE_VAL(NUM);
  509. attr->config1 = term->val.num;
  510. break;
  511. case PARSE_EVENTS__TERM_TYPE_CONFIG2:
  512. CHECK_TYPE_VAL(NUM);
  513. attr->config2 = term->val.num;
  514. break;
  515. case PARSE_EVENTS__TERM_TYPE_SAMPLE_PERIOD:
  516. CHECK_TYPE_VAL(NUM);
  517. attr->sample_period = term->val.num;
  518. break;
  519. case PARSE_EVENTS__TERM_TYPE_BRANCH_SAMPLE_TYPE:
  520. /*
  521. * TODO uncomment when the field is available
  522. * attr->branch_sample_type = term->val.num;
  523. */
  524. break;
  525. case PARSE_EVENTS__TERM_TYPE_NAME:
  526. CHECK_TYPE_VAL(STR);
  527. break;
  528. default:
  529. return -EINVAL;
  530. }
  531. return 0;
  532. #undef CHECK_TYPE_VAL
  533. }
  534. static int config_attr(struct perf_event_attr *attr,
  535. struct list_head *head, int fail)
  536. {
  537. struct parse_events__term *term;
  538. list_for_each_entry(term, head, list)
  539. if (config_term(attr, term) && fail)
  540. return -EINVAL;
  541. return 0;
  542. }
  543. int parse_events_add_numeric(struct list_head **list, int *idx,
  544. unsigned long type, unsigned long config,
  545. struct list_head *head_config)
  546. {
  547. struct perf_event_attr attr;
  548. memset(&attr, 0, sizeof(attr));
  549. attr.type = type;
  550. attr.config = config;
  551. if (head_config &&
  552. config_attr(&attr, head_config, 1))
  553. return -EINVAL;
  554. return add_event(list, idx, &attr,
  555. (char *) __event_name(type, config));
  556. }
  557. static int parse_events__is_name_term(struct parse_events__term *term)
  558. {
  559. return term->type_term == PARSE_EVENTS__TERM_TYPE_NAME;
  560. }
  561. static char *pmu_event_name(struct perf_event_attr *attr,
  562. struct list_head *head_terms)
  563. {
  564. struct parse_events__term *term;
  565. list_for_each_entry(term, head_terms, list)
  566. if (parse_events__is_name_term(term))
  567. return term->val.str;
  568. return (char *) __event_name(PERF_TYPE_RAW, attr->config);
  569. }
  570. int parse_events_add_pmu(struct list_head **list, int *idx,
  571. char *name, struct list_head *head_config)
  572. {
  573. struct perf_event_attr attr;
  574. struct perf_pmu *pmu;
  575. pmu = perf_pmu__find(name);
  576. if (!pmu)
  577. return -EINVAL;
  578. memset(&attr, 0, sizeof(attr));
  579. /*
  580. * Configure hardcoded terms first, no need to check
  581. * return value when called with fail == 0 ;)
  582. */
  583. config_attr(&attr, head_config, 0);
  584. if (perf_pmu__config(pmu, &attr, head_config))
  585. return -EINVAL;
  586. return add_event(list, idx, &attr,
  587. pmu_event_name(&attr, head_config));
  588. }
  589. void parse_events_update_lists(struct list_head *list_event,
  590. struct list_head *list_all)
  591. {
  592. /*
  593. * Called for single event definition. Update the
  594. * 'all event' list, and reinit the 'signle event'
  595. * list, for next event definition.
  596. */
  597. list_splice_tail(list_event, list_all);
  598. free(list_event);
  599. }
  600. int parse_events_modifier(struct list_head *list, char *str)
  601. {
  602. struct perf_evsel *evsel;
  603. int exclude = 0, exclude_GH = 0;
  604. int eu = 0, ek = 0, eh = 0, eH = 0, eG = 0, precise = 0;
  605. if (str == NULL)
  606. return 0;
  607. while (*str) {
  608. if (*str == 'u') {
  609. if (!exclude)
  610. exclude = eu = ek = eh = 1;
  611. eu = 0;
  612. } else if (*str == 'k') {
  613. if (!exclude)
  614. exclude = eu = ek = eh = 1;
  615. ek = 0;
  616. } else if (*str == 'h') {
  617. if (!exclude)
  618. exclude = eu = ek = eh = 1;
  619. eh = 0;
  620. } else if (*str == 'G') {
  621. if (!exclude_GH)
  622. exclude_GH = eG = eH = 1;
  623. eG = 0;
  624. } else if (*str == 'H') {
  625. if (!exclude_GH)
  626. exclude_GH = eG = eH = 1;
  627. eH = 0;
  628. } else if (*str == 'p') {
  629. precise++;
  630. } else
  631. break;
  632. ++str;
  633. }
  634. /*
  635. * precise ip:
  636. *
  637. * 0 - SAMPLE_IP can have arbitrary skid
  638. * 1 - SAMPLE_IP must have constant skid
  639. * 2 - SAMPLE_IP requested to have 0 skid
  640. * 3 - SAMPLE_IP must have 0 skid
  641. *
  642. * See also PERF_RECORD_MISC_EXACT_IP
  643. */
  644. if (precise > 3)
  645. return -EINVAL;
  646. list_for_each_entry(evsel, list, node) {
  647. evsel->attr.exclude_user = eu;
  648. evsel->attr.exclude_kernel = ek;
  649. evsel->attr.exclude_hv = eh;
  650. evsel->attr.precise_ip = precise;
  651. evsel->attr.exclude_host = eH;
  652. evsel->attr.exclude_guest = eG;
  653. }
  654. return 0;
  655. }
  656. int parse_events(struct perf_evlist *evlist, const char *str, int unset __used)
  657. {
  658. LIST_HEAD(list);
  659. LIST_HEAD(list_tmp);
  660. YY_BUFFER_STATE buffer;
  661. int ret, idx = evlist->nr_entries;
  662. buffer = parse_events__scan_string(str);
  663. #ifdef PARSER_DEBUG
  664. parse_events_debug = 1;
  665. #endif
  666. ret = parse_events_parse(&list, &idx);
  667. parse_events__flush_buffer(buffer);
  668. parse_events__delete_buffer(buffer);
  669. parse_events_lex_destroy();
  670. if (!ret) {
  671. int entries = idx - evlist->nr_entries;
  672. perf_evlist__splice_list_tail(evlist, &list, entries);
  673. return 0;
  674. }
  675. /*
  676. * There are 2 users - builtin-record and builtin-test objects.
  677. * Both call perf_evlist__delete in case of error, so we dont
  678. * need to bother.
  679. */
  680. fprintf(stderr, "invalid or unsupported event: '%s'\n", str);
  681. fprintf(stderr, "Run 'perf list' for a list of valid events\n");
  682. return ret;
  683. }
  684. int parse_events_option(const struct option *opt, const char *str,
  685. int unset __used)
  686. {
  687. struct perf_evlist *evlist = *(struct perf_evlist **)opt->value;
  688. return parse_events(evlist, str, unset);
  689. }
  690. int parse_filter(const struct option *opt, const char *str,
  691. int unset __used)
  692. {
  693. struct perf_evlist *evlist = *(struct perf_evlist **)opt->value;
  694. struct perf_evsel *last = NULL;
  695. if (evlist->nr_entries > 0)
  696. last = list_entry(evlist->entries.prev, struct perf_evsel, node);
  697. if (last == NULL || last->attr.type != PERF_TYPE_TRACEPOINT) {
  698. fprintf(stderr,
  699. "-F option should follow a -e tracepoint option\n");
  700. return -1;
  701. }
  702. last->filter = strdup(str);
  703. if (last->filter == NULL) {
  704. fprintf(stderr, "not enough memory to hold filter string\n");
  705. return -1;
  706. }
  707. return 0;
  708. }
  709. static const char * const event_type_descriptors[] = {
  710. "Hardware event",
  711. "Software event",
  712. "Tracepoint event",
  713. "Hardware cache event",
  714. "Raw hardware event descriptor",
  715. "Hardware breakpoint",
  716. };
  717. /*
  718. * Print the events from <debugfs_mount_point>/tracing/events
  719. */
  720. void print_tracepoint_events(const char *subsys_glob, const char *event_glob)
  721. {
  722. DIR *sys_dir, *evt_dir;
  723. struct dirent *sys_next, *evt_next, sys_dirent, evt_dirent;
  724. char evt_path[MAXPATHLEN];
  725. char dir_path[MAXPATHLEN];
  726. if (debugfs_valid_mountpoint(tracing_events_path))
  727. return;
  728. sys_dir = opendir(tracing_events_path);
  729. if (!sys_dir)
  730. return;
  731. for_each_subsystem(sys_dir, sys_dirent, sys_next) {
  732. if (subsys_glob != NULL &&
  733. !strglobmatch(sys_dirent.d_name, subsys_glob))
  734. continue;
  735. snprintf(dir_path, MAXPATHLEN, "%s/%s", tracing_events_path,
  736. sys_dirent.d_name);
  737. evt_dir = opendir(dir_path);
  738. if (!evt_dir)
  739. continue;
  740. for_each_event(sys_dirent, evt_dir, evt_dirent, evt_next) {
  741. if (event_glob != NULL &&
  742. !strglobmatch(evt_dirent.d_name, event_glob))
  743. continue;
  744. snprintf(evt_path, MAXPATHLEN, "%s:%s",
  745. sys_dirent.d_name, evt_dirent.d_name);
  746. printf(" %-50s [%s]\n", evt_path,
  747. event_type_descriptors[PERF_TYPE_TRACEPOINT]);
  748. }
  749. closedir(evt_dir);
  750. }
  751. closedir(sys_dir);
  752. }
  753. /*
  754. * Check whether event is in <debugfs_mount_point>/tracing/events
  755. */
  756. int is_valid_tracepoint(const char *event_string)
  757. {
  758. DIR *sys_dir, *evt_dir;
  759. struct dirent *sys_next, *evt_next, sys_dirent, evt_dirent;
  760. char evt_path[MAXPATHLEN];
  761. char dir_path[MAXPATHLEN];
  762. if (debugfs_valid_mountpoint(tracing_events_path))
  763. return 0;
  764. sys_dir = opendir(tracing_events_path);
  765. if (!sys_dir)
  766. return 0;
  767. for_each_subsystem(sys_dir, sys_dirent, sys_next) {
  768. snprintf(dir_path, MAXPATHLEN, "%s/%s", tracing_events_path,
  769. sys_dirent.d_name);
  770. evt_dir = opendir(dir_path);
  771. if (!evt_dir)
  772. continue;
  773. for_each_event(sys_dirent, evt_dir, evt_dirent, evt_next) {
  774. snprintf(evt_path, MAXPATHLEN, "%s:%s",
  775. sys_dirent.d_name, evt_dirent.d_name);
  776. if (!strcmp(evt_path, event_string)) {
  777. closedir(evt_dir);
  778. closedir(sys_dir);
  779. return 1;
  780. }
  781. }
  782. closedir(evt_dir);
  783. }
  784. closedir(sys_dir);
  785. return 0;
  786. }
  787. void print_events_type(u8 type)
  788. {
  789. struct event_symbol *syms = event_symbols;
  790. unsigned int i;
  791. char name[64];
  792. for (i = 0; i < ARRAY_SIZE(event_symbols); i++, syms++) {
  793. if (type != syms->type)
  794. continue;
  795. if (strlen(syms->alias))
  796. snprintf(name, sizeof(name), "%s OR %s",
  797. syms->symbol, syms->alias);
  798. else
  799. snprintf(name, sizeof(name), "%s", syms->symbol);
  800. printf(" %-50s [%s]\n", name,
  801. event_type_descriptors[type]);
  802. }
  803. }
  804. int print_hwcache_events(const char *event_glob)
  805. {
  806. unsigned int type, op, i, printed = 0;
  807. for (type = 0; type < PERF_COUNT_HW_CACHE_MAX; type++) {
  808. for (op = 0; op < PERF_COUNT_HW_CACHE_OP_MAX; op++) {
  809. /* skip invalid cache type */
  810. if (!is_cache_op_valid(type, op))
  811. continue;
  812. for (i = 0; i < PERF_COUNT_HW_CACHE_RESULT_MAX; i++) {
  813. char *name = event_cache_name(type, op, i);
  814. if (event_glob != NULL && !strglobmatch(name, event_glob))
  815. continue;
  816. printf(" %-50s [%s]\n", name,
  817. event_type_descriptors[PERF_TYPE_HW_CACHE]);
  818. ++printed;
  819. }
  820. }
  821. }
  822. return printed;
  823. }
  824. /*
  825. * Print the help text for the event symbols:
  826. */
  827. void print_events(const char *event_glob)
  828. {
  829. unsigned int i, type, prev_type = -1, printed = 0, ntypes_printed = 0;
  830. struct event_symbol *syms = event_symbols;
  831. char name[MAX_NAME_LEN];
  832. printf("\n");
  833. printf("List of pre-defined events (to be used in -e):\n");
  834. for (i = 0; i < ARRAY_SIZE(event_symbols); i++, syms++) {
  835. type = syms->type;
  836. if (type != prev_type && printed) {
  837. printf("\n");
  838. printed = 0;
  839. ntypes_printed++;
  840. }
  841. if (event_glob != NULL &&
  842. !(strglobmatch(syms->symbol, event_glob) ||
  843. (syms->alias && strglobmatch(syms->alias, event_glob))))
  844. continue;
  845. if (strlen(syms->alias))
  846. snprintf(name, MAX_NAME_LEN, "%s OR %s", syms->symbol, syms->alias);
  847. else
  848. strncpy(name, syms->symbol, MAX_NAME_LEN);
  849. printf(" %-50s [%s]\n", name,
  850. event_type_descriptors[type]);
  851. prev_type = type;
  852. ++printed;
  853. }
  854. if (ntypes_printed) {
  855. printed = 0;
  856. printf("\n");
  857. }
  858. print_hwcache_events(event_glob);
  859. if (event_glob != NULL)
  860. return;
  861. printf("\n");
  862. printf(" %-50s [%s]\n",
  863. "rNNN",
  864. event_type_descriptors[PERF_TYPE_RAW]);
  865. printf(" %-50s [%s]\n",
  866. "cpu/t1=v1[,t2=v2,t3 ...]/modifier",
  867. event_type_descriptors[PERF_TYPE_RAW]);
  868. printf(" (see 'perf list --help' on how to encode it)\n");
  869. printf("\n");
  870. printf(" %-50s [%s]\n",
  871. "mem:<addr>[:access]",
  872. event_type_descriptors[PERF_TYPE_BREAKPOINT]);
  873. printf("\n");
  874. print_tracepoint_events(NULL, NULL);
  875. }
  876. int parse_events__is_hardcoded_term(struct parse_events__term *term)
  877. {
  878. return term->type_term != PARSE_EVENTS__TERM_TYPE_USER;
  879. }
  880. static int new_term(struct parse_events__term **_term, int type_val,
  881. int type_term, char *config,
  882. char *str, long num)
  883. {
  884. struct parse_events__term *term;
  885. term = zalloc(sizeof(*term));
  886. if (!term)
  887. return -ENOMEM;
  888. INIT_LIST_HEAD(&term->list);
  889. term->type_val = type_val;
  890. term->type_term = type_term;
  891. term->config = config;
  892. switch (type_val) {
  893. case PARSE_EVENTS__TERM_TYPE_NUM:
  894. term->val.num = num;
  895. break;
  896. case PARSE_EVENTS__TERM_TYPE_STR:
  897. term->val.str = str;
  898. break;
  899. default:
  900. return -EINVAL;
  901. }
  902. *_term = term;
  903. return 0;
  904. }
  905. int parse_events__term_num(struct parse_events__term **term,
  906. int type_term, char *config, long num)
  907. {
  908. return new_term(term, PARSE_EVENTS__TERM_TYPE_NUM, type_term,
  909. config, NULL, num);
  910. }
  911. int parse_events__term_str(struct parse_events__term **term,
  912. int type_term, char *config, char *str)
  913. {
  914. return new_term(term, PARSE_EVENTS__TERM_TYPE_STR, type_term,
  915. config, str, 0);
  916. }
  917. void parse_events__free_terms(struct list_head *terms)
  918. {
  919. struct parse_events__term *term, *h;
  920. list_for_each_entry_safe(term, h, terms, list)
  921. free(term);
  922. free(terms);
  923. }