evsel.c 34 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488
  1. /*
  2. * Copyright (C) 2011, Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com>
  3. *
  4. * Parts came from builtin-{top,stat,record}.c, see those files for further
  5. * copyright notes.
  6. *
  7. * Released under the GPL v2. (and only v2, not any later version)
  8. */
  9. #include <byteswap.h>
  10. #include <linux/bitops.h>
  11. #include "asm/bug.h"
  12. #include "debugfs.h"
  13. #include "event-parse.h"
  14. #include "evsel.h"
  15. #include "evlist.h"
  16. #include "util.h"
  17. #include "cpumap.h"
  18. #include "thread_map.h"
  19. #include "target.h"
  20. #include <linux/hw_breakpoint.h>
  21. #include <linux/perf_event.h>
  22. #include "perf_regs.h"
  23. static struct {
  24. bool sample_id_all;
  25. bool exclude_guest;
  26. } perf_missing_features;
  27. #define FD(e, x, y) (*(int *)xyarray__entry(e->fd, x, y))
  28. static int __perf_evsel__sample_size(u64 sample_type)
  29. {
  30. u64 mask = sample_type & PERF_SAMPLE_MASK;
  31. int size = 0;
  32. int i;
  33. for (i = 0; i < 64; i++) {
  34. if (mask & (1ULL << i))
  35. size++;
  36. }
  37. size *= sizeof(u64);
  38. return size;
  39. }
  40. void hists__init(struct hists *hists)
  41. {
  42. memset(hists, 0, sizeof(*hists));
  43. hists->entries_in_array[0] = hists->entries_in_array[1] = RB_ROOT;
  44. hists->entries_in = &hists->entries_in_array[0];
  45. hists->entries_collapsed = RB_ROOT;
  46. hists->entries = RB_ROOT;
  47. pthread_mutex_init(&hists->lock, NULL);
  48. }
  49. void __perf_evsel__set_sample_bit(struct perf_evsel *evsel,
  50. enum perf_event_sample_format bit)
  51. {
  52. if (!(evsel->attr.sample_type & bit)) {
  53. evsel->attr.sample_type |= bit;
  54. evsel->sample_size += sizeof(u64);
  55. }
  56. }
  57. void __perf_evsel__reset_sample_bit(struct perf_evsel *evsel,
  58. enum perf_event_sample_format bit)
  59. {
  60. if (evsel->attr.sample_type & bit) {
  61. evsel->attr.sample_type &= ~bit;
  62. evsel->sample_size -= sizeof(u64);
  63. }
  64. }
  65. void perf_evsel__set_sample_id(struct perf_evsel *evsel)
  66. {
  67. perf_evsel__set_sample_bit(evsel, ID);
  68. evsel->attr.read_format |= PERF_FORMAT_ID;
  69. }
  70. void perf_evsel__init(struct perf_evsel *evsel,
  71. struct perf_event_attr *attr, int idx)
  72. {
  73. evsel->idx = idx;
  74. evsel->attr = *attr;
  75. evsel->leader = evsel;
  76. INIT_LIST_HEAD(&evsel->node);
  77. hists__init(&evsel->hists);
  78. evsel->sample_size = __perf_evsel__sample_size(attr->sample_type);
  79. }
  80. struct perf_evsel *perf_evsel__new(struct perf_event_attr *attr, int idx)
  81. {
  82. struct perf_evsel *evsel = zalloc(sizeof(*evsel));
  83. if (evsel != NULL)
  84. perf_evsel__init(evsel, attr, idx);
  85. return evsel;
  86. }
  87. struct event_format *event_format__new(const char *sys, const char *name)
  88. {
  89. int fd, n;
  90. char *filename;
  91. void *bf = NULL, *nbf;
  92. size_t size = 0, alloc_size = 0;
  93. struct event_format *format = NULL;
  94. if (asprintf(&filename, "%s/%s/%s/format", tracing_events_path, sys, name) < 0)
  95. goto out;
  96. fd = open(filename, O_RDONLY);
  97. if (fd < 0)
  98. goto out_free_filename;
  99. do {
  100. if (size == alloc_size) {
  101. alloc_size += BUFSIZ;
  102. nbf = realloc(bf, alloc_size);
  103. if (nbf == NULL)
  104. goto out_free_bf;
  105. bf = nbf;
  106. }
  107. n = read(fd, bf + size, BUFSIZ);
  108. if (n < 0)
  109. goto out_free_bf;
  110. size += n;
  111. } while (n > 0);
  112. pevent_parse_format(&format, bf, size, sys);
  113. out_free_bf:
  114. free(bf);
  115. close(fd);
  116. out_free_filename:
  117. free(filename);
  118. out:
  119. return format;
  120. }
  121. struct perf_evsel *perf_evsel__newtp(const char *sys, const char *name, int idx)
  122. {
  123. struct perf_evsel *evsel = zalloc(sizeof(*evsel));
  124. if (evsel != NULL) {
  125. struct perf_event_attr attr = {
  126. .type = PERF_TYPE_TRACEPOINT,
  127. .sample_type = (PERF_SAMPLE_RAW | PERF_SAMPLE_TIME |
  128. PERF_SAMPLE_CPU | PERF_SAMPLE_PERIOD),
  129. };
  130. if (asprintf(&evsel->name, "%s:%s", sys, name) < 0)
  131. goto out_free;
  132. evsel->tp_format = event_format__new(sys, name);
  133. if (evsel->tp_format == NULL)
  134. goto out_free;
  135. event_attr_init(&attr);
  136. attr.config = evsel->tp_format->id;
  137. attr.sample_period = 1;
  138. perf_evsel__init(evsel, &attr, idx);
  139. }
  140. return evsel;
  141. out_free:
  142. free(evsel->name);
  143. free(evsel);
  144. return NULL;
  145. }
  146. const char *perf_evsel__hw_names[PERF_COUNT_HW_MAX] = {
  147. "cycles",
  148. "instructions",
  149. "cache-references",
  150. "cache-misses",
  151. "branches",
  152. "branch-misses",
  153. "bus-cycles",
  154. "stalled-cycles-frontend",
  155. "stalled-cycles-backend",
  156. "ref-cycles",
  157. };
  158. static const char *__perf_evsel__hw_name(u64 config)
  159. {
  160. if (config < PERF_COUNT_HW_MAX && perf_evsel__hw_names[config])
  161. return perf_evsel__hw_names[config];
  162. return "unknown-hardware";
  163. }
  164. static int perf_evsel__add_modifiers(struct perf_evsel *evsel, char *bf, size_t size)
  165. {
  166. int colon = 0, r = 0;
  167. struct perf_event_attr *attr = &evsel->attr;
  168. bool exclude_guest_default = false;
  169. #define MOD_PRINT(context, mod) do { \
  170. if (!attr->exclude_##context) { \
  171. if (!colon) colon = ++r; \
  172. r += scnprintf(bf + r, size - r, "%c", mod); \
  173. } } while(0)
  174. if (attr->exclude_kernel || attr->exclude_user || attr->exclude_hv) {
  175. MOD_PRINT(kernel, 'k');
  176. MOD_PRINT(user, 'u');
  177. MOD_PRINT(hv, 'h');
  178. exclude_guest_default = true;
  179. }
  180. if (attr->precise_ip) {
  181. if (!colon)
  182. colon = ++r;
  183. r += scnprintf(bf + r, size - r, "%.*s", attr->precise_ip, "ppp");
  184. exclude_guest_default = true;
  185. }
  186. if (attr->exclude_host || attr->exclude_guest == exclude_guest_default) {
  187. MOD_PRINT(host, 'H');
  188. MOD_PRINT(guest, 'G');
  189. }
  190. #undef MOD_PRINT
  191. if (colon)
  192. bf[colon - 1] = ':';
  193. return r;
  194. }
  195. static int perf_evsel__hw_name(struct perf_evsel *evsel, char *bf, size_t size)
  196. {
  197. int r = scnprintf(bf, size, "%s", __perf_evsel__hw_name(evsel->attr.config));
  198. return r + perf_evsel__add_modifiers(evsel, bf + r, size - r);
  199. }
  200. const char *perf_evsel__sw_names[PERF_COUNT_SW_MAX] = {
  201. "cpu-clock",
  202. "task-clock",
  203. "page-faults",
  204. "context-switches",
  205. "cpu-migrations",
  206. "minor-faults",
  207. "major-faults",
  208. "alignment-faults",
  209. "emulation-faults",
  210. };
  211. static const char *__perf_evsel__sw_name(u64 config)
  212. {
  213. if (config < PERF_COUNT_SW_MAX && perf_evsel__sw_names[config])
  214. return perf_evsel__sw_names[config];
  215. return "unknown-software";
  216. }
  217. static int perf_evsel__sw_name(struct perf_evsel *evsel, char *bf, size_t size)
  218. {
  219. int r = scnprintf(bf, size, "%s", __perf_evsel__sw_name(evsel->attr.config));
  220. return r + perf_evsel__add_modifiers(evsel, bf + r, size - r);
  221. }
  222. static int __perf_evsel__bp_name(char *bf, size_t size, u64 addr, u64 type)
  223. {
  224. int r;
  225. r = scnprintf(bf, size, "mem:0x%" PRIx64 ":", addr);
  226. if (type & HW_BREAKPOINT_R)
  227. r += scnprintf(bf + r, size - r, "r");
  228. if (type & HW_BREAKPOINT_W)
  229. r += scnprintf(bf + r, size - r, "w");
  230. if (type & HW_BREAKPOINT_X)
  231. r += scnprintf(bf + r, size - r, "x");
  232. return r;
  233. }
  234. static int perf_evsel__bp_name(struct perf_evsel *evsel, char *bf, size_t size)
  235. {
  236. struct perf_event_attr *attr = &evsel->attr;
  237. int r = __perf_evsel__bp_name(bf, size, attr->bp_addr, attr->bp_type);
  238. return r + perf_evsel__add_modifiers(evsel, bf + r, size - r);
  239. }
  240. const char *perf_evsel__hw_cache[PERF_COUNT_HW_CACHE_MAX]
  241. [PERF_EVSEL__MAX_ALIASES] = {
  242. { "L1-dcache", "l1-d", "l1d", "L1-data", },
  243. { "L1-icache", "l1-i", "l1i", "L1-instruction", },
  244. { "LLC", "L2", },
  245. { "dTLB", "d-tlb", "Data-TLB", },
  246. { "iTLB", "i-tlb", "Instruction-TLB", },
  247. { "branch", "branches", "bpu", "btb", "bpc", },
  248. { "node", },
  249. };
  250. const char *perf_evsel__hw_cache_op[PERF_COUNT_HW_CACHE_OP_MAX]
  251. [PERF_EVSEL__MAX_ALIASES] = {
  252. { "load", "loads", "read", },
  253. { "store", "stores", "write", },
  254. { "prefetch", "prefetches", "speculative-read", "speculative-load", },
  255. };
  256. const char *perf_evsel__hw_cache_result[PERF_COUNT_HW_CACHE_RESULT_MAX]
  257. [PERF_EVSEL__MAX_ALIASES] = {
  258. { "refs", "Reference", "ops", "access", },
  259. { "misses", "miss", },
  260. };
  261. #define C(x) PERF_COUNT_HW_CACHE_##x
  262. #define CACHE_READ (1 << C(OP_READ))
  263. #define CACHE_WRITE (1 << C(OP_WRITE))
  264. #define CACHE_PREFETCH (1 << C(OP_PREFETCH))
  265. #define COP(x) (1 << x)
  266. /*
  267. * cache operartion stat
  268. * L1I : Read and prefetch only
  269. * ITLB and BPU : Read-only
  270. */
  271. static unsigned long perf_evsel__hw_cache_stat[C(MAX)] = {
  272. [C(L1D)] = (CACHE_READ | CACHE_WRITE | CACHE_PREFETCH),
  273. [C(L1I)] = (CACHE_READ | CACHE_PREFETCH),
  274. [C(LL)] = (CACHE_READ | CACHE_WRITE | CACHE_PREFETCH),
  275. [C(DTLB)] = (CACHE_READ | CACHE_WRITE | CACHE_PREFETCH),
  276. [C(ITLB)] = (CACHE_READ),
  277. [C(BPU)] = (CACHE_READ),
  278. [C(NODE)] = (CACHE_READ | CACHE_WRITE | CACHE_PREFETCH),
  279. };
  280. bool perf_evsel__is_cache_op_valid(u8 type, u8 op)
  281. {
  282. if (perf_evsel__hw_cache_stat[type] & COP(op))
  283. return true; /* valid */
  284. else
  285. return false; /* invalid */
  286. }
  287. int __perf_evsel__hw_cache_type_op_res_name(u8 type, u8 op, u8 result,
  288. char *bf, size_t size)
  289. {
  290. if (result) {
  291. return scnprintf(bf, size, "%s-%s-%s", perf_evsel__hw_cache[type][0],
  292. perf_evsel__hw_cache_op[op][0],
  293. perf_evsel__hw_cache_result[result][0]);
  294. }
  295. return scnprintf(bf, size, "%s-%s", perf_evsel__hw_cache[type][0],
  296. perf_evsel__hw_cache_op[op][1]);
  297. }
  298. static int __perf_evsel__hw_cache_name(u64 config, char *bf, size_t size)
  299. {
  300. u8 op, result, type = (config >> 0) & 0xff;
  301. const char *err = "unknown-ext-hardware-cache-type";
  302. if (type > PERF_COUNT_HW_CACHE_MAX)
  303. goto out_err;
  304. op = (config >> 8) & 0xff;
  305. err = "unknown-ext-hardware-cache-op";
  306. if (op > PERF_COUNT_HW_CACHE_OP_MAX)
  307. goto out_err;
  308. result = (config >> 16) & 0xff;
  309. err = "unknown-ext-hardware-cache-result";
  310. if (result > PERF_COUNT_HW_CACHE_RESULT_MAX)
  311. goto out_err;
  312. err = "invalid-cache";
  313. if (!perf_evsel__is_cache_op_valid(type, op))
  314. goto out_err;
  315. return __perf_evsel__hw_cache_type_op_res_name(type, op, result, bf, size);
  316. out_err:
  317. return scnprintf(bf, size, "%s", err);
  318. }
  319. static int perf_evsel__hw_cache_name(struct perf_evsel *evsel, char *bf, size_t size)
  320. {
  321. int ret = __perf_evsel__hw_cache_name(evsel->attr.config, bf, size);
  322. return ret + perf_evsel__add_modifiers(evsel, bf + ret, size - ret);
  323. }
  324. static int perf_evsel__raw_name(struct perf_evsel *evsel, char *bf, size_t size)
  325. {
  326. int ret = scnprintf(bf, size, "raw 0x%" PRIx64, evsel->attr.config);
  327. return ret + perf_evsel__add_modifiers(evsel, bf + ret, size - ret);
  328. }
  329. const char *perf_evsel__name(struct perf_evsel *evsel)
  330. {
  331. char bf[128];
  332. if (evsel->name)
  333. return evsel->name;
  334. switch (evsel->attr.type) {
  335. case PERF_TYPE_RAW:
  336. perf_evsel__raw_name(evsel, bf, sizeof(bf));
  337. break;
  338. case PERF_TYPE_HARDWARE:
  339. perf_evsel__hw_name(evsel, bf, sizeof(bf));
  340. break;
  341. case PERF_TYPE_HW_CACHE:
  342. perf_evsel__hw_cache_name(evsel, bf, sizeof(bf));
  343. break;
  344. case PERF_TYPE_SOFTWARE:
  345. perf_evsel__sw_name(evsel, bf, sizeof(bf));
  346. break;
  347. case PERF_TYPE_TRACEPOINT:
  348. scnprintf(bf, sizeof(bf), "%s", "unknown tracepoint");
  349. break;
  350. case PERF_TYPE_BREAKPOINT:
  351. perf_evsel__bp_name(evsel, bf, sizeof(bf));
  352. break;
  353. default:
  354. scnprintf(bf, sizeof(bf), "unknown attr type: %d",
  355. evsel->attr.type);
  356. break;
  357. }
  358. evsel->name = strdup(bf);
  359. return evsel->name ?: "unknown";
  360. }
  361. /*
  362. * The enable_on_exec/disabled value strategy:
  363. *
  364. * 1) For any type of traced program:
  365. * - all independent events and group leaders are disabled
  366. * - all group members are enabled
  367. *
  368. * Group members are ruled by group leaders. They need to
  369. * be enabled, because the group scheduling relies on that.
  370. *
  371. * 2) For traced programs executed by perf:
  372. * - all independent events and group leaders have
  373. * enable_on_exec set
  374. * - we don't specifically enable or disable any event during
  375. * the record command
  376. *
  377. * Independent events and group leaders are initially disabled
  378. * and get enabled by exec. Group members are ruled by group
  379. * leaders as stated in 1).
  380. *
  381. * 3) For traced programs attached by perf (pid/tid):
  382. * - we specifically enable or disable all events during
  383. * the record command
  384. *
  385. * When attaching events to already running traced we
  386. * enable/disable events specifically, as there's no
  387. * initial traced exec call.
  388. */
  389. void perf_evsel__config(struct perf_evsel *evsel,
  390. struct perf_record_opts *opts)
  391. {
  392. struct perf_event_attr *attr = &evsel->attr;
  393. int track = !evsel->idx; /* only the first counter needs these */
  394. attr->sample_id_all = perf_missing_features.sample_id_all ? 0 : 1;
  395. attr->inherit = !opts->no_inherit;
  396. perf_evsel__set_sample_bit(evsel, IP);
  397. perf_evsel__set_sample_bit(evsel, TID);
  398. /*
  399. * We default some events to a 1 default interval. But keep
  400. * it a weak assumption overridable by the user.
  401. */
  402. if (!attr->sample_period || (opts->user_freq != UINT_MAX &&
  403. opts->user_interval != ULLONG_MAX)) {
  404. if (opts->freq) {
  405. perf_evsel__set_sample_bit(evsel, PERIOD);
  406. attr->freq = 1;
  407. attr->sample_freq = opts->freq;
  408. } else {
  409. attr->sample_period = opts->default_interval;
  410. }
  411. }
  412. if (opts->no_samples)
  413. attr->sample_freq = 0;
  414. if (opts->inherit_stat)
  415. attr->inherit_stat = 1;
  416. if (opts->sample_address) {
  417. perf_evsel__set_sample_bit(evsel, ADDR);
  418. attr->mmap_data = track;
  419. }
  420. if (opts->call_graph) {
  421. perf_evsel__set_sample_bit(evsel, CALLCHAIN);
  422. if (opts->call_graph == CALLCHAIN_DWARF) {
  423. perf_evsel__set_sample_bit(evsel, REGS_USER);
  424. perf_evsel__set_sample_bit(evsel, STACK_USER);
  425. attr->sample_regs_user = PERF_REGS_MASK;
  426. attr->sample_stack_user = opts->stack_dump_size;
  427. attr->exclude_callchain_user = 1;
  428. }
  429. }
  430. if (perf_target__has_cpu(&opts->target))
  431. perf_evsel__set_sample_bit(evsel, CPU);
  432. if (opts->period)
  433. perf_evsel__set_sample_bit(evsel, PERIOD);
  434. if (!perf_missing_features.sample_id_all &&
  435. (opts->sample_time || !opts->no_inherit ||
  436. perf_target__has_cpu(&opts->target)))
  437. perf_evsel__set_sample_bit(evsel, TIME);
  438. if (opts->raw_samples) {
  439. perf_evsel__set_sample_bit(evsel, TIME);
  440. perf_evsel__set_sample_bit(evsel, RAW);
  441. perf_evsel__set_sample_bit(evsel, CPU);
  442. }
  443. if (opts->no_delay) {
  444. attr->watermark = 0;
  445. attr->wakeup_events = 1;
  446. }
  447. if (opts->branch_stack) {
  448. perf_evsel__set_sample_bit(evsel, BRANCH_STACK);
  449. attr->branch_sample_type = opts->branch_stack;
  450. }
  451. attr->mmap = track;
  452. attr->comm = track;
  453. /*
  454. * XXX see the function comment above
  455. *
  456. * Disabling only independent events or group leaders,
  457. * keeping group members enabled.
  458. */
  459. if (perf_evsel__is_group_leader(evsel))
  460. attr->disabled = 1;
  461. /*
  462. * Setting enable_on_exec for independent events and
  463. * group leaders for traced executed by perf.
  464. */
  465. if (perf_target__none(&opts->target) && perf_evsel__is_group_leader(evsel))
  466. attr->enable_on_exec = 1;
  467. }
  468. int perf_evsel__alloc_fd(struct perf_evsel *evsel, int ncpus, int nthreads)
  469. {
  470. int cpu, thread;
  471. evsel->fd = xyarray__new(ncpus, nthreads, sizeof(int));
  472. if (evsel->fd) {
  473. for (cpu = 0; cpu < ncpus; cpu++) {
  474. for (thread = 0; thread < nthreads; thread++) {
  475. FD(evsel, cpu, thread) = -1;
  476. }
  477. }
  478. }
  479. return evsel->fd != NULL ? 0 : -ENOMEM;
  480. }
  481. int perf_evsel__set_filter(struct perf_evsel *evsel, int ncpus, int nthreads,
  482. const char *filter)
  483. {
  484. int cpu, thread;
  485. for (cpu = 0; cpu < ncpus; cpu++) {
  486. for (thread = 0; thread < nthreads; thread++) {
  487. int fd = FD(evsel, cpu, thread),
  488. err = ioctl(fd, PERF_EVENT_IOC_SET_FILTER, filter);
  489. if (err)
  490. return err;
  491. }
  492. }
  493. return 0;
  494. }
  495. int perf_evsel__alloc_id(struct perf_evsel *evsel, int ncpus, int nthreads)
  496. {
  497. evsel->sample_id = xyarray__new(ncpus, nthreads, sizeof(struct perf_sample_id));
  498. if (evsel->sample_id == NULL)
  499. return -ENOMEM;
  500. evsel->id = zalloc(ncpus * nthreads * sizeof(u64));
  501. if (evsel->id == NULL) {
  502. xyarray__delete(evsel->sample_id);
  503. evsel->sample_id = NULL;
  504. return -ENOMEM;
  505. }
  506. return 0;
  507. }
  508. int perf_evsel__alloc_counts(struct perf_evsel *evsel, int ncpus)
  509. {
  510. evsel->counts = zalloc((sizeof(*evsel->counts) +
  511. (ncpus * sizeof(struct perf_counts_values))));
  512. return evsel->counts != NULL ? 0 : -ENOMEM;
  513. }
  514. void perf_evsel__free_fd(struct perf_evsel *evsel)
  515. {
  516. xyarray__delete(evsel->fd);
  517. evsel->fd = NULL;
  518. }
  519. void perf_evsel__free_id(struct perf_evsel *evsel)
  520. {
  521. xyarray__delete(evsel->sample_id);
  522. evsel->sample_id = NULL;
  523. free(evsel->id);
  524. evsel->id = NULL;
  525. }
  526. void perf_evsel__close_fd(struct perf_evsel *evsel, int ncpus, int nthreads)
  527. {
  528. int cpu, thread;
  529. for (cpu = 0; cpu < ncpus; cpu++)
  530. for (thread = 0; thread < nthreads; ++thread) {
  531. close(FD(evsel, cpu, thread));
  532. FD(evsel, cpu, thread) = -1;
  533. }
  534. }
  535. void perf_evsel__free_counts(struct perf_evsel *evsel)
  536. {
  537. free(evsel->counts);
  538. }
  539. void perf_evsel__exit(struct perf_evsel *evsel)
  540. {
  541. assert(list_empty(&evsel->node));
  542. xyarray__delete(evsel->fd);
  543. xyarray__delete(evsel->sample_id);
  544. free(evsel->id);
  545. }
  546. void perf_evsel__delete(struct perf_evsel *evsel)
  547. {
  548. perf_evsel__exit(evsel);
  549. close_cgroup(evsel->cgrp);
  550. free(evsel->group_name);
  551. if (evsel->tp_format)
  552. pevent_free_format(evsel->tp_format);
  553. free(evsel->name);
  554. free(evsel);
  555. }
  556. static inline void compute_deltas(struct perf_evsel *evsel,
  557. int cpu,
  558. struct perf_counts_values *count)
  559. {
  560. struct perf_counts_values tmp;
  561. if (!evsel->prev_raw_counts)
  562. return;
  563. if (cpu == -1) {
  564. tmp = evsel->prev_raw_counts->aggr;
  565. evsel->prev_raw_counts->aggr = *count;
  566. } else {
  567. tmp = evsel->prev_raw_counts->cpu[cpu];
  568. evsel->prev_raw_counts->cpu[cpu] = *count;
  569. }
  570. count->val = count->val - tmp.val;
  571. count->ena = count->ena - tmp.ena;
  572. count->run = count->run - tmp.run;
  573. }
  574. int __perf_evsel__read_on_cpu(struct perf_evsel *evsel,
  575. int cpu, int thread, bool scale)
  576. {
  577. struct perf_counts_values count;
  578. size_t nv = scale ? 3 : 1;
  579. if (FD(evsel, cpu, thread) < 0)
  580. return -EINVAL;
  581. if (evsel->counts == NULL && perf_evsel__alloc_counts(evsel, cpu + 1) < 0)
  582. return -ENOMEM;
  583. if (readn(FD(evsel, cpu, thread), &count, nv * sizeof(u64)) < 0)
  584. return -errno;
  585. compute_deltas(evsel, cpu, &count);
  586. if (scale) {
  587. if (count.run == 0)
  588. count.val = 0;
  589. else if (count.run < count.ena)
  590. count.val = (u64)((double)count.val * count.ena / count.run + 0.5);
  591. } else
  592. count.ena = count.run = 0;
  593. evsel->counts->cpu[cpu] = count;
  594. return 0;
  595. }
  596. int __perf_evsel__read(struct perf_evsel *evsel,
  597. int ncpus, int nthreads, bool scale)
  598. {
  599. size_t nv = scale ? 3 : 1;
  600. int cpu, thread;
  601. struct perf_counts_values *aggr = &evsel->counts->aggr, count;
  602. aggr->val = aggr->ena = aggr->run = 0;
  603. for (cpu = 0; cpu < ncpus; cpu++) {
  604. for (thread = 0; thread < nthreads; thread++) {
  605. if (FD(evsel, cpu, thread) < 0)
  606. continue;
  607. if (readn(FD(evsel, cpu, thread),
  608. &count, nv * sizeof(u64)) < 0)
  609. return -errno;
  610. aggr->val += count.val;
  611. if (scale) {
  612. aggr->ena += count.ena;
  613. aggr->run += count.run;
  614. }
  615. }
  616. }
  617. compute_deltas(evsel, -1, aggr);
  618. evsel->counts->scaled = 0;
  619. if (scale) {
  620. if (aggr->run == 0) {
  621. evsel->counts->scaled = -1;
  622. aggr->val = 0;
  623. return 0;
  624. }
  625. if (aggr->run < aggr->ena) {
  626. evsel->counts->scaled = 1;
  627. aggr->val = (u64)((double)aggr->val * aggr->ena / aggr->run + 0.5);
  628. }
  629. } else
  630. aggr->ena = aggr->run = 0;
  631. return 0;
  632. }
  633. static int get_group_fd(struct perf_evsel *evsel, int cpu, int thread)
  634. {
  635. struct perf_evsel *leader = evsel->leader;
  636. int fd;
  637. if (perf_evsel__is_group_leader(evsel))
  638. return -1;
  639. /*
  640. * Leader must be already processed/open,
  641. * if not it's a bug.
  642. */
  643. BUG_ON(!leader->fd);
  644. fd = FD(leader, cpu, thread);
  645. BUG_ON(fd == -1);
  646. return fd;
  647. }
  648. static int __perf_evsel__open(struct perf_evsel *evsel, struct cpu_map *cpus,
  649. struct thread_map *threads)
  650. {
  651. int cpu, thread;
  652. unsigned long flags = 0;
  653. int pid = -1, err;
  654. if (evsel->fd == NULL &&
  655. perf_evsel__alloc_fd(evsel, cpus->nr, threads->nr) < 0)
  656. return -ENOMEM;
  657. if (evsel->cgrp) {
  658. flags = PERF_FLAG_PID_CGROUP;
  659. pid = evsel->cgrp->fd;
  660. }
  661. fallback_missing_features:
  662. if (perf_missing_features.exclude_guest)
  663. evsel->attr.exclude_guest = evsel->attr.exclude_host = 0;
  664. retry_sample_id:
  665. if (perf_missing_features.sample_id_all)
  666. evsel->attr.sample_id_all = 0;
  667. for (cpu = 0; cpu < cpus->nr; cpu++) {
  668. for (thread = 0; thread < threads->nr; thread++) {
  669. int group_fd;
  670. if (!evsel->cgrp)
  671. pid = threads->map[thread];
  672. group_fd = get_group_fd(evsel, cpu, thread);
  673. FD(evsel, cpu, thread) = sys_perf_event_open(&evsel->attr,
  674. pid,
  675. cpus->map[cpu],
  676. group_fd, flags);
  677. if (FD(evsel, cpu, thread) < 0) {
  678. err = -errno;
  679. goto try_fallback;
  680. }
  681. }
  682. }
  683. return 0;
  684. try_fallback:
  685. if (err != -EINVAL || cpu > 0 || thread > 0)
  686. goto out_close;
  687. if (!perf_missing_features.exclude_guest &&
  688. (evsel->attr.exclude_guest || evsel->attr.exclude_host)) {
  689. perf_missing_features.exclude_guest = true;
  690. goto fallback_missing_features;
  691. } else if (!perf_missing_features.sample_id_all) {
  692. perf_missing_features.sample_id_all = true;
  693. goto retry_sample_id;
  694. }
  695. out_close:
  696. do {
  697. while (--thread >= 0) {
  698. close(FD(evsel, cpu, thread));
  699. FD(evsel, cpu, thread) = -1;
  700. }
  701. thread = threads->nr;
  702. } while (--cpu >= 0);
  703. return err;
  704. }
  705. void perf_evsel__close(struct perf_evsel *evsel, int ncpus, int nthreads)
  706. {
  707. if (evsel->fd == NULL)
  708. return;
  709. perf_evsel__close_fd(evsel, ncpus, nthreads);
  710. perf_evsel__free_fd(evsel);
  711. evsel->fd = NULL;
  712. }
  713. static struct {
  714. struct cpu_map map;
  715. int cpus[1];
  716. } empty_cpu_map = {
  717. .map.nr = 1,
  718. .cpus = { -1, },
  719. };
  720. static struct {
  721. struct thread_map map;
  722. int threads[1];
  723. } empty_thread_map = {
  724. .map.nr = 1,
  725. .threads = { -1, },
  726. };
  727. int perf_evsel__open(struct perf_evsel *evsel, struct cpu_map *cpus,
  728. struct thread_map *threads)
  729. {
  730. if (cpus == NULL) {
  731. /* Work around old compiler warnings about strict aliasing */
  732. cpus = &empty_cpu_map.map;
  733. }
  734. if (threads == NULL)
  735. threads = &empty_thread_map.map;
  736. return __perf_evsel__open(evsel, cpus, threads);
  737. }
  738. int perf_evsel__open_per_cpu(struct perf_evsel *evsel,
  739. struct cpu_map *cpus)
  740. {
  741. return __perf_evsel__open(evsel, cpus, &empty_thread_map.map);
  742. }
  743. int perf_evsel__open_per_thread(struct perf_evsel *evsel,
  744. struct thread_map *threads)
  745. {
  746. return __perf_evsel__open(evsel, &empty_cpu_map.map, threads);
  747. }
  748. static int perf_evsel__parse_id_sample(const struct perf_evsel *evsel,
  749. const union perf_event *event,
  750. struct perf_sample *sample)
  751. {
  752. u64 type = evsel->attr.sample_type;
  753. const u64 *array = event->sample.array;
  754. bool swapped = evsel->needs_swap;
  755. union u64_swap u;
  756. array += ((event->header.size -
  757. sizeof(event->header)) / sizeof(u64)) - 1;
  758. if (type & PERF_SAMPLE_CPU) {
  759. u.val64 = *array;
  760. if (swapped) {
  761. /* undo swap of u64, then swap on individual u32s */
  762. u.val64 = bswap_64(u.val64);
  763. u.val32[0] = bswap_32(u.val32[0]);
  764. }
  765. sample->cpu = u.val32[0];
  766. array--;
  767. }
  768. if (type & PERF_SAMPLE_STREAM_ID) {
  769. sample->stream_id = *array;
  770. array--;
  771. }
  772. if (type & PERF_SAMPLE_ID) {
  773. sample->id = *array;
  774. array--;
  775. }
  776. if (type & PERF_SAMPLE_TIME) {
  777. sample->time = *array;
  778. array--;
  779. }
  780. if (type & PERF_SAMPLE_TID) {
  781. u.val64 = *array;
  782. if (swapped) {
  783. /* undo swap of u64, then swap on individual u32s */
  784. u.val64 = bswap_64(u.val64);
  785. u.val32[0] = bswap_32(u.val32[0]);
  786. u.val32[1] = bswap_32(u.val32[1]);
  787. }
  788. sample->pid = u.val32[0];
  789. sample->tid = u.val32[1];
  790. }
  791. return 0;
  792. }
  793. static bool sample_overlap(const union perf_event *event,
  794. const void *offset, u64 size)
  795. {
  796. const void *base = event;
  797. if (offset + size > base + event->header.size)
  798. return true;
  799. return false;
  800. }
  801. int perf_evsel__parse_sample(struct perf_evsel *evsel, union perf_event *event,
  802. struct perf_sample *data)
  803. {
  804. u64 type = evsel->attr.sample_type;
  805. u64 regs_user = evsel->attr.sample_regs_user;
  806. bool swapped = evsel->needs_swap;
  807. const u64 *array;
  808. /*
  809. * used for cross-endian analysis. See git commit 65014ab3
  810. * for why this goofiness is needed.
  811. */
  812. union u64_swap u;
  813. memset(data, 0, sizeof(*data));
  814. data->cpu = data->pid = data->tid = -1;
  815. data->stream_id = data->id = data->time = -1ULL;
  816. data->period = 1;
  817. if (event->header.type != PERF_RECORD_SAMPLE) {
  818. if (!evsel->attr.sample_id_all)
  819. return 0;
  820. return perf_evsel__parse_id_sample(evsel, event, data);
  821. }
  822. array = event->sample.array;
  823. if (evsel->sample_size + sizeof(event->header) > event->header.size)
  824. return -EFAULT;
  825. if (type & PERF_SAMPLE_IP) {
  826. data->ip = event->ip.ip;
  827. array++;
  828. }
  829. if (type & PERF_SAMPLE_TID) {
  830. u.val64 = *array;
  831. if (swapped) {
  832. /* undo swap of u64, then swap on individual u32s */
  833. u.val64 = bswap_64(u.val64);
  834. u.val32[0] = bswap_32(u.val32[0]);
  835. u.val32[1] = bswap_32(u.val32[1]);
  836. }
  837. data->pid = u.val32[0];
  838. data->tid = u.val32[1];
  839. array++;
  840. }
  841. if (type & PERF_SAMPLE_TIME) {
  842. data->time = *array;
  843. array++;
  844. }
  845. data->addr = 0;
  846. if (type & PERF_SAMPLE_ADDR) {
  847. data->addr = *array;
  848. array++;
  849. }
  850. data->id = -1ULL;
  851. if (type & PERF_SAMPLE_ID) {
  852. data->id = *array;
  853. array++;
  854. }
  855. if (type & PERF_SAMPLE_STREAM_ID) {
  856. data->stream_id = *array;
  857. array++;
  858. }
  859. if (type & PERF_SAMPLE_CPU) {
  860. u.val64 = *array;
  861. if (swapped) {
  862. /* undo swap of u64, then swap on individual u32s */
  863. u.val64 = bswap_64(u.val64);
  864. u.val32[0] = bswap_32(u.val32[0]);
  865. }
  866. data->cpu = u.val32[0];
  867. array++;
  868. }
  869. if (type & PERF_SAMPLE_PERIOD) {
  870. data->period = *array;
  871. array++;
  872. }
  873. if (type & PERF_SAMPLE_READ) {
  874. fprintf(stderr, "PERF_SAMPLE_READ is unsupported for now\n");
  875. return -1;
  876. }
  877. if (type & PERF_SAMPLE_CALLCHAIN) {
  878. if (sample_overlap(event, array, sizeof(data->callchain->nr)))
  879. return -EFAULT;
  880. data->callchain = (struct ip_callchain *)array;
  881. if (sample_overlap(event, array, data->callchain->nr))
  882. return -EFAULT;
  883. array += 1 + data->callchain->nr;
  884. }
  885. if (type & PERF_SAMPLE_RAW) {
  886. const u64 *pdata;
  887. u.val64 = *array;
  888. if (WARN_ONCE(swapped,
  889. "Endianness of raw data not corrected!\n")) {
  890. /* undo swap of u64, then swap on individual u32s */
  891. u.val64 = bswap_64(u.val64);
  892. u.val32[0] = bswap_32(u.val32[0]);
  893. u.val32[1] = bswap_32(u.val32[1]);
  894. }
  895. if (sample_overlap(event, array, sizeof(u32)))
  896. return -EFAULT;
  897. data->raw_size = u.val32[0];
  898. pdata = (void *) array + sizeof(u32);
  899. if (sample_overlap(event, pdata, data->raw_size))
  900. return -EFAULT;
  901. data->raw_data = (void *) pdata;
  902. array = (void *)array + data->raw_size + sizeof(u32);
  903. }
  904. if (type & PERF_SAMPLE_BRANCH_STACK) {
  905. u64 sz;
  906. data->branch_stack = (struct branch_stack *)array;
  907. array++; /* nr */
  908. sz = data->branch_stack->nr * sizeof(struct branch_entry);
  909. sz /= sizeof(u64);
  910. array += sz;
  911. }
  912. if (type & PERF_SAMPLE_REGS_USER) {
  913. /* First u64 tells us if we have any regs in sample. */
  914. u64 avail = *array++;
  915. if (avail) {
  916. data->user_regs.regs = (u64 *)array;
  917. array += hweight_long(regs_user);
  918. }
  919. }
  920. if (type & PERF_SAMPLE_STACK_USER) {
  921. u64 size = *array++;
  922. data->user_stack.offset = ((char *)(array - 1)
  923. - (char *) event);
  924. if (!size) {
  925. data->user_stack.size = 0;
  926. } else {
  927. data->user_stack.data = (char *)array;
  928. array += size / sizeof(*array);
  929. data->user_stack.size = *array;
  930. }
  931. }
  932. return 0;
  933. }
  934. int perf_event__synthesize_sample(union perf_event *event, u64 type,
  935. const struct perf_sample *sample,
  936. bool swapped)
  937. {
  938. u64 *array;
  939. /*
  940. * used for cross-endian analysis. See git commit 65014ab3
  941. * for why this goofiness is needed.
  942. */
  943. union u64_swap u;
  944. array = event->sample.array;
  945. if (type & PERF_SAMPLE_IP) {
  946. event->ip.ip = sample->ip;
  947. array++;
  948. }
  949. if (type & PERF_SAMPLE_TID) {
  950. u.val32[0] = sample->pid;
  951. u.val32[1] = sample->tid;
  952. if (swapped) {
  953. /*
  954. * Inverse of what is done in perf_evsel__parse_sample
  955. */
  956. u.val32[0] = bswap_32(u.val32[0]);
  957. u.val32[1] = bswap_32(u.val32[1]);
  958. u.val64 = bswap_64(u.val64);
  959. }
  960. *array = u.val64;
  961. array++;
  962. }
  963. if (type & PERF_SAMPLE_TIME) {
  964. *array = sample->time;
  965. array++;
  966. }
  967. if (type & PERF_SAMPLE_ADDR) {
  968. *array = sample->addr;
  969. array++;
  970. }
  971. if (type & PERF_SAMPLE_ID) {
  972. *array = sample->id;
  973. array++;
  974. }
  975. if (type & PERF_SAMPLE_STREAM_ID) {
  976. *array = sample->stream_id;
  977. array++;
  978. }
  979. if (type & PERF_SAMPLE_CPU) {
  980. u.val32[0] = sample->cpu;
  981. if (swapped) {
  982. /*
  983. * Inverse of what is done in perf_evsel__parse_sample
  984. */
  985. u.val32[0] = bswap_32(u.val32[0]);
  986. u.val64 = bswap_64(u.val64);
  987. }
  988. *array = u.val64;
  989. array++;
  990. }
  991. if (type & PERF_SAMPLE_PERIOD) {
  992. *array = sample->period;
  993. array++;
  994. }
  995. return 0;
  996. }
  997. struct format_field *perf_evsel__field(struct perf_evsel *evsel, const char *name)
  998. {
  999. return pevent_find_field(evsel->tp_format, name);
  1000. }
  1001. void *perf_evsel__rawptr(struct perf_evsel *evsel, struct perf_sample *sample,
  1002. const char *name)
  1003. {
  1004. struct format_field *field = perf_evsel__field(evsel, name);
  1005. int offset;
  1006. if (!field)
  1007. return NULL;
  1008. offset = field->offset;
  1009. if (field->flags & FIELD_IS_DYNAMIC) {
  1010. offset = *(int *)(sample->raw_data + field->offset);
  1011. offset &= 0xffff;
  1012. }
  1013. return sample->raw_data + offset;
  1014. }
  1015. u64 perf_evsel__intval(struct perf_evsel *evsel, struct perf_sample *sample,
  1016. const char *name)
  1017. {
  1018. struct format_field *field = perf_evsel__field(evsel, name);
  1019. void *ptr;
  1020. u64 value;
  1021. if (!field)
  1022. return 0;
  1023. ptr = sample->raw_data + field->offset;
  1024. switch (field->size) {
  1025. case 1:
  1026. return *(u8 *)ptr;
  1027. case 2:
  1028. value = *(u16 *)ptr;
  1029. break;
  1030. case 4:
  1031. value = *(u32 *)ptr;
  1032. break;
  1033. case 8:
  1034. value = *(u64 *)ptr;
  1035. break;
  1036. default:
  1037. return 0;
  1038. }
  1039. if (!evsel->needs_swap)
  1040. return value;
  1041. switch (field->size) {
  1042. case 2:
  1043. return bswap_16(value);
  1044. case 4:
  1045. return bswap_32(value);
  1046. case 8:
  1047. return bswap_64(value);
  1048. default:
  1049. return 0;
  1050. }
  1051. return 0;
  1052. }
  1053. static int comma_fprintf(FILE *fp, bool *first, const char *fmt, ...)
  1054. {
  1055. va_list args;
  1056. int ret = 0;
  1057. if (!*first) {
  1058. ret += fprintf(fp, ",");
  1059. } else {
  1060. ret += fprintf(fp, ":");
  1061. *first = false;
  1062. }
  1063. va_start(args, fmt);
  1064. ret += vfprintf(fp, fmt, args);
  1065. va_end(args);
  1066. return ret;
  1067. }
  1068. static int __if_fprintf(FILE *fp, bool *first, const char *field, u64 value)
  1069. {
  1070. if (value == 0)
  1071. return 0;
  1072. return comma_fprintf(fp, first, " %s: %" PRIu64, field, value);
  1073. }
  1074. #define if_print(field) printed += __if_fprintf(fp, &first, #field, evsel->attr.field)
  1075. struct bit_names {
  1076. int bit;
  1077. const char *name;
  1078. };
  1079. static int bits__fprintf(FILE *fp, const char *field, u64 value,
  1080. struct bit_names *bits, bool *first)
  1081. {
  1082. int i = 0, printed = comma_fprintf(fp, first, " %s: ", field);
  1083. bool first_bit = true;
  1084. do {
  1085. if (value & bits[i].bit) {
  1086. printed += fprintf(fp, "%s%s", first_bit ? "" : "|", bits[i].name);
  1087. first_bit = false;
  1088. }
  1089. } while (bits[++i].name != NULL);
  1090. return printed;
  1091. }
  1092. static int sample_type__fprintf(FILE *fp, bool *first, u64 value)
  1093. {
  1094. #define bit_name(n) { PERF_SAMPLE_##n, #n }
  1095. struct bit_names bits[] = {
  1096. bit_name(IP), bit_name(TID), bit_name(TIME), bit_name(ADDR),
  1097. bit_name(READ), bit_name(CALLCHAIN), bit_name(ID), bit_name(CPU),
  1098. bit_name(PERIOD), bit_name(STREAM_ID), bit_name(RAW),
  1099. bit_name(BRANCH_STACK), bit_name(REGS_USER), bit_name(STACK_USER),
  1100. { .name = NULL, }
  1101. };
  1102. #undef bit_name
  1103. return bits__fprintf(fp, "sample_type", value, bits, first);
  1104. }
  1105. static int read_format__fprintf(FILE *fp, bool *first, u64 value)
  1106. {
  1107. #define bit_name(n) { PERF_FORMAT_##n, #n }
  1108. struct bit_names bits[] = {
  1109. bit_name(TOTAL_TIME_ENABLED), bit_name(TOTAL_TIME_RUNNING),
  1110. bit_name(ID), bit_name(GROUP),
  1111. { .name = NULL, }
  1112. };
  1113. #undef bit_name
  1114. return bits__fprintf(fp, "read_format", value, bits, first);
  1115. }
  1116. int perf_evsel__fprintf(struct perf_evsel *evsel,
  1117. struct perf_attr_details *details, FILE *fp)
  1118. {
  1119. bool first = true;
  1120. int printed = fprintf(fp, "%s", perf_evsel__name(evsel));
  1121. if (details->verbose || details->freq) {
  1122. printed += comma_fprintf(fp, &first, " sample_freq=%" PRIu64,
  1123. (u64)evsel->attr.sample_freq);
  1124. }
  1125. if (details->verbose) {
  1126. if_print(type);
  1127. if_print(config);
  1128. if_print(config1);
  1129. if_print(config2);
  1130. if_print(size);
  1131. printed += sample_type__fprintf(fp, &first, evsel->attr.sample_type);
  1132. if (evsel->attr.read_format)
  1133. printed += read_format__fprintf(fp, &first, evsel->attr.read_format);
  1134. if_print(disabled);
  1135. if_print(inherit);
  1136. if_print(pinned);
  1137. if_print(exclusive);
  1138. if_print(exclude_user);
  1139. if_print(exclude_kernel);
  1140. if_print(exclude_hv);
  1141. if_print(exclude_idle);
  1142. if_print(mmap);
  1143. if_print(comm);
  1144. if_print(freq);
  1145. if_print(inherit_stat);
  1146. if_print(enable_on_exec);
  1147. if_print(task);
  1148. if_print(watermark);
  1149. if_print(precise_ip);
  1150. if_print(mmap_data);
  1151. if_print(sample_id_all);
  1152. if_print(exclude_host);
  1153. if_print(exclude_guest);
  1154. if_print(__reserved_1);
  1155. if_print(wakeup_events);
  1156. if_print(bp_type);
  1157. if_print(branch_sample_type);
  1158. }
  1159. fputc('\n', fp);
  1160. return ++printed;
  1161. }
  1162. bool perf_evsel__fallback(struct perf_evsel *evsel, int err,
  1163. char *msg, size_t msgsize)
  1164. {
  1165. if ((err == ENOENT || err == ENXIO) &&
  1166. evsel->attr.type == PERF_TYPE_HARDWARE &&
  1167. evsel->attr.config == PERF_COUNT_HW_CPU_CYCLES) {
  1168. /*
  1169. * If it's cycles then fall back to hrtimer based
  1170. * cpu-clock-tick sw counter, which is always available even if
  1171. * no PMU support.
  1172. *
  1173. * PPC returns ENXIO until 2.6.37 (behavior changed with commit
  1174. * b0a873e).
  1175. */
  1176. scnprintf(msg, msgsize, "%s",
  1177. "The cycles event is not supported, trying to fall back to cpu-clock-ticks");
  1178. evsel->attr.type = PERF_TYPE_SOFTWARE;
  1179. evsel->attr.config = PERF_COUNT_SW_CPU_CLOCK;
  1180. free(evsel->name);
  1181. evsel->name = NULL;
  1182. return true;
  1183. }
  1184. return false;
  1185. }
  1186. int perf_evsel__open_strerror(struct perf_evsel *evsel,
  1187. struct perf_target *target,
  1188. int err, char *msg, size_t size)
  1189. {
  1190. switch (err) {
  1191. case EPERM:
  1192. case EACCES:
  1193. return scnprintf(msg, size, "%s",
  1194. "You may not have permission to collect %sstats.\n"
  1195. "Consider tweaking /proc/sys/kernel/perf_event_paranoid:\n"
  1196. " -1 - Not paranoid at all\n"
  1197. " 0 - Disallow raw tracepoint access for unpriv\n"
  1198. " 1 - Disallow cpu events for unpriv\n"
  1199. " 2 - Disallow kernel profiling for unpriv",
  1200. target->system_wide ? "system-wide " : "");
  1201. case ENOENT:
  1202. return scnprintf(msg, size, "The %s event is not supported.",
  1203. perf_evsel__name(evsel));
  1204. case EMFILE:
  1205. return scnprintf(msg, size, "%s",
  1206. "Too many events are opened.\n"
  1207. "Try again after reducing the number of events.");
  1208. case ENODEV:
  1209. if (target->cpu_list)
  1210. return scnprintf(msg, size, "%s",
  1211. "No such device - did you specify an out-of-range profile CPU?\n");
  1212. break;
  1213. case EOPNOTSUPP:
  1214. if (evsel->attr.precise_ip)
  1215. return scnprintf(msg, size, "%s",
  1216. "\'precise\' request may not be supported. Try removing 'p' modifier.");
  1217. #if defined(__i386__) || defined(__x86_64__)
  1218. if (evsel->attr.type == PERF_TYPE_HARDWARE)
  1219. return scnprintf(msg, size, "%s",
  1220. "No hardware sampling interrupt available.\n"
  1221. "No APIC? If so then you can boot the kernel with the \"lapic\" boot parameter to force-enable it.");
  1222. #endif
  1223. break;
  1224. default:
  1225. break;
  1226. }
  1227. return scnprintf(msg, size,
  1228. "The sys_perf_event_open() syscall returned with %d (%s) for event (%s). \n"
  1229. "/bin/dmesg may provide additional information.\n"
  1230. "No CONFIG_PERF_EVENTS=y kernel support configured?\n",
  1231. err, strerror(err), perf_evsel__name(evsel));
  1232. }